From c47587cbfa829b03be1dc2e5a79dc96af6ea712f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 15 Dec 2023 10:48:21 -0600 Subject: [PATCH 1/3] Reformat codebase with black --- docs/conf.py | 55 +- stellarphot/__init__.py | 3 +- stellarphot/_astropy_init.py | 4 +- stellarphot/conftest.py | 3 +- stellarphot/core.py | 514 ++++++++------ .../differential_photometry/aij_rel_fluxes.py | 88 +-- .../tests/test_aij_rel_fluxes.py | 123 ++-- .../tests/test_vsx_mags.py | 45 +- .../differential_photometry/vsx_mags.py | 23 +- stellarphot/gui_tools/comparison_functions.py | 384 +++++++---- stellarphot/gui_tools/fits_opener.py | 8 +- .../gui_tools/photometry_widget_functions.py | 16 +- .../gui_tools/seeing_profile_functions.py | 225 +++--- .../gui_tools/tests/test_seeing_profile.py | 54 +- stellarphot/io/__init__.py | 2 +- stellarphot/io/aij.py | 283 ++++---- stellarphot/io/tess.py | 99 +-- stellarphot/io/tests/test_aij_io.py | 59 +- stellarphot/io/tests/test_tess_submission.py | 18 +- stellarphot/photometry/photometry.py | 554 ++++++++------- stellarphot/photometry/source_detection.py | 138 ++-- stellarphot/photometry/tests/fake_image.py | 115 ++-- .../photometry/tests/test_detection.py | 101 +-- .../photometry/tests/test_photometry.py | 542 ++++++++------- stellarphot/plotting/aij_plots.py | 99 ++- stellarphot/plotting/multi_night_plots.py | 123 ++-- stellarphot/plotting/transit_plots.py | 99 ++- stellarphot/settings/autowidgets.py | 5 +- stellarphot/settings/models.py | 26 +- stellarphot/settings/tests/test_models.py | 24 +- stellarphot/settings/views.py | 2 +- stellarphot/tests/make_wcs.py | 3 +- stellarphot/tests/test_core.py | 647 ++++++++++++------ stellarphot/transit_fitting/__init__.py | 2 +- stellarphot/transit_fitting/core.py | 218 +++--- stellarphot/transit_fitting/gui.py | 174 ++--- stellarphot/transit_fitting/io.py | 4 +- stellarphot/transit_fitting/plotting.py | 33 +- .../tests/test_transit_fitting_gui.py | 9 +- .../tests/test_transit_model_fit.py | 165 ++--- stellarphot/utils/__init__.py | 1 - stellarphot/utils/catalog_search.py | 107 +-- stellarphot/utils/comparison_utils.py | 53 +- stellarphot/utils/magnitude_transforms.py | 278 ++++---- .../utils/tests/test_catalog_search.py | 152 ++-- .../utils/tests/test_magnitude_transforms.py | 179 +++-- stellarphot/version.py | 6 +- 47 files changed, 3441 insertions(+), 2424 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 4ca8ccd3..35726ce6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,7 +34,9 @@ try: from sphinx_astropy.conf.v1 import * # noqa except ImportError: - print('ERROR: the documentation requires the sphinx-astropy package to be installed') + print( + "ERROR: the documentation requires the sphinx-astropy package to be installed" + ) sys.exit(1) if sys.version_info < (3, 11): @@ -51,10 +53,10 @@ # -- General configuration ---------------------------------------------------- # By default, highlight as Python 3. -highlight_language = 'python3' +highlight_language = "python3" # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.2' +# needs_sphinx = '1.2' # To perform a Sphinx version check that needs to be more specific than # major.minor, call `check_sphinx_version("x.y.z")` here. @@ -62,7 +64,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns.append('_templates') +exclude_patterns.append("_templates") # This is added to the end of RST files - a good place to put substitutions to # be used globally. @@ -72,20 +74,19 @@ # -- Project information ------------------------------------------------------ # This does not *have* to match the package name, but typically does -project = pyproject['project']['name'] -author = ", ".join(v['name'] for v in pyproject['project']['authors']) -copyright = '{0}, {1}'.format( - datetime.datetime.now().year, author) +project = pyproject["project"]["name"] +author = ", ".join(v["name"] for v in pyproject["project"]["authors"]) +copyright = "{0}, {1}".format(datetime.datetime.now().year, author) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. -import_module(pyproject['project']['name']) -package = sys.modules[pyproject['project']['name']] +import_module(pyproject["project"]["name"]) +package = sys.modules[pyproject["project"]["name"]] # The short X.Y version. -version = package.__version__.split('-', 1)[0] +version = package.__version__.split("-", 1)[0] # The full version, including alpha/beta/rc tags. release = package.__version__ @@ -102,59 +103,59 @@ # Add any paths that contain custom themes here, relative to this directory. # To use a different custom theme, add the directory containing the theme. -#html_theme_path = [] +# html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. To override the custom theme, set this to the # name of a builtin theme or the name of a custom theme in html_theme_path. -#html_theme = None +# html_theme = None html_theme_options = { - 'logotext1': 'stellarphot', # white, semi-bold - 'logotext2': '', # orange, light - 'logotext3': ':docs' # white, light - } + "logotext1": "stellarphot", # white, semi-bold + "logotext2": "", # orange, light + "logotext3": ":docs", # white, light +} # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = '' +# html_logo = '' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = '' +# html_favicon = '' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '' +# html_last_updated_fmt = '' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -html_title = '{0} v{1}'.format(project, release) +html_title = "{0} v{1}".format(project, release) # Output file base name for HTML help builder. -htmlhelp_basename = project + 'doc' +htmlhelp_basename = project + "doc" # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [('index', project + '.tex', project + u' Documentation', - author, 'manual')] +latex_documents = [ + ("index", project + ".tex", project + " Documentation", author, "manual") +] # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [('index', project.lower(), project + u' Documentation', - [author], 1)] +man_pages = [("index", project.lower(), project + " Documentation", [author], 1)] # -- Turn on nitpicky mode for sphinx (to warn about references not found) ---- # diff --git a/stellarphot/__init__.py b/stellarphot/__init__.py index 1e813acd..76b345d9 100644 --- a/stellarphot/__init__.py +++ b/stellarphot/__init__.py @@ -3,7 +3,8 @@ # Packages may add whatever they like to this file, but # should keep this content at the top. # ---------------------------------------------------------------------------- -from ._astropy_init import * # noqa +from ._astropy_init import * # noqa + # ---------------------------------------------------------------------------- from .core import * diff --git a/stellarphot/_astropy_init.py b/stellarphot/_astropy_init.py index 6d81902a..9a0c4c45 100644 --- a/stellarphot/_astropy_init.py +++ b/stellarphot/_astropy_init.py @@ -4,12 +4,12 @@ from astropy.tests.runner import TestRunner -__all__ = ['__version__', 'test'] +__all__ = ["__version__", "test"] try: from .version import version as __version__ except ImportError: - __version__ = '' + __version__ = "" # Create the test function for self test test = TestRunner.make_test_runner_in(os.path.dirname(__file__)) diff --git a/stellarphot/conftest.py b/stellarphot/conftest.py index b34d0664..d1087d17 100644 --- a/stellarphot/conftest.py +++ b/stellarphot/conftest.py @@ -12,9 +12,10 @@ def pytest_configure(config): iers_conf.auto_download = False config.option.astropy_header = True - PYTEST_HEADER_MODULES.pop('h5py', None) + PYTEST_HEADER_MODULES.pop("h5py", None) from .version import version + packagename = os.path.basename(os.path.dirname(__file__)) TESTED_VERSIONS[packagename] = version diff --git a/stellarphot/core.py b/stellarphot/core.py index cbabc1bd..e2e8cb2a 100644 --- a/stellarphot/core.py +++ b/stellarphot/core.py @@ -8,14 +8,20 @@ import numpy as np -__all__ = ['Camera', 'BaseEnhancedTable', 'PhotometryData', 'CatalogData', - 'SourceListData'] +__all__ = [ + "Camera", + "BaseEnhancedTable", + "PhotometryData", + "CatalogData", + "SourceListData", +] # Approach to validation of units was inspired by the GammaPy project # which did it before we did: # https://docs.gammapy.org/dev/_modules/gammapy/analysis/config.html + class QuantityType(Quantity): # Validator for Quantity type @classmethod @@ -46,12 +52,15 @@ def validate(cls, v): v = Quantity(v) except TypeError: raise ValueError(f"Invalid value for Quantity: {v}") - if (len(v.unit.bases) != 2 or - v.unit.bases[0].physical_type != "angle" or - v.unit.bases[1].name != "pix" ): + if ( + len(v.unit.bases) != 2 + or v.unit.bases[0].physical_type != "angle" + or v.unit.bases[1].name != "pix" + ): raise ValueError(f"Invalid unit for pixel scale: {v.unit!r}") return v + class Camera(BaseModel): """ A class to represent a CCD-based camera. @@ -113,6 +122,7 @@ class Camera(BaseModel): """ + gain: QuantityType read_noise: QuantityType dark_current: QuantityType @@ -125,7 +135,7 @@ class Config: json_encoders = { Quantity: lambda v: f"{v.value} {v.unit}", QuantityType: lambda v: f"{v.value} {v.unit}", - PixelScaleType: lambda v: f"{v.value} {v.unit}" + PixelScaleType: lambda v: f"{v.value} {v.unit}", } # When the switch to pydantic v2 happens, this root_validator will need @@ -134,40 +144,53 @@ class Config: @classmethod def validate_gain(cls, values): # Get read noise units - rn_unit = Quantity(values['read_noise']).unit + rn_unit = Quantity(values["read_noise"]).unit # Check that gain and read noise have compatible units, that is that # gain is read noise per adu. - gain = values['gain'] - if (len(gain.unit.bases) != 2 or gain.unit.bases[0] != rn_unit or - gain.unit.bases[1] != u.adu): - raise ValueError(f"Gain units {gain.unit} are not compatible with " - f"read noise units {rn_unit}.") + gain = values["gain"] + if ( + len(gain.unit.bases) != 2 + or gain.unit.bases[0] != rn_unit + or gain.unit.bases[1] != u.adu + ): + raise ValueError( + f"Gain units {gain.unit} are not compatible with " + f"read noise units {rn_unit}." + ) # Check that dark current and read noise have compatible units, that is # that dark current is read noise per second. - dark_current = values['dark_current'] - if (len(dark_current.unit.bases) != 2 or - dark_current.unit.bases[0] != rn_unit or - dark_current.unit.bases[1] != u.s): - raise ValueError(f"Dark current units {dark_current.unit} are not " - f"compatible with read noise units {rn_unit}.") + dark_current = values["dark_current"] + if ( + len(dark_current.unit.bases) != 2 + or dark_current.unit.bases[0] != rn_unit + or dark_current.unit.bases[1] != u.s + ): + raise ValueError( + f"Dark current units {dark_current.unit} are not " + f"compatible with read noise units {rn_unit}." + ) # Dark current validates against read noise return values def copy(self): - return Camera(gain=self.gain, - read_noise=self.read_noise, - dark_current=self.dark_current, - pixel_scale=self.pixel_scale) + return Camera( + gain=self.gain, + read_noise=self.read_noise, + dark_current=self.dark_current, + pixel_scale=self.pixel_scale, + ) def __copy__(self): return self.copy() def __repr__(self): - return f"Camera(gain={self.gain}, read_noise={self.read_noise}, " \ - f"dark_current={self.dark_current}, pixel_scale={self.pixel_scale})" + return ( + f"Camera(gain={self.gain}, read_noise={self.read_noise}, " + f"dark_current={self.dark_current}, pixel_scale={self.pixel_scale})" + ) class BaseEnhancedTable(QTable): @@ -212,8 +235,9 @@ class BaseEnhancedTable(QTable): validation of the inputs is performed and the resulting table is returned. """ - def __init__(self, *args, input_data=None, table_description=None, colname_map=None, - **kwargs): + def __init__( + self, *args, input_data=None, table_description=None, colname_map=None, **kwargs + ): if (table_description is None) and (input_data is None): # Assume user is trying to create an empty table and let QTable # handle it @@ -224,16 +248,21 @@ def __init__(self, *args, input_data=None, table_description=None, colname_map=N try: self._table_description = {k: v for k, v in table_description.items()} except AttributeError: - raise TypeError("You must provide a dict as table_description (input " - f"table_description is type {type(self._table_description)}).") + raise TypeError( + "You must provide a dict as table_description (input " + f"table_description is type {type(self._table_description)})." + ) # Check data before copying to avoid recusive loop and non-QTable # data input. - if not isinstance(input_data, Table) or isinstance(input_data, - BaseEnhancedTable): - raise TypeError("You must provide an astropy Table and NOT a " - "BaseEnhancedTable as input_data (currently of " - f"type {type(input_data)}).") + if not isinstance(input_data, Table) or isinstance( + input_data, BaseEnhancedTable + ): + raise TypeError( + "You must provide an astropy Table and NOT a " + "BaseEnhancedTable as input_data (currently of " + f"type {type(input_data)})." + ) # Copy data before potential modification data = input_data.copy() @@ -244,9 +273,11 @@ def __init__(self, *args, input_data=None, table_description=None, colname_map=N try: self._colname_map = {k: v for k, v in colname_map.items()} except AttributeError: - raise TypeError("You must provide a dict as table_description " - "(input table_description is type " - f"{type(self._table_description)}).") + raise TypeError( + "You must provide a dict as table_description " + "(input table_description is type " + f"{type(self._table_description)})." + ) self._update_colnames(self._colname_map, data) @@ -275,18 +306,22 @@ def _validate_columns(self, data): # Check type try: if data[this_col].unit != this_unit: - raise ValueError(f"data['{this_col}'] is of wrong unit " - f"(should be {this_unit} but reported " - f"as {data[this_col].unit}).") + raise ValueError( + f"data['{this_col}'] is of wrong unit " + f"(should be {this_unit} but reported " + f"as {data[this_col].unit})." + ) except KeyError: - raise ValueError(f"data['{this_col}'] is missing from input " - "data.") - else: # Check that columns with no units but are required exist! + raise ValueError( + f"data['{this_col}'] is missing from input " "data." + ) + else: # Check that columns with no units but are required exist! try: tempvar = data[this_col] except KeyError: - raise ValueError(f"data['{this_col}'] is missing from input " - "data.") + raise ValueError( + f"data['{this_col}'] is missing from input " "data." + ) def _update_colnames(self, colname_map, data): # Change column names as desired, done before validating the columns, @@ -295,15 +330,17 @@ def _update_colnames(self, colname_map, data): try: data.rename_column(orig_name, new_name) except KeyError: - raise ValueError(f"data['{orig_name}'] is missing from input " - "data but listed in colname_map!") + raise ValueError( + f"data['{orig_name}'] is missing from input " + "data but listed in colname_map!" + ) def _update_passbands(self): # Converts filter names in filter column to AAVSO standard names # Assumes _passband_map is in namespace. for orig_pb, aavso_pb in self._passband_map.items(): - mask = self['passband'] == orig_pb - self['passband'][mask] = aavso_pb + mask = self["passband"] == orig_pb + self["passband"][mask] = aavso_pb class PhotometryData(BaseEnhancedTable): @@ -410,193 +447,236 @@ class PhotometryData(BaseEnhancedTable): # Define columns that must be in table and provide information about their type, and # units. phot_descript = { - 'star_id' : None, - 'ra' : u.deg, - 'dec' : u.deg, - 'xcenter' : u.pix, - 'ycenter' : u.pix, - 'fwhm_x' : u.pix, - 'fwhm_y' : u.pix, - 'width' : u.pix, - 'aperture' : u.pix, - 'aperture_area' : u.pix, - 'annulus_inner' : u.pix, - 'annulus_outer' : u.pix, - 'annulus_area' : u.pix, - 'aperture_sum' : None, - 'annulus_sum' : None, - 'sky_per_pix_avg' : None, - 'sky_per_pix_med' : None, - 'sky_per_pix_std' : None, - 'aperture_net_cnts' : None, - 'noise_cnts' : None, - 'noise_electrons' : u.electron, - 'snr' : None, - 'mag_inst' : None, - 'mag_error' : None, - 'exposure' : u.second, - 'date-obs' : None, - 'airmass' : None, - 'passband' : None, - 'file' : None + "star_id": None, + "ra": u.deg, + "dec": u.deg, + "xcenter": u.pix, + "ycenter": u.pix, + "fwhm_x": u.pix, + "fwhm_y": u.pix, + "width": u.pix, + "aperture": u.pix, + "aperture_area": u.pix, + "annulus_inner": u.pix, + "annulus_outer": u.pix, + "annulus_area": u.pix, + "aperture_sum": None, + "annulus_sum": None, + "sky_per_pix_avg": None, + "sky_per_pix_med": None, + "sky_per_pix_std": None, + "aperture_net_cnts": None, + "noise_cnts": None, + "noise_electrons": u.electron, + "snr": None, + "mag_inst": None, + "mag_error": None, + "exposure": u.second, + "date-obs": None, + "airmass": None, + "passband": None, + "file": None, } observatory = None camera = None - def __init__(self, *args, input_data=None, observatory=None, camera=None, - colname_map=None, passband_map=None, retain_user_computed=False, - **kwargs): + def __init__( + self, + *args, + input_data=None, + observatory=None, + camera=None, + colname_map=None, + passband_map=None, + retain_user_computed=False, + **kwargs, + ): if (observatory is None) and (camera is None) and (input_data is None): super().__init__(*args, **kwargs) else: # Perform input validation if not isinstance(observatory, EarthLocation): - raise TypeError("observatory must be an " - "astropy.coordinates.EarthLocation object instead " - f"of type {type(observatory)}.") + raise TypeError( + "observatory must be an " + "astropy.coordinates.EarthLocation object instead " + f"of type {type(observatory)}." + ) if not isinstance(camera, Camera): - raise TypeError("camera must be a stellarphot.Camera object instead " - f"of type {type(camera)}.") + raise TypeError( + "camera must be a stellarphot.Camera object instead " + f"of type {type(camera)}." + ) # Check the time column is correct format and scale try: - if (input_data['date-obs'][0].scale != 'utc'): - raise ValueError("input_data['date-obs'] astropy.time.Time must " - "have scale='utc', " - f"not \'{input_data['date-obs'][0].scale}\'.") + if input_data["date-obs"][0].scale != "utc": + raise ValueError( + "input_data['date-obs'] astropy.time.Time must " + "have scale='utc', " + f"not '{input_data['date-obs'][0].scale}'." + ) except AttributeError: # Happens if first item dosn't have a "scale" - raise ValueError("input_data['date-obs'] isn't column of " - "astropy.time.Time entries.") + raise ValueError( + "input_data['date-obs'] isn't column of " + "astropy.time.Time entries." + ) # Convert input data to QTable (while also checking for required columns) - super().__init__(input_data=input_data, - table_description=self.phot_descript, - colname_map=colname_map, **kwargs) + super().__init__( + input_data=input_data, + table_description=self.phot_descript, + colname_map=colname_map, + **kwargs, + ) # Add the TableAttributes directly to meta (and adding attribute # functions below) since using TableAttributes results in a # inability to access the values to due a # AttributeError: 'TableAttribute' object has no attribute 'name' - self.meta['lat'] = observatory.lat - self.meta['lon'] = observatory.lon - self.meta['height'] = observatory.height - self.meta['gain'] = camera.gain - self.meta['read_noise'] = camera.read_noise - self.meta['dark_current'] = camera.dark_current - self.meta['pixel_scale'] = camera.pixel_scale + self.meta["lat"] = observatory.lat + self.meta["lon"] = observatory.lon + self.meta["height"] = observatory.height + self.meta["gain"] = camera.gain + self.meta["read_noise"] = camera.read_noise + self.meta["dark_current"] = camera.dark_current + self.meta["pixel_scale"] = camera.pixel_scale # Check for consistency of counts-related columns - counts_columns = ['aperture_sum', 'annulus_sum', 'aperture_net_cnts', - 'noise_cnts'] - counts_per_pixel_columns = ['sky_per_pix_avg', 'sky_per_pix_med', - 'sky_per_pix_std'] + counts_columns = [ + "aperture_sum", + "annulus_sum", + "aperture_net_cnts", + "noise_cnts", + ] + counts_per_pixel_columns = [ + "sky_per_pix_avg", + "sky_per_pix_med", + "sky_per_pix_std", + ] cnts_unit = self[counts_columns[0]].unit for this_col in counts_columns[1:]: if input_data[this_col].unit != cnts_unit: - raise ValueError(f"input_data['{this_col}'] has inconsistent units " - f"with input_data['{counts_columns[0]}'] (should " - f"be {cnts_unit} but it's " - f"{input_data[this_col].unit}).") + raise ValueError( + f"input_data['{this_col}'] has inconsistent units " + f"with input_data['{counts_columns[0]}'] (should " + f"be {cnts_unit} but it's " + f"{input_data[this_col].unit})." + ) for this_col in counts_per_pixel_columns: if cnts_unit is None: perpixel = u.pixel**-1 else: perpixel = cnts_unit * u.pixel**-1 if input_data[this_col].unit != perpixel: - raise ValueError(f"input_data['{this_col}'] has inconsistent units " - f"with input_data['{counts_columns[0]}'] (should " - f"be {perpixel} but it's " - f"{input_data[this_col].unit}).") + raise ValueError( + f"input_data['{this_col}'] has inconsistent units " + f"with input_data['{counts_columns[0]}'] (should " + f"be {perpixel} but it's " + f"{input_data[this_col].unit})." + ) # Compute additional columns (not done yet) - computed_columns = ['bjd', 'night'] + computed_columns = ["bjd", "night"] # Check if columns exist already, if they do and retain_user_computed is # False, throw an error. for this_col in computed_columns: if this_col in self.colnames: if not retain_user_computed: - raise ValueError(f"Computed column '{this_col}' already exist " - "in data. If you want to keep them, set " - "retain_user_computed=True.") + raise ValueError( + f"Computed column '{this_col}' already exist " + "in data. If you want to keep them, set " + "retain_user_computed=True." + ) else: # Compute the columns that need to be computed (match requries # python>=3.10) match this_col: - case 'bjd': - self['bjd'] = self.add_bjd_col(observatory) + case "bjd": + self["bjd"] = self.add_bjd_col(observatory) - case 'night': + case "night": # Generate integer counter for nights. This should be # approximately the MJD at noon local before the evening of # the observation. - hr_offset = int(observatory.lon.value/15) + hr_offset = int(observatory.lon.value / 15) # Compute offset to 12pm Local Time before evening - LocalTime = Time(self['date-obs']) + hr_offset*u.hr + LocalTime = Time(self["date-obs"]) + hr_offset * u.hr hr = LocalTime.ymdhms.hour # Compute number of hours to shift to arrive at 12 noon # local time shift_hr = hr.copy() shift_hr[hr < 12] = shift_hr[hr < 12] + 12 shift_hr[hr >= 12] = shift_hr[hr >= 12] - 12 - delta = -shift_hr * u.hr - LocalTime.ymdhms.minute * u.min \ - - LocalTime.ymdhms.second*u.s - shift = Column(data = delta, name='shift') + delta = ( + -shift_hr * u.hr + - LocalTime.ymdhms.minute * u.min + - LocalTime.ymdhms.second * u.s + ) + shift = Column(data=delta, name="shift") # Compute MJD at local noon before the evening of this # observation. - self['night'] = Column(data= - np.array((Time(self['date-obs']) - + shift).to_value('mjd'), - dtype=int), - name='night') + self["night"] = Column( + data=np.array( + (Time(self["date-obs"]) + shift).to_value("mjd"), + dtype=int, + ), + name="night", + ) case _: - raise ValueError(f"Trying to compute column ({this_col}). " - "This should never happen.") + raise ValueError( + f"Trying to compute column ({this_col}). " + "This should never happen." + ) # Apply the filter/passband name update if passband_map is not None: self._passband_map = passband_map.copy() self._update_passbands() - def add_bjd_col(self, observatory): """ Returns a astropy column of barycentric Julian date times corresponding to the input observations. It modifies that table in place. """ - if ( np.isnan(self['ra']).any() or np.isnan(self['dec']).any() ): - print("WARNING: BJD could not be computed in output PhotometryData object " - "because some RA or Dec values are missing.") + if np.isnan(self["ra"]).any() or np.isnan(self["dec"]).any(): + print( + "WARNING: BJD could not be computed in output PhotometryData object " + "because some RA or Dec values are missing." + ) return np.full(len(self), np.nan) else: # Convert times at start of each observation to TDB (Barycentric Dynamical # Time) - times = Time(self['date-obs']) + times = Time(self["date-obs"]) times_tdb = times.tdb - times_tdb.format='jd' # Switch to JD format + times_tdb.format = "jd" # Switch to JD format # Compute light travel time corrections - ip_peg = SkyCoord(ra=self['ra'], dec=self['dec'], unit='degree') + ip_peg = SkyCoord(ra=self["ra"], dec=self["dec"], unit="degree") ltt_bary = times.light_travel_time(ip_peg, location=observatory) time_barycenter = times_tdb + ltt_bary # Return BJD at midpoint of exposure at each location - return Time(time_barycenter + self['exposure'] / 2, scale='tdb') + return Time(time_barycenter + self["exposure"] / 2, scale="tdb") @property def camera(self): - return Camera(gain=self.meta['gain'], - read_noise=self.meta['read_noise'], - dark_current=self.meta['dark_current'], - pixel_scale=self.meta['pixel_scale']) + return Camera( + gain=self.meta["gain"], + read_noise=self.meta["read_noise"], + dark_current=self.meta["dark_current"], + pixel_scale=self.meta["pixel_scale"], + ) @property def observatory(self): - return EarthLocation(lat=self.meta['lat'], lon=self.meta['lon'], - height=self.meta['height']) + return EarthLocation( + lat=self.meta["lat"], lon=self.meta["lon"], height=self.meta["height"] + ) class CatalogData(BaseEnhancedTable): @@ -661,34 +741,45 @@ class CatalogData(BaseEnhancedTable): # Define columns that must be in table and provide information about their type, and # units. catalog_descript = { - 'id' : None, - 'ra' : u.deg, - 'dec' : u.deg, - 'mag' : None, - 'passband' : None + "id": None, + "ra": u.deg, + "dec": u.deg, + "mag": None, + "passband": None, } catalog_name = None catalog_source = None - def __init__(self, *args, input_data=None, catalog_name=None, catalog_source=None, - colname_map=None, passband_map=None, **kwargs): + def __init__( + self, + *args, + input_data=None, + catalog_name=None, + catalog_source=None, + colname_map=None, + passband_map=None, + **kwargs, + ): if (input_data is None) and (catalog_name is None) and (catalog_source is None): super().__init__(*args, **kwargs) else: self._passband_map = passband_map - if (input_data is not None): + if input_data is not None: # Convert input data to QTable (while also checking for required # columns) - super().__init__(table_description=self.catalog_descript, - input_data=input_data, - colname_map=colname_map, **kwargs) + super().__init__( + table_description=self.catalog_descript, + input_data=input_data, + colname_map=colname_map, + **kwargs, + ) # Add the TableAttributes directly to meta (and adding attribute # functions below) since using TableAttributes results in a # inability to access the values to due a # AttributeError: 'TableAttribute' object has no attribute 'name' - self.meta['catalog_name'] = str(catalog_name) - self.meta['catalog_source'] = str(catalog_source) + self.meta["catalog_name"] = str(catalog_name) + self.meta["catalog_source"] = str(catalog_source) # Apply the filter/passband name update if passband_map is not None: @@ -700,11 +791,11 @@ def __init__(self, *args, input_data=None, catalog_name=None, catalog_source=Non @property def catalog_name(self): - return self.meta['catalog_name'] + return self.meta["catalog_name"] @property def catalog_source(self): - return self.meta['catalog_source'] + return self.meta["catalog_source"] class SourceListData(BaseEnhancedTable): @@ -767,23 +858,26 @@ class SourceListData(BaseEnhancedTable): # Define columns that must be in table and provide information about their type, and # units. sourcelist_descript = { - 'star_id' : None, - 'ra' : u.deg, - 'dec' : u.deg, - 'xcenter' : u.pix, - 'ycenter' : u.pix + "star_id": None, + "ra": u.deg, + "dec": u.deg, + "xcenter": u.pix, + "ycenter": u.pix, } def __init__(self, *args, input_data=None, colname_map=None, **kwargs): - if (input_data is None): + if input_data is None: super().__init__(*args, **kwargs) else: # Check data before copying to avoid recusive loop and non-QTable # data input. - if not isinstance(input_data, Table) or isinstance(input_data, - BaseEnhancedTable): - raise TypeError("input_data must be an astropy Table (and not a " - "BaseEnhancedTable) as data.") + if not isinstance(input_data, Table) or isinstance( + input_data, BaseEnhancedTable + ): + raise TypeError( + "input_data must be an astropy Table (and not a " + "BaseEnhancedTable) as data." + ) # Process inputs and save as needed data = input_data.copy() @@ -795,8 +889,10 @@ def __init__(self, *args, input_data=None, colname_map=None, **kwargs): try: self._colname_map = {k: v for k, v in colname_map.items()} except AttributeError: - raise TypeError("You must provide a dict as table_description (it " - f"is type {type(self._colname_map)}).") + raise TypeError( + "You must provide a dict as table_description (it " + f"is type {type(self._colname_map)})." + ) self._update_colnames(self._colname_map, data) # No need to repeat this @@ -807,56 +903,68 @@ def __init__(self, *args, input_data=None, colname_map=None, **kwargs): # Check if RA/Dec or xcenter/ycenter are missing ra_dec_present = True x_y_present = True - nosky_pos = ('ra' not in data.colnames or - 'dec' not in data.colnames or - np.isnan(data['ra'].value).all() or - np.isnan(data['dec'].value).all()) - noimg_pos = ('xcenter' not in data.colnames or - 'ycenter' not in data.colnames or - np.isnan(data['xcenter'].value).all() or - np.isnan(data['ycenter'].value).all()) + nosky_pos = ( + "ra" not in data.colnames + or "dec" not in data.colnames + or np.isnan(data["ra"].value).all() + or np.isnan(data["dec"].value).all() + ) + noimg_pos = ( + "xcenter" not in data.colnames + or "ycenter" not in data.colnames + or np.isnan(data["xcenter"].value).all() + or np.isnan(data["ycenter"].value).all() + ) if nosky_pos: ra_dec_present = False if noimg_pos: x_y_present = False - if (nosky_pos and noimg_pos): - raise ValueError("data must have either sky (ra, dec) or "+ - "image (xcenter, ycenter) position.") + if nosky_pos and noimg_pos: + raise ValueError( + "data must have either sky (ra, dec) or " + + "image (xcenter, ycenter) position." + ) # Create empty versions of any missing columns - for this_col in ['ra', 'dec', 'xcenter', 'ycenter']: + for this_col in ["ra", "dec", "xcenter", "ycenter"]: # Create blank ra/dec columns - if (this_col not in data.colnames): - data[this_col] = Column(data=np.full(len(data), np.nan), - name=this_col, - unit=self.sourcelist_descript[this_col]) + if this_col not in data.colnames: + data[this_col] = Column( + data=np.full(len(data), np.nan), + name=this_col, + unit=self.sourcelist_descript[this_col], + ) # Convert input data to QTable (while also checking for required columns) - super().__init__(table_description=self.sourcelist_descript, - input_data=data, colname_map=None, **kwargs) - self.meta['has_ra_dec'] = ra_dec_present - self.meta['has_x_y'] = x_y_present + super().__init__( + table_description=self.sourcelist_descript, + input_data=data, + colname_map=None, + **kwargs, + ) + self.meta["has_ra_dec"] = ra_dec_present + self.meta["has_x_y"] = x_y_present @property def has_ra_dec(self): - return self.meta['has_ra_dec'] + return self.meta["has_ra_dec"] @property def has_x_y(self): - return self.meta['has_x_y'] + return self.meta["has_x_y"] def drop_ra_dec(self): # drop sky-based positions from existing SourceListData structure - self.meta['has_ra_dec'] = False - self['ra'] = Column(data=np.full(len(self), np.nan), name='ra', unit=u.deg) - self['dec'] = Column(data=np.full(len(self), np.nan), name='dec', unit=u.deg) + self.meta["has_ra_dec"] = False + self["ra"] = Column(data=np.full(len(self), np.nan), name="ra", unit=u.deg) + self["dec"] = Column(data=np.full(len(self), np.nan), name="dec", unit=u.deg) def drop_x_y(self): # drop image-based positionsfrom existing SourceListData structure - self.meta['has_x_y'] = False - self['xcenter'] = Column(data=np.full(len(self), np.nan), name='ra', - unit=u.deg) - self['ycenter'] = Column(data=np.full(len(self), np.nan), name='dec', - unit=u.deg) + self.meta["has_x_y"] = False + self["xcenter"] = Column(data=np.full(len(self), np.nan), name="ra", unit=u.deg) + self["ycenter"] = Column( + data=np.full(len(self), np.nan), name="dec", unit=u.deg + ) diff --git a/stellarphot/differential_photometry/aij_rel_fluxes.py b/stellarphot/differential_photometry/aij_rel_fluxes.py index ae56293b..97a7ad56 100644 --- a/stellarphot/differential_photometry/aij_rel_fluxes.py +++ b/stellarphot/differential_photometry/aij_rel_fluxes.py @@ -4,7 +4,7 @@ from astropy.table import Table import astropy.units as u -__all__ = ['add_in_quadrature', 'calc_aij_relative_flux'] +__all__ = ["add_in_quadrature", "calc_aij_relative_flux"] def add_in_quadrature(array): @@ -14,10 +14,14 @@ def add_in_quadrature(array): return np.sqrt((array**2).sum()) -def calc_aij_relative_flux(star_data, comp_stars, - in_place=True, coord_column=None, - star_id_column='star_id', - counts_column_name='aperture_net_cnts'): +def calc_aij_relative_flux( + star_data, + comp_stars, + in_place=True, + coord_column=None, + star_id_column="star_id", + counts_column_name="aperture_net_cnts", +): """ Calculate AstroImageJ-style flux ratios. @@ -63,27 +67,25 @@ def calc_aij_relative_flux(star_data, comp_stars, """ # Match comparison star list to instrumental magnitude information - if star_data['ra'].unit is None: - unit = 'degree' + if star_data["ra"].unit is None: + unit = "degree" else: # Pulled this from the source code -- None is ok but need # to match the number of coordinates. unit = [None, None] - star_data_coords = SkyCoord(ra=star_data['ra'], dec=star_data['dec'], - unit=unit) + star_data_coords = SkyCoord(ra=star_data["ra"], dec=star_data["dec"], unit=unit) if coord_column is not None: comp_coords = comp_stars[coord_column] else: - if comp_stars['ra'].unit is None: - unit = 'degree' + if comp_stars["ra"].unit is None: + unit = "degree" else: # Pulled this from the source code -- None is ok but need # to match the number of coordinates. unit = [None, None] - comp_coords = SkyCoord(ra=comp_stars['ra'], dec=comp_stars['dec'], - unit=unit) + comp_coords = SkyCoord(ra=comp_stars["ra"], dec=comp_stars["dec"], unit=unit) # Check for matches of stars in star data to the stars in comp_stars # and eliminate as comps any stars for which the separation is bigger @@ -93,35 +95,37 @@ def calc_aij_relative_flux(star_data, comp_stars, # Not sure this is really close enough for a good match... good = d2d < 1.2 * u.arcsec - check_for_bad = Table(data=[star_data[star_id_column].data, good], - names=['star_id', 'good']) - check_for_bad = check_for_bad.group_by('star_id') + check_for_bad = Table( + data=[star_data[star_id_column].data, good], names=["star_id", "good"] + ) + check_for_bad = check_for_bad.group_by("star_id") is_all_good = check_for_bad.groups.aggregate(np.all) - bad_comps = set(is_all_good['star_id'][~is_all_good['good']]) + bad_comps = set(is_all_good["star_id"][~is_all_good["good"]]) # Check whether any of the comp stars have NaN values and, # if they do, exclude them from the comp set. - check_for_nan = Table(data=[star_data[star_id_column].data, - star_data[counts_column_name].data], - names=['star_id', 'net_counts']) - check_for_nan = check_for_nan.group_by('star_id') - check_for_nan['good'] = ~np.isnan(check_for_nan['net_counts']) + check_for_nan = Table( + data=[star_data[star_id_column].data, star_data[counts_column_name].data], + names=["star_id", "net_counts"], + ) + check_for_nan = check_for_nan.group_by("star_id") + check_for_nan["good"] = ~np.isnan(check_for_nan["net_counts"]) is_all_good = check_for_nan.groups.aggregate(np.all) - bad_comps = bad_comps | set(is_all_good['star_id'][~is_all_good['good']]) + bad_comps = bad_comps | set(is_all_good["star_id"][~is_all_good["good"]]) for comp in bad_comps: this_comp = star_data[star_id_column] == comp good[this_comp] = False - error_column_name = 'noise_electrons' + error_column_name = "noise_electrons" # Calculate comp star counts for each time # Make a small table with just counts, errors and time for all of the comparison # stars. - comp_fluxes = star_data['date-obs', counts_column_name, error_column_name][good] + comp_fluxes = star_data["date-obs", counts_column_name, error_column_name][good] # print(np.isnan(comp_fluxes[flux_column_name]).sum(), # np.isnan(comp_fluxes[error_column_name]).sum()) # print(star_data[good][flux_column_name][np.isnan(comp_fluxes[flux_column_name])]) @@ -129,7 +133,7 @@ def calc_aij_relative_flux(star_data, comp_stars, # Check whether any of the columns are masked, but with no masked values, # and convert to regular column...eventually - comp_fluxes = comp_fluxes.group_by('date-obs') + comp_fluxes = comp_fluxes.group_by("date-obs") comp_totals = comp_fluxes.groups.aggregate(np.sum)[counts_column_name] comp_num_stars = comp_fluxes.groups.aggregate(np.count_nonzero)[counts_column_name] comp_errors = comp_fluxes.groups.aggregate(add_in_quadrature)[error_column_name] @@ -138,7 +142,7 @@ def calc_aij_relative_flux(star_data, comp_stars, comp_error_vector = np.ones_like(star_data[counts_column_name]) if len(set(comp_num_stars)) > 1: - raise RuntimeError('Different number of stars in comparison sets') + raise RuntimeError("Different number of stars in comparison sets") # Calculate relative flux for every star @@ -149,32 +153,36 @@ def calc_aij_relative_flux(star_data, comp_stars, flux_offset = -star_data[counts_column_name] * is_comp # This seems a little hacky; there must be a better way - for date_obs, comp_total, comp_error in zip(comp_fluxes.groups.keys, - comp_totals, comp_errors): - this_time = star_data['date-obs'] == date_obs[0] + for date_obs, comp_total, comp_error in zip( + comp_fluxes.groups.keys, comp_totals, comp_errors + ): + this_time = star_data["date-obs"] == date_obs[0] comp_total_vector[this_time] *= comp_total comp_error_vector[this_time] = comp_error relative_flux = star_data[counts_column_name] / (comp_total_vector + flux_offset) relative_flux = relative_flux.flatten() - rel_flux_error = (star_data[counts_column_name] / comp_total_vector * - np.sqrt((star_data[error_column_name] / star_data[counts_column_name])**2 + - (comp_error_vector / comp_total_vector)**2 - ) - ) + rel_flux_error = ( + star_data[counts_column_name] + / comp_total_vector + * np.sqrt( + (star_data[error_column_name] / star_data[counts_column_name]) ** 2 + + (comp_error_vector / comp_total_vector) ** 2 + ) + ) # Add these columns to table if not in_place: star_data = star_data.copy() - star_data['relative_flux'] = relative_flux - star_data['relative_flux_error'] = rel_flux_error - star_data['relative_flux_snr'] = relative_flux / rel_flux_error + star_data["relative_flux"] = relative_flux + star_data["relative_flux_error"] = rel_flux_error + star_data["relative_flux_snr"] = relative_flux / rel_flux_error # AIJ records the total comparison counts even though that total is used # only for the targets, not the comparison. - star_data['comparison counts'] = comp_total_vector # + flux_offset - star_data['comparison error'] = comp_error_vector + star_data["comparison counts"] = comp_total_vector # + flux_offset + star_data["comparison error"] = comp_error_vector return star_data diff --git a/stellarphot/differential_photometry/tests/test_aij_rel_fluxes.py b/stellarphot/differential_photometry/tests/test_aij_rel_fluxes.py index e3189730..3341637f 100644 --- a/stellarphot/differential_photometry/tests/test_aij_rel_fluxes.py +++ b/stellarphot/differential_photometry/tests/test_aij_rel_fluxes.py @@ -24,115 +24,132 @@ def _raw_photometry_table(): n_times = 10 n_stars = 4 # How about ten times... - times = Time('2018-06-25T01:00:00', format='isot', scale='utc') + times = Time("2018-06-25T01:00:00", format="isot", scale="utc") times = times + np.arange(n_times) * 30 * u.second times = times.value # and four stars star_ra = 250.0 * u.degree + np.arange(n_stars) * 10 * u.arcmin star_dec = np.array([45.0] * n_stars) * u.degree - fluxes = np.array([10000., 20000, 30000, 40000]) + fluxes = np.array([10000.0, 20000, 30000, 40000]) errors = np.sqrt(fluxes) + 50 - star_ids = np.arange(1, 5, dtype='int') + star_ids = np.arange(1, 5, dtype="int") # Stars 2, 3 and 4 will be the comparison stars comp_stars = np.array([0, 1, 1, 1]) expected_comp_fluxes = np.sum(fluxes[1:]) - comp_flux_offset = - comp_stars * fluxes + comp_flux_offset = -comp_stars * fluxes expected_flux_ratios = fluxes / (expected_comp_fluxes + comp_flux_offset) - comp_error_total = np.sqrt((errors[1:]**2).sum()) - - expected_flux_error = (fluxes / expected_comp_fluxes * - np.sqrt(errors**2 / fluxes**2 + - comp_error_total**2 / expected_comp_fluxes**2)) - - raw_table = Table(data=[np.sort(_repeat(times, n_stars)), _repeat(star_ra, n_times), - _repeat(star_dec, n_times), _repeat(fluxes, n_times), - _repeat(errors, n_times), - _repeat(star_ids, n_times)], - names=['date-obs', 'ra', 'dec', 'aperture_net_cnts', - 'noise_electrons', 'star_id']) + comp_error_total = np.sqrt((errors[1:] ** 2).sum()) + + expected_flux_error = ( + fluxes + / expected_comp_fluxes + * np.sqrt( + errors**2 / fluxes**2 + + comp_error_total**2 / expected_comp_fluxes**2 + ) + ) + + raw_table = Table( + data=[ + np.sort(_repeat(times, n_stars)), + _repeat(star_ra, n_times), + _repeat(star_dec, n_times), + _repeat(fluxes, n_times), + _repeat(errors, n_times), + _repeat(star_ids, n_times), + ], + names=[ + "date-obs", + "ra", + "dec", + "aperture_net_cnts", + "noise_electrons", + "star_id", + ], + ) photom = PhotometryData(raw_table) return expected_flux_ratios, expected_flux_error, raw_table, raw_table[1:4] -@pytest.mark.parametrize('comp_ra_dec_have_units', [True, False]) -@pytest.mark.parametrize('star_ra_dec_have_units', [True, False]) -@pytest.mark.parametrize('in_place', [True, False]) -def test_relative_flux_calculation(in_place, - star_ra_dec_have_units, - comp_ra_dec_have_units): +@pytest.mark.parametrize("comp_ra_dec_have_units", [True, False]) +@pytest.mark.parametrize("star_ra_dec_have_units", [True, False]) +@pytest.mark.parametrize("in_place", [True, False]) +def test_relative_flux_calculation( + in_place, star_ra_dec_have_units, comp_ra_dec_have_units +): expected_flux, expected_error, input_table, comp_star = _raw_photometry_table() # Try doing it all at once - n_times = len(np.unique(input_table['date-obs'])) + n_times = len(np.unique(input_table["date-obs"])) all_expected_flux = _repeat(expected_flux, n_times) all_expected_error = _repeat(expected_error, n_times) if not star_ra_dec_have_units: - input_table['ra'] = input_table['ra'].data - input_table['dec'] = input_table['dec'].data + input_table["ra"] = input_table["ra"].data + input_table["dec"] = input_table["dec"].data if not comp_ra_dec_have_units: - comp_star['ra'] = comp_star['ra'].data - comp_star['dec'] = comp_star['dec'].data + comp_star["ra"] = comp_star["ra"].data + comp_star["dec"] = comp_star["dec"].data - output_table = calc_aij_relative_flux(input_table, comp_star, - in_place=in_place) - output_flux = output_table['relative_flux'] - output_error = output_table['relative_flux_error'] + output_table = calc_aij_relative_flux(input_table, comp_star, in_place=in_place) + output_flux = output_table["relative_flux"] + output_error = output_table["relative_flux_error"] print(all_expected_flux - output_flux) np.testing.assert_allclose(output_flux, all_expected_flux) np.testing.assert_allclose(output_error, all_expected_error) if in_place: - assert 'relative_flux' in input_table.colnames + assert "relative_flux" in input_table.colnames else: - assert 'relative_flux' not in input_table.colnames + assert "relative_flux" not in input_table.colnames -@pytest.mark.parametrize('bad_thing', ['RA', 'NaN']) +@pytest.mark.parametrize("bad_thing", ["RA", "NaN"]) def test_bad_comp_star(bad_thing): - expected_flux, expected_error, input_table, comp_star = \ - _raw_photometry_table() + expected_flux, expected_error, input_table, comp_star = _raw_photometry_table() # We'll do modify the "bad" property for the last star in the last # image. # First, let's sort so the row we want to modify is the last one - input_table.sort(['date-obs', 'star_id']) + input_table.sort(["date-obs", "star_id"]) # Force a copy of this row so we have access to the original values last_one = Table(input_table[-1]) - if bad_thing == 'RA': + if bad_thing == "RA": # "Jiggle" one of the stars by moving it by a few arcsec in one image. - coord_inp = SkyCoord(ra=last_one['ra'][0], dec=last_one['dec'][0], - unit=u.degree) + coord_inp = SkyCoord( + ra=last_one["ra"][0], dec=last_one["dec"][0], unit=u.degree + ) coord_bad_ra = coord_inp.ra + 3 * u.arcsecond print(len(last_one), coord_inp) - input_table['ra'][-1] = coord_bad_ra.degree - elif bad_thing == 'NaN': - input_table['aperture_net_cnts'][-1] = np.nan + input_table["ra"][-1] = coord_bad_ra.degree + elif bad_thing == "NaN": + input_table["aperture_net_cnts"][-1] = np.nan - output_table = calc_aij_relative_flux(input_table, comp_star, - in_place=False) + output_table = calc_aij_relative_flux(input_table, comp_star, in_place=False) - old_total_flux = comp_star['aperture_net_cnts'].sum() - new_flux = old_total_flux - last_one['aperture_net_cnts'] + old_total_flux = comp_star["aperture_net_cnts"].sum() + new_flux = old_total_flux - last_one["aperture_net_cnts"] # This works for target stars, i.e. those never in comparison set new_expected_flux = old_total_flux / new_flux * expected_flux # Oh wow, this is terrible.... # Need to manually calculate for the only two that are still in comparison - new_expected_flux[1] = (comp_star['aperture_net_cnts'][0] / - comp_star['aperture_net_cnts'][1]) - new_expected_flux[2] = (comp_star['aperture_net_cnts'][1] / - comp_star['aperture_net_cnts'][0]) + new_expected_flux[1] = ( + comp_star["aperture_net_cnts"][0] / comp_star["aperture_net_cnts"][1] + ) + new_expected_flux[2] = ( + comp_star["aperture_net_cnts"][1] / comp_star["aperture_net_cnts"][0] + ) new_expected_flux[3] = expected_flux[3] - if bad_thing == 'NaN': + if bad_thing == "NaN": new_expected_flux[3] = np.nan - np.testing.assert_allclose(new_expected_flux, output_table['relative_flux'][-4:]) + np.testing.assert_allclose(new_expected_flux, output_table["relative_flux"][-4:]) diff --git a/stellarphot/differential_photometry/tests/test_vsx_mags.py b/stellarphot/differential_photometry/tests/test_vsx_mags.py index 947efc63..a9b7d4a6 100644 --- a/stellarphot/differential_photometry/tests/test_vsx_mags.py +++ b/stellarphot/differential_photometry/tests/test_vsx_mags.py @@ -7,35 +7,46 @@ def test_one_vmag(): - find_var_data = get_pkg_data_filename('data/variables.fits') + find_var_data = get_pkg_data_filename("data/variables.fits") var_stars = Table.read(find_var_data) - find_star_data = get_pkg_data_filename('data/2014-12-29-ey-uma-9.fits') + find_star_data = get_pkg_data_filename("data/2014-12-29-ey-uma-9.fits") star_data = Table.read(find_star_data) - find_comp_data = get_pkg_data_filename('data/comp_stars.fits') + find_comp_data = get_pkg_data_filename("data/comp_stars.fits") comp_stars = Table.read(find_comp_data) - comp_stars['coords'] = SkyCoord(ra=comp_stars['ra'], dec=comp_stars['dec'], unit='degree') - vmag, error = calc_vmag(var_stars, star_data, comp_stars, band='Rc', star_data_mag_column='mag_inst_R') + comp_stars["coords"] = SkyCoord( + ra=comp_stars["ra"], dec=comp_stars["dec"], unit="degree" + ) + vmag, error = calc_vmag( + var_stars, star_data, comp_stars, band="Rc", star_data_mag_column="mag_inst_R" + ) np.testing.assert_almost_equal(vmag, 14.54827, decimal=5) np.testing.assert_almost_equal(error, 0.028685, decimal=5) def test_multi_vmag(): - find_var_data = get_pkg_data_filename('data/variables.fits') + find_var_data = get_pkg_data_filename("data/variables.fits") var_stars = Table.read(find_var_data) - find_star_data = get_pkg_data_filename('data/2014-12-29-ey-uma-9.fits') + find_star_data = get_pkg_data_filename("data/2014-12-29-ey-uma-9.fits") star_data = Table.read(find_star_data) - find_comp_data = get_pkg_data_filename('data/comp_stars.fits') + find_comp_data = get_pkg_data_filename("data/comp_stars.fits") comp_stars = Table.read(find_comp_data) - comp_stars['coords'] = SkyCoord(ra=comp_stars['ra'], dec=comp_stars['dec'], unit='degree') + comp_stars["coords"] = SkyCoord( + ra=comp_stars["ra"], dec=comp_stars["dec"], unit="degree" + ) - vmag, error = calc_vmag(var_stars, star_data, comp_stars, band='Rc', star_data_mag_column='mag_inst_R') - del var_stars['coords'] + vmag, error = calc_vmag( + var_stars, star_data, comp_stars, band="Rc", star_data_mag_column="mag_inst_R" + ) + del var_stars["coords"] # We really don't care about the metadata here, so silently accept one - v_data = vstack([var_stars, var_stars], metadata_conflicts='silent') - v_data['coords'] = SkyCoord(ra=v_data['RAJ2000'], - dec=v_data['DEJ2000'], unit='degree') - v_table = calc_multi_vmag(v_data, star_data, comp_stars, band='Rc', star_data_mag_column='mag_inst_R') + v_data = vstack([var_stars, var_stars], metadata_conflicts="silent") + v_data["coords"] = SkyCoord( + ra=v_data["RAJ2000"], dec=v_data["DEJ2000"], unit="degree" + ) + v_table = calc_multi_vmag( + v_data, star_data, comp_stars, band="Rc", star_data_mag_column="mag_inst_R" + ) - assert v_table['Mag'][0] == v_table['Mag'][1] - assert v_table['StDev'][0] == v_table['StDev'][1] + assert v_table["Mag"][0] == v_table["Mag"][1] + assert v_table["StDev"][0] == v_table["StDev"][1] diff --git a/stellarphot/differential_photometry/vsx_mags.py b/stellarphot/differential_photometry/vsx_mags.py index d65ff023..3f93de94 100644 --- a/stellarphot/differential_photometry/vsx_mags.py +++ b/stellarphot/differential_photometry/vsx_mags.py @@ -4,7 +4,7 @@ from astropy.coordinates import SkyCoord from astropy import units as u -__all__ = ['calc_multi_vmag', 'calc_vmag'] +__all__ = ["calc_multi_vmag", "calc_vmag"] def calc_multi_vmag(var_stars, star_data, comp_stars, **kwd): @@ -35,16 +35,17 @@ def calc_multi_vmag(var_stars, star_data, comp_stars, **kwd): vmag = [] stdev = [] for vsx in var_stars: - name.append(vsx['Name']) + name.append(vsx["Name"]) avg_vmag, error = calc_vmag(vsx, star_data, comp_stars, **kwd) vmag.append(avg_vmag) stdev.append(error) - vmag_table = Table([name, vmag, stdev], names=('Name', 'Mag', 'StDev')) + vmag_table = Table([name, vmag, stdev], names=("Name", "Mag", "StDev")) return vmag_table -def calc_vmag(var_stars, star_data, comp_stars, band=None, - star_data_mag_column='mag_inst'): +def calc_vmag( + var_stars, star_data, comp_stars, band=None, star_data_mag_column="mag_inst" +): """ Calculate the average magnitude and standard deviation of a variable star in field. @@ -88,17 +89,17 @@ def calc_vmag(var_stars, star_data, comp_stars, band=None, # Match variable stars (essentially a list of targets) to instrumental # magnitude information. - var_coords = var_stars['coords'] - star_data_coords = SkyCoord(ra=star_data['RA'], dec=star_data['Dec']) + var_coords = var_stars["coords"] + star_data_coords = SkyCoord(ra=star_data["RA"], dec=star_data["Dec"]) v_index, v_d2d, _ = var_coords.match_to_catalog_sky(star_data_coords) - rcomps = comp_stars[comp_stars['band'] == band] + rcomps = comp_stars[comp_stars["band"] == band] # Match comparison star list to instrumental magnitude information try: - comp_coords = rcomps['coords'] + comp_coords = rcomps["coords"] except KeyError: - comp_coords = SkyCoord(ra=rcomps['RAJ2000'], dec=rcomps['DEJ2000']) + comp_coords = SkyCoord(ra=rcomps["RAJ2000"], dec=rcomps["DEJ2000"]) index, d2d, _ = comp_coords.match_to_catalog_sky(star_data_coords) good = d2d < 1 * u.arcsec good_index = index[good] @@ -107,7 +108,7 @@ def calc_vmag(var_stars, star_data, comp_stars, band=None, comp_star_mag = star_data[good_index][star_data_mag_column] a_index, a_d2d, _ = comp_coords.match_to_catalog_sky(comp_coords) good_a_index = a_index[good] - accepted_comp = rcomps[good_a_index]['mag'] + accepted_comp = rcomps[good_a_index]["mag"] new_mag = vmag_image - comp_star_mag + accepted_comp avg = np.nanmean(new_mag) stdev = np.nanstd(new_mag) diff --git a/stellarphot/gui_tools/comparison_functions.py b/stellarphot/gui_tools/comparison_functions.py index 5acdcf34..075c678e 100644 --- a/stellarphot/gui_tools/comparison_functions.py +++ b/stellarphot/gui_tools/comparison_functions.py @@ -22,11 +22,15 @@ from stellarphot.gui_tools.seeing_profile_functions import set_keybindings from stellarphot.gui_tools.fits_opener import FitsOpener from stellarphot.io import TessSubmission, TOI, TessTargetFile -from stellarphot.utils.comparison_utils import (set_up, crossmatch_APASS2VSX, - mag_scale, in_field) +from stellarphot.utils.comparison_utils import ( + set_up, + crossmatch_APASS2VSX, + mag_scale, + in_field, +) -__all__ = ['make_markers', 'wrap', 'ComparisonViewer'] +__all__ = ["make_markers", "wrap", "ComparisonViewer"] DESC_STYLE = {"description_width": "initial"} @@ -64,7 +68,7 @@ def make_markers(iw, ccd, RD, vsx, ent, name_or_coord=None): Markers are added to the image in Ginga widget. """ iw.load_nddata(ccd) - iw.zoom_level = 'fit' + iw.zoom_level = "fit" try: iw.reset_markers() @@ -72,9 +76,10 @@ def make_markers(iw, ccd, RD, vsx, ent, name_or_coord=None): iw.remove_all_markers() if RD: - iw.marker = {'type': 'circle', 'color': 'green', 'radius': 10} - iw.add_markers(RD, skycoord_colname='coords', - use_skycoord=True, marker_name='TESS Targets') + iw.marker = {"type": "circle", "color": "green", "radius": 10} + iw.add_markers( + RD, skycoord_colname="coords", use_skycoord=True, marker_name="TESS Targets" + ) if name_or_coord is not None: if isinstance(name_or_coord, str): @@ -83,13 +88,18 @@ def make_markers(iw, ccd, RD, vsx, ent, name_or_coord=None): iw.center_on(name_or_coord) if vsx: - iw.marker = {'type': 'circle', 'color': 'blue', 'radius': 10} - iw.add_markers(vsx, skycoord_colname='coords', - use_skycoord=True, marker_name='VSX') - iw.marker = {'type': 'circle', 'color': 'red', 'radius': 10} - iw.add_markers(ent, skycoord_colname='coords', - use_skycoord=True, marker_name='APASS comparison') - iw.marker = {'type': 'cross', 'color': 'red', 'radius': 6} + iw.marker = {"type": "circle", "color": "blue", "radius": 10} + iw.add_markers( + vsx, skycoord_colname="coords", use_skycoord=True, marker_name="VSX" + ) + iw.marker = {"type": "circle", "color": "red", "radius": 10} + iw.add_markers( + ent, + skycoord_colname="coords", + use_skycoord=True, + marker_name="APASS comparison", + ) + iw.marker = {"type": "cross", "color": "red", "radius": 6} def wrap(imagewidget, outputwidget): @@ -106,6 +116,7 @@ def wrap(imagewidget, outputwidget): Output widget for printing information. """ + def cb(viewer, event, data_x, data_y): i = imagewidget._viewer.get_image() @@ -120,22 +131,27 @@ def cb(viewer, event, data_x, data_y): out_skycoord = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree)) try: - all_table = imagewidget.get_markers(marker_name='all') + all_table = imagewidget.get_markers(marker_name="all") except AttributeError: all_table = imagewidget.get_all_markers() with outputwidget: - index, d2d, d3d = out_skycoord.match_to_catalog_sky( - all_table['coord']) + index, d2d, d3d = out_skycoord.match_to_catalog_sky(all_table["coord"]) if d2d < 10 * u.arcsec: - mouse = all_table['coord'][index].separation( - all_table['coord']) + mouse = all_table["coord"][index].separation(all_table["coord"]) rat = mouse < 1 * u.arcsec - elims = [name for name in all_table['marker name'] - [rat] if name.startswith('elim')] + elims = [ + name + for name in all_table["marker name"][rat] + if name.startswith("elim") + ] if not elims: - imagewidget.add_markers(all_table[rat], skycoord_colname='coord', use_skycoord=True, - marker_name=f'elim{imagewidget.next_elim}') + imagewidget.add_markers( + all_table[rat], + skycoord_colname="coord", + use_skycoord=True, + marker_name=f"elim{imagewidget.next_elim}", + ) else: for elim in elims: try: @@ -144,8 +160,8 @@ def cb(viewer, event, data_x, data_y): imagewidget.remove_markers(marker_name=elim) else: - print('sorry try again') - imagewidget._viewer.onscreen_message('Click closer to a star') + print("sorry try again") + imagewidget._viewer.onscreen_message("Click closer to a star") return cb @@ -219,19 +235,21 @@ class ComparisonViewer: variables : `astropy.table.Table` """ - def __init__(self, - file="", - directory='.', - target_mag=10, - bright_mag_limit=8, - dim_mag_limit=17, - targets_from_file=None, - object_coordinate=None, - photom_apertures_file=None, - overwrite_outputs=True): - - self._label_name = 'labels' - self._circle_name = 'target circle' + + def __init__( + self, + file="", + directory=".", + target_mag=10, + bright_mag_limit=8, + dim_mag_limit=17, + targets_from_file=None, + object_coordinate=None, + photom_apertures_file=None, + overwrite_outputs=True, + ): + self._label_name = "labels" + self._circle_name = "target circle" self._file_chooser = FitsOpener() self._directory = directory @@ -260,23 +278,33 @@ def _init(self): """ if self.tess_submission is not None: self._tess_object_info.layout.visibility = "visible" - self.ccd, self.vsx = \ - set_up(self._file_chooser.path.name, - directory_with_images=self._file_chooser.path.parent - ) + self.ccd, self.vsx = set_up( + self._file_chooser.path.name, + directory_with_images=self._file_chooser.path.parent, + ) - apass, vsx_apass_angle, targets_apass_angle = crossmatch_APASS2VSX(self.ccd, - self.targets_from_file, - self.vsx) + apass, vsx_apass_angle, targets_apass_angle = crossmatch_APASS2VSX( + self.ccd, self.targets_from_file, self.vsx + ) - apass_good_coord, good_stars = mag_scale(self.target_mag, apass, vsx_apass_angle, - targets_apass_angle, - brighter_dmag=self.target_mag - self.bright_mag_limit, - dimmer_dmag=self.dim_mag_limit - self.target_mag) + apass_good_coord, good_stars = mag_scale( + self.target_mag, + apass, + vsx_apass_angle, + targets_apass_angle, + brighter_dmag=self.target_mag - self.bright_mag_limit, + dimmer_dmag=self.dim_mag_limit - self.target_mag, + ) apass_comps = in_field(apass_good_coord, self.ccd, apass, good_stars) - make_markers(self.iw, self.ccd, self.targets_from_file, self.vsx, apass_comps, - name_or_coord=self.target_coord) + make_markers( + self.iw, + self.ccd, + self.targets_from_file, + self.vsx, + apass_comps, + name_or_coord=self.target_coord, + ) @property def variables(self): @@ -284,10 +312,12 @@ def variables(self): An `astropy.table.Table` of the variables in the class. """ comp_table = self.generate_table() - new_vsx_mark = comp_table['marker name'] == 'VSX' - idx, _, _ = comp_table['coord'][new_vsx_mark].match_to_catalog_sky(self.vsx['coords']) + new_vsx_mark = comp_table["marker name"] == "VSX" + idx, _, _ = comp_table["coord"][new_vsx_mark].match_to_catalog_sky( + self.vsx["coords"] + ) our_vsx = self.vsx[idx] - our_vsx['star_id'] = comp_table['star_id'][new_vsx_mark] + our_vsx["star_id"] = comp_table["star_id"][new_vsx_mark] return our_vsx def _set_object(self): @@ -295,7 +325,7 @@ def _set_object(self): Try to automatically set object name immediately after file is chosen. """ try: - self.object_name.value = self._file_chooser.header['object'] + self.object_name.value = self._file_chooser.header["object"] except KeyError: # No object, will show empty box for name self.object_name.disabled = False @@ -309,9 +339,7 @@ def _set_object(self): # Maybe this is a tess object? try: self.tess_submission = TessSubmission.from_header( - self._file_chooser.header, - telescope_code="Paul-P-Feder-0.4m", - planet=1 + self._file_chooser.header, telescope_code="Paul-P-Feder-0.4m", planet=1 ) except ValueError: # Guess not, time to turn on the coordinates box @@ -326,9 +354,9 @@ def _set_object(self): self.tess_save_toggle.disabled = False self.toi_info = TOI(self.tess_submission.tic_id) - self._target_file_info = TessTargetFile(self.toi_info.coord, - self.toi_info.tess_mag, - self.toi_info.depth) + self._target_file_info = TessTargetFile( + self.toi_info.coord, self.toi_info.tess_mag, self.toi_info.depth + ) self.target_coord = self.tess_submission.tic_coord self._tess_object_info.mag.value = self.toi_info.tess_mag @@ -342,7 +370,7 @@ def _set_file(self, change): self._update_tess_save_names() def _save_toggle_action(self, change): - activated = change['new'] + activated = change["new"] if activated: self._tess_save_box.layout.visibility = "visible" @@ -350,25 +378,26 @@ def _save_toggle_action(self, change): self._tess_save_box.layout.visibility = "hidden" def _make_observers(self): - self._show_labels_button.observe(self._show_label_button_handler, - names='value') + self._show_labels_button.observe(self._show_label_button_handler, names="value") self._save_var_info.on_click(self._save_variables_to_file) self._save_aperture_file.on_click(self._save_aperture_to_file) self._file_chooser.register_callback(self._set_file) self.tess_save_toggle.observe(self._save_toggle_action, "value") self.save_files.on_click(self.save_tess_files) - def _save_variables_to_file(self, button=None, filename=''): + def _save_variables_to_file(self, button=None, filename=""): if not filename: - filename = 'variables.csv' + filename = "variables.csv" # Export variables as CSV (overwrite existing file if it exists) try: self.variables.write(filename, overwrite=self.overwrite_outputs) except OSError: - raise OSError(f"Existing file ({filename}) can not be overwritten. Set overwrite_outputs=True to address this.") + raise OSError( + f"Existing file ({filename}) can not be overwritten. Set overwrite_outputs=True to address this." + ) def _show_label_button_handler(self, change): - value = change['new'] + value = change["new"] if value: # Showing labels can take a bit, disable while in progress self._show_labels_button.disabled = True @@ -376,47 +405,53 @@ def _show_label_button_handler(self, change): self._show_labels_button.disabled = False else: self.remove_labels() - self._show_labels_button.description = self._show_labels_button.descriptions[value] + self._show_labels_button.description = self._show_labels_button.descriptions[ + value + ] - def _save_aperture_to_file(self, button=None, filename=''): + def _save_aperture_to_file(self, button=None, filename=""): if not filename: filename = self.photom_apertures_file # Convert aperture table into a SourceList objects and output targets_table = self.generate_table() # Assign units to columns - targets_table['ra'] = targets_table['ra'] * u.deg - targets_table['dec'] = targets_table['dec'] * u.deg - targets_table['x'] = targets_table['x'] * u.pixel - targets_table['y'] = targets_table['y'] * u.pixel + targets_table["ra"] = targets_table["ra"] * u.deg + targets_table["dec"] = targets_table["dec"] * u.deg + targets_table["x"] = targets_table["x"] * u.pixel + targets_table["y"] = targets_table["y"] * u.pixel # Drop redundant sky position column - targets_table.remove_columns(['coord']) + targets_table.remove_columns(["coord"]) # Build sources list - targets2sourcelist = {'x' : 'xcenter', 'y' : 'ycenter'} - sources = SourceListData(input_data=targets_table, colname_map=targets2sourcelist) + targets2sourcelist = {"x": "xcenter", "y": "ycenter"} + sources = SourceListData( + input_data=targets_table, colname_map=targets2sourcelist + ) # Export aperture file as CSV (overwrite existing file if it exists) try: sources.write(filename, overwrite=self.overwrite_outputs) except OSError: - raise OSError(f"Existing file ({filename}) can not be overwritten. Set overwrite_outputs=True to address this.") + raise OSError( + f"Existing file ({filename}) can not be overwritten. Set overwrite_outputs=True to address this." + ) def _make_control_bar(self): - self._show_labels_button = ipw.ToggleButton(description='Click to show labels') + self._show_labels_button = ipw.ToggleButton(description="Click to show labels") self._show_labels_button.descriptions = { - True: 'Click to hide labels', - False: 'Click to show labels' + True: "Click to hide labels", + False: "Click to show labels", } - self._save_var_info = ipw.Button(description='Save variable info') + self._save_var_info = ipw.Button(description="Save variable info") - self._save_aperture_file = ipw.Button(description='Save aperture file') + self._save_aperture_file = ipw.Button(description="Save aperture file") controls = ipw.HBox( children=[ self._show_labels_button, self._save_var_info, - self._save_aperture_file + self._save_aperture_file, ] ) @@ -437,21 +472,25 @@ def _make_tess_object_info(self): self._tess_object_info.layout.visibility = "hidden" def _make_tess_save_box(self): - self.tess_save_toggle = ipw.ToggleButton(description="TESS files...", - disabled=True) + self.tess_save_toggle = ipw.ToggleButton( + description="TESS files...", disabled=True + ) self._tess_save_box = ipw.VBox() self._tess_save_box.layout.visibility = "hidden" - scope_name = ipw.Text(description="Telescope code", - value="Paul-P-Feder-0.4m", - style=DESC_STYLE) + scope_name = ipw.Text( + description="Telescope code", value="Paul-P-Feder-0.4m", style=DESC_STYLE + ) planet_num = ipw.IntText(description="Planet", value=1) dumb = [] dumb2 = [] - for save in ["Full field of view", "Zoomed filed of view"]: # , "Aperture file"]: + for save in [ + "Full field of view", + "Zoomed filed of view", + ]: # , "Aperture file"]: box = ipw.HBox() title = ipw.HTML(value=f"{save} file name") label = ipw.Label(value="") @@ -462,7 +501,9 @@ def _make_tess_save_box(self): # self._field_name, self._zoom_name, self._aper_name = dumb self._field_name, self._zoom_name = dumb self.save_files = ipw.Button(description="Save") - self._tess_save_box.children = [scope_name, planet_num] + dumb2 + [self.save_files] + self._tess_save_box.children = ( + [scope_name, planet_num] + dumb2 + [self.save_files] + ) def _update_tess_save_names(self): if self.tess_submission is not None: @@ -474,26 +515,30 @@ def _update_tess_save_names(self): self._zoom_name.value = "" def _viewer(self): - header = ipw.HTML(value=""" + header = ipw.HTML( + value="""

Click and drag or use arrow keys to pan, use +/- keys to zoom

Shift-left click (or Crtl-left click)to exclude star as target or comp. Click again to include.

- """) + """ + ) - legend = ipw.HTML(value=""" + legend = ipw.HTML( + value="""

Green circles -- Gaia stars within 2.5 arcmin of target

Red circles -- APASS stars within 1 mag of target

Blue circles -- VSX variables

Red × -- Exclude as target or comp

- """) + """ + ) iw = ImageWidget() out = ipw.Output() set_keybindings(iw) bind_map = iw._viewer.get_bindmap() gvc = iw._viewer.get_canvas() - bind_map.map_event(None, ('shift',), 'ms_left', 'cursor') - gvc.add_callback('cursor-down', wrap(iw, out)) + bind_map.map_event(None, ("shift",), "ms_left", "cursor") + gvc.add_callback("cursor-down", wrap(iw, out)) self.object_name = ipw.Text(description="Object", disabled=True) controls = self._make_control_bar() @@ -502,8 +547,16 @@ def _viewer(self): box = ipw.VBox() inner_box = ipw.HBox() inner_box.children = [iw, legend] - box.children = [self._file_chooser.file_chooser, self.object_name, self._tess_object_info, header, - inner_box, controls, self.tess_save_toggle, self._tess_save_box] + box.children = [ + self._file_chooser.file_chooser, + self.object_name, + self._tess_object_info, + header, + inner_box, + controls, + self.tess_save_toggle, + self._tess_save_box, + ] return box, iw @@ -531,7 +584,9 @@ def save_tess_files(self, button=None): try: self.iw.save(self._field_name.value) except OSError: - raise OSError(f"Existing file ({self._field_name.value}) can not be overwritten. Set overwrite_outputs=True to address this.") + raise OSError( + f"Existing file ({self._field_name.value}) can not be overwritten. Set overwrite_outputs=True to address this." + ) if self._zoom_name.value: self.tess_field_zoom_view() @@ -541,7 +596,9 @@ def save_tess_files(self, button=None): try: self.iw.save(self._zoom_name.value) except OSError: - raise OSError(f"Existing file ({self._zoom_name.value}) can not be overwritten. Set overwrite_outputs=True to address this.") + raise OSError( + f"Existing file ({self._zoom_name.value}) can not be overwritten. Set overwrite_outputs=True to address this." + ) def generate_table(self): """ @@ -555,42 +612,41 @@ def generate_table(self): try: all_table = self.iw.get_all_markers() except AttributeError: - all_table = self.iw.get_markers(marker_name='all') + all_table = self.iw.get_markers(marker_name="all") - elims = np.array([name.startswith('elim') - for name in all_table['marker name']]) + elims = np.array([name.startswith("elim") for name in all_table["marker name"]]) elim_table = all_table[elims] comp_table = all_table[~elims] - index, d2d, d3d = elim_table['coord'].match_to_catalog_sky(comp_table['coord']) + index, d2d, d3d = elim_table["coord"].match_to_catalog_sky(comp_table["coord"]) comp_table.remove_rows(index) # Add separate RA and Dec columns for ease in processing later - comp_table['ra'] = comp_table['coord'].ra.degree - comp_table['dec'] = comp_table['coord'].dec.degree + comp_table["ra"] = comp_table["coord"].ra.degree + comp_table["dec"] = comp_table["coord"].dec.degree # Calculate how far each is from target - comp_table['separation'] = self.target_coord.separation(comp_table['coord']) + comp_table["separation"] = self.target_coord.separation(comp_table["coord"]) # Add dummy column for sorting in the order we want - comp_table['sort'] = np.zeros(len(comp_table)) + comp_table["sort"] = np.zeros(len(comp_table)) # Set sort order - apass_mark = comp_table['marker name'] == 'APASS comparison' - vsx_mark = comp_table['marker name'] == 'VSX' - tess_mark = ((comp_table['marker name'] == 'TESS Targets') | - (comp_table['separation'] < 0.3 * u.arcsec)) - + apass_mark = comp_table["marker name"] == "APASS comparison" + vsx_mark = comp_table["marker name"] == "VSX" + tess_mark = (comp_table["marker name"] == "TESS Targets") | ( + comp_table["separation"] < 0.3 * u.arcsec + ) - comp_table['sort'][apass_mark] = 2 - comp_table['sort'][vsx_mark] = 1 - comp_table['sort'][tess_mark] = 0 + comp_table["sort"][apass_mark] = 2 + comp_table["sort"][vsx_mark] = 1 + comp_table["sort"][tess_mark] = 0 # Sort the table - comp_table.sort(['sort', 'separation']) + comp_table.sort(["sort", "separation"]) # Assign the IDs - comp_table['star_id'] = range(1, len(comp_table) + 1) + comp_table["star_id"] = range(1, len(comp_table) + 1) return comp_table @@ -609,26 +665,50 @@ def show_labels(self): original_mark = self.iw._marker for star in comp_table: - star_id = star['star_id'] - if star['marker name'] == 'TESS Targets': - label = f'T{star_id}' - self.iw._marker = functools.partial(self.iw.dc.Text, text=label, fontsize=20, fontscale=False, color='green') - self.iw.add_markers(Table(data=[[star['x']+20], [star['y']-20]], names=['x', 'y']), - marker_name=self._label_name) - - elif star['marker name'] == 'APASS comparison': - label = f'C{star_id}' - self.iw._marker = functools.partial(self.iw.dc.Text, text=label, fontsize=20, fontscale=False, color='red') - self.iw.add_markers(Table(data=[[star['x']+20], [star['y']-20]], names=['x', 'y']), - marker_name=self._label_name) - - elif star['marker name'] == 'VSX': - label = f'V{star_id}' - self.iw._marker = functools.partial(self.iw.dc.Text, text=label, fontsize=20, fontscale=False, color='blue') - self.iw.add_markers(Table(data=[[star['x']+20], [star['y']-20]], names=['x', 'y']), - marker_name=self._label_name) + star_id = star["star_id"] + if star["marker name"] == "TESS Targets": + label = f"T{star_id}" + self.iw._marker = functools.partial( + self.iw.dc.Text, + text=label, + fontsize=20, + fontscale=False, + color="green", + ) + self.iw.add_markers( + Table(data=[[star["x"] + 20], [star["y"] - 20]], names=["x", "y"]), + marker_name=self._label_name, + ) + + elif star["marker name"] == "APASS comparison": + label = f"C{star_id}" + self.iw._marker = functools.partial( + self.iw.dc.Text, + text=label, + fontsize=20, + fontscale=False, + color="red", + ) + self.iw.add_markers( + Table(data=[[star["x"] + 20], [star["y"] - 20]], names=["x", "y"]), + marker_name=self._label_name, + ) + + elif star["marker name"] == "VSX": + label = f"V{star_id}" + self.iw._marker = functools.partial( + self.iw.dc.Text, + text=label, + fontsize=20, + fontscale=False, + color="blue", + ) + self.iw.add_markers( + Table(data=[[star["x"] + 20], [star["y"] - 20]], names=["x", "y"]), + marker_name=self._label_name, + ) else: - label = f'U{star_id}' + label = f"U{star_id}" print(f"Unrecognized marker name: {star['marker name']}") plot_names.append(label) self.iw._marker = original_mark @@ -652,9 +732,7 @@ def remove_labels(self): # No labels, keep going pass - def show_circle(self, - radius=2.5 * u.arcmin, - pixel_scale=0.56 * u.arcsec / u.pixel): + def show_circle(self, radius=2.5 * u.arcmin, pixel_scale=0.56 * u.arcsec / u.pixel): """ Show a circle around the target. @@ -673,15 +751,15 @@ def show_circle(self, None Circle is shown around the target. """ - radius_pixels = np.round((radius / pixel_scale).to(u.pixel).value, - decimals=0) + radius_pixels = np.round((radius / pixel_scale).to(u.pixel).value, decimals=0) orig_marker = self.iw.marker - self.iw.marker = {'color': 'yellow', - 'radius': radius_pixels, - 'type': 'circle'} - self.iw.add_markers(Table(data=[[self.target_coord]], names=['coords']), - skycoord_colname='coords', - use_skycoord=True, marker_name=self._circle_name) + self.iw.marker = {"color": "yellow", "radius": radius_pixels, "type": "circle"} + self.iw.add_markers( + Table(data=[[self.target_coord]], names=["coords"]), + skycoord_colname="coords", + use_skycoord=True, + marker_name=self._circle_name, + ) self.iw.marker = orig_marker def remove_circle(self): @@ -711,7 +789,7 @@ def tess_field_view(self): """ # Show whole field of view - self.iw.zoom_level = 'fit' + self.iw.zoom_level = "fit" # Show the circle self.show_circle() @@ -739,8 +817,10 @@ def tess_field_zoom_view(self, width=6 * u.arcmin): # Turn off labels -- too cluttered self.remove_labels() - left_side = self.ccd.wcs.pixel_to_world(0, self.ccd.shape[1]/2) - right_side = self.ccd.wcs.pixel_to_world(self.ccd.shape[0], self.ccd.shape[1]/2) + left_side = self.ccd.wcs.pixel_to_world(0, self.ccd.shape[1] / 2) + right_side = self.ccd.wcs.pixel_to_world( + self.ccd.shape[0], self.ccd.shape[1] / 2 + ) fov = left_side.separation(right_side) view_ratio = width / fov diff --git a/stellarphot/gui_tools/fits_opener.py b/stellarphot/gui_tools/fits_opener.py index a1d2b1f5..374e6317 100644 --- a/stellarphot/gui_tools/fits_opener.py +++ b/stellarphot/gui_tools/fits_opener.py @@ -7,7 +7,7 @@ from ipyfilechooser import FileChooser -__all__ = ['FitsOpener'] +__all__ = ["FitsOpener"] class FitsOpener: @@ -37,10 +37,11 @@ class FitsOpener: path : `pathlib.Path` """ + def __init__(self, title="Choose an image", filter_pattern=None, **kwargs): self._fc = FileChooser(title=title, **kwargs) if not filter_pattern: - self._fc.filter_pattern = ['*.fit*', '*.fit*.[bg]z'] + self._fc.filter_pattern = ["*.fit*", "*.fit*.[bg]z"] else: self._fc.filter_pattern = filter_pattern @@ -87,7 +88,7 @@ def _set_header(self): return try: - self.object = self._header['object'] + self.object = self._header["object"] except KeyError: pass @@ -105,6 +106,7 @@ def register_callback(self, callable): callable : function A function that takes one argument. """ + def wrap_call(change): self._set_header() callable(change) diff --git a/stellarphot/gui_tools/photometry_widget_functions.py b/stellarphot/gui_tools/photometry_widget_functions.py index 879b1b02..e6dc9b09 100644 --- a/stellarphot/gui_tools/photometry_widget_functions.py +++ b/stellarphot/gui_tools/photometry_widget_functions.py @@ -6,7 +6,7 @@ from stellarphot.settings import ApertureSettings, PhotometryFileSettings, ui_generator -__all__ = ['PhotometrySettings'] +__all__ = ["PhotometrySettings"] class PhotometrySettings: @@ -31,10 +31,12 @@ class PhotometrySettings: object_name : str The name of the object. """ + def __init__(self): self._file_loc_widget = ui_generator(PhotometryFileSettings) - self._object_name = ipw.Dropdown(description='Choose object', - style=dict(description_width='initial')) + self._object_name = ipw.Dropdown( + description="Choose object", style=dict(description_width="initial") + ) self._file_loc_widget.observe(self._update_locations) self.ifc = None @@ -81,9 +83,13 @@ def _update_ifc(self, change): def _update_object_list(self, change): if self.ifc.summary: - self._object_name.options = sorted(set(self.ifc.summary['object'][~self.ifc.summary['object'].mask])) + self._object_name.options = sorted( + set(self.ifc.summary["object"][~self.ifc.summary["object"].mask]) + ) else: self._object_name.options = [] def _update_aperture_settings(self, change): - self.aperture_settings = ApertureSettings.parse_file(self.file_locations.aperture_settings_file) + self.aperture_settings = ApertureSettings.parse_file( + self.file_locations.aperture_settings_file + ) diff --git a/stellarphot/gui_tools/seeing_profile_functions.py b/stellarphot/gui_tools/seeing_profile_functions.py index ff7b304c..81a9d3d5 100644 --- a/stellarphot/gui_tools/seeing_profile_functions.py +++ b/stellarphot/gui_tools/seeing_profile_functions.py @@ -22,8 +22,14 @@ from stellarphot.plotting import seeing_plot from stellarphot.settings import ApertureSettings, ui_generator -__all__ = ['set_keybindings', 'find_center', 'radial_profile', - 'RadialProfile', 'box', 'SeeingProfileWidget'] +__all__ = [ + "set_keybindings", + "find_center", + "radial_profile", + "RadialProfile", + "box", + "SeeingProfileWidget", +] desc_style = {"description_width": "initial"} @@ -61,31 +67,31 @@ def set_keybindings(image_widget, scroll_zoom=False): # Displays the event map... # bind_map.eventmap bind_map.clear_event_map() - bind_map.map_event(None, (), 'ms_left', 'pan') + bind_map.map_event(None, (), "ms_left", "pan") if scroll_zoom: - bind_map.map_event(None, (), 'pa_pan', 'zoom') + bind_map.map_event(None, (), "pa_pan", "zoom") # bind_map.map_event(None, (), 'ms_left', 'cursor') # contrast with right mouse - bind_map.map_event(None, (), 'ms_right', 'contrast') + bind_map.map_event(None, (), "ms_right", "contrast") # shift-right mouse to reset contrast - bind_map.map_event(None, ('shift',), 'ms_right', 'contrast_restore') - bind_map.map_event(None, ('ctrl',), 'ms_left', 'cursor') + bind_map.map_event(None, ("shift",), "ms_right", "contrast_restore") + bind_map.map_event(None, ("ctrl",), "ms_left", "cursor") # Bind +/- to zoom in/out - bind_map.map_event(None, (), 'kp_+', 'zoom_in') - bind_map.map_event(None, (), 'kp_=', 'zoom_in') - bind_map.map_event(None, (), 'kp_-', 'zoom_out') - bind_map.map_event(None, (), 'kp__', 'zoom_out') + bind_map.map_event(None, (), "kp_+", "zoom_in") + bind_map.map_event(None, (), "kp_=", "zoom_in") + bind_map.map_event(None, (), "kp_-", "zoom_out") + bind_map.map_event(None, (), "kp__", "zoom_out") # Bind arrow keys to panning # There is NOT a typo below. I want the keys to move the image in the # direction of the arrow - bind_map.map_event(None, (), 'kp_left', 'pan_right') - bind_map.map_event(None, (), 'kp_right', 'pan_left') - bind_map.map_event(None, (), 'kp_up', 'pan_down') - bind_map.map_event(None, (), 'kp_down', 'pan_up') + bind_map.map_event(None, (), "kp_left", "pan_right") + bind_map.map_event(None, (), "kp_right", "pan_left") + bind_map.map_event(None, (), "kp_up", "pan_down") + bind_map.map_event(None, (), "kp_down", "pan_up") # TODO: Can this be replaced by a properly masked call to centroid_com? @@ -125,7 +131,7 @@ def find_center(image, center_guess, cutout_size=30, max_iters=10): cnt = 0 # Grab the cutout... - sub_data = image[y - pad:y + pad, x - pad:x + pad] # - med + sub_data = image[y - pad : y + pad, x - pad : x + pad] # - med # ...do stats on it... _, sub_med, _ = sigma_clipped_stats(sub_data) @@ -140,14 +146,13 @@ def find_center(image, center_guess, cutout_size=30, max_iters=10): # ceno is the "original" center guess, set it to something nonsensical here ceno = np.array([-100, -100]) - while (cnt <= max_iters and - (np.abs(np.array([x_cm, y_cm]) - pad).max() > 3 - or np.abs(cen - ceno).max() > 0.1)): - + while cnt <= max_iters and ( + np.abs(np.array([x_cm, y_cm]) - pad).max() > 3 or np.abs(cen - ceno).max() > 0.1 + ): # Update x, y positions for subsetting x = int(np.floor(x_cm)) + x - pad y = int(np.floor(y_cm)) + y - pad - sub_data = image[y - pad:y + pad, x - pad:x + pad] # - med + sub_data = image[y - pad : y + pad, x - pad : x + pad] # - med _, sub_med, _ = sigma_clipped_stats(sub_data) # sub_med = 0 mask = (sub_data - sub_med) < 0 @@ -155,8 +160,10 @@ def find_center(image, center_guess, cutout_size=30, max_iters=10): ceno = cen cen = np.array([x_cm + x - pad, y_cm + y - pad]) if not np.all(~np.isnan(cen)): - raise RuntimeError('Centroid finding failed, ' - 'previous was {}, current is {}'.format(ceno, cen)) + raise RuntimeError( + "Centroid finding failed, " + "previous was {}, current is {}".format(ceno, cen) + ) cnt += 1 return cen @@ -200,10 +207,10 @@ def radial_profile(data, center, size=30, return_scaled=True): """ yd, xd = np.indices((size, size)) - sub_image = Cutout2D(data, center, size, mode='strict') + sub_image = Cutout2D(data, center, size, mode="strict") sub_center = sub_image.center_cutout - r = np.sqrt((xd - sub_center[0])**2 + (yd - sub_center[1])**2) + r = np.sqrt((xd - sub_center[0]) ** 2 + (yd - sub_center[1]) ** 2) r_exact = r.copy() r = r.astype(int) @@ -267,6 +274,7 @@ class RadialProfile: scaled_profile : numpy array Radial profile scaled to have a maximum of 1. """ + def __init__(self, data, x, y): self._cen = find_center(data, (x, y), cutout_size=30) self._data = data @@ -283,10 +291,8 @@ def profile(self, profile_size): """ self.profile_size = profile_size - self.r_exact, self.ravg, self.radialprofile = ( - radial_profile(self.data, - self.cen, - size=profile_size) + self.r_exact, self.ravg, self.radialprofile = radial_profile( + self.data, self.cen, size=profile_size ) self.sub_data = Cutout2D(self.data, self.cen, size=profile_size).data @@ -441,20 +447,21 @@ class SeeingProfileWidget: Box containing the TESS settings. """ + def __init__(self, imagewidget=None, width=500): if not imagewidget: - imagewidget = ImageWidget(image_width=width, - image_height=width, - use_opencv=True) + imagewidget = ImageWidget( + image_width=width, image_height=width, use_opencv=True + ) self.iw = imagewidget # Do some set up of the ImageWidget set_keybindings(self.iw, scroll_zoom=False) bind_map = self.iw._viewer.get_bindmap() - bind_map.map_event(None, ('shift',), 'ms_left', 'cursor') + bind_map.map_event(None, ("shift",), "ms_left", "cursor") gvc = self.iw._viewer.get_canvas() self._mse = self._make_show_event() - gvc.add_callback('cursor-down', self._mse) + gvc.add_callback("cursor-down", self._mse) # Outputs to hold the graphs self.out = ipw.Output() @@ -465,18 +472,21 @@ def __init__(self, imagewidget=None, width=500): self.fits_file = FitsOpener(title="Choose an image") big_box = ipw.HBox() big_box = ipw.GridspecLayout(1, 2) - layout = ipw.Layout(width='20ch') + layout = ipw.Layout(width="20ch") vb = ipw.VBox() self.aperture_settings_file_name = ipw.Text( description="Aperture settings file name", - style={'description_width': 'initial'}, - value="aperture_settings.json" + style={"description_width": "initial"}, + value="aperture_settings.json", ) self.aperture_settings = ui_generator(ApertureSettings) self.aperture_settings.show_savebuttonbar = True self.aperture_settings.path = Path(self.aperture_settings_file_name.value) self.save_aps = ipw.Button(description="Save settings") - vb.children = [self.aperture_settings_file_name, self.aperture_settings] #, self.save_aps] #, self.in_t, self.out_t] + vb.children = [ + self.aperture_settings_file_name, + self.aperture_settings, + ] # , self.save_aps] #, self.in_t, self.out_t] lil_box = ipw.VBox() lil_tabs = ipw.Tab() @@ -491,20 +501,20 @@ def __init__(self, imagewidget=None, width=500): imbox.children = [imagewidget, vb] big_box[0, 0] = imbox big_box[0, 1] = lil_box - big_box.layout.width = '100%' + big_box.layout.width = "100%" # Line below puts space between the image and the plots so the plots # don't jump around as the image value changes. - big_box.layout.justify_content = 'space-between' + big_box.layout.justify_content = "space-between" self.big_box = big_box self.container.children = [self.fits_file.file_chooser, self.big_box] self.box = self.container - self._aperture_name = 'aperture' + self._aperture_name = "aperture" self._tess_sub = None # Fill this in later with name of object from FITS file - self.object_name = '' + self.object_name = "" self._set_observers() self.aperture_settings.description = "" @@ -528,7 +538,7 @@ def _construct_tess_sub(self): self._tess_sub = TessSubmission.from_header( fits.getheader(file), telescope_code=self.setting_box.telescope_code.value, - planet=self.setting_box.planet_num.value + planet=self.setting_box.planet_num.value, ) def _set_seeing_profile_name(self, change): @@ -536,7 +546,7 @@ def _set_seeing_profile_name(self, change): self.seeing_file_name.value = self._tess_sub.seeing_profile def _save_toggle_action(self, change): - activated = change['new'] + activated = change["new"] if activated: self.setting_box.layout.visibility = "visible" @@ -548,7 +558,7 @@ def _save_seeing_plot(self, button): self._seeing_plot_fig.savefig(self.seeing_file_name.value) def _change_aperture_save_location(self, change): - new_name = change['new'] + new_name = change["new"] new_path = Path(new_name) self.aperture_settings.path = new_path self.aperture_settings.savebuttonbar.unsaved_changes = True @@ -556,36 +566,42 @@ def _change_aperture_save_location(self, change): def _set_observers(self): def aperture_obs(change): self._update_plots() - ape = ApertureSettings(**change['new']) - self.aperture_settings.description = ( - f"Inner annulus: {ape.inner_annulus}, outer annulus: {ape.outer_annulus}" - ) + ape = ApertureSettings(**change["new"]) + self.aperture_settings.description = f"Inner annulus: {ape.inner_annulus}, outer annulus: {ape.outer_annulus}" - self.aperture_settings.observe(aperture_obs, names='_value') + self.aperture_settings.observe(aperture_obs, names="_value") self.save_aps.on_click(self._save_ap_settings) - self.aperture_settings_file_name.observe(self._change_aperture_save_location, names='value') + self.aperture_settings_file_name.observe( + self._change_aperture_save_location, names="value" + ) self.fits_file.register_callback(self._update_file) - self.save_toggle.observe(self._save_toggle_action, names='value') + self.save_toggle.observe(self._save_toggle_action, names="value") self.save_seeing.on_click(self._save_seeing_plot) self.setting_box.planet_num.observe(self._set_seeing_profile_name) self.setting_box.telescope_code.observe(self._set_seeing_profile_name) def _save_ap_settings(self, button): - with open('aperture_settings.txt', 'w') as f: - f.write(f'{ap_rad},{ap_rad + 10},{ap_rad + 15}') + with open("aperture_settings.txt", "w") as f: + f.write(f"{ap_rad},{ap_rad + 10},{ap_rad + 15}") def _make_tess_box(self): box = ipw.VBox() setting_box = ipw.HBox() - self.save_toggle = ipw.ToggleButton(description="TESS seeing profile...", - disabled=True) - scope_name = ipw.Text(description="Telescope code", - value="Paul-P-Feder-0.4m", - style=desc_style) + self.save_toggle = ipw.ToggleButton( + description="TESS seeing profile...", disabled=True + ) + scope_name = ipw.Text( + description="Telescope code", value="Paul-P-Feder-0.4m", style=desc_style + ) planet_num = ipw.IntText(description="Planet", value=1) self.save_seeing = ipw.Button(description="Save") self.seeing_file_name = ipw.Label(value="Moo") - setting_box.children = (scope_name, planet_num, self.seeing_file_name, self.save_seeing) + setting_box.children = ( + scope_name, + planet_num, + self.seeing_file_name, + self.save_seeing, + ) # for kid in setting_box.children: # kid.disabled = True box.children = (self.save_toggle, setting_box) @@ -600,7 +616,6 @@ def _update_ap_settings(self, value): self.aperture_settings.value = value def _make_show_event(self): - def show_event(viewer, event=None, datax=None, datay=None, aperture=None): profile_size = 60 default_gap = 5 # pixels @@ -620,7 +635,7 @@ def show_event(viewer, event=None, datax=None, datay=None, aperture=None): rad_prof = RadialProfile(data, x, y) try: - try: # Remove previous marker + try: # Remove previous marker self.iw.remove_markers(marker_name=self._aperture_name) except AttributeError: self.iw.remove_markers_by_name(marker_name=self._aperture_name) @@ -629,9 +644,12 @@ def show_event(viewer, event=None, datax=None, datay=None, aperture=None): pass # ADD MARKER WHERE CLICKED - self.iw.add_markers(Table(data=[[rad_prof.cen[0]], [rad_prof.cen[1]]], - names=['x', 'y']), - marker_name=self._aperture_name) + self.iw.add_markers( + Table( + data=[[rad_prof.cen[0]], [rad_prof.cen[1]]], names=["x", "y"] + ), + marker_name=self._aperture_name, + ) # ----> MOVE PROFILE CONSTRUCTION INTO FUNCTION <---- @@ -644,14 +662,18 @@ def show_event(viewer, event=None, datax=None, datay=None, aperture=None): self.rad_prof = rad_prof # Make an aperture settings object, but don't update it's widget yet. - ap_settings = ApertureSettings(radius=aperture_radius, - gap=default_gap, - annulus_width=default_annulus_width) + ap_settings = ApertureSettings( + radius=aperture_radius, + gap=default_gap, + annulus_width=default_annulus_width, + ) update_aperture_settings = True else: # User changed aperture - aperture_radius = aperture['radius'] - ap_settings = ApertureSettings(**aperture) # Make an ApertureSettings object + aperture_radius = aperture["radius"] + ap_settings = ApertureSettings( + **aperture + ) # Make an ApertureSettings object rad_prof = self.rad_prof @@ -672,44 +694,54 @@ def _update_plots(self): ap_settings = ApertureSettings(**self.aperture_settings.value) with self.out: # sub_med += med - self._seeing_plot_fig = seeing_plot(rad_prof.r_exact, rad_prof.scaled_exact_counts, - rad_prof.ravg, - rad_prof.scaled_profile, rad_prof.HWHM, - self.object_name, - aperture_settings=ap_settings, - figsize=fig_size) + self._seeing_plot_fig = seeing_plot( + rad_prof.r_exact, + rad_prof.scaled_exact_counts, + rad_prof.ravg, + rad_prof.scaled_profile, + rad_prof.HWHM, + self.object_name, + aperture_settings=ap_settings, + figsize=fig_size, + ) plt.show() # CALCULATE AND DISPLAY NET COUNTS INSIDE RADIUS self.out2.clear_output(wait=True) with self.out2: - sub_blot = rad_prof.sub_data.copy().astype('float32') + sub_blot = rad_prof.sub_data.copy().astype("float32") min_idx = profile_size // 2 - 2 * rad_prof.FWHM max_idx = profile_size // 2 + 2 * rad_prof.FWHM sub_blot[min_idx:max_idx, min_idx:max_idx] = np.nan sub_std = np.nanstd(sub_blot) new_sub_med = np.nanmedian(sub_blot) - r_exact, ravg, tbin2 = radial_profile(rad_prof.data - new_sub_med, rad_prof.cen, - size=profile_size, - return_scaled=False) - r_exact_s, ravg_s, tbin2_s = radial_profile(rad_prof.data - new_sub_med, rad_prof.cen, - size=profile_size, - return_scaled=True) - #tbin2 = np.bincount(r.ravel(), (sub_data - sub_med).ravel()) + r_exact, ravg, tbin2 = radial_profile( + rad_prof.data - new_sub_med, + rad_prof.cen, + size=profile_size, + return_scaled=False, + ) + r_exact_s, ravg_s, tbin2_s = radial_profile( + rad_prof.data - new_sub_med, + rad_prof.cen, + size=profile_size, + return_scaled=True, + ) + # tbin2 = np.bincount(r.ravel(), (sub_data - sub_med).ravel()) counts = np.cumsum(tbin2) plt.figure(figsize=fig_size) plt.plot(rad_prof.radius_values, counts) plt.xlim(0, 40) ylim = plt.ylim() - plt.vlines(ap_settings.radius, *plt.ylim(), colors=['red']) + plt.vlines(ap_settings.radius, *plt.ylim(), colors=["red"]) plt.ylim(*ylim) plt.grid() - plt.title('Net counts in aperture') + plt.title("Net counts in aperture") e_sky = np.nanmax([np.sqrt(new_sub_med), sub_std]) - plt.xlabel('Aperture radius (pixels)') - plt.ylabel('Net counts') + plt.xlabel("Aperture radius (pixels)") + plt.ylabel("Net counts") plt.show() # CALCULATE And DISPLAY SNR AS A FUNCTION OF RADIUS @@ -726,20 +758,23 @@ def _update_plots(self): nr = tbin2 / tbin2_s # This ignores dark current - error = np.sqrt(poisson ** 2 + np.cumsum(nr) - * (e_sky ** 2 + (read_noise / gain)** 2)) + error = np.sqrt( + poisson**2 + np.cumsum(nr) * (e_sky**2 + (read_noise / gain) ** 2) + ) snr = np.cumsum(tbin2) / error plt.figure(figsize=fig_size) plt.plot(rad_prof.radius_values + 1, snr) - plt.title(f'Signal to noise ratio max {snr.max():.1f} ' - f'at radius {snr.argmax() + 1}') + plt.title( + f"Signal to noise ratio max {snr.max():.1f} " + f"at radius {snr.argmax() + 1}" + ) plt.xlim(0, 40) ylim = plt.ylim() - plt.vlines(ap_settings.radius, *plt.ylim(), colors=['red']) + plt.vlines(ap_settings.radius, *plt.ylim(), colors=["red"]) plt.ylim(*ylim) - plt.xlabel('Aperture radius (pixels)') - plt.ylabel('SNR') + plt.xlabel("Aperture radius (pixels)") + plt.ylabel("SNR") plt.grid() plt.show() diff --git a/stellarphot/gui_tools/tests/test_seeing_profile.py b/stellarphot/gui_tools/tests/test_seeing_profile.py index 6cb8ac5e..dacbaff9 100644 --- a/stellarphot/gui_tools/tests/test_seeing_profile.py +++ b/stellarphot/gui_tools/tests/test_seeing_profile.py @@ -9,17 +9,20 @@ from stellarphot.gui_tools import seeing_profile_functions as spf # Make a few round stars -STARS = Table(dict(amplitude=[1000, 200, 300], - x_mean=[30, 100, 150], - y_mean=[40, 110, 160], - x_stddev=[4, 4, 4], - y_stddev=[4, 4, 4], - theta=[0, 0, 0] - ) +STARS = Table( + dict( + amplitude=[1000, 200, 300], + x_mean=[30, 100, 150], + y_mean=[40, 110, 160], + x_stddev=[4, 4, 4], + y_stddev=[4, 4, 4], + theta=[0, 0, 0], + ) ) SHAPE = (300, 300) RANDOM_SEED = 1230971 + def test_keybindings(): def simple_bindmap(bindmap): bound_keys = {} @@ -27,7 +30,7 @@ def simple_bindmap(bindmap): for key in bindmap.keys(): modifier = key[1] key_name = key[2] - bound_keys[str(key[0]) + ''.join(modifier) + key_name] = key + bound_keys[str(key[0]) + "".join(modifier) + key_name] = key return bound_keys # This test assumes the ginga widget backend... @@ -36,18 +39,18 @@ def simple_bindmap(bindmap): bound_keys = simple_bindmap(original_bindings) # Spot check a couple of things before we run our function - assert 'Nonekp_D' in bound_keys - assert 'Nonekp_+' in bound_keys - assert 'Nonekp_left' not in bound_keys + assert "Nonekp_D" in bound_keys + assert "Nonekp_+" in bound_keys + assert "Nonekp_left" not in bound_keys # rebind spf.set_keybindings(iw) new_bindings = iw._viewer.get_bindmap().eventmap bound_keys = simple_bindmap(new_bindings) - assert 'Nonekp_D' not in bound_keys - assert 'Nonekp_+' in bound_keys + assert "Nonekp_D" not in bound_keys + assert "Nonekp_+" in bound_keys # Yes, the line below is correct... - assert new_bindings[bound_keys['Nonekp_left']]['name'] == 'pan_right' + assert new_bindings[bound_keys["Nonekp_left"]]["name"] == "pan_right" def test_find_center_no_noise_good_guess(): @@ -59,8 +62,9 @@ def test_find_center_no_noise_good_guess(): def test_find_center_noise_bad_guess(): image = make_gaussian_sources_image(SHAPE, STARS) - noise = make_noise_image(SHAPE, distribution='gaussian', mean=0, stddev=5, - seed=RANDOM_SEED) + noise = make_noise_image( + SHAPE, distribution="gaussian", mean=0, stddev=5, seed=RANDOM_SEED + ) cen2 = spf.find_center(image + noise, [40, 50], max_iters=1) # Bad initial guess, noise, should take more than one try... with pytest.raises(AssertionError): @@ -69,8 +73,9 @@ def test_find_center_noise_bad_guess(): def test_find_center_noise_good_guess(): image = make_gaussian_sources_image(SHAPE, STARS) - noise = make_noise_image(SHAPE, distribution='gaussian', mean=0, stddev=5, - seed=RANDOM_SEED) + noise = make_noise_image( + SHAPE, distribution="gaussian", mean=0, stddev=5, seed=RANDOM_SEED + ) # Trying again with several iterations should work cen3 = spf.find_center(image + noise, [31, 41], max_iters=10) # Tolerance chosen based on some trial and error @@ -88,8 +93,9 @@ def test_find_center_no_star(): # No star anywhere near the original guess image = make_gaussian_sources_image(SHAPE, STARS) # Offset the mean from zero to avoid nan center - noise = make_noise_image(SHAPE, distribution='gaussian', - mean=1000, stddev=5, seed=RANDOM_SEED) + noise = make_noise_image( + SHAPE, distribution="gaussian", mean=1000, stddev=5, seed=RANDOM_SEED + ) cen = spf.find_center(image + noise, [50, 200], max_iters=10) assert (np.abs(cen[0] - 50) > 1) and (np.abs(cen[1] - 200) > 1) @@ -97,14 +103,12 @@ def test_find_center_no_star(): def test_radial_profile(): image = make_gaussian_sources_image(SHAPE, STARS) for row in STARS: - cen = spf.find_center(image, (row['x_mean'], row['y_mean']), - max_iters=10) + cen = spf.find_center(image, (row["x_mean"], row["y_mean"]), max_iters=10) print(row) r_ex, r_a, radprof = spf.radial_profile(image, cen) - r_exs, r_as, radprofs = spf.radial_profile(image, cen, - return_scaled=False) + r_exs, r_as, radprofs = spf.radial_profile(image, cen, return_scaled=False) # Numerical value below is integral of input 2D gaussian, 2pi A sigma^2 - expected_integral = 2 * np.pi * row['amplitude'] * row['x_stddev']**2 + expected_integral = 2 * np.pi * row["amplitude"] * row["x_stddev"] ** 2 print(expected_integral, radprofs.sum()) np.testing.assert_allclose(radprofs.sum(), expected_integral, atol=50) diff --git a/stellarphot/io/__init__.py b/stellarphot/io/__init__.py index a737443f..06033810 100644 --- a/stellarphot/io/__init__.py +++ b/stellarphot/io/__init__.py @@ -1,4 +1,4 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst from .aij import * -from .tess import * \ No newline at end of file +from .tess import * diff --git a/stellarphot/io/aij.py b/stellarphot/io/aij.py index 754ba1d2..2a5fd8f9 100644 --- a/stellarphot/io/aij.py +++ b/stellarphot/io/aij.py @@ -7,8 +7,14 @@ import numpy as np -__all__ = [ 'ApertureAIJ', 'MultiApertureAIJ', 'ApertureFileAIJ', - 'generate_aij_table', 'parse_aij_table', 'Star'] +__all__ = [ + "ApertureAIJ", + "MultiApertureAIJ", + "ApertureFileAIJ", + "generate_aij_table", + "parse_aij_table", + "Star", +] class ApertureAIJ: @@ -33,6 +39,7 @@ class ApertureAIJ: Whether to remove stars in the annulus from the photometry. Default: True """ + def __init__(self): # Outer annulus radius self.rback2 = 41.0 @@ -50,21 +57,19 @@ def __init__(self): self.backplane = False def __setattr__(self, attr, value): - floats = ['rback1', 'rback2', 'radius'] - bools = ['removebackstars', 'backplane'] + floats = ["rback1", "rback2", "radius"] + bools = ["removebackstars", "backplane"] if attr in floats: super().__setattr__(attr, float(value)) elif attr in bools: if isinstance(value, str): - value = True if value.lower() == 'true' else False + value = True if value.lower() == "true" else False super().__setattr__(attr, value) def __eq__(self, other): - attributes = ['rback1', 'rback2', 'radius', - 'removebackstars', 'backplane'] - equal = [getattr(self, attr) == getattr(other, attr) - for attr in attributes] + attributes = ["rback1", "rback2", "radius", "removebackstars", "backplane"] + equal = [getattr(self, attr) == getattr(other, attr) for attr in attributes] return all(equal) @@ -109,6 +114,7 @@ class MultiApertureAIJ: yapertures : list List of y positions of the apertures. """ + def __init__(self): # Default values for these chosen to match AIJ defaults # They are not used by stellarphot @@ -130,53 +136,68 @@ def __init__(self): self.decapertures = [] def __setattr__(self, name, value): - floats = ['naperturesmax', 'apfwhmfactor'] - bools = ['usevarsizeap'] - lists = ['isrefstar', 'centroidstar', 'isalignstar', 'xapertures', - 'yapertures', 'absmagapertures', 'raapertures', 'decapertures'] + floats = ["naperturesmax", "apfwhmfactor"] + bools = ["usevarsizeap"] + lists = [ + "isrefstar", + "centroidstar", + "isalignstar", + "xapertures", + "yapertures", + "absmagapertures", + "raapertures", + "decapertures", + ] - list_is_bool = ['isrefstar', 'centroidstar', 'isalignstar'] + list_is_bool = ["isrefstar", "centroidstar", "isalignstar"] if name not in (floats + bools + lists): - raise AttributeError(f'Attribute {name} does not exist') + raise AttributeError(f"Attribute {name} does not exist") if name in floats: value = float(value) elif name in bools: if isinstance(value, str): - value = True if value.lower() == 'true' else False + value = True if value.lower() == "true" else False else: value = bool(value) elif name in lists: - if (name in list_is_bool) and (len(value) > 0) and isinstance(value[0], str): - value = [True if v.lower() == 'true' else False for v in value] + if ( + (name in list_is_bool) + and (len(value) > 0) + and isinstance(value[0], str) + ): + value = [True if v.lower() == "true" else False for v in value] value = list(value) super().__setattr__(name, value) def __eq__(self, other): simple_attrs = [ - 'naperturesmax', - 'apfwhmfactor', - 'usevarsizeap', - 'isrefstar', - 'centroidstar', - 'isalignstar', + "naperturesmax", + "apfwhmfactor", + "usevarsizeap", + "isrefstar", + "centroidstar", + "isalignstar", ] float_attrs = [ - 'xapertures', - 'yapertures', - 'absmagapertures', - 'raapertures', - 'decapertures' + "xapertures", + "yapertures", + "absmagapertures", + "raapertures", + "decapertures", ] - simple_eq = [getattr(self, attr) == getattr(other, attr) - for attr in simple_attrs] + simple_eq = [ + getattr(self, attr) == getattr(other, attr) for attr in simple_attrs + ] - float_eq = [np.allclose(getattr(self, attr), getattr(other, attr), equal_nan=True) - for attr in float_attrs] + float_eq = [ + np.allclose(getattr(self, attr), getattr(other, attr), equal_nan=True) + for attr in float_attrs + ] equal = simple_eq + float_eq @@ -185,17 +206,18 @@ def __eq__(self, other): class ApertureFileAIJ: """ - Class to represent AstroImageJ aperture file. + Class to represent AstroImageJ aperture file. - Attributes - ---------- + Attributes + ---------- - aperture : `~stellarphot.io.ApertureAIJ` - Aperture information. -` - multiaperture : `~stellarphot.io.MultiApertureAIJ` - Multi-aperture information. + aperture : `~stellarphot.io.ApertureAIJ` + Aperture information. + ` + multiaperture : `~stellarphot.io.MultiApertureAIJ` + Multi-aperture information. """ + def __init__(self): self.aperture = ApertureAIJ() self.multiaperture = MultiApertureAIJ() @@ -208,22 +230,24 @@ def __str__(self): base_attrib = vars(attrib) for bname, battrib in base_attrib.items(): try: - value = ','.join([str(v).lower() for v in battrib]) + value = ",".join([str(v).lower() for v in battrib]) except TypeError: value = battrib if value is True: - value = 'true' + value = "true" elif value is False: - value = 'false' + value = "false" - lines.append(f'.{name}.{bname}={value}') + lines.append(f".{name}.{bname}={value}") # Add a trailing blank line - return '\n'.join(lines) + '\n' + return "\n".join(lines) + "\n" def __eq__(self, other): - return (self.aperture == other.aperture) and (self.multiaperture == other.multiaperture) + return (self.aperture == other.aperture) and ( + self.multiaperture == other.multiaperture + ) def write(self, file): """ @@ -255,15 +279,15 @@ def read(cls, file): aij_aps = cls() - with open(file, 'r') as f: + with open(file, "r") as f: for line in f: - class_path, value = line.strip().split('=') + class_path, value = line.strip().split("=") # There is always a leading dot - _, attr1, attr2 = class_path.split('.') + _, attr1, attr2 = class_path.split(".") obj = getattr(aij_aps, attr1) # value is either a single value or a list of values separated by commas - vals = value.split(',') + vals = value.split(",") if len(vals) == 1: val_to_set = vals[0] else: @@ -277,11 +301,17 @@ def read(cls, file): return aij_aps @classmethod - def from_table(cls, aperture_table, - aperture_rad=None, inner_annulus=None, outer_annulus=None, - default_absmag=99.999, default_isalign=True, - default_centroidstar=True, - y_size=4096): + def from_table( + cls, + aperture_table, + aperture_rad=None, + inner_annulus=None, + outer_annulus=None, + default_absmag=99.999, + default_isalign=True, + default_centroidstar=True, + y_size=4096, + ): """ Create an `~stellarphot.io.ApertureFileAIJ` instance from a stellarphot aperture table and info about the aperture sizes. @@ -327,37 +357,38 @@ def from_table(cls, aperture_table, apAIJ.multiaperture.naperturesmax = n_apertures + 1 # A boolean column for this would be better, but this will do # for now. - apAIJ.multiaperture.isrefstar = [('comparison' in name.lower()) - for name in aperture_table['marker name']] + apAIJ.multiaperture.isrefstar = [ + ("comparison" in name.lower()) for name in aperture_table["marker name"] + ] # These are not currently in the table but that could change... - if 'centroidstar' not in columns: + if "centroidstar" not in columns: apAIJ.multiaperture.centroidstar = [default_centroidstar] * n_apertures else: - apAIJ.multiaperture.centroidstar = aperture_table['centroidstar'] + apAIJ.multiaperture.centroidstar = aperture_table["centroidstar"] - if 'isalign' not in columns: + if "isalign" not in columns: apAIJ.multiaperture.isalignstar = [default_isalign] * n_apertures else: - apAIJ.multiaperture.isalignstar = aperture_table['isalign'] + apAIJ.multiaperture.isalignstar = aperture_table["isalign"] - apAIJ.multiaperture.xapertures = aperture_table['x'] - apAIJ.multiaperture.yapertures = y_size - aperture_table['y'] + apAIJ.multiaperture.xapertures = aperture_table["x"] + apAIJ.multiaperture.yapertures = y_size - aperture_table["y"] - if 'absmag' not in columns: + if "absmag" not in columns: apAIJ.multiaperture.absmagapertures = [default_absmag] * n_apertures else: - apAIJ.multiaperture.absmagapertures = aperture_table['absmag'] + apAIJ.multiaperture.absmagapertures = aperture_table["absmag"] - apAIJ.multiaperture.raapertures = aperture_table['coord'].ra.degree - apAIJ.multiaperture.decapertures = aperture_table['coord'].dec.degree + apAIJ.multiaperture.raapertures = aperture_table["coord"].ra.degree + apAIJ.multiaperture.decapertures = aperture_table["coord"].dec.degree return apAIJ def _is_comp(star_coord, comp_table): - idx, d2d, _ = star_coord.match_to_catalog_sky(comp_table['coord']) - return 'comparison' in comp_table['marker name'][idx] + idx, d2d, _ = star_coord.match_to_catalog_sky(comp_table["coord"]) + return "comparison" in comp_table["marker name"][idx] def generate_aij_table(table_name, comparison_table): @@ -380,48 +411,49 @@ def generate_aij_table(table_name, comparison_table): Table of photometry in AIJ format. """ info_columns = { - 'date-obs': 'DATE_OBS', - 'airmass': 'AIRMASS', - 'BJD': 'BJD_MOBS', - 'exposure': 'EXPOSURE', - 'filter': 'FILTER', - 'aperture': 'Source_Radius', - 'annulus_inner': 'Sky_Rad(min)', - 'annulus_outer': 'Sky_Rad(max)' + "date-obs": "DATE_OBS", + "airmass": "AIRMASS", + "BJD": "BJD_MOBS", + "exposure": "EXPOSURE", + "filter": "FILTER", + "aperture": "Source_Radius", + "annulus_inner": "Sky_Rad(min)", + "annulus_outer": "Sky_Rad(max)", } by_source_columns = { - 'xcenter': 'X(IJ)', - 'ycenter': 'Y(IJ)', - 'aperture_net_counts': 'Source-Sky', - 'aperture_area': 'N_Src_Pixels', - 'noise-aij': 'Source_Error', - 'snr': 'Source_SNR', - 'sky_per_pix_avg': 'Sky/Pixel', - 'annulus_area': 'N_Sky_Pixels', - 'fwhm_x': 'X-Width', - 'fwhm_y': 'Y-Width', - 'width': 'Width', - 'relative_flux': 'rel_flux', - 'relative_flux_error': 'rel_flux_err', - 'relative_flux_snr': 'rel_flux_SNR', - 'comparison counts': 'tot_C_cnts', - 'comparison error': 'tot_C_err' + "xcenter": "X(IJ)", + "ycenter": "Y(IJ)", + "aperture_net_counts": "Source-Sky", + "aperture_area": "N_Src_Pixels", + "noise-aij": "Source_Error", + "snr": "Source_SNR", + "sky_per_pix_avg": "Sky/Pixel", + "annulus_area": "N_Sky_Pixels", + "fwhm_x": "X-Width", + "fwhm_y": "Y-Width", + "width": "Width", + "relative_flux": "rel_flux", + "relative_flux_error": "rel_flux_err", + "relative_flux_snr": "rel_flux_SNR", + "comparison counts": "tot_C_cnts", + "comparison error": "tot_C_err", } - by_star = table_name.group_by('star_id') + by_star = table_name.group_by("star_id") base_table = by_star.groups[0][list(info_columns.keys())] for star_id, sub_table in zip(by_star.groups.keys, by_star.groups): - star_co = SkyCoord(ra=sub_table['RA'][0], dec=sub_table['Dec'][0], - unit='degree') + star_co = SkyCoord( + ra=sub_table["RA"][0], dec=sub_table["Dec"][0], unit="degree" + ) if _is_comp(star_co, comparison_table): - char = 'C' + char = "C" else: - char = 'T' + char = "T" - new_table = sub_table[list(by_source_columns.keys())] # + ['BJD'] + new_table = sub_table[list(by_source_columns.keys())] # + ['BJD'] for old_col, new_col in by_source_columns.items(): - new_column_name = new_col + f'_{char}{star_id[0]}' + new_column_name = new_col + f"_{char}{star_id[0]}" new_table.rename_column(old_col, new_column_name) # Add individual columns to the existing table instead of hstack # Turns out hstack is super slow. @@ -454,16 +486,16 @@ def parse_aij_table(table_name): """ # Read in the raw table. - if table_name.endswith('.csv'): + if table_name.endswith(".csv"): # The table may have been edited and changed to csv. raw = Table.read(table_name) else: # The default, though, is tab-separated text with a file extension xls. - raw = Table.read(table_name, format='ascii.tab') + raw = Table.read(table_name, format="ascii.tab") # Extract the names of all columns which are not specific to a source. # Source columns end with _TX or _CX where X is one or more digits. - source_column = r'.*_[CT]\d+' + source_column = r".*_[CT]\d+" common_columns = [] for name in raw.colnames: if not re.search(source_column, name): @@ -472,23 +504,21 @@ def parse_aij_table(table_name): # Get all of the source designations from the names of the net counts # columns. - flux_columns = [name for name in raw.colnames if - name.startswith('Source-Sky')] - source_column_ids = [c.split('_')[1] for c in flux_columns] + flux_columns = [name for name in raw.colnames if name.startswith("Source-Sky")] + source_column_ids = [c.split("_")[1] for c in flux_columns] # For the first source, grab all of the column names that are specific to # a source. These will be the same for all sources. - first_source_names = [name for name in raw.colnames if - name.endswith(source_column_ids[0])] - generic_source_columns = [name.rsplit('_', 1)[0] for name in - first_source_names] + first_source_names = [ + name for name in raw.colnames if name.endswith(source_column_ids[0]) + ] + generic_source_columns = [name.rsplit("_", 1)[0] for name in first_source_names] # Make a list of star objects. Not sure this is actually better than # a list of tables or something simple like that. stars = [] for idx, source in enumerate(source_column_ids): - specifc_column_names = ['_'.join([g, source]) for g in - generic_source_columns] + specifc_column_names = ["_".join([g, source]) for g in generic_source_columns] all_names = common_columns + specifc_column_names my_table = raw[all_names] for spec, gen in zip(specifc_column_names, generic_source_columns): @@ -546,9 +576,10 @@ class Star(object): snr : `astropy.units.Quantity` """ + def __init__(self, table, id_num): self._table = table - self._table['DEC'].unit = u.degree + self._table["DEC"].unit = u.degree self.id = id_num @property @@ -556,63 +587,63 @@ def airmass(self): """ Airmass at the time of observation. """ - return self._table['AIRMASS'] + return self._table["AIRMASS"] @property def counts(self): """ Net counts in the aperture. """ - return self._table['Source-Sky'] + return self._table["Source-Sky"] @property def ra(self): """ Right ascension of the star. """ - return self._table['RA'] / 24 * 360 * u.degree + return self._table["RA"] / 24 * 360 * u.degree @property def dec(self): """ Declination of the star. """ - return self._table['DEC'] + return self._table["DEC"] @property def error(self): """ Error in the net counts. """ - return self._table['Source_Error'] + return self._table["Source_Error"] @property def sky_per_pixel(self): """ Sky brightness per pixel. """ - return self._table['Sky/Pixel'] + return self._table["Sky/Pixel"] @property def peak(self): """ Peak counts in the aperture. """ - return self._table['Peak'] + return self._table["Peak"] @property def jd_utc_start(self): """ Julian date of the start of the observation. """ - return self._table['JD_UTC'] + return self._table["JD_UTC"] @property def mjd_start(self): """ Modified Julian date of the start of the observation. """ - return self._table['J.D.-2400000'] - 0.5 + return self._table["J.D.-2400000"] - 0.5 @property def exposure(self): @@ -620,9 +651,9 @@ def exposure(self): Exposure time of the observation. """ try: - return self._table['EXPOSURE'] + return self._table["EXPOSURE"] except KeyError: - return self._table['EXPTIME'] + return self._table["EXPTIME"] @property def magnitude(self): @@ -650,4 +681,4 @@ def bjd_tdb(self): """ Midpoint of the exposure as barycentric Julian date in Barycentric Dynamical Time. """ - return self._table['BJD_TDB'] + return self._table["BJD_TDB"] diff --git a/stellarphot/io/tess.py b/stellarphot/io/tess.py index 694c1688..fc563057 100644 --- a/stellarphot/io/tess.py +++ b/stellarphot/io/tess.py @@ -76,6 +76,7 @@ class TessSubmission: The UTC date of the first observation, in YYYYMMDD format """ + telescope_code: str filter: str utc_start: int @@ -106,21 +107,21 @@ def from_header(cls, header, telescope_code="", planet=0): filter = "" fails = {} try: - dateobs = header['date-obs'] + dateobs = header["date-obs"] except KeyError: fails["utc_start"] = "UTC date of first image" else: dateobs = dateobs.split("T")[0].replace("-", "") try: - filter = header['filter'] + filter = header["filter"] except KeyError: - fails["filter"] = ("filter/passband") + fails["filter"] = "filter/passband" try: - obj = header['object'] + obj = header["object"] except KeyError: - fails['tic_id'] = "TIC ID number" + fails["tic_id"] = "TIC ID number" else: result = TIC_regex.match(obj) if result: @@ -131,7 +132,7 @@ def from_header(cls, header, telescope_code="", planet=0): planet = int(result.group("planet")[1:]) else: # No star from the object after all - fails['tic_id'] = "TIC ID number" + fails["tic_id"] = "TIC ID number" fail = [] for k, v in fails.items(): @@ -142,11 +143,13 @@ def from_header(cls, header, telescope_code="", planet=0): if fail: raise ValueError(fail) - return cls(utc_start=dateobs, - filter=filter, - telescope_code=telescope_code, - tic_id=tic_id, - planet_number=planet) + return cls( + utc_start=dateobs, + filter=filter, + telescope_code=telescope_code, + tic_id=tic_id, + planet_number=planet, + ) def _valid_tele_code(self): return len(self.telescope_code) > 0 @@ -165,9 +168,7 @@ def _valid(self): + TIC ID is not more than 10 digits """ valid = ( - self._valid_tele_code() and - self._valid_planet() and - self._valid_tic_num() + self._valid_tele_code() and self._valid_planet() and self._valid_tic_num() ) return valid @@ -181,7 +182,7 @@ def base_name(self): f"TIC{self.tic_id}-{self.planet_number:02d}", self.utc_start, self.telescope_code, - self.filter + self.filter, ] return "_".join(pieces) @@ -220,7 +221,9 @@ def tic_coord(self): """ if not self._tic_info: self._tic_info = get_tic_info(self.tic_id) - return SkyCoord(ra=self._tic_info['ra'][0], dec=self._tic_info['dec'][0], unit='degree') + return SkyCoord( + ra=self._tic_info["ra"][0], dec=self._tic_info["dec"][0], unit="degree" + ) def invalid_parts(self): """ @@ -283,17 +286,22 @@ class TOI: tic_id : int """ + def __init__(self, tic_id, toi_table=DEFAULT_TABLE_LOCATION, allow_download=True): path = Path(toi_table) if not path.is_file(): if not allow_download: raise ValueError(f"File {toi_table} not found.") - toi_table = download_file(TOI_TABLE_URL, cache=True, show_progress=True, timeout=60) + toi_table = download_file( + TOI_TABLE_URL, cache=True, show_progress=True, timeout=60 + ) self._toi_table = Table.read(toi_table, format="ascii.csv") - self._toi_table = self._toi_table[self._toi_table['TIC ID'] == tic_id] + self._toi_table = self._toi_table[self._toi_table["TIC ID"] == tic_id] if len(self._toi_table) != 1: - raise RuntimeError(f"Found {len(self._toi_table)} rows in table, expected one.") + raise RuntimeError( + f"Found {len(self._toi_table)} rows in table, expected one." + ) self._tic_info = get_tic_info(tic_id) @property @@ -301,84 +309,87 @@ def tess_mag(self): """ The TESS magnitude of the target. """ - return self._toi_table['TESS Mag'][0] + return self._toi_table["TESS Mag"][0] @property def tess_mag_error(self): """ The uncertainty in the TESS magnitude. """ - return self._toi_table['TESS Mag err'][0] + return self._toi_table["TESS Mag err"][0] @property def depth(self): """ The transit depth of the target in parts per thousand. """ - return self._toi_table['Depth (ppm)'][0] / 1000 + return self._toi_table["Depth (ppm)"][0] / 1000 @property def depth_error(self): """ The uncertainty in the transit depth in parts per thousand. """ - return self._toi_table['Depth (ppm) err'][0] / 1000 + return self._toi_table["Depth (ppm) err"][0] / 1000 @property def epoch(self): """ The epoch of the transit. """ - return Time(self._toi_table['Epoch (BJD)'][0], scale='tdb', format='jd') + return Time(self._toi_table["Epoch (BJD)"][0], scale="tdb", format="jd") @property def epoch_error(self): """ The uncertainty in the epoch of the transit. """ - return self._toi_table['Epoch (BJD) err'][0] * u.day + return self._toi_table["Epoch (BJD) err"][0] * u.day @property def period(self): """ The period of the transit. """ - return self._toi_table['Period (days)'][0] * u.day + return self._toi_table["Period (days)"][0] * u.day @property def period_error(self): """ The uncertainty in the period of the transit. """ - return self._toi_table['Period (days) err'][0] * u.day + return self._toi_table["Period (days) err"][0] * u.day @property def duration(self): """ The duration of the transit. """ - return self._toi_table['Duration (hours)'][0] * u.hour + return self._toi_table["Duration (hours)"][0] * u.hour @property def duration_error(self): """ The uncertainty in the duration of the transit. """ - return self._toi_table['Duration (hours) err'][0] * u.hour + return self._toi_table["Duration (hours) err"][0] * u.hour @property def coord(self): """ The coordinates of the target. """ - return SkyCoord(ra=self._tic_info['ra'][0], dec=self._tic_info['dec'][0], unit='degree') + return SkyCoord( + ra=self._tic_info["ra"][0], dec=self._tic_info["dec"][0], unit="degree" + ) @property def tic_id(self): """ The TIC ID of the target. """ - return self._tic_info['ID'][0] + return self._tic_info["ID"][0] + @dataclass class TessTargetFile: @@ -433,10 +444,11 @@ class TessTargetFile: The target table. """ - coord : SkyCoord - magnitude : float - depth : float - file : str = "" + + coord: SkyCoord + magnitude: float + depth: float + file: str = "" def __post_init__(self): self.aperture_server = GAIA_APERTURE_SERVER @@ -450,20 +462,25 @@ def __post_init__(self): def _retrieve_target_file(self): params = dict( - ra = self.coord.ra.to_string(unit='hour', decimal=False, sep=":"), - dec = self.coord.dec.to_string(unit='degree', decimal=False, sep=":"), + ra=self.coord.ra.to_string(unit="hour", decimal=False, sep=":"), + dec=self.coord.dec.to_string(unit="degree", decimal=False, sep=":"), mag=self.magnitude, - depth=self.depth + depth=self.depth, + ) + result = requests.get( + self.aperture_server + "cgi-bin/gaia_to_aij/upload_request.cgi", + params=params, + ) + links = re.search( + 'href="(.+)"', + result.text.replace("\n", ""), ) - result = requests.get(self.aperture_server + "cgi-bin/gaia_to_aij/upload_request.cgi", params=params) - links = re.search('href="(.+)"', result.text.replace('\n', ''), ) download_link = self.aperture_server + links[1] target_file_contents = requests.get(download_link) # Write GAIA data to local file with open(self._path, "w") as f: f.write(target_file_contents.text) - def _build_table(self): from stellarphot.utils.comparison_utils import read_file diff --git a/stellarphot/io/tests/test_aij_io.py b/stellarphot/io/tests/test_aij_io.py index 88bea691..384ed235 100644 --- a/stellarphot/io/tests/test_aij_io.py +++ b/stellarphot/io/tests/test_aij_io.py @@ -21,26 +21,24 @@ def test_aperture_file_content(): # Test that the format of the generated aperture file # matches the expected format ap = ApertureFileAIJ() - ref_data = get_pkg_data_filename('data/apertures_as_table.csv') + ref_data = get_pkg_data_filename("data/apertures_as_table.csv") ref_table = Table.read(ref_data) - ap.multiaperture.xapertures = ref_table['x'] + ap.multiaperture.xapertures = ref_table["x"] # AIJ has origin in different place than the reference table. - ap.multiaperture.yapertures = np.around((4096 - ref_table['y']), - decimals=4) + ap.multiaperture.yapertures = np.around((4096 - ref_table["y"]), decimals=4) - ap.multiaperture.raapertures = ref_table['ra'] - ap.multiaperture.decapertures = ref_table['dec'] + ap.multiaperture.raapertures = ref_table["ra"] + ap.multiaperture.decapertures = ref_table["dec"] - ap.multiaperture.isrefstar = ref_table['isrefstar'] - ap.multiaperture.centroidstar = ref_table['centroidstar'] - ap.multiaperture.isalignstar = ref_table['isalignstar'] + ap.multiaperture.isrefstar = ref_table["isrefstar"] + ap.multiaperture.centroidstar = ref_table["centroidstar"] + ap.multiaperture.isalignstar = ref_table["isalignstar"] - ap.multiaperture.absmagapertures = ref_table['absmag'] + ap.multiaperture.absmagapertures = ref_table["absmag"] - ref_aperture_file = \ - get_pkg_data_filename('data/aij-sample-apertures.aperture') + ref_aperture_file = get_pkg_data_filename("data/aij-sample-apertures.aperture") ref_apertures = ApertureFileAIJ.read(ref_aperture_file) @@ -51,34 +49,39 @@ def test_aperture_creation_from_table(): # Check that generating an aperture object from an # aperture table gives the right result. - ref_data = get_pkg_data_filename('data/apertures_as_table.csv') + ref_data = get_pkg_data_filename("data/apertures_as_table.csv") ref_table = Table.read(ref_data) # Need to create a coord column - coordinates = SkyCoord(ra=ref_table['ra'], dec=ref_table['dec'], - unit='degree') - ref_table['coord'] = coordinates + coordinates = SkyCoord(ra=ref_table["ra"], dec=ref_table["dec"], unit="degree") + ref_table["coord"] = coordinates # Delete some columns that are not in the usual aperture table - del ref_table['ra'], ref_table['dec'], \ - ref_table['isalignstar'], ref_table['centroidstar'] + del ( + ref_table["ra"], + ref_table["dec"], + ref_table["isalignstar"], + ref_table["centroidstar"], + ) # Generate marker names that match those in aperture file # Note that the csv reads in the True/False column as text, # not as bool. - ref_table['marker name'] = ['APASS comparison' - if v == 'True' else "TESS target" - for v in ref_table['isrefstar']] + ref_table["marker name"] = [ + "APASS comparison" if v == "True" else "TESS target" + for v in ref_table["isrefstar"] + ] - del ref_table['isrefstar'] + del ref_table["isrefstar"] ap_info = ApertureAIJ() - ap_aij = ApertureFileAIJ.from_table(ref_table, aperture_rad=ap_info.radius, - inner_annulus=ap_info.rback1, - outer_annulus=ap_info.rback2, - ) - ref_aperture_file = \ - get_pkg_data_filename('data/aij-sample-apertures.aperture') + ap_aij = ApertureFileAIJ.from_table( + ref_table, + aperture_rad=ap_info.radius, + inner_annulus=ap_info.rback1, + outer_annulus=ap_info.rback2, + ) + ref_aperture_file = get_pkg_data_filename("data/aij-sample-apertures.aperture") ref_apertures = ApertureFileAIJ.read(ref_aperture_file) diff --git a/stellarphot/io/tests/test_tess_submission.py b/stellarphot/io/tests/test_tess_submission.py index afb1233a..b3836c44 100644 --- a/stellarphot/io/tests/test_tess_submission.py +++ b/stellarphot/io/tests/test_tess_submission.py @@ -9,13 +9,13 @@ GOOD_HEADER = { "date-obs": "2022-06-04T05:44:28.010", "filter": "ip", - "object": "TIC-237205154" + "object": "TIC-237205154", } GOOD_HEADER_WITH_PLANET = { "date-obs": "2022-06-04T05:44:28.010", "filter": "ip", - "object": "TIC-237205154.01" + "object": "TIC-237205154.01", } BAD_HEADER = {} @@ -79,15 +79,15 @@ def test_target_file(): # first point of this test is to simply succeed in creating the # object - tic_742648307 = SkyCoord(ra=104.733225, dec=49.968739, unit='degree') + tic_742648307 = SkyCoord(ra=104.733225, dec=49.968739, unit="degree") tess_target = TessTargetFile(tic_742648307, magnitude=12, depth=10) # Check that the first thing in the list is the tick object check_coords = SkyCoord( - ra=tess_target.table['RA'][0], - dec=tess_target.table['Dec'][0], - unit=('hour', 'degree') + ra=tess_target.table["RA"][0], + dec=tess_target.table["Dec"][0], + unit=("hour", "degree"), ) assert tic_742648307.separation(check_coords).arcsecond < 1 @@ -100,7 +100,7 @@ def test_target_file(): # the object is deleted, so we need to do that here instead of # letting it happen at the end of the test. with warnings.catch_warnings(): - warnings.filterwarnings("ignore", - message="unclosed file", - category=ResourceWarning) + warnings.filterwarnings( + "ignore", message="unclosed file", category=ResourceWarning + ) del tess_target diff --git a/stellarphot/photometry/photometry.py b/stellarphot/photometry/photometry.py index 4beb4e18..1a9a1876 100644 --- a/stellarphot/photometry/photometry.py +++ b/stellarphot/photometry/photometry.py @@ -13,8 +13,7 @@ from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import FITSFixedWarning from ccdproc import ImageFileCollection -from photutils.aperture import (CircularAnnulus, CircularAperture, - aperture_photometry) +from photutils.aperture import CircularAnnulus, CircularAperture, aperture_photometry from photutils.centroids import centroid_sources from scipy.spatial.distance import cdist @@ -23,28 +22,39 @@ from .source_detection import compute_fwhm -__all__ = ['single_image_photometry', 'multi_image_photometry', - 'faster_sigma_clip_stats', - 'find_too_close', 'clipped_sky_per_pix_stats', - 'calculate_noise'] +__all__ = [ + "single_image_photometry", + "multi_image_photometry", + "faster_sigma_clip_stats", + "find_too_close", + "clipped_sky_per_pix_stats", + "calculate_noise", +] # Allowed FITS header keywords for exposure values -EXPOSURE_KEYWORDS = ["EXPOSURE", "EXPTIME", "TELAPSE", "ELAPTIME", "ONTIME", - "LIVETIME"] - - -def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, - aperture_settings, - shift_tolerance, max_adu, fwhm_estimate, - use_coordinates='pixel', - include_dig_noise=True, - reject_too_close=True, - reject_background_outliers=True, - passband_map=None, - fwhm_by_fit=True, fname=None, - logline="single_image_photometry:", - logfile=None, - console_log = True): +EXPOSURE_KEYWORDS = ["EXPOSURE", "EXPTIME", "TELAPSE", "ELAPTIME", "ONTIME", "LIVETIME"] + + +def single_image_photometry( + ccd_image, + sourcelist, + camera, + observatory_location, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + use_coordinates="pixel", + include_dig_noise=True, + reject_too_close=True, + reject_background_outliers=True, + passband_map=None, + fwhm_by_fit=True, + fname=None, + logline="single_image_photometry:", + logfile=None, + console_log=True, +): """ Perform aperture photometry on a single image, with an options for estimating the local background from sigma clipped stats of the counts in an annulus around @@ -168,49 +178,60 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, the `use_coordinates` parameter should be set to "sky". """ - # Check that the input parameters are valid if not isinstance(ccd_image, CCDData): - raise TypeError("ccd_image must be a CCDData object, but it is " - f"'{type(ccd_image)}'.") + raise TypeError( + "ccd_image must be a CCDData object, but it is " f"'{type(ccd_image)}'." + ) if not isinstance(sourcelist, SourceListData): - raise TypeError("sourcelist must be a SourceListData object, but it is " - f"'{type(sourcelist)}'.") + raise TypeError( + "sourcelist must be a SourceListData object, but it is " + f"'{type(sourcelist)}'." + ) if not isinstance(camera, Camera): raise TypeError(f"camera must be a Camera object, but it is '{type(camera)}'.") if not isinstance(observatory_location, EarthLocation): - raise TypeError("observatory_location must be a EarthLocation object, but it " - f"is '{type(observatory_location)}'.") + raise TypeError( + "observatory_location must be a EarthLocation object, but it " + f"is '{type(observatory_location)}'." + ) if aperture_settings.inner_annulus >= aperture_settings.outer_annulus: - raise ValueError(f"outer_annulus ({aperture_settings.outer_annulus}) must be greater than " - f"inner_annulus ({aperture_settings.inner_annulus}).") + raise ValueError( + f"outer_annulus ({aperture_settings.outer_annulus}) must be greater than " + f"inner_annulus ({aperture_settings.inner_annulus})." + ) if aperture_settings.radius >= aperture_settings.inner_annulus: - raise ValueError(f"aperture_radius ({aperture_settings.radius}) must be greater than " - f"inner_annulus ({aperture_settings.inner_annulus}).") - if (shift_tolerance<=0): - raise ValueError(f"shift_tolerance ({shift_tolerance}) must be greater than 0 " - "(should be on order of FWHM).") - if (max_adu<=0): + raise ValueError( + f"aperture_radius ({aperture_settings.radius}) must be greater than " + f"inner_annulus ({aperture_settings.inner_annulus})." + ) + if shift_tolerance <= 0: + raise ValueError( + f"shift_tolerance ({shift_tolerance}) must be greater than 0 " + "(should be on order of FWHM)." + ) + if max_adu <= 0: raise ValueError(f"max_adu ({max_adu}) must be greater than 0.") - if (use_coordinates not in ['pixel', 'sky']): - raise ValueError(f"input_coordinates ({use_coordinates}) must be either " - "'pixel' or 'sky'.") + if use_coordinates not in ["pixel", "sky"]: + raise ValueError( + f"input_coordinates ({use_coordinates}) must be either " "'pixel' or 'sky'." + ) # Set up logging logger = logging.getLogger("single_image_photometry") - console_format = logging.Formatter('%(message)s') + console_format = logging.Formatter("%(message)s") if logger.hasHandlers() is False: logger.setLevel(logging.INFO) if logfile is not None: # by default this appends to existing logfile fh = logging.FileHandler(logfile) - log_format = logging.Formatter('%(levelname)s - %(message)s') + log_format = logging.Formatter("%(levelname)s - %(message)s") if console_log: ch = logging.StreamHandler() ch.setFormatter(console_format) ch.setLevel(logging.INFO) logger.addHandler(ch) - else: # Log to console + else: # Log to console fh = logging.StreamHandler() log_format = console_format fh.setFormatter(log_format) @@ -228,25 +249,31 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, break if matched_kw is None: - logger.warning(f"{logline} None of the accepted exposure keywords " - f"({format(', '.join(EXPOSURE_KEYWORDS))}) found in the " - "header ... SKIPPING THIS IMAGE!") + logger.warning( + f"{logline} None of the accepted exposure keywords " + f"({format(', '.join(EXPOSURE_KEYWORDS))}) found in the " + "header ... SKIPPING THIS IMAGE!" + ) return None, None exposure = ccd_image.header[matched_kw] # Search for other keywords that are required try: - date_obs = ccd_image.header['DATE-OBS'] + date_obs = ccd_image.header["DATE-OBS"] except KeyError: - logger.warning(f"{logline} 'DATE-OBS' not found in CCD image header " - "... SKIPPING THIS IMAGE!") + logger.warning( + f"{logline} 'DATE-OBS' not found in CCD image header " + "... SKIPPING THIS IMAGE!" + ) return None, None try: - filter = ccd_image.header['FILTER'] + filter = ccd_image.header["FILTER"] except KeyError: - logger.warning(f"{logline} 'FILTER' not found in CCD image header ... " - "SKIPPING THIS IMAGE!") + logger.warning( + f"{logline} 'FILTER' not found in CCD image header ... " + "SKIPPING THIS IMAGE!" + ) return None, None # Set high pixels to NaN (make sure ccd_image.data is a float array first) @@ -254,41 +281,47 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, ccd_image.data[ccd_image.data > max_adu] = np.nan # Extract necessary values from sourcelist structure - star_ids = sourcelist['star_id'].value - xs = sourcelist['xcenter'].value - ys = sourcelist['ycenter'].value - ra = sourcelist['ra'].value - dec = sourcelist['dec'].value + star_ids = sourcelist["star_id"].value + xs = sourcelist["xcenter"].value + ys = sourcelist["ycenter"].value + ra = sourcelist["ra"].value + dec = sourcelist["dec"].value src_cnt = len(sourcelist) # If RA/Dec are available attempt to use them to determine the source positions - if use_coordinates == 'sky' and sourcelist.has_ra_dec: + if use_coordinates == "sky" and sourcelist.has_ra_dec: try: - imgpos = ccd_image.wcs.world_to_pixel(SkyCoord(ra, dec, unit=u.deg, - frame='icrs')) + imgpos = ccd_image.wcs.world_to_pixel( + SkyCoord(ra, dec, unit=u.deg, frame="icrs") + ) xs, ys = imgpos[0], imgpos[1] except AttributeError: # No WCS, skip this image msg = f"{logline} ccd_image must have a valid WCS to use RA/Dec!" logger.warning(msg) return None, None - elif use_coordinates == 'sky' and not sourcelist.has_ra_dec: - raise ValueError("use_coordinates='sky' but sourcelist does not have" - "RA/Dec coordinates!") + elif use_coordinates == "sky" and not sourcelist.has_ra_dec: + raise ValueError( + "use_coordinates='sky' but sourcelist does not have" "RA/Dec coordinates!" + ) # Reject sources that are within an aperture diameter of each other. dropped_sources = [] try: - too_close = find_too_close(sourcelist, aperture_settings.radius, - pixel_scale=camera.pixel_scale.value) + too_close = find_too_close( + sourcelist, aperture_settings.radius, pixel_scale=camera.pixel_scale.value + ) except Exception as e: # Any failure here is BAD, so raise an error raise RuntimeError( - f"Call to find_too_close() returned {type(e).__name__}: {str(e)}") + f"Call to find_too_close() returned {type(e).__name__}: {str(e)}" + ) too_close_cnt = np.sum(too_close) non_overlap = ~too_close - msg = (f"{logline} {too_close_cnt} of {src_cnt} sources within 2 aperture radii of " - "nearest neighbor") + msg = ( + f"{logline} {too_close_cnt} of {src_cnt} sources within 2 aperture radii of " + "nearest neighbor" + ) if reject_too_close: # Track dropped sources due to being too close together @@ -304,12 +337,15 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, msg += " ... keeping them." logger.info(msg) - # Remove all source positions too close to edges of image (where the annulus would # extend beyond the image boundaries). padding = aperture_settings.outer_annulus - out_of_bounds = ( (xs < padding) | (xs > (ccd_image.shape[1] - padding)) | - (ys < padding) | (ys > (ccd_image.shape[0] - padding)) ) + out_of_bounds = ( + (xs < padding) + | (xs > (ccd_image.shape[1] - padding)) + | (ys < padding) + | (ys > (ccd_image.shape[0] - padding)) + ) in_bounds = ~out_of_bounds # Track dropped sources due to out of bounds positions dropped_sources.extend(star_ids[out_of_bounds].tolist()) @@ -321,29 +357,33 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, dec = dec[in_bounds] in_cnt = np.sum(in_bounds) out_cnt = np.sum(out_of_bounds) - logger.info(f"{logline} {out_cnt} sources too close to image edge ... removed " - "them.") - logger.info(f"{logline} {in_cnt} of {src_cnt} original sources to have photometry " - "done.") - + logger.info( + f"{logline} {out_cnt} sources too close to image edge ... removed " "them." + ) + logger.info( + f"{logline} {in_cnt} of {src_cnt} original sources to have photometry " "done." + ) # If we are using x/y positions previously obtained from the ra/dec positions and # WCS, then recentroid the sources to refine the positions. This is # particularly useful is processing multiple images of the same field # and just passing the same sourcelist when calling single_image_photometry # on each image. - if use_coordinates == 'sky': + if use_coordinates == "sky": try: - xcen, ycen = centroid_sources(ccd_image.data, xs, ys, - box_size=2 * aperture_settings.radius + 1) + xcen, ycen = centroid_sources( + ccd_image.data, xs, ys, box_size=2 * aperture_settings.radius + 1 + ) except NoOverlapError: - logger.warning(f"{logline} Determining new centroids failed ... " - "SKIPPING THIS IMAGE!") + logger.warning( + f"{logline} Determining new centroids failed ... " + "SKIPPING THIS IMAGE!" + ) return None, None - else: # Proceed + else: # Proceed # Calculate offset between centroid in this image and the positions # based on input RA/Dec. - center_diff = np.sqrt((xs - xcen)**2 + (ys - ycen)**2) + center_diff = np.sqrt((xs - xcen) ** 2 + (ys - ycen) ** 2) # The center really shouldn't move more than about the fwhm, could # rework this in the future to use that instead. @@ -367,155 +407,168 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, # Define apertures and annuli for the aperture photometry aper_locs = np.array([xs, ys]).T - apers = CircularAperture(aper_locs, - r=aperture_settings.radius) - anuls = CircularAnnulus(aper_locs, - r_in=aperture_settings.inner_annulus, - r_out=aperture_settings.outer_annulus) + apers = CircularAperture(aper_locs, r=aperture_settings.radius) + anuls = CircularAnnulus( + aper_locs, + r_in=aperture_settings.inner_annulus, + r_out=aperture_settings.outer_annulus, + ) # Perform the aperture photometry - photom = aperture_photometry(ccd_image.data, (apers, anuls), - mask=ccd_image.mask, method='center') + photom = aperture_photometry( + ccd_image.data, (apers, anuls), mask=ccd_image.mask, method="center" + ) # Add source ids to the photometry table - photom['star_id'] = star_ids - photom['ra'] = ra * u.deg - photom['dec'] = dec * u.deg + photom["star_id"] = star_ids + photom["ra"] = ra * u.deg + photom["dec"] = dec * u.deg # Drop ID column from aperture_photometry() - del photom['id'] + del photom["id"] # Add various CCD image parameters to the photometry table if fname is not None: - photom['file'] = fname + photom["file"] = fname else: - photom['file'] = [''] * len(photom) + photom["file"] = [""] * len(photom) # Set various columns based on CCDData headers (which we # checked for earlier) - photom['exposure'] = [exposure] * len(photom) * u.second - photom['date-obs'] = Time(Column(data=[date_obs] )) - photom['filter'] = [filter] * len(photom) - photom.rename_column('filter', 'passband') + photom["exposure"] = [exposure] * len(photom) * u.second + photom["date-obs"] = Time(Column(data=[date_obs])) + photom["filter"] = [filter] * len(photom) + photom.rename_column("filter", "passband") # Check for airmass keyword in header and set 'airmass' if found, # but accept it may not be available try: - photom['airmass'] = [ccd_image.header['AIRMASS']] * len(photom) + photom["airmass"] = [ccd_image.header["AIRMASS"]] * len(photom) except KeyError: - logger.warning(f"{logline} 'AIRMASS' not found in CCD " - "image header ... setting to NaN!") - photom['airmass'] = [np.nan] * len(photom) + logger.warning( + f"{logline} 'AIRMASS' not found in CCD " "image header ... setting to NaN!" + ) + photom["airmass"] = [np.nan] * len(photom) # Save aperture and annulus information - photom.rename_column('aperture_sum_0', 'aperture_sum') - photom.rename_column('aperture_sum_1', 'annulus_sum') - photom['aperture_sum'].unit = ccd_image.unit - photom['annulus_sum'].unit = ccd_image.unit - photom['aperture'] = apers.r * u.pixel - photom['annulus_inner'] = anuls.r_in * u.pixel - photom['annulus_outer'] = anuls.r_out * u.pixel + photom.rename_column("aperture_sum_0", "aperture_sum") + photom.rename_column("aperture_sum_1", "annulus_sum") + photom["aperture_sum"].unit = ccd_image.unit + photom["annulus_sum"].unit = ccd_image.unit + photom["aperture"] = apers.r * u.pixel + photom["annulus_inner"] = anuls.r_in * u.pixel + photom["annulus_outer"] = anuls.r_out * u.pixel # By convention, area is in units of pixels (not pixels squared) in a digital image - photom['aperture_area'] = apers.area * u.pixel - photom['annulus_area'] = anuls.area * u.pixel + photom["aperture_area"] = apers.area * u.pixel + photom["annulus_area"] = anuls.area * u.pixel if reject_background_outliers: msg = f"{logline} Computing clipped sky stats ... " try: - avg_sky_per_pix, med_sky_per_pix, std_sky_per_pix = \ - clipped_sky_per_pix_stats(ccd_image, anuls) + ( + avg_sky_per_pix, + med_sky_per_pix, + std_sky_per_pix, + ) = clipped_sky_per_pix_stats(ccd_image, anuls) except AttributeError: msg += "BAD ANNULUS ('sky_per_pix' stats set to np.nan) ... " - avg_sky_per_pix, med_sky_per_pix, std_sky_per_pix = \ - np.nan, np.nan, np.nan - photom['sky_per_pix_avg'] = avg_sky_per_pix / u.pixel - photom['sky_per_pix_med'] = med_sky_per_pix / u.pixel - photom['sky_per_pix_std'] = std_sky_per_pix / u.pixel + avg_sky_per_pix, med_sky_per_pix, std_sky_per_pix = np.nan, np.nan, np.nan + photom["sky_per_pix_avg"] = avg_sky_per_pix / u.pixel + photom["sky_per_pix_med"] = med_sky_per_pix / u.pixel + photom["sky_per_pix_std"] = std_sky_per_pix / u.pixel msg += "DONE." logger.info(msg) - else: # Don't reject outliers (but why would you do this?) - logger.warning(f"{logline} SUGGESTION: You are computing sky per pixel " - "without clipping (set reject_background_outliers=True " - "to perform clipping).") + else: # Don't reject outliers (but why would you do this?) + logger.warning( + f"{logline} SUGGESTION: You are computing sky per pixel " + "without clipping (set reject_background_outliers=True " + "to perform clipping)." + ) med_pp = [] std_pp = [] for mask in anuls.to_mask(): annulus_data = mask.cutout(ccd_image) med_pp.append(np.median(annulus_data)) std_pp.append(np.std(annulus_data)) - photom['sky_per_pix_avg'] = photom['annulus_sum'] / photom['annulus_area'] - photom['sky_per_pix_med'] = np.array(med_pp) * ccd_image.unit / u.pixel - photom['sky_per_pix_std'] = np.array(std_pp) * ccd_image.unit / u.pixel + photom["sky_per_pix_avg"] = photom["annulus_sum"] / photom["annulus_area"] + photom["sky_per_pix_med"] = np.array(med_pp) * ccd_image.unit / u.pixel + photom["sky_per_pix_std"] = np.array(std_pp) * ccd_image.unit / u.pixel # Compute counts using clipped stats on sky per pixel - photom['aperture_net_cnts'] = (photom['aperture_sum'].value - - (photom['aperture_area'].value * - photom['sky_per_pix_avg'].value)) - photom['aperture_net_cnts'].unit = ccd_image.unit + photom["aperture_net_cnts"] = photom["aperture_sum"].value - ( + photom["aperture_area"].value * photom["sky_per_pix_avg"].value + ) + photom["aperture_net_cnts"].unit = ccd_image.unit # Fit the FWHM of the sources (can result in many warrnings due to # failed FWHM fitting, capture those warnings and print a summary) msg = f"{logline} Fitting FWHM of all sources (may take a few minutes) ... " with warnings.catch_warnings(record=True) as warned: warnings.filterwarnings("always", category=AstropyUserWarning) - fwhm_x, fwhm_y = compute_fwhm(ccd_image, photom, - fwhm_estimate=fwhm_estimate, fit=fwhm_by_fit) + fwhm_x, fwhm_y = compute_fwhm( + ccd_image, photom, fwhm_estimate=fwhm_estimate, fit=fwhm_by_fit + ) num_warnings = len(warned) msg += f"fitting failed on {num_warnings} of {len(photom)} sources ... " msg += "DONE." logger.info(msg) # Deal with bad FWHM values - bad_fwhm = (fwhm_x < 1) | (fwhm_y < 1) # Set bad values to NaN now + bad_fwhm = (fwhm_x < 1) | (fwhm_y < 1) # Set bad values to NaN now fwhm_x[bad_fwhm] = np.nan fwhm_y[bad_fwhm] = np.nan - photom['fwhm_x'] = fwhm_x * u.pixel - photom['fwhm_y'] = fwhm_y * u.pixel - photom['width'] = ((fwhm_x + fwhm_y) / 2) * u.pixel + photom["fwhm_x"] = fwhm_x * u.pixel + photom["fwhm_y"] = fwhm_y * u.pixel + photom["width"] = ((fwhm_x + fwhm_y) / 2) * u.pixel if np.sum(bad_fwhm) > 0: - logger.info(f"{logline} Bad FWHM values (<1 pixel) for {np.sum(bad_fwhm)} " - "sources.") + logger.info( + f"{logline} Bad FWHM values (<1 pixel) for {np.sum(bad_fwhm)} " "sources." + ) # Flag sources with bad counts before computing noise. # This can happen, for example, when the object is faint and centroiding is # bad. It can also happen when the sky background is low. - bad_cnts = photom['aperture_net_cnts'].value < 0 + bad_cnts = photom["aperture_net_cnts"].value < 0 # This next line works because booleans are just 0/1 in numpy if np.sum(bad_cnts) > 0: - logger.info(f"{logline} Aperture net counts negative for {np.sum(bad_cnts)} " - "sources.") + logger.info( + f"{logline} Aperture net counts negative for {np.sum(bad_cnts)} " "sources." + ) all_bads = bad_cnts | bad_fwhm - photom['aperture_net_cnts'][all_bads] = np.nan - logger.info(f"{logline} {np.sum(all_bads)} sources with either bad FWHM fit " - "or bad aperture net counts had aperture_net_cnts set to NaN.") + photom["aperture_net_cnts"][all_bads] = np.nan + logger.info( + f"{logline} {np.sum(all_bads)} sources with either bad FWHM fit " + "or bad aperture net counts had aperture_net_cnts set to NaN." + ) # Compute instrumental magnitudes - photom['mag_inst'] = ( - -2.5 * np.log10(camera.gain.value * photom['aperture_net_cnts'].value / - photom['exposure'].value) + photom["mag_inst"] = -2.5 * np.log10( + camera.gain.value * photom["aperture_net_cnts"].value / photom["exposure"].value ) # Compute and save noise msg = f"{logline} Calculating noise for all sources ... " - noise = calculate_noise(camera=camera, - counts=photom['aperture_net_cnts'].value, - sky_per_pix=photom['sky_per_pix_avg'].value, - aperture_area=photom['aperture_area'].value, - annulus_area=photom['annulus_area'].value, - exposure=photom['exposure'].value, - include_digitization=include_dig_noise) - photom['noise_electrons'] = noise # Noise in electrons - photom['noise_electrons'].unit = u.electron - photom['noise_cnts'] = noise / camera.gain.value # Noise in counts - photom['noise_cnts'].unit = ccd_image.unit + noise = calculate_noise( + camera=camera, + counts=photom["aperture_net_cnts"].value, + sky_per_pix=photom["sky_per_pix_avg"].value, + aperture_area=photom["aperture_area"].value, + annulus_area=photom["annulus_area"].value, + exposure=photom["exposure"].value, + include_digitization=include_dig_noise, + ) + photom["noise_electrons"] = noise # Noise in electrons + photom["noise_electrons"].unit = u.electron + photom["noise_cnts"] = noise / camera.gain.value # Noise in counts + photom["noise_cnts"].unit = ccd_image.unit # Compute and save SNR - snr = camera.gain.value * photom['aperture_net_cnts'] / noise - photom['snr'] = snr - photom['mag_error'] = 1.085736205 / snr + snr = camera.gain.value * photom["aperture_net_cnts"] / noise + photom["snr"] = snr + photom["mag_error"] = 1.085736205 / snr msg += "DONE." logger.info(msg) @@ -527,26 +580,35 @@ def single_image_photometry(ccd_image, sourcelist, camera, observatory_location, logger.handlers.clear() # Create PhotometryData object to return - photom_data = PhotometryData(observatory=observatory_location, camera=camera, - input_data=photom, passband_map=passband_map) + photom_data = PhotometryData( + observatory=observatory_location, + camera=camera, + input_data=photom, + passband_map=passband_map, + ) return photom_data, dropped_sources -def multi_image_photometry(directory_with_images, - object_of_interest, - sourcelist, camera, - observatory_location, - aperture_settings, - shift_tolerance, max_adu, fwhm_estimate, - include_dig_noise=True, - reject_too_close=True, - reject_background_outliers=True, - reject_unmatched=True, - passband_map=None, - fwhm_by_fit=True, - logfile=None, - console_log=True): +def multi_image_photometry( + directory_with_images, + object_of_interest, + sourcelist, + camera, + observatory_location, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + include_dig_noise=True, + reject_too_close=True, + reject_background_outliers=True, + reject_unmatched=True, + passband_map=None, + fwhm_by_fit=True, + logfile=None, + console_log=True, +): """ Perform aperture photometry on a directory of images. @@ -655,13 +717,15 @@ def multi_image_photometry(directory_with_images, # Confirm sourcelist has ra/dec coordinates if not sourcelist.has_ra_dec: - raise ValueError("multi_image_photometry: sourcelist must have RA/Dec " - "coordinates to use this function.") + raise ValueError( + "multi_image_photometry: sourcelist must have RA/Dec " + "coordinates to use this function." + ) # Set up logging (retrieve a logger but purge any existing handlers) multilogger = logging.getLogger("multi_image_photometry") multilogger.setLevel(logging.INFO) - console_format = logging.Formatter('%(message)s') + console_format = logging.Formatter("%(message)s") for handler in multilogger.handlers[:]: multilogger.removeHandler(handler) @@ -671,13 +735,13 @@ def multi_image_photometry(directory_with_images, logfile = Path(directory_with_images) / logfile # by default this appends to existing logfile fh = logging.FileHandler(logfile) - log_format = logging.Formatter('%(levelname)s - %(message)s') + log_format = logging.Formatter("%(levelname)s - %(message)s") if console_log: ch = logging.StreamHandler() ch.setFormatter(console_format) ch.setLevel(logging.INFO) multilogger.addHandler(ch) - else: # Log to console + else: # Log to console fh = logging.StreamHandler() log_format = console_format fh.setFormatter(log_format) @@ -700,7 +764,7 @@ def multi_image_photometry(directory_with_images, n_files_processed = 0 msg = f"Starting photometry of files in {directory_with_images} ... " - if (logfile is not None): + if logfile is not None: msg += f"logging output to {orig_logfile}" # If not logging to console, print message here if not console_log: @@ -708,35 +772,43 @@ def multi_image_photometry(directory_with_images, multilogger.info(msg) # Suppress the FITSFixedWarning that is raised when reading a FITS file header - warnings.filterwarnings('ignore', category=FITSFixedWarning) + warnings.filterwarnings("ignore", category=FITSFixedWarning) # Process all the files for this_ccd, this_fname in ifc.ccds(object=object_of_interest, return_fname=True): multilogger.info(f"multi_image_photometry: Processing image {this_fname}") if this_ccd.wcs is None: - multilogger.warning(' .... SKIPPING THIS IMAGE (NO WCS)') + multilogger.warning(" .... SKIPPING THIS IMAGE (NO WCS)") continue # Call single_image_photometry on each image n_files_processed += 1 multilogger.info(" Calling single_image_photometry ...") - this_phot, this_missing_sources = \ - single_image_photometry(this_ccd, sourcelist, - camera, observatory_location, - aperture_settings, - shift_tolerance, max_adu, fwhm_estimate, - use_coordinates='sky', - include_dig_noise=include_dig_noise, - reject_too_close=reject_too_close, - reject_background_outliers=reject_background_outliers, - passband_map=passband_map, - fwhm_by_fit=fwhm_by_fit, fname=this_fname, - logline=" >", - logfile = logfile) + this_phot, this_missing_sources = single_image_photometry( + this_ccd, + sourcelist, + camera, + observatory_location, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + use_coordinates="sky", + include_dig_noise=include_dig_noise, + reject_too_close=reject_too_close, + reject_background_outliers=reject_background_outliers, + passband_map=passband_map, + fwhm_by_fit=fwhm_by_fit, + fname=this_fname, + logline=" >", + logfile=logfile, + ) if (this_phot is None) or (this_missing_sources is None): multilogger.info(" single_image_photometry failed for this image.") else: - multilogger.info(f" Done with single_image_photometry for {this_fname}\n\n") + multilogger.info( + f" Done with single_image_photometry for {this_fname}\n\n" + ) # Extend the list of missing stars missing_sources.extend(this_missing_sources) @@ -763,12 +835,12 @@ def multi_image_photometry(directory_with_images, else: uniques = set([missing_sources]) - msg = (f" Removing {len(uniques)} sources not observed in every image ... ") + msg = f" Removing {len(uniques)} sources not observed in every image ... " # Purge the photometry table of all sources that were eliminated # on at least one image - starid_to_remove = sorted([u for u in uniques if u in all_phot['star_id']]) + starid_to_remove = sorted([u for u in uniques if u in all_phot["star_id"]]) # add index to PhotometryData to speed up removal - all_phot.add_index('star_id') + all_phot.add_index("star_id") # Remove the starid for objects not observed in every image if starid_to_remove: bad_rows = all_phot.loc_indices[starid_to_remove] @@ -776,14 +848,16 @@ def multi_image_photometry(directory_with_images, bad_rows = list(bad_rows) except TypeError: bad_rows = [bad_rows] - all_phot.remove_indices('star_id') + all_phot.remove_indices("star_id") all_phot.remove_rows(sorted(bad_rows)) # Drop index from PhotometryData to save memory - all_phot.remove_indices('star_id') + all_phot.remove_indices("star_id") msg += "DONE." multilogger.info(msg) - multilogger.info(f" DONE processing all matching images in {directory_with_images}") + multilogger.info( + f" DONE processing all matching images in {directory_with_images}" + ) if logfile is not None and not console_log: print(f" DONE processing all matching images in {directory_with_images}") @@ -841,8 +915,11 @@ def faster_sigma_clip_stats(data, sigma=5, iters=5, axis=None): if np.nansum(clips) == 0: break data[clips] = np.nan - return (bn.nanmean(data, axis=axis), bn.nanmedian(data, axis=axis), - bn.nanstd(data, axis=axis)) + return ( + bn.nanmean(data, axis=axis), + bn.nanmedian(data, axis=axis), + bn.nanstd(data, axis=axis), + ) def find_too_close(sourcelist, aperture_rad, pixel_scale=None): @@ -875,29 +952,32 @@ def find_too_close(sourcelist, aperture_rad, pixel_scale=None): are closer than two aperture radii, ``False`` otherwise. """ if not isinstance(sourcelist, SourceListData): - raise TypeError("sourcelist must be of type SourceListData not " - f"'{type(sourcelist)}'") + raise TypeError( + "sourcelist must be of type SourceListData not " f"'{type(sourcelist)}'" + ) if not isinstance(pixel_scale, float): raise TypeError(f"pixel_scale must be a float not '{type(pixel_scale)}'") if sourcelist.has_x_y: - x, y = sourcelist['xcenter'], sourcelist['ycenter'] + x, y = sourcelist["xcenter"], sourcelist["ycenter"] # Find the pixel distance to the nearest neighbor for each source - dist_mat = cdist(np.array([x, y]).T, np.array([x, y]).T, metric='euclidean') + dist_mat = cdist(np.array([x, y]).T, np.array([x, y]).T, metric="euclidean") np.fill_diagonal(dist_mat, np.inf) # Return array with True where the distance is less than twice the aperture # radius - return (dist_mat.min(0) < 2 * aperture_rad) + return dist_mat.min(0) < 2 * aperture_rad elif sourcelist.has_ra_dec: - if (pixel_scale is None): - raise ValueError("pixel_scale must be provided if x/y coordinates are " - "not available in the sourcelist.") - star_coords = SkyCoord(ra=sourcelist['ra'], dec=sourcelist['dec'], - frame='icrs', unit='degree') - idxc, d2d, d3d = star_coords.match_to_catalog_sky(star_coords, - nthneighbor=2) - return (d2d < (aperture_rad * 2 * pixel_scale * u.arcsec)) + if pixel_scale is None: + raise ValueError( + "pixel_scale must be provided if x/y coordinates are " + "not available in the sourcelist." + ) + star_coords = SkyCoord( + ra=sourcelist["ra"], dec=sourcelist["dec"], frame="icrs", unit="degree" + ) + idxc, d2d, d3d = star_coords.match_to_catalog_sky(star_coords, nthneighbor=2) + return d2d < (aperture_rad * 2 * pixel_scale * u.arcsec) else: raise ValueError("sourcelist must have x/y or ra/dec coordinates") @@ -934,7 +1014,7 @@ def clipped_sky_per_pix_stats(data, annulus, sigma=5, iters=5): # Use the 'center' method because then pixels are either in or out. To use # 'partial' or 'exact' we would need to do a weighted sigma clip and # I'm not sure how to do that. - masks = annulus.to_mask(method='center') + masks = annulus.to_mask(method="center") anul_list = [] for mask in masks: @@ -945,20 +1025,26 @@ def clipped_sky_per_pix_stats(data, annulus, sigma=5, iters=5): anul_array = np.array(anul_list) # Turn all zeros into np.nan... anul_array[anul_array == 0] = np.nan - avg_sky_per_pix, med_sky_per_pix, std_sky_per_pix = \ - faster_sigma_clip_stats(anul_array, - sigma=sigma, - iters=iters, - axis=1 - ) + avg_sky_per_pix, med_sky_per_pix, std_sky_per_pix = faster_sigma_clip_stats( + anul_array, sigma=sigma, iters=iters, axis=1 + ) - return (avg_sky_per_pix * data.unit, med_sky_per_pix * data.unit, - std_sky_per_pix * data.unit) + return ( + avg_sky_per_pix * data.unit, + med_sky_per_pix * data.unit, + std_sky_per_pix * data.unit, + ) -def calculate_noise(camera=None, counts=0.0, sky_per_pix=0.0, - aperture_area=0, annulus_area=0, - exposure=0, include_digitization=False): +def calculate_noise( + camera=None, + counts=0.0, + sky_per_pix=0.0, + aperture_area=0, + annulus_area=0, + exposure=0, + include_digitization=False, +): """ Computes the noise in a photometric measurement. @@ -1045,7 +1131,7 @@ def calculate_noise(camera=None, counts=0.0, sky_per_pix=0.0, sky = area_ratio * gain * sky_per_pix dark = area_ratio * dark_current_per_sec * exposure - rn_error = area_ratio * read_noise ** 2 + rn_error = area_ratio * read_noise**2 digitization = 0.0 diff --git a/stellarphot/photometry/source_detection.py b/stellarphot/photometry/source_detection.py index 0cac1273..1e90b030 100644 --- a/stellarphot/photometry/source_detection.py +++ b/stellarphot/photometry/source_detection.py @@ -11,7 +11,7 @@ from stellarphot.core import SourceListData -__all__ = ['source_detection', 'compute_fwhm'] +__all__ = ["source_detection", "compute_fwhm"] def _fit_2dgaussian(data): @@ -42,18 +42,19 @@ def _fit_2dgaussian(data): # or many of the returned properties will be NaN. props = data_properties(data - np.min(data[~mask]), mask=mask) - init_const = 0. # subtracted data minimum above + init_const = 0.0 # subtracted data minimum above # ptp = peak-to-peak, i.e. max - min, need to also exclude non-finite # values here. init_amplitude = np.ptp(data[~mask]) - g_init = (Const2D(init_const) - + Gaussian2D(amplitude=init_amplitude, - x_mean=props.xcentroid, - y_mean=props.ycentroid, - x_stddev=props.semimajor_sigma.value, - y_stddev=props.semiminor_sigma.value, - theta=props.orientation.value)) + g_init = Const2D(init_const) + Gaussian2D( + amplitude=init_amplitude, + x_mean=props.xcentroid, + y_mean=props.ycentroid, + x_stddev=props.semimajor_sigma.value, + y_stddev=props.semiminor_sigma.value, + theta=props.orientation.value, + ) fitter = LevMarLSQFitter() y, x = np.indices(data.shape) @@ -63,10 +64,15 @@ def _fit_2dgaussian(data): return gfit -def compute_fwhm(ccd, sources, fwhm_estimate=5, - x_column='xcenter', y_column='ycenter', - fit=True, - sky_per_pix_avg=0): +def compute_fwhm( + ccd, + sources, + fwhm_estimate=5, + x_column="xcenter", + y_column="ycenter", + fit=True, + sky_per_pix_avg=0, +): """ Computes the FWHM in both x and y directions of sources in an image. @@ -148,9 +154,16 @@ def compute_fwhm(ccd, sources, fwhm_estimate=5, return np.array(fwhm_x), np.array(fwhm_y) -def source_detection(ccd, fwhm=8, sigma=3.0, iters=5, - threshold=10.0, find_fwhm=True, - sky_per_pix_avg=0, padding=0): +def source_detection( + ccd, + fwhm=8, + sigma=3.0, + iters=5, + threshold=10.0, + find_fwhm=True, + sky_per_pix_avg=0, + padding=0, +): """ Returns an SourceListData object containing the position of sources within the image identified using `photutils.DAOStarFinder` algorithm. @@ -215,23 +228,29 @@ def source_detection(ccd, fwhm=8, sigma=3.0, iters=5, # if not provided). Using clipped stats should hopefully get rid of any # bright stars that might be in the image, so the mean should be a good # estimate of the sky background. - print("source_detection: You may see a warning about invalid values in the " - "input image. This is expected if any pixels are saturated and can be " - "ignored.") + print( + "source_detection: You may see a warning about invalid values in the " + "input image. This is expected if any pixels are saturated and can be " + "ignored." + ) mean, median, std = sigma_clipped_stats(ccd, sigma=sigma, maxiters=iters) - print(f"source_detection: sigma_clipped_stats mean={mean:.4f}, median={median:.4f}, std={std:.4f}") + print( + f"source_detection: sigma_clipped_stats mean={mean:.4f}, median={median:.4f}, std={std:.4f}" + ) if sky_per_pix_avg is None: sky_per_pix_avg = mean print(f"source_detection: sky_per_pix_avg set to {sky_per_pix_avg:.4f}") # Identify sources applying DAOStarFinder to a "sky subtracted" # image. - print(f"source_detection: threshold set to {threshold}* standard deviation " - f"({std:.4f})") + print( + f"source_detection: threshold set to {threshold}* standard deviation " + f"({std:.4f})" + ) print(f"source_detection: Assuming fwhm of {fwhm} for DAOStarFinder") # daofind should be run on background subtracted image # (fails, or at least returns garbage, if sky_per_pix_avg is too low) - daofind = DAOStarFinder(fwhm = fwhm, threshold = threshold * std) + daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold * std) if isinstance(ccd, CCDData): sources = daofind(ccd.data - sky_per_pix_avg) else: @@ -240,11 +259,15 @@ def source_detection(ccd, fwhm=8, sigma=3.0, iters=5, # Identify sources near the edge of the image and remove them # from the source list. padding_smt = "" - if (padding > 0): + if padding > 0: src_cnt0 = len(sources) y_lim, x_lim = ccd.shape - keep = ((sources['xcentroid'].value >= padding) & (sources['ycentroid'].value >= padding) & - (sources['xcentroid'].value < x_lim-padding) & (sources['ycentroid'].value < y_lim-padding)) + keep = ( + (sources["xcentroid"].value >= padding) + & (sources["ycentroid"].value >= padding) + & (sources["xcentroid"].value < x_lim - padding) + & (sources["ycentroid"].value < y_lim - padding) + ) sources = sources[keep] padding_smt = f" (after removing {src_cnt0-len(sources)} sources near the edge)" @@ -255,51 +278,54 @@ def source_detection(ccd, fwhm=8, sigma=3.0, iters=5, try: # Retrieve the RA and Dec of each source as SKyCoord objects, then convert to # arrays of floats to add to table - skypos = ccd.wcs.pixel_to_world(sources['xcentroid'], sources['ycentroid']) - sources['ra'] = skypos.ra.value - sources['dec'] = skypos.dec.value + skypos = ccd.wcs.pixel_to_world(sources["xcentroid"], sources["ycentroid"]) + sources["ra"] = skypos.ra.value + sources["dec"] = skypos.dec.value except AttributeError: # No WCS, so add empty columns - sources['ra'] = np.nan * np.ones(src_cnt) - sources['dec'] = np.nan * np.ones(src_cnt) + sources["ra"] = np.nan * np.ones(src_cnt) + sources["dec"] = np.nan * np.ones(src_cnt) # If requested, compute the FWHM of each source if find_fwhm: - x, y = compute_fwhm(ccd, sources, fwhm_estimate=fwhm, - x_column='xcentroid', y_column='ycentroid', - sky_per_pix_avg=sky_per_pix_avg) - sources['fwhm_x'] = x - sources['fwhm_y'] = y - sources['width'] = (x + y) / 2 # Average of x and y FWHM + x, y = compute_fwhm( + ccd, + sources, + fwhm_estimate=fwhm, + x_column="xcentroid", + y_column="ycentroid", + sky_per_pix_avg=sky_per_pix_avg, + ) + sources["fwhm_x"] = x + sources["fwhm_y"] = y + sources["width"] = (x + y) / 2 # Average of x and y FWHM # Flag bogus fwhm values returned from fitting (no objects # have a fwhm less than 1 pixel) - bad_src = (sources['fwhm_x']<1) | (sources['fwhm_y']<1) - sources['fwhm_x'][bad_src] = np.nan - sources['fwhm_y'][bad_src] = np.nan - sources['width'][bad_src] = np.nan - else: # add empty columns - sources['fwhm_x'] = np.nan * np.ones(src_cnt) - sources['fwhm_y'] = np.nan * np.ones(src_cnt) - sources['width'] = np.nan * np.ones(src_cnt) + bad_src = (sources["fwhm_x"] < 1) | (sources["fwhm_y"] < 1) + sources["fwhm_x"][bad_src] = np.nan + sources["fwhm_y"][bad_src] = np.nan + sources["width"][bad_src] = np.nan + else: # add empty columns + sources["fwhm_x"] = np.nan * np.ones(src_cnt) + sources["fwhm_y"] = np.nan * np.ones(src_cnt) + sources["width"] = np.nan * np.ones(src_cnt) # Convert sources to SourceListData object by adding # units to the columns units_dict = { - 'id' : None, - 'ra' : u.deg, - 'dec' : u.deg, - 'xcentroid' : u.pix, - 'ycentroid' : u.pix, - 'fwhm_x' : u.pix, - 'fwhm_y' : u.pix, - 'width' : u.pix + "id": None, + "ra": u.deg, + "dec": u.deg, + "xcentroid": u.pix, + "ycentroid": u.pix, + "fwhm_x": u.pix, + "fwhm_y": u.pix, + "width": u.pix, } sources = Table(data=sources, units=units_dict) # Rename columns to match SourceListData - colnamemap = {'id' : 'star_id', - 'xcentroid' : 'xcenter', - 'ycentroid' : 'ycenter'} + colnamemap = {"id": "star_id", "xcentroid": "xcenter", "ycentroid": "ycenter"} sl_data = SourceListData(input_data=sources, colname_map=colnamemap) return sl_data diff --git a/stellarphot/photometry/tests/fake_image.py b/stellarphot/photometry/tests/fake_image.py index d932a996..577313c5 100644 --- a/stellarphot/photometry/tests/fake_image.py +++ b/stellarphot/photometry/tests/fake_image.py @@ -1,4 +1,3 @@ - import astropy.io.fits as fits import numpy as np from astropy.nddata import CCDData @@ -23,20 +22,19 @@ class FakeImage: The seed to use for the random number generator. If not specified, the seed will be randomly generated. """ + def __init__(self, noise_dev=1.0, seed=None): self.image_shape = [400, 500] - data_file = get_pkg_data_filename('data/test_sources.csv') + data_file = get_pkg_data_filename("data/test_sources.csv") self._sources = Table.read(data_file) - self.mean_noise = self.sources['amplitude'].max() / 100 + self.mean_noise = self.sources["amplitude"].max() / 100 self.noise_dev = noise_dev - self._stars = make_gaussian_sources_image(self.image_shape, - self.sources) - self._noise = make_noise_image(self._stars.shape, - mean=self.mean_noise, - stddev=noise_dev, - seed=seed) + self._stars = make_gaussian_sources_image(self.image_shape, self.sources) + self._noise = make_noise_image( + self._stars.shape, mean=self.mean_noise, stddev=noise_dev, seed=seed + ) # Sky background per pixel should be the mean level of the noise. - self._sources['sky_per_pix_avg'] = self.mean_noise + self._sources["sky_per_pix_avg"] = self.mean_noise @property def sources(self): @@ -57,14 +55,14 @@ class FakeCCDImage(CCDData): # Generates a fake CCDData object for testing purposes. def __init__(self, *args, **kwargs): # Pull off the seed argument if it exists. - seed = kwargs.pop('seed', None) + seed = kwargs.pop("seed", None) # If no arguments are passed, use the default FakeImage. # This dodge is necessary because otherwise we can't copy the CCDData # object apparently. if (len(args) == 0) and (len(kwargs) == 0): base_data = FakeImage(seed=seed) - super().__init__(base_data.image.copy(), unit='adu') + super().__init__(base_data.image.copy(), unit="adu") # Append attributes from the base data object. self.sources = base_data.sources.copy() @@ -75,26 +73,43 @@ def __init__(self, *args, **kwargs): # Add some additional features to the CCDData object, like # a header and the sources used to create the image.abs self.header = fits.Header() - self.header['OBJECT'] = 'Test Object' - self.header['EXPOSURE'] = 1.0 - self.header['DATE-OBS'] = '2018-01-01T00:00:00.0' - self.header['AIRMASS'] = 1.2 - self.header['FILTER'] = 'V' + self.header["OBJECT"] = "Test Object" + self.header["EXPOSURE"] = 1.0 + self.header["DATE-OBS"] = "2018-01-01T00:00:00.0" + self.header["AIRMASS"] = 1.2 + self.header["FILTER"] = "V" # Set up a WCS header for the CCDData object. (size_y, size_x) = base_data.image_shape - pixel_scale = 0.75 # arcseconds per pixel + pixel_scale = 0.75 # arcseconds per pixel ra_center = 283.6165 dec_center = 33.05857 w = WCS(naxis=2) - w.wcs.crpix = [size_x / 2, size_y / 2] # Reference pixel (center of the image) - w.wcs.cdelt = [-pixel_scale / 3600, pixel_scale / 3600] # Pixel scale in degrees per pixel - w.wcs.crval = [ra_center, dec_center] # RA and Dec of the reference pixel in degrees - w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] # Coordinate type (TAN projection) + w.wcs.crpix = [ + size_x / 2, + size_y / 2, + ] # Reference pixel (center of the image) + w.wcs.cdelt = [ + -pixel_scale / 3600, + pixel_scale / 3600, + ] # Pixel scale in degrees per pixel + w.wcs.crval = [ + ra_center, + dec_center, + ] # RA and Dec of the reference pixel in degrees + w.wcs.ctype = ["RA---TAN", "DEC--TAN"] # Coordinate type (TAN projection) # Rotate image to be slightly off of horizontal north_angle_deg = 8.4 - w.wcs.pc = [[np.cos(np.radians(north_angle_deg)), -np.sin(np.radians(north_angle_deg))], - [np.sin(np.radians(north_angle_deg)), np.cos(np.radians(north_angle_deg))]] + w.wcs.pc = [ + [ + np.cos(np.radians(north_angle_deg)), + -np.sin(np.radians(north_angle_deg)), + ], + [ + np.sin(np.radians(north_angle_deg)), + np.cos(np.radians(north_angle_deg)), + ], + ] self.wcs = w self.header.update(w.to_header()) @@ -103,10 +118,19 @@ def drop_wcs(self): # Convienence function to remove WCS information from the CCDData object # for testing purposes. self.wcs = None - wcs_keywords = ['CTYPE', 'CRPIX', 'CRVAL', 'CDELT','CUNIT', - 'CD1_', 'CD2_', 'PC1_', 'PC2_'] + wcs_keywords = [ + "CTYPE", + "CRPIX", + "CRVAL", + "CDELT", + "CUNIT", + "CD1_", + "CD2_", + "PC1_", + "PC2_", + ] for keyword in wcs_keywords: - matching_keys = [key for key in self.header.keys() if keyword in key] + matching_keys = [key for key in self.header.keys() if keyword in key] for key in matching_keys: del self.header[key] @@ -134,7 +158,7 @@ def shift_FakeCCDImage(ccd_data, x_shift, y_shift): `FakeCCDImage`: A new CCDData object. """ # Copy WCS from original CCDData object - shifted_ccd_data = ccd_data.copy() + shifted_ccd_data = ccd_data.copy() for key, val in ccd_data.__dict__.items(): try: shifted_ccd_data.__dict__[key] = val.copy() @@ -145,31 +169,36 @@ def shift_FakeCCDImage(ccd_data, x_shift, y_shift): # Calculate the new RA and Dec center after shifting x_shift = int(x_shift) y_shift = int(y_shift) - ra_center_shifted, dec_center_shifted = \ - shifted_wcs.all_pix2world((shifted_ccd_data.data.shape[1]) / 2 + x_shift, - (shifted_ccd_data.data.shape[0]) / 2 + y_shift, 0) + ra_center_shifted, dec_center_shifted = shifted_wcs.all_pix2world( + (shifted_ccd_data.data.shape[1]) / 2 + x_shift, + (shifted_ccd_data.data.shape[0]) / 2 + y_shift, + 0, + ) # Shift source positions - shifted_ccd_data.sources['x_mean'] -= x_shift - shifted_ccd_data.sources['y_mean'] -= y_shift + shifted_ccd_data.sources["x_mean"] -= x_shift + shifted_ccd_data.sources["y_mean"] -= y_shift # Check shifted sources are still on the image - if ( (np.any(shifted_ccd_data.sources['x_mean'] < 0)) | - (np.any(shifted_ccd_data.sources['x_mean'] > shifted_ccd_data.data.shape[1])) | - (np.any(shifted_ccd_data.sources['y_mean'] < 0)) | - (np.any(shifted_ccd_data.sources['y_mean'] > shifted_ccd_data.data.shape[0])) ): - raise ValueError('Sources shifted off the edge of the image.') + if ( + (np.any(shifted_ccd_data.sources["x_mean"] < 0)) + | (np.any(shifted_ccd_data.sources["x_mean"] > shifted_ccd_data.data.shape[1])) + | (np.any(shifted_ccd_data.sources["y_mean"] < 0)) + | (np.any(shifted_ccd_data.sources["y_mean"] > shifted_ccd_data.data.shape[0])) + ): + raise ValueError("Sources shifted off the edge of the image.") # Update the new RA and Dec center in the shifted WCS shifted_wcs.wcs.crval = [ra_center_shifted, dec_center_shifted] shifted_ccd_data.wcs = shifted_wcs # Make image - srcs = make_gaussian_sources_image(shifted_ccd_data.image_shape, - shifted_ccd_data.sources) - background = make_noise_image(srcs.shape, - mean=shifted_ccd_data.mean_noise, - stddev=shifted_ccd_data.noise_dev) + srcs = make_gaussian_sources_image( + shifted_ccd_data.image_shape, shifted_ccd_data.sources + ) + background = make_noise_image( + srcs.shape, mean=shifted_ccd_data.mean_noise, stddev=shifted_ccd_data.noise_dev + ) shifted_ccd_data.data = srcs + background return shifted_ccd_data diff --git a/stellarphot/photometry/tests/test_detection.py b/stellarphot/photometry/tests/test_detection.py index c59e402b..84b96a4d 100644 --- a/stellarphot/photometry/tests/test_detection.py +++ b/stellarphot/photometry/tests/test_detection.py @@ -8,7 +8,7 @@ from astropy import units as u from astropy.utils.exceptions import AstropyUserWarning -from stellarphot.photometry import (source_detection, compute_fwhm) +from stellarphot.photometry import source_detection, compute_fwhm from fake_image import FakeImage @@ -16,7 +16,7 @@ SEED = 5432985 -@pytest.mark.parametrize('units', [u.pixel, None]) +@pytest.mark.parametrize("units", [u.pixel, None]) def test_compute_fwhm(units): fake_image = FakeImage(seed=SEED) sources = fake_image.sources @@ -26,13 +26,14 @@ def test_compute_fwhm(units): # that the source table values have units. # Do not try: sources['x_mean'] = sources['x_mean'] * units # Turns out individual values do NOT have units in that case. - sources['x_mean'] = [v * units for v in sources['x_mean']] - sources['y_mean'] = [v * units for v in sources['y_mean']] + sources["x_mean"] = [v * units for v in sources["x_mean"]] + sources["y_mean"] = [v * units for v in sources["y_mean"]] - fwhm_x, fwhm_y = compute_fwhm(fake_image.image, sources, - x_column='x_mean', y_column='y_mean') + fwhm_x, fwhm_y = compute_fwhm( + fake_image.image, sources, x_column="x_mean", y_column="y_mean" + ) - expected_fwhm = np.array(sources['x_stddev'] * gaussian_sigma_to_fwhm) + expected_fwhm = np.array(sources["x_stddev"] * gaussian_sigma_to_fwhm) assert np.allclose(fwhm_x, expected_fwhm, rtol=1e-2) @@ -41,7 +42,7 @@ def test_compute_fwhm_with_NaNs(): # We should be able to find FWHM for a source even with NaNs in the image. fake_image = FakeImage(seed=SEED) sources = fake_image.sources - x, y = sources['x_mean'].astype(int)[0], sources['y_mean'].astype(int)[0] + x, y = sources["x_mean"].astype(int)[0], sources["y_mean"].astype(int)[0] image = fake_image.image.copy() # Add a NaN to the image at the location of the first source. Note the @@ -50,13 +51,16 @@ def test_compute_fwhm_with_NaNs(): # We expect a warning about NaNs in the image, so catch it with warnings.catch_warnings(): - warnings.filterwarnings("ignore", - message="Non-Finite input data has been removed", - category=AstropyUserWarning) - fwhm_x, fwhm_y = compute_fwhm(image, sources, - x_column='x_mean', y_column='y_mean', fit=True) - - expected_fwhm = np.array(sources['x_stddev'] * gaussian_sigma_to_fwhm) + warnings.filterwarnings( + "ignore", + message="Non-Finite input data has been removed", + category=AstropyUserWarning, + ) + fwhm_x, fwhm_y = compute_fwhm( + image, sources, x_column="x_mean", y_column="y_mean", fit=True + ) + + expected_fwhm = np.array(sources["x_stddev"] * gaussian_sigma_to_fwhm) assert np.allclose(fwhm_x, expected_fwhm, rtol=1e-2) @@ -65,32 +69,41 @@ def test_detect_source_number_location(): Make sure we detect the sources in the input table.... """ fake_image = FakeImage(seed=SEED) - sources = QTable(fake_image.sources, units={'x_mean':u.pixel, 'y_mean':u.pixel, - 'x_stddev':u.pixel, 'y_stddev':u.pixel}) + sources = QTable( + fake_image.sources, + units={ + "x_mean": u.pixel, + "y_mean": u.pixel, + "x_stddev": u.pixel, + "y_stddev": u.pixel, + }, + ) # print(sources) # Pass only one value for the sky background for source detection - sky_per_pix = sources['sky_per_pix_avg'].mean() - found_sources = source_detection(fake_image.image, - fwhm=2 * sources['x_stddev'].mean(), - threshold=10, - sky_per_pix_avg=sky_per_pix) + sky_per_pix = sources["sky_per_pix_avg"].mean() + found_sources = source_detection( + fake_image.image, + fwhm=2 * sources["x_stddev"].mean(), + threshold=10, + sky_per_pix_avg=sky_per_pix, + ) # Sort by flux so we can reliably match them - sources.sort('amplitude') - found_sources.sort('flux') + sources.sort("amplitude") + found_sources.sort("flux") # Do we have the right number of sources? assert len(sources) == len(found_sources) for inp, out in zip(sources, found_sources): # Do the positions match? - np.testing.assert_allclose(out['xcenter'], inp['x_mean'], - rtol=1e-5, atol=0.05) - np.testing.assert_allclose(out['ycenter'], inp['y_mean'], - rtol=1e-5, atol=0.05) - np.testing.assert_allclose(gaussian_sigma_to_fwhm * (inp['x_stddev'] - + inp['y_stddev']) / 2, - out['width'], - rtol=1e-5, atol=0.05) + np.testing.assert_allclose(out["xcenter"], inp["x_mean"], rtol=1e-5, atol=0.05) + np.testing.assert_allclose(out["ycenter"], inp["y_mean"], rtol=1e-5, atol=0.05) + np.testing.assert_allclose( + gaussian_sigma_to_fwhm * (inp["x_stddev"] + inp["y_stddev"]) / 2, + out["width"], + rtol=1e-5, + atol=0.05, + ) def test_detect_source_with_padding(): @@ -98,16 +111,26 @@ def test_detect_source_with_padding(): Make sure we detect the sources in the input table.... """ fake_image = FakeImage(seed=SEED) - sources = QTable(fake_image.sources, units={'x_mean':u.pixel, 'y_mean':u.pixel, - 'x_stddev':u.pixel, 'y_stddev':u.pixel}) + sources = QTable( + fake_image.sources, + units={ + "x_mean": u.pixel, + "y_mean": u.pixel, + "x_stddev": u.pixel, + "y_stddev": u.pixel, + }, + ) # Pass only one value for the sky background for source detection - sky_per_pix = sources['sky_per_pix_avg'].mean() + sky_per_pix = sources["sky_per_pix_avg"].mean() # Padding was chosen to be large enough to ensure that one of the sources in # test_sources.csv would land too close to the edge of the image. - found_sources = source_detection(fake_image.image, - fwhm=2 * sources['x_stddev'].mean(), - threshold=10, - sky_per_pix_avg=sky_per_pix, padding=95) + found_sources = source_detection( + fake_image.image, + fwhm=2 * sources["x_stddev"].mean(), + threshold=10, + sky_per_pix_avg=sky_per_pix, + padding=95, + ) # Did we drop one source because it was too close to the edge? assert len(sources) - 1 == len(found_sources) diff --git a/stellarphot/photometry/tests/test_photometry.py b/stellarphot/photometry/tests/test_photometry.py index 3944082b..67cb9c2a 100644 --- a/stellarphot/photometry/tests/test_photometry.py +++ b/stellarphot/photometry/tests/test_photometry.py @@ -13,9 +13,13 @@ from fake_image import FakeCCDImage, shift_FakeCCDImage from stellarphot.core import Camera, SourceListData -from stellarphot.photometry import (calculate_noise, find_too_close, - multi_image_photometry, - single_image_photometry, source_detection) +from stellarphot.photometry import ( + calculate_noise, + find_too_close, + multi_image_photometry, + single_image_photometry, + source_detection, +) from stellarphot.settings import ApertureSettings GAINS = [1.0, 1.5, 2.0] @@ -30,8 +34,8 @@ def test_calc_noise_defaults(): assert calculate_noise() == 0 -@pytest.mark.parametrize('aperture_area', [5, 20]) -@pytest.mark.parametrize('gain', GAINS) +@pytest.mark.parametrize("aperture_area", [5, 20]) +@pytest.mark.parametrize("gain", GAINS) def test_calc_noise_source_only(gain, aperture_area): # If the only source of noise is Poisson error in the source # then the noise should be the square root of the counts. @@ -39,18 +43,20 @@ def test_calc_noise_source_only(gain, aperture_area): expected = np.sqrt(gain * counts) # Create camera instance - camera = Camera(gain=gain*u.electron/u.adu, read_noise=0*u.electron, - dark_current=0*u.electron/u.second, - pixel_scale=1*u.arcsec/u.pixel) + camera = Camera( + gain=gain * u.electron / u.adu, + read_noise=0 * u.electron, + dark_current=0 * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, + ) - np.testing.assert_allclose(calculate_noise(camera, - counts=counts, - aperture_area=aperture_area), - expected) + np.testing.assert_allclose( + calculate_noise(camera, counts=counts, aperture_area=aperture_area), expected + ) -@pytest.mark.parametrize('aperture_area', [5, 20]) -@pytest.mark.parametrize('gain', GAINS) +@pytest.mark.parametrize("aperture_area", [5, 20]) +@pytest.mark.parametrize("gain", GAINS) def test_calc_noise_dark_only(gain, aperture_area): # Gain should not affect this one. Dark current needs a couple other things, # but this is basically Poisson error. @@ -58,21 +64,23 @@ def test_calc_noise_dark_only(gain, aperture_area): exposure = 20 # Create camera instance - camera = Camera(gain=gain*u.electron/u.adu, - read_noise=0*u.electron, - dark_current=dark_current*u.electron/u.second, - pixel_scale=1*u.arcsec/u.pixel) + camera = Camera( + gain=gain * u.electron / u.adu, + read_noise=0 * u.electron, + dark_current=dark_current * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, + ) expected = np.sqrt(dark_current * aperture_area * exposure) - np.testing.assert_allclose(calculate_noise(camera, - aperture_area=aperture_area, - exposure=exposure), - expected) + np.testing.assert_allclose( + calculate_noise(camera, aperture_area=aperture_area, exposure=exposure), + expected, + ) -@pytest.mark.parametrize('aperture_area', [5, 20]) -@pytest.mark.parametrize('gain', GAINS) +@pytest.mark.parametrize("aperture_area", [5, 20]) +@pytest.mark.parametrize("gain", GAINS) def test_calc_read_noise_only(gain, aperture_area): # The read noise per pixel IS the noise. The only multiplier is # the number of pixels. @@ -80,33 +88,36 @@ def test_calc_read_noise_only(gain, aperture_area): expected = np.sqrt(aperture_area * read_noise**2) # Create camera instance - camera = Camera(gain=gain*u.electron/u.adu, - read_noise=read_noise*u.electron, - dark_current=0*u.electron/u.second, - pixel_scale=1*u.arcsec/u.pixel) + camera = Camera( + gain=gain * u.electron / u.adu, + read_noise=read_noise * u.electron, + dark_current=0 * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, + ) - np.testing.assert_allclose(calculate_noise(camera, - aperture_area=aperture_area), - expected) + np.testing.assert_allclose( + calculate_noise(camera, aperture_area=aperture_area), expected + ) -@pytest.mark.parametrize('aperture_area', [5, 20]) -@pytest.mark.parametrize('gain', GAINS) +@pytest.mark.parametrize("aperture_area", [5, 20]) +@pytest.mark.parametrize("gain", GAINS) def test_calc_sky_only(gain, aperture_area): # The sky noise per pixel is the poisson and per pixel. sky = 10 expected = np.sqrt(gain * aperture_area * sky) # Create camera instance - camera = Camera(gain=gain*u.electron/u.adu, - read_noise=0*u.electron, - dark_current=0*u.electron/u.second, - pixel_scale=1*u.arcsec/u.pixel) + camera = Camera( + gain=gain * u.electron / u.adu, + read_noise=0 * u.electron, + dark_current=0 * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, + ) - np.testing.assert_allclose(calculate_noise(camera, - aperture_area=aperture_area, - sky_per_pix=sky), - expected) + np.testing.assert_allclose( + calculate_noise(camera, aperture_area=aperture_area, sky_per_pix=sky), expected + ) def test_annulus_area_term(): @@ -117,24 +128,28 @@ def test_annulus_area_term(): annulus_area = 10 * aperture_area gain = 1.5 sky = 10 - expected = np.sqrt(gain * aperture_area * - (1 + aperture_area / annulus_area) * sky) + expected = np.sqrt(gain * aperture_area * (1 + aperture_area / annulus_area) * sky) # Create camera instance - camera = Camera(gain=gain*u.electron/u.adu, - read_noise=0*u.electron, - dark_current=0*u.electron/u.second, - pixel_scale=1*u.arcsec/u.pixel) + camera = Camera( + gain=gain * u.electron / u.adu, + read_noise=0 * u.electron, + dark_current=0 * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, + ) - np.testing.assert_allclose(calculate_noise(camera, - aperture_area=aperture_area, - annulus_area=annulus_area, - sky_per_pix=sky), - expected) + np.testing.assert_allclose( + calculate_noise( + camera, + aperture_area=aperture_area, + annulus_area=annulus_area, + sky_per_pix=sky, + ), + expected, + ) -@pytest.mark.parametrize('digit,expected', - ((False, 89.078616), (True, 89.10182))) +@pytest.mark.parametrize("digit,expected", ((False, 89.078616), (True, 89.10182))) def test_calc_noise_messy_case(digit, expected): # Do a single test where all the parameters are set and compare with # what a calculator gave. @@ -150,44 +165,49 @@ def test_calc_noise_messy_case(digit, expected): read_noise = 12 # Create camera instance - camera = Camera(gain=gain*u.electron/u.adu, - read_noise=read_noise*u.electron, - dark_current=dark_current*u.electron/u.second, - pixel_scale=1*u.arcsec/u.pixel) + camera = Camera( + gain=gain * u.electron / u.adu, + read_noise=read_noise * u.electron, + dark_current=dark_current * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, + ) np.testing.assert_allclose( - calculate_noise(camera, - counts=counts, - sky_per_pix=sky, - exposure=exposure, - aperture_area=aperture_area, - annulus_area=annulus_area, - include_digitization=digit), - expected + calculate_noise( + camera, + counts=counts, + sky_per_pix=sky, + exposure=exposure, + aperture_area=aperture_area, + annulus_area=annulus_area, + include_digitization=digit, + ), + expected, ) def test_find_too_close(): # Load test sourcelist into memory - test_sl_data = ascii.read(get_pkg_data_filename('data/test_corner.ecsv'), - format='ecsv', - fast_reader=False) + test_sl_data = ascii.read( + get_pkg_data_filename("data/test_corner.ecsv"), format="ecsv", fast_reader=False + ) # Create no sky position sourcelist test_sl_data_nosky = test_sl_data.copy() - test_sl_data_nosky.remove_column('ra') - test_sl_data_nosky.remove_column('dec') + test_sl_data_nosky.remove_column("ra") + test_sl_data_nosky.remove_column("dec") # Create no image position sourcelist test_sl_data_noimgpos = test_sl_data.copy() - test_sl_data_noimgpos.remove_column('xcenter') - test_sl_data_noimgpos.remove_column('ycenter') + test_sl_data_noimgpos.remove_column("xcenter") + test_sl_data_noimgpos.remove_column("ycenter") # Create SourceListData objects sl_test = SourceListData(input_data=test_sl_data, colname_map=None) sl_test_nosky = SourceListData(input_data=test_sl_data_nosky, colname_map=None) - sl_test_noimgpos = SourceListData(input_data=test_sl_data_noimgpos, - colname_map=None) + sl_test_noimgpos = SourceListData( + input_data=test_sl_data_noimgpos, colname_map=None + ) assert sl_test.has_ra_dec is True assert sl_test.has_x_y is True @@ -217,32 +237,34 @@ def test_find_too_close(): shift_tolerance = 6 max_adu = 60000 fwhm_estimate = 5 -fake_camera = Camera(gain = 1.0*u.electron/u.adu, - read_noise = 0*u.electron, - dark_current = 0.1*u.electron/u.second, - pixel_scale = 1*u.arcsec/u.pixel) -fake_obs = EarthLocation(lat = 0*u.deg, - lon = 0*u.deg, - height = 0*u.m) -coords2use='pixel' +fake_camera = Camera( + gain=1.0 * u.electron / u.adu, + read_noise=0 * u.electron, + dark_current=0.1 * u.electron / u.second, + pixel_scale=1 * u.arcsec / u.pixel, +) +fake_obs = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m) +coords2use = "pixel" # The True case below is a regression test for #157 -@pytest.mark.parametrize('int_data', [True, False]) +@pytest.mark.parametrize("int_data", [True, False]) def test_aperture_photometry_no_outlier_rejection(int_data): fake_CCDimage = FakeCCDImage(seed=SEED) sources = fake_CCDimage.sources - aperture = sources['aperture'][0] + aperture = sources["aperture"][0] inner_annulus = 2 * aperture outer_annulus = 3 * aperture - aperture_settings = ApertureSettings(radius=aperture, - gap=inner_annulus - aperture, - width_annulus=outer_annulus - inner_annulus) + aperture_settings = ApertureSettings( + radius=aperture, + gap=inner_annulus - aperture, + width_annulus=outer_annulus - inner_annulus, + ) - found_sources = source_detection(fake_CCDimage, - fwhm=sources['x_stddev'].mean(), - threshold=10) + found_sources = source_detection( + fake_CCDimage, fwhm=sources["x_stddev"].mean(), threshold=10 + ) # The scale_factor is used to rescale data to integers if needed. It # needs to be set later on when the net counts are "unscaled" in the @@ -255,26 +277,33 @@ def test_aperture_photometry_no_outlier_rejection(int_data): data = scale_factor * fake_CCDimage.data fake_CCDimage.data = data.astype(int) - phot, missing_sources = single_image_photometry(fake_CCDimage, - found_sources, - fake_camera, - fake_obs, - aperture_settings, - shift_tolerance, - max_adu, fwhm_estimate, - use_coordinates=coords2use, - include_dig_noise=True, - reject_too_close=False, - reject_background_outliers=False) - - phot.sort('aperture_sum') - sources.sort('amplitude') + phot, missing_sources = single_image_photometry( + fake_CCDimage, + found_sources, + fake_camera, + fake_obs, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + use_coordinates=coords2use, + include_dig_noise=True, + reject_too_close=False, + reject_background_outliers=False, + ) + + phot.sort("aperture_sum") + sources.sort("amplitude") for inp, out in zip(sources, phot): - stdev = inp['x_stddev'] - expected_flux = (inp['amplitude'] * 2 * np.pi * - stdev**2 * - (1 - np.exp(-aperture**2 / (2 * stdev**2)))) + stdev = inp["x_stddev"] + expected_flux = ( + inp["amplitude"] + * 2 + * np.pi + * stdev**2 + * (1 - np.exp(-(aperture**2) / (2 * stdev**2))) + ) # This expected flux is correct IF there were no noise. With noise, the # standard deviation in the sum of the noise within in the aperture is # n_pix_in_aperture times the single-pixel standard deviation. @@ -288,11 +317,13 @@ def test_aperture_photometry_no_outlier_rejection(int_data): # less than the expected one sigma deviation. # We need to remove any scaling that has been done of the data values. - assert (np.abs(expected_flux - out['aperture_net_cnts'].value / scale_factor) < - np.pi * aperture**2 * fake_CCDimage.noise_dev) + assert ( + np.abs(expected_flux - out["aperture_net_cnts"].value / scale_factor) + < np.pi * aperture**2 * fake_CCDimage.noise_dev + ) -@pytest.mark.parametrize('reject', [True, False]) +@pytest.mark.parametrize("reject", [True, False]) def test_aperture_photometry_with_outlier_rejection(reject): """ Insert some really large pixel values in the annulus and check that @@ -301,52 +332,61 @@ def test_aperture_photometry_with_outlier_rejection(reject): """ fake_CCDimage = FakeCCDImage(seed=SEED) sources = fake_CCDimage.sources - aperture = sources['aperture'][0] + aperture = sources["aperture"][0] inner_annulus = 2 * aperture outer_annulus = 3 * aperture - aperture_settings = ApertureSettings(radius=aperture, - gap=inner_annulus - aperture, - width_annulus=outer_annulus - inner_annulus) + aperture_settings = ApertureSettings( + radius=aperture, + gap=inner_annulus - aperture, + width_annulus=outer_annulus - inner_annulus, + ) image = fake_CCDimage.data - print(f'{fake_CCDimage=}') + print(f"{fake_CCDimage=}") print(f"{sources['x_stddev'].mean()}") - found_sources = source_detection(fake_CCDimage, - fwhm=sources['x_stddev'].mean(), - threshold=10) + found_sources = source_detection( + fake_CCDimage, fwhm=sources["x_stddev"].mean(), threshold=10 + ) # Add some large pixel values to the annulus for each source. # adding these moves the average pixel value by quite a bit, # so we'll only get the correct net flux if these are removed. for source in fake_CCDimage.sources: - center_px = (int(source['x_mean']), int(source['y_mean'])) + center_px = (int(source["x_mean"]), int(source["y_mean"])) begin = center_px[0] + inner_annulus + 1 end = begin + (outer_annulus - inner_annulus - 1) # Yes, x and y are deliberately reversed below. image[center_px[1], begin:end] = 100 * fake_CCDimage.mean_noise - phot, missing_sources = single_image_photometry(fake_CCDimage, - found_sources, - fake_camera, - fake_obs, - aperture_settings, - shift_tolerance, - max_adu, fwhm_estimate, - use_coordinates=coords2use, - include_dig_noise=True, - reject_too_close=False, - reject_background_outliers=reject) - - phot.sort('aperture_sum') - sources.sort('amplitude') + phot, missing_sources = single_image_photometry( + fake_CCDimage, + found_sources, + fake_camera, + fake_obs, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + use_coordinates=coords2use, + include_dig_noise=True, + reject_too_close=False, + reject_background_outliers=reject, + ) + + phot.sort("aperture_sum") + sources.sort("amplitude") for inp, out in zip(sources, phot): - stdev = inp['x_stddev'] - expected_flux = (inp['amplitude'] * 2 * np.pi * - stdev**2 * - (1 - np.exp(-aperture**2 / (2 * stdev**2)))) + stdev = inp["x_stddev"] + expected_flux = ( + inp["amplitude"] + * 2 + * np.pi + * stdev**2 + * (1 - np.exp(-(aperture**2) / (2 * stdev**2))) + ) # This expected flux is correct IF there were no noise. With noise, the # standard deviation in the sum of the noise within in the aperture is # n_pix_in_aperture times the single-pixel standard deviation. @@ -361,12 +401,16 @@ def test_aperture_photometry_with_outlier_rejection(reject): # Here we just check whether any difference is consistent with # less than the expected one sigma deviation. if reject: - assert (np.abs(expected_flux - out['aperture_net_cnts'].value) < - expected_deviation) + assert ( + np.abs(expected_flux - out["aperture_net_cnts"].value) + < expected_deviation + ) else: with pytest.raises(AssertionError): - assert (np.abs(expected_flux - out['aperture_net_cnts'].value) < - expected_deviation) + assert ( + np.abs(expected_flux - out["aperture_net_cnts"].value) + < expected_deviation + ) def list_of_fakes(num_files): @@ -374,18 +418,18 @@ def list_of_fakes(num_files): fake_images = [FakeCCDImage(seed=SEED)] # Create additional images, each in a different position. - for i in range(num_files-1): - angle = 2*np.pi/(num_files-1) * i + for i in range(num_files - 1): + angle = 2 * np.pi / (num_files - 1) * i rad = 50 - dx, dy = rad*np.cos(angle), rad*np.sin(angle) - fake_images.append(shift_FakeCCDImage(fake_images[0], dx, dy) ) + dx, dy = rad * np.cos(angle), rad * np.sin(angle) + fake_images.append(shift_FakeCCDImage(fake_images[0], dx, dy)) - filters = ['U', 'B', 'V', 'R', 'I'] + filters = ["U", "B", "V", "R", "I"] for i in range(num_files): - if (i < 5): - fake_images[i].header['FILTER'] = filters[i] + if i < 5: + fake_images[i].header["FILTER"] = filters[i] else: - fake_images[i].header['FILTER'] = 'V' + fake_images[i].header["FILTER"] = "V" return fake_images @@ -401,66 +445,79 @@ def test_photometry_on_directory(): # when the temporary directory is deleted on Windows. with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as temp_dir: # Come up with Filenames - temp_file_names = [Path(temp_dir) / - f"tempfile_{i:02d}.fit" for i in range(1, num_files + 1)] + temp_file_names = [ + Path(temp_dir) / f"tempfile_{i:02d}.fit" for i in range(1, num_files + 1) + ] # Write the CCDData objects to files for i, image in enumerate(fake_images): - from time import sleep; sleep(1) + from time import sleep + + sleep(1) image.write(temp_file_names[i]) - object_name = fake_images[0].header['OBJECT'] + object_name = fake_images[0].header["OBJECT"] sources = fake_images[0].sources - aperture = sources['aperture'][0] + aperture = sources["aperture"][0] inner_annulus = 2 * aperture outer_annulus = 3 * aperture - aperture_settings = ApertureSettings(radius=aperture, - gap=inner_annulus - aperture, - width_annulus=outer_annulus - inner_annulus) + aperture_settings = ApertureSettings( + radius=aperture, + gap=inner_annulus - aperture, + width_annulus=outer_annulus - inner_annulus, + ) # Generate the sourcelist - found_sources = source_detection(fake_images[0], - fwhm=fake_images[0].sources['x_stddev'].mean(), - threshold=10) + found_sources = source_detection( + fake_images[0], fwhm=fake_images[0].sources["x_stddev"].mean(), threshold=10 + ) with warnings.catch_warnings(): - warnings.filterwarnings("ignore", - message="Cannot merge meta key", - category=MergeConflictWarning) - phot_data = multi_image_photometry(temp_dir, - object_name, - found_sources, - fake_camera, - fake_obs, - aperture_settings, - shift_tolerance, max_adu, fwhm_estimate, - include_dig_noise=True, - reject_too_close=True, - reject_background_outliers=True, - passband_map=None, - fwhm_by_fit=True) + warnings.filterwarnings( + "ignore", message="Cannot merge meta key", category=MergeConflictWarning + ) + phot_data = multi_image_photometry( + temp_dir, + object_name, + found_sources, + fake_camera, + fake_obs, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + include_dig_noise=True, + reject_too_close=True, + reject_background_outliers=True, + passband_map=None, + fwhm_by_fit=True, + ) # For following assertion to be true, rad must be small enough that # no source lies within outer_annulus of the edge of an image. - assert len(phot_data) == num_files*len(found_sources) + assert len(phot_data) == num_files * len(found_sources) # Sort all data by amount of signal - sources.sort('amplitude') - found_sources.sort('flux') + sources.sort("amplitude") + found_sources.sort("flux") # Get noise level from the first image noise_dev = fake_images[0].noise_dev for fnd, inp in zip(found_sources, sources): - star_id_chk = fnd['star_id'] + star_id_chk = fnd["star_id"] # Select the rows in phot_data that correspond to the current star # and compute the average of the aperture sums. - selected_rows = phot_data[phot_data['star_id'] == star_id_chk] - obs_avg_net_cnts = np.average(selected_rows['aperture_net_cnts'].value) - - stdev = inp['x_stddev'] - expected_flux = (inp['amplitude'] * 2 * np.pi * - stdev**2 * - (1 - np.exp(-aperture**2 / (2 * stdev**2)))) + selected_rows = phot_data[phot_data["star_id"] == star_id_chk] + obs_avg_net_cnts = np.average(selected_rows["aperture_net_cnts"].value) + + stdev = inp["x_stddev"] + expected_flux = ( + inp["amplitude"] + * 2 + * np.pi + * stdev**2 + * (1 - np.exp(-(aperture**2) / (2 * stdev**2))) + ) # This expected flux is correct IF there were no noise. With noise, the # standard deviation in the sum of the noise within in the aperture is # n_pix_in_aperture times the single-pixel standard deviation. @@ -475,8 +532,9 @@ def test_photometry_on_directory(): # Here we just check whether any difference is consistent with # less than the expected one sigma deviation. - assert (np.abs(expected_flux - obs_avg_net_cnts) < - np.pi * aperture**2 * noise_dev) + assert ( + np.abs(expected_flux - obs_avg_net_cnts) < np.pi * aperture**2 * noise_dev + ) def test_photometry_on_directory_with_no_ra_dec(): @@ -490,42 +548,49 @@ def test_photometry_on_directory_with_no_ra_dec(): # when the temporary directory is deleted on Windows. with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as temp_dir: # Come up with Filenames - temp_file_names = [Path(temp_dir) / - f"tempfile_{i:02d}.fits" for i in range(1, num_files + 1)] + temp_file_names = [ + Path(temp_dir) / f"tempfile_{i:02d}.fits" for i in range(1, num_files + 1) + ] # Write the CCDData objects to files for i, image in enumerate(fake_images): image.write(temp_file_names[i]) - object_name = fake_images[0].header['OBJECT'] + object_name = fake_images[0].header["OBJECT"] sources = fake_images[0].sources - aperture = sources['aperture'][0] + aperture = sources["aperture"][0] inner_annulus = 2 * aperture outer_annulus = 3 * aperture - aperture_settings = ApertureSettings(radius=aperture, - gap=inner_annulus - aperture, - width_annulus=outer_annulus - inner_annulus) + aperture_settings = ApertureSettings( + radius=aperture, + gap=inner_annulus - aperture, + width_annulus=outer_annulus - inner_annulus, + ) # Generate the sourcelist - found_sources = source_detection(fake_images[0], - fwhm=fake_images[0].sources['x_stddev'].mean(), - threshold=10) + found_sources = source_detection( + fake_images[0], fwhm=fake_images[0].sources["x_stddev"].mean(), threshold=10 + ) # Damage the sourcelist by removing the ra and dec columns found_sources.drop_ra_dec() with pytest.raises(ValueError): - phot_data = multi_image_photometry(temp_dir, - object_name, - found_sources, - fake_camera, - fake_obs, - aperture_settings, - shift_tolerance, max_adu, fwhm_estimate, - include_dig_noise=True, - reject_too_close=True, - reject_background_outliers=True, - passband_map=None, - fwhm_by_fit=True) + phot_data = multi_image_photometry( + temp_dir, + object_name, + found_sources, + fake_camera, + fake_obs, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + include_dig_noise=True, + reject_too_close=True, + reject_background_outliers=True, + passband_map=None, + fwhm_by_fit=True, + ) def test_photometry_on_directory_with_bad_fits(): @@ -540,39 +605,48 @@ def test_photometry_on_directory_with_bad_fits(): # when the temporary directory is deleted on Windows. with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as temp_dir: # Come up with Filenames - temp_file_names = [Path(temp_dir) / - f"tempfile_{i:02d}.fits" for i in range(1, num_files + 1)] + temp_file_names = [ + Path(temp_dir) / f"tempfile_{i:02d}.fits" for i in range(1, num_files + 1) + ] # Write the CCDData objects to files for i, image in enumerate(fake_images): image.drop_wcs() image.write(temp_file_names[i]) - object_name = fake_images[0].header['OBJECT'] + object_name = fake_images[0].header["OBJECT"] sources = fake_images[0].sources - aperture = sources['aperture'][0] + aperture = sources["aperture"][0] inner_annulus = 2 * aperture outer_annulus = 3 * aperture - aperture_settings = ApertureSettings(radius=aperture, - gap=inner_annulus - aperture, - width_annulus=outer_annulus - inner_annulus) + aperture_settings = ApertureSettings( + radius=aperture, + gap=inner_annulus - aperture, + width_annulus=outer_annulus - inner_annulus, + ) # Generate the sourcelist with RA/Dec information from a clean image - found_sources = source_detection(clean_fake_images[0], - fwhm=clean_fake_images[0].sources['x_stddev'].mean(), - threshold=10) + found_sources = source_detection( + clean_fake_images[0], + fwhm=clean_fake_images[0].sources["x_stddev"].mean(), + threshold=10, + ) # Since none of the images will be valid, it should raise a RuntimeError with pytest.raises(RuntimeError): - phot_data = multi_image_photometry(temp_dir, - object_name, - found_sources, - fake_camera, - fake_obs, - aperture_settings, - shift_tolerance, max_adu, fwhm_estimate, - include_dig_noise=True, - reject_too_close=True, - reject_background_outliers=True, - passband_map=None, - fwhm_by_fit=True) + phot_data = multi_image_photometry( + temp_dir, + object_name, + found_sources, + fake_camera, + fake_obs, + aperture_settings, + shift_tolerance, + max_adu, + fwhm_estimate, + include_dig_noise=True, + reject_too_close=True, + reject_background_outliers=True, + passband_map=None, + fwhm_by_fit=True, + ) diff --git a/stellarphot/plotting/aij_plots.py b/stellarphot/plotting/aij_plots.py index 456b7e33..01d66ef7 100644 --- a/stellarphot/plotting/aij_plots.py +++ b/stellarphot/plotting/aij_plots.py @@ -2,12 +2,20 @@ from ..settings import ApertureSettings -__all__ = ['seeing_plot'] - - -def seeing_plot(raw_radius, raw_counts, binned_radius, binned_counts, HWHM, - plot_title='', file_name='', aperture_settings=None, - figsize=(20, 10)): +__all__ = ["seeing_plot"] + + +def seeing_plot( + raw_radius, + raw_counts, + binned_radius, + binned_counts, + HWHM, + plot_title="", + file_name="", + aperture_settings=None, + figsize=(20, 10), +): """ Show a seeing plot for data from an image with radius on the x axis and counts (ADU) on the y axis. @@ -50,10 +58,9 @@ def seeing_plot(raw_radius, raw_counts, binned_radius, binned_counts, HWHM, """ if aperture_settings is None: radius = 4 * HWHM - aperture_settings = ApertureSettings(radius=radius, - inner_annulus=radius + 10, - outer_annulus=radius + 25) - + aperture_settings = ApertureSettings( + radius=radius, inner_annulus=radius + 10, outer_annulus=radius + 25 + ) radius = aperture_settings.radius inner_annulus = aperture_settings.inner_annulus @@ -63,46 +70,70 @@ def seeing_plot(raw_radius, raw_counts, binned_radius, binned_counts, HWHM, plt.grid(True) # plot the raw radius and raw counts - plt.plot(raw_radius, raw_counts, linestyle='none', - marker="s", markerfacecolor='none', color='blue') + plt.plot( + raw_radius, + raw_counts, + linestyle="none", + marker="s", + markerfacecolor="none", + color="blue", + ) # plot the binned radius and binned counts - plt.plot(binned_radius, binned_counts, color='magenta', linewidth='1.0') + plt.plot(binned_radius, binned_counts, color="magenta", linewidth="1.0") # draw vertical line at HWHM and label it - plt.vlines(HWHM, -0.2, 1.2, linestyle=(0, (5, 10)), color='#00cc00') - plt.annotate(f"HWHM {HWHM:2.1f}", (HWHM, -0.25), - color='#00cc00', horizontalalignment='center') + plt.vlines(HWHM, -0.2, 1.2, linestyle=(0, (5, 10)), color="#00cc00") + plt.annotate( + f"HWHM {HWHM:2.1f}", + (HWHM, -0.25), + color="#00cc00", + horizontalalignment="center", + ) # label axis - plt.xlabel('Radius (pixels)') - plt.ylabel('ADU') + plt.xlabel("Radius (pixels)") + plt.ylabel("ADU") # draw vertical line at the radius and label it - plt.vlines(radius, -0.2, binned_counts[0], color='red') - plt.annotate(f"Radius {radius:2.1f}", (radius, -0.25), - color='red', horizontalalignment='center') - plt.hlines(binned_counts[0], binned_counts[0], radius, color='red') + plt.vlines(radius, -0.2, binned_counts[0], color="red") + plt.annotate( + f"Radius {radius:2.1f}", + (radius, -0.25), + color="red", + horizontalalignment="center", + ) + plt.hlines(binned_counts[0], binned_counts[0], radius, color="red") # label the source plt.annotate( - 'SOURCE', (radius, binned_counts[0] + 0.02), - color='red', horizontalalignment='center') + "SOURCE", + (radius, binned_counts[0] + 0.02), + color="red", + horizontalalignment="center", + ) # draw vertical lines at the background and label it - plt.vlines(inner_annulus, -0.2, binned_counts[0], color='red') - plt.vlines(outer_annulus, -0.2, binned_counts[0], color='red') - plt.hlines(binned_counts[0], inner_annulus, outer_annulus, color='red') - plt.annotate('BACKGROUND', (inner_annulus, - binned_counts[0] + 0.02), color='red') - plt.annotate(f"Back> {inner_annulus:2.1f}", - (inner_annulus, -0.25), color='red', horizontalalignment='center') - plt.annotate(f" {inner_annulus:2.1f}", + (inner_annulus, -0.25), + color="red", + horizontalalignment="center", + ) + plt.annotate( + f" 0.5: - color = 'green' + color = "green" elif max_pow > 0.4: - color = 'cyan' + color = "cyan" else: - color = 'gray' + color = "gray" bar_x = 0.25 - plt.plot([bar_x, bar_x], [0, max_pow], - color=color, linewidth=10, label='LS power') + plt.plot( + [bar_x, bar_x], [0, max_pow], color=color, linewidth=10, label="LS power" + ) plt.legend() # Add dot for magnitude of star. - size = 10000. / np.abs(10**((source_median - brightest_mag) / 2.5)) - plt.scatter([0.8], [0.2], c='red', marker='o', s=size) + size = 10000.0 / np.abs(10 ** ((source_median - brightest_mag) / 2.5)) + plt.scatter([0.8], [0.2], c="red", marker="o", s=size) plt.ylim(0, 1) diff --git a/stellarphot/plotting/transit_plots.py b/stellarphot/plotting/transit_plots.py index 8174c480..9b16cbf0 100644 --- a/stellarphot/plotting/transit_plots.py +++ b/stellarphot/plotting/transit_plots.py @@ -5,11 +5,11 @@ from astropy import units as u -__all__ = ['plot_many_factors', 'bin_data', 'scale_and_shift'] +__all__ = ["plot_many_factors", "bin_data", "scale_and_shift"] def plot_many_factors(photometry, low, high, shift, scale, ax=None): - """ Plots many factors of photometry against each other. + """Plots many factors of photometry against each other. Parameters ---------- @@ -38,12 +38,14 @@ def plot_many_factors(photometry, low, high, shift, scale, ax=None): None Added features to the plot directly. """ - airmass = photometry['airmass'] / np.mean(photometry['airmass']) - x = photometry['xcenter'] / np.mean(photometry['xcenter']) - y = photometry['ycenter'] / np.mean(photometry['ycenter']) - comp_counts = photometry['comparison counts'] / np.mean(photometry['comparison counts']) - sky_per_pix = photometry['sky_per_pix_avg'] / np.mean(photometry['sky_per_pix_avg']) - width = photometry['width'] / np.mean(photometry['width']) + airmass = photometry["airmass"] / np.mean(photometry["airmass"]) + x = photometry["xcenter"] / np.mean(photometry["xcenter"]) + y = photometry["ycenter"] / np.mean(photometry["ycenter"]) + comp_counts = photometry["comparison counts"] / np.mean( + photometry["comparison counts"] + ) + sky_per_pix = photometry["sky_per_pix_avg"] / np.mean(photometry["sky_per_pix_avg"]) + width = photometry["width"] / np.mean(photometry["width"]) scale_airmass = scale_and_shift(airmass, scale, 0.75 * shift, pos=False) scale_x = scale_and_shift(x, scale, shift, pos=True) @@ -52,28 +54,63 @@ def plot_many_factors(photometry, low, high, shift, scale, ax=None): scale_counts = scale_and_shift(comp_counts, scale, shift, pos=True) scale_width = scale_and_shift(width, scale, shift, pos=True) - x_times = (photometry['bjd'] - 2400000 * u.day).jd + x_times = (photometry["bjd"] - 2400000 * u.day).jd if ax is None: ax = plt.gca() - print(f'{scale_airmass.min()} {scale_airmass.max()}') - ax.plot(x_times, scale_counts, '.', c='brown', - label='tot_C_cnts (arbitrarily scaled and shifted)', alpha=0.5, ms=4) - ax.plot(x_times, scale_airmass, 'c-', - label="AIRMASS (arbitrarily scaled and shifted)", ms=4) - ax.plot(x_times, scale_sky_pix, c='gold', - label='Sky/Pixel_T1 (arbitrarily scaled and shifted)', ms=4) - ax.plot(x_times, scale_width, '-', c='gray', - label="Width_T1 (arbitrarily scaled and shifted)", ms=4) - ax.plot(x_times, scale_x, '-', c='pink', - label="X(FITS)_T1 (arbitrarily scaled and shifted)", ms=4) - ax.plot(x_times, scale_y, '-', c='lightblue', - label="Y(FITS)_T1 (arbitrarily scaled and shifted)", ms=4) + print(f"{scale_airmass.min()} {scale_airmass.max()}") + ax.plot( + x_times, + scale_counts, + ".", + c="brown", + label="tot_C_cnts (arbitrarily scaled and shifted)", + alpha=0.5, + ms=4, + ) + ax.plot( + x_times, + scale_airmass, + "c-", + label="AIRMASS (arbitrarily scaled and shifted)", + ms=4, + ) + ax.plot( + x_times, + scale_sky_pix, + c="gold", + label="Sky/Pixel_T1 (arbitrarily scaled and shifted)", + ms=4, + ) + ax.plot( + x_times, + scale_width, + "-", + c="gray", + label="Width_T1 (arbitrarily scaled and shifted)", + ms=4, + ) + ax.plot( + x_times, + scale_x, + "-", + c="pink", + label="X(FITS)_T1 (arbitrarily scaled and shifted)", + ms=4, + ) + ax.plot( + x_times, + scale_y, + "-", + c="lightblue", + label="Y(FITS)_T1 (arbitrarily scaled and shifted)", + ms=4, + ) def bin_data(data_set, num=3, error_set=None): - """ Bins data into groups of num. + """Bins data into groups of num. Parameters ---------- @@ -99,15 +136,15 @@ def bin_data(data_set, num=3, error_set=None): binned_set = [] error = [] for i in range(0, len(data_set), num): - binned_set.append(data_set[i:i+num].mean()) + binned_set.append(data_set[i : i + num].mean()) if error_set is not None: - error_bin = error_set[i:i+num]**2 - error.append(error_bin.sum()/num) + error_bin = error_set[i : i + num] ** 2 + error.append(error_bin.sum() / num) return np.array(binned_set), np.array(error) def scale_and_shift(data_set, scale, shift, pos=True): - """ Scales and shifts data set passed in. + """Scales and shifts data set passed in. Parameters ---------- @@ -131,9 +168,13 @@ def scale_and_shift(data_set, scale, shift, pos=True): The scaled and shifted data. """ if not pos: - data_set = 1 - scale * (data_set - data_set.min()) / (data_set.max() - data_set.min()) + data_set = 1 - scale * (data_set - data_set.min()) / ( + data_set.max() - data_set.min() + ) else: - data_set = 1 + scale * (data_set - data_set.min()) / (data_set.max() - data_set.min()) + data_set = 1 + scale * (data_set - data_set.min()) / ( + data_set.max() - data_set.min() + ) data_set += shift diff --git a/stellarphot/settings/autowidgets.py b/stellarphot/settings/autowidgets.py index 533e6fde..e6dccbeb 100644 --- a/stellarphot/settings/autowidgets.py +++ b/stellarphot/settings/autowidgets.py @@ -3,15 +3,14 @@ import ipywidgets as w from ipyautoui.autowidgets import create_widget_caller -__all__ = [ - 'CustomBoundedIntTex' -] +__all__ = ["CustomBoundedIntTex"] class CustomBoundedIntTex(w.BoundedIntText): """ A BoundedIntText widget adapted for use in ipyautoui. """ + def __init__(self, schema): self.schema = schema self.caller = create_widget_caller(schema) diff --git a/stellarphot/settings/models.py b/stellarphot/settings/models.py index e92dc19e..37451fa2 100644 --- a/stellarphot/settings/models.py +++ b/stellarphot/settings/models.py @@ -6,10 +6,7 @@ from .autowidgets import CustomBoundedIntTex -__all__ = [ - 'ApertureSettings', - 'PhotometryFileSettings' -] +__all__ = ["ApertureSettings", "PhotometryFileSettings"] class ApertureSettings(BaseModel): @@ -45,9 +42,10 @@ class ApertureSettings(BaseModel): >>> aperture_settings = ApertureSettings(radius=4, gap=10, annulus_width=15) """ - radius : conint(ge=1) = Field(autoui=CustomBoundedIntTex, default=1) - gap : conint(ge=1) = Field(autoui=CustomBoundedIntTex, default=1) - annulus_width : conint(ge=1) = Field(autoui=CustomBoundedIntTex, default=1) + + radius: conint(ge=1) = Field(autoui=CustomBoundedIntTex, default=1) + gap: conint(ge=1) = Field(autoui=CustomBoundedIntTex, default=1) + annulus_width: conint(ge=1) = Field(autoui=CustomBoundedIntTex, default=1) class Config: validate_assignment = True @@ -72,7 +70,13 @@ class PhotometryFileSettings(BaseModel): """ An evolutionary step on the way to having a monolithic set of photometry settings. """ - image_folder : Path = Field(show_only_dirs=True, default='', - description="Folder containing the calibrated images") - aperture_settings_file : Path = Field(filter_pattern='*.json', default='') - aperture_locations_file : Path = Field(filter_pattern=['*.ecsv', '*.csv'], default='') + + image_folder: Path = Field( + show_only_dirs=True, + default="", + description="Folder containing the calibrated images", + ) + aperture_settings_file: Path = Field(filter_pattern="*.json", default="") + aperture_locations_file: Path = Field( + filter_pattern=["*.ecsv", "*.csv"], default="" + ) diff --git a/stellarphot/settings/tests/test_models.py b/stellarphot/settings/tests/test_models.py index 2835827f..8f573740 100644 --- a/stellarphot/settings/tests/test_models.py +++ b/stellarphot/settings/tests/test_models.py @@ -5,23 +5,25 @@ from stellarphot.settings.models import ApertureSettings -DEFAULT_APERTURE_SETTINGS = dict(radius=5, - gap=10, - annulus_width=15) +DEFAULT_APERTURE_SETTINGS = dict(radius=5, gap=10, annulus_width=15) def test_create_aperture_settings_correctly(): ap_set = ApertureSettings(**DEFAULT_APERTURE_SETTINGS) - assert ap_set.radius == DEFAULT_APERTURE_SETTINGS['radius'] - assert (ap_set.inner_annulus == - DEFAULT_APERTURE_SETTINGS['radius'] + DEFAULT_APERTURE_SETTINGS['gap']) - assert (ap_set.outer_annulus == - DEFAULT_APERTURE_SETTINGS['radius'] + - DEFAULT_APERTURE_SETTINGS['gap'] + - DEFAULT_APERTURE_SETTINGS['annulus_width']) + assert ap_set.radius == DEFAULT_APERTURE_SETTINGS["radius"] + assert ( + ap_set.inner_annulus + == DEFAULT_APERTURE_SETTINGS["radius"] + DEFAULT_APERTURE_SETTINGS["gap"] + ) + assert ( + ap_set.outer_annulus + == DEFAULT_APERTURE_SETTINGS["radius"] + + DEFAULT_APERTURE_SETTINGS["gap"] + + DEFAULT_APERTURE_SETTINGS["annulus_width"] + ) -@pytest.mark.parametrize('bad_one', ['radius', 'gap', 'annulus_width']) +@pytest.mark.parametrize("bad_one", ["radius", "gap", "annulus_width"]) def test_create_invalid_values(bad_one): # Check that individual values that are bad raise an error bad_settings = DEFAULT_APERTURE_SETTINGS.copy() diff --git a/stellarphot/settings/views.py b/stellarphot/settings/views.py index cdfda705..91db3292 100644 --- a/stellarphot/settings/views.py +++ b/stellarphot/settings/views.py @@ -1,6 +1,6 @@ from ipyautoui import AutoUi -__all__ = ['ui_generator'] +__all__ = ["ui_generator"] def ui_generator(model): diff --git a/stellarphot/tests/make_wcs.py b/stellarphot/tests/make_wcs.py index 3a67ecd9..3ae299d8 100644 --- a/stellarphot/tests/make_wcs.py +++ b/stellarphot/tests/make_wcs.py @@ -1,9 +1,10 @@ from astropy.wcs import WCS + def make_wcs(): wcs = WCS(naxis=2) # Numbering of pixels for crpix starts at 1.... - wcs.wcs.crpix = [5., 5.] + wcs.wcs.crpix = [5.0, 5.0] wcs.wcs.cdelt = [1, 1] wcs.wcs.crval = [10, 5] wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"] diff --git a/stellarphot/tests/test_core.py b/stellarphot/tests/test_core.py index d2a2f60f..add47e95 100644 --- a/stellarphot/tests/test_core.py +++ b/stellarphot/tests/test_core.py @@ -7,8 +7,13 @@ from astropy.coordinates import EarthLocation from astropy.utils.data import get_pkg_data_filename from pydantic import ValidationError -from stellarphot.core import (Camera, BaseEnhancedTable, PhotometryData, - CatalogData, SourceListData) +from stellarphot.core import ( + Camera, + BaseEnhancedTable, + PhotometryData, + CatalogData, + SourceListData, +) def test_camera_attributes(): @@ -16,10 +21,12 @@ def test_camera_attributes(): read_noise = 10 * u.electron dark_current = 0.01 * u.electron / u.second pixel_scale = 0.563 * u.arcsec / u.pix - c = Camera(gain=gain, - read_noise=read_noise, - dark_current=dark_current, - pixel_scale=pixel_scale) + c = Camera( + gain=gain, + read_noise=read_noise, + dark_current=dark_current, + pixel_scale=pixel_scale, + ) assert c.gain == gain assert c.dark_current == dark_current assert c.read_noise == read_noise @@ -32,26 +39,34 @@ def test_camera_unitscheck(): dark_current = 0.01 * u.electron / u.second pixel_scale = 0.563 * u.arcsec / u.pix - with pytest.raises(ValidationError, match="gain"): - c = Camera(gain=gain.value, - read_noise=read_noise, - dark_current=dark_current, - pixel_scale=pixel_scale) - with pytest.raises(ValidationError, match="read_noise"): - c = Camera(gain=gain, - read_noise=read_noise.value, - dark_current=dark_current, - pixel_scale=pixel_scale) - with pytest.raises(ValidationError, match="dark_current"): - c = Camera(gain=gain, - read_noise=read_noise, - dark_current=dark_current.value, - pixel_scale=pixel_scale) - with pytest.raises(ValidationError, match="pixel_scale"): - c = Camera(gain=gain, - read_noise=read_noise, - dark_current=dark_current, - pixel_scale=pixel_scale.value) + with pytest.raises(ValidationError, match="gain"): + c = Camera( + gain=gain.value, + read_noise=read_noise, + dark_current=dark_current, + pixel_scale=pixel_scale, + ) + with pytest.raises(ValidationError, match="read_noise"): + c = Camera( + gain=gain, + read_noise=read_noise.value, + dark_current=dark_current, + pixel_scale=pixel_scale, + ) + with pytest.raises(ValidationError, match="dark_current"): + c = Camera( + gain=gain, + read_noise=read_noise, + dark_current=dark_current.value, + pixel_scale=pixel_scale, + ) + with pytest.raises(ValidationError, match="pixel_scale"): + c = Camera( + gain=gain, + read_noise=read_noise, + dark_current=dark_current, + pixel_scale=pixel_scale.value, + ) def test_camera_altunitscheck(): @@ -60,10 +75,12 @@ def test_camera_altunitscheck(): read_noise = 10 * u.count dark_current = 0.01 * u.count / u.second pixel_scale = 0.563 * u.arcsec / u.pix - c = Camera(gain=gain, - read_noise=read_noise, - dark_current=dark_current, - pixel_scale=pixel_scale) + c = Camera( + gain=gain, + read_noise=read_noise, + dark_current=dark_current, + pixel_scale=pixel_scale, + ) assert c.gain == gain assert c.dark_current == dark_current assert c.read_noise == read_noise @@ -71,34 +88,68 @@ def test_camera_altunitscheck(): # Create several test descriptions for use in base_enhanced_table tests. -test_descript = {'id': None, - 'ra': u.deg, - 'dec' : u.deg, - 'sky_per_pix_avg' : u.adu, - 'sky_per_pix_med' : u.adu, - 'sky_per_pix_std' : u.adu, - 'fwhm_x' : u.pix, - 'fwhm_y' : u.pix, - 'width' : u.pix} +test_descript = { + "id": None, + "ra": u.deg, + "dec": u.deg, + "sky_per_pix_avg": u.adu, + "sky_per_pix_med": u.adu, + "sky_per_pix_std": u.adu, + "fwhm_x": u.pix, + "fwhm_y": u.pix, + "width": u.pix, +} # Define a realistic table of astronomical data contianing one row -data = np.array([[1, 78.17278712191920, 22.505771480719400, 31.798216414544900, - 31.658750534057600, 9.294325523269860, 13.02511260943810, - 13.02511260943810, 13.02511260943810]]) -colnames = ['id', 'ra', 'dec', 'sky_per_pix_avg', 'sky_per_pix_med', 'sky_per_pix_std', - 'fwhm_x', 'fwhm_y', 'width'] -coltypes = ['int', 'float', 'float', 'float', 'float', 'float', 'float', 'float', - 'float'] -colunits = [None, u.deg, u.deg, u.adu, u.adu, u.adu, u.pix, u.pix, u.pix] +data = np.array( + [ + [ + 1, + 78.17278712191920, + 22.505771480719400, + 31.798216414544900, + 31.658750534057600, + 9.294325523269860, + 13.02511260943810, + 13.02511260943810, + 13.02511260943810, + ] + ] +) +colnames = [ + "id", + "ra", + "dec", + "sky_per_pix_avg", + "sky_per_pix_med", + "sky_per_pix_std", + "fwhm_x", + "fwhm_y", + "width", +] +coltypes = [ + "int", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", +] +colunits = [None, u.deg, u.deg, u.adu, u.adu, u.adu, u.pix, u.pix, u.pix] testdata = Table(data, names=colnames, dtype=coltypes, units=colunits) # Define some configuration information assuming Feder telescope -feder_cg_16m = Camera(gain = 1.5 * u.electron / u.adu, - read_noise = 10.0 * u.electron, - dark_current=0.01 * u.electron / u.second, - pixel_scale = 0.563 * u.arcsec / u.pix) -feder_passbands = {'up':'SU', 'gp':'SG', 'rp':'SR', 'zp':'SZ', 'ip':'SI'} -feder_obs = EarthLocation(lat = 46.86678,lon=-96.45328, height=311) +feder_cg_16m = Camera( + gain=1.5 * u.electron / u.adu, + read_noise=10.0 * u.electron, + dark_current=0.01 * u.electron / u.second, + pixel_scale=0.563 * u.arcsec / u.pix, +) +feder_passbands = {"up": "SU", "gp": "SG", "rp": "SR", "zp": "SZ", "ip": "SI"} +feder_obs = EarthLocation(lat=46.86678, lon=-96.45328, height=311) def test_base_enhanced_table_blank(): @@ -111,118 +162,239 @@ def test_base_enhanced_table_blank(): def test_base_enhanced_table_from_existing_table(): # Should create a populated dataset properly and display the astropy data test_base2 = BaseEnhancedTable(table_description=test_descript, input_data=testdata) - assert len(test_base2['ra']) == 1 - assert len(test_base2['dec']) == 1 + assert len(test_base2["ra"]) == 1 + assert len(test_base2["dec"]) == 1 def test_base_enhanced_table_missing_column(): # Should raise exception because the RA data is missing from input data testdata_nora = testdata.copy() - testdata_nora.remove_column('ra') + testdata_nora.remove_column("ra") with pytest.raises(ValueError): - test_base = BaseEnhancedTable(table_description=test_descript, - input_data=testdata_nora) + test_base = BaseEnhancedTable( + table_description=test_descript, input_data=testdata_nora + ) def test_base_enhanced_table_missing_badunits(): # This will fail due to RA being in units of hours bad_ra_descript = test_descript.copy() - bad_ra_descript[1,2] = u.hr + bad_ra_descript[1, 2] = u.hr with pytest.raises(ValueError): - test_base = BaseEnhancedTable(table_description=bad_ra_descript, - input_data=testdata) + test_base = BaseEnhancedTable( + table_description=bad_ra_descript, input_data=testdata + ) def test_base_enhanced_table_recursive(): # Should create a populated dataset properly and display the astropy data test_base2 = BaseEnhancedTable(table_description=test_descript, input_data=testdata) - assert len(test_base2['ra']) == 1 - assert len(test_base2['dec']) == 1 + assert len(test_base2["ra"]) == 1 + assert len(test_base2["dec"]) == 1 # Attempt recursive call with pytest.raises(TypeError): - test_base3 = BaseEnhancedTable(table_description=test_descript, - input_data=test_base2) + test_base3 = BaseEnhancedTable( + table_description=test_descript, input_data=test_base2 + ) # Define a realistic table of photometry data (a bit corrupted) -photdata = np.array([[1, 2049.145245206124, 2054.0849947477964, 109070.60831212997, - 154443.9371254444, 78.17278712191924, 22.505771480719375, - 31.798216414544864, 31.658750534057617, 9.294325523269857, - 13.02511260943813, 13.02511260943813, 13.02511260943813, 29.0, - 2642.079421669016, 44.0, 59.0, 4853.760649796231, 120.0, - '2022-11-27T06:26:29.620', 59909, 25057.195077483062, - 2459910.7754060575, -6.239606167785804, 1.115, 'ip', - 'TIC_467615239.01-S001-R001-C001-ip.fit', 1, 0.02320185643388203, - 803.1970935659333, 535.4647290439556, 46.795229859903905]]) -photcolnames = ['id', 'xcenter', 'ycenter', 'aperture_sum', 'annulus_sum', 'ra', 'dec', - 'sky_per_pix_avg', 'sky_per_pix_med', 'sky_per_pix_std', 'fwhm_x', - 'fwhm_y', 'width', 'aperture', 'aperture_area', 'annulus_inner', - 'annulus_outer', 'annulus_area', 'exposure', 'date-obs', 'night', - 'aperture_net_cnts', 'bjd', 'mag_inst', 'airmass', 'passband', 'file', - 'star_id', 'mag_error', 'noise_electrons', 'noise_cnts', 'snr'] -photcoltypes = ['int', 'float', 'float', 'float', 'float', 'float', 'float', 'float', - 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', - 'float', 'float', 'float', 'str', 'int', 'float', 'float', 'float', - 'float', 'str', 'str', 'int', 'float', 'float', 'float', 'float'] -photcolunits = [None, u.pix, u.pix, u.adu, None, u.deg, u.deg, u.adu, u.adu, u.adu, - None, None, None, u.pix, u.pix, u.pix, u.pix, u.pix, u.s, - None, None, u.adu, None, None, None, None, None, None, None, - u.electron, None, u.adu] +photdata = np.array( + [ + [ + 1, + 2049.145245206124, + 2054.0849947477964, + 109070.60831212997, + 154443.9371254444, + 78.17278712191924, + 22.505771480719375, + 31.798216414544864, + 31.658750534057617, + 9.294325523269857, + 13.02511260943813, + 13.02511260943813, + 13.02511260943813, + 29.0, + 2642.079421669016, + 44.0, + 59.0, + 4853.760649796231, + 120.0, + "2022-11-27T06:26:29.620", + 59909, + 25057.195077483062, + 2459910.7754060575, + -6.239606167785804, + 1.115, + "ip", + "TIC_467615239.01-S001-R001-C001-ip.fit", + 1, + 0.02320185643388203, + 803.1970935659333, + 535.4647290439556, + 46.795229859903905, + ] + ] +) +photcolnames = [ + "id", + "xcenter", + "ycenter", + "aperture_sum", + "annulus_sum", + "ra", + "dec", + "sky_per_pix_avg", + "sky_per_pix_med", + "sky_per_pix_std", + "fwhm_x", + "fwhm_y", + "width", + "aperture", + "aperture_area", + "annulus_inner", + "annulus_outer", + "annulus_area", + "exposure", + "date-obs", + "night", + "aperture_net_cnts", + "bjd", + "mag_inst", + "airmass", + "passband", + "file", + "star_id", + "mag_error", + "noise_electrons", + "noise_cnts", + "snr", +] +photcoltypes = [ + "int", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "float", + "str", + "int", + "float", + "float", + "float", + "float", + "str", + "str", + "int", + "float", + "float", + "float", + "float", +] +photcolunits = [ + None, + u.pix, + u.pix, + u.adu, + None, + u.deg, + u.deg, + u.adu, + u.adu, + u.adu, + None, + None, + None, + u.pix, + u.pix, + u.pix, + u.pix, + u.pix, + u.s, + None, + None, + u.adu, + None, + None, + None, + None, + None, + None, + None, + u.electron, + None, + u.adu, +] # Define initial bad table -testphot_data = Table(photdata, names=photcolnames, dtype=photcoltypes, - units=photcolunits) +testphot_data = Table( + photdata, names=photcolnames, dtype=photcoltypes, units=photcolunits +) # Convert times to correct time format but leave bad units testphot_goodTime = testphot_data.copy() -testphot_goodTime['date-obs'] = Column(data=Time(testphot_goodTime['date-obs'], - format='isot', scale='utc'), - name='date-obs') +testphot_goodTime["date-obs"] = Column( + data=Time(testphot_goodTime["date-obs"], format="isot", scale="utc"), + name="date-obs", +) # Fix all the units for PhotometryData phot_descript = { - 'star_id' : None, - 'ra' : u.deg, - 'dec' : u.deg, - 'xcenter' : u.pix, - 'ycenter' : u.pix, - 'fwhm_x' : u.pix, - 'fwhm_y' : u.pix, - 'width' : u.pix, - 'aperture' : u.pix, - 'annulus_inner' : u.pix, - 'annulus_outer' : u.pix, - 'aperture_sum' : None, - 'annulus_sum' : None, - 'sky_per_pix_avg' : None, - 'sky_per_pix_med' : None, - 'sky_per_pix_std' : None, - 'aperture_net_cnts' : None, - 'noise_cnts' : None, - 'noise_electrons' : u.electron, - 'exposure' : u.second, - 'date-obs' : None, - 'airmass' : None, - 'passband' : None, - 'file' : None + "star_id": None, + "ra": u.deg, + "dec": u.deg, + "xcenter": u.pix, + "ycenter": u.pix, + "fwhm_x": u.pix, + "fwhm_y": u.pix, + "width": u.pix, + "aperture": u.pix, + "annulus_inner": u.pix, + "annulus_outer": u.pix, + "aperture_sum": None, + "annulus_sum": None, + "sky_per_pix_avg": None, + "sky_per_pix_med": None, + "sky_per_pix_std": None, + "aperture_net_cnts": None, + "noise_cnts": None, + "noise_electrons": u.electron, + "exposure": u.second, + "date-obs": None, + "airmass": None, + "passband": None, + "file": None, } testphot_goodUnits = testphot_goodTime.copy() for this_col, this_unit in phot_descript.items(): testphot_goodUnits[this_col].unit = this_unit # Fix the units for the counts-related columns -counts_columns = ['aperture_sum', 'annulus_sum', 'aperture_net_cnts', 'noise_cnts'] -counts_per_pixel_sqr_columns = ['sky_per_pix_avg', 'sky_per_pix_med', - 'sky_per_pix_std'] +counts_columns = ["aperture_sum", "annulus_sum", "aperture_net_cnts", "noise_cnts"] +counts_per_pixel_sqr_columns = ["sky_per_pix_avg", "sky_per_pix_med", "sky_per_pix_std"] for this_col in counts_columns: testphot_goodUnits[this_col].unit = u.adu for this_col in counts_per_pixel_sqr_columns: testphot_goodUnits[this_col].unit = u.adu * u.pixel**-1 # Remove calculated columns from the test data to produce clean data -computed_columns = ['bjd', 'night'] +computed_columns = ["bjd", "night"] testphot_clean = testphot_goodUnits.copy() for this_col in computed_columns: del testphot_clean[this_col] @@ -237,11 +409,15 @@ def test_photometry_blank(): def test_photometry_data(): # Create photometry data instance - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, input_data=testphot_clean) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_clean, + ) # Check some aspects of that data are sound - assert phot_data.camera.gain == 1.5 * u.electron / u.adu + assert phot_data.camera.gain == 1.5 * u.electron / u.adu assert phot_data.camera.read_noise == 10.0 * u.electron assert phot_data.camera.dark_current == 0.01 * u.electron / u.second assert phot_data.camera.pixel_scale == 0.563 * u.arcsec / u.pix @@ -251,7 +427,7 @@ def test_photometry_data(): assert phot_data.observatory.lon.unit == u.deg assert round(phot_data.observatory.height.value) == 311 assert phot_data.observatory.height.unit == u.m - assert phot_data['night'][0] == 59909 + assert phot_data["night"][0] == 59909 # Checking the BJD computation against Ohio State online calculator for # UTC 2022 11 27 06 27 29.620 @@ -262,17 +438,21 @@ def test_photometry_data(): # Dec 22 30 20.77733059 # which returned 2459910.775405664 (Uses custom IDL, astropy is SOFA checked). # Demand a difference of less than 1/20 of a second. - assert (phot_data['bjd'][0].value - 2459910.775405664)*86400 < 0.05 + assert (phot_data["bjd"][0].value - 2459910.775405664) * 86400 < 0.05 def test_photometry_slicing(): # Create photometry data instance - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, input_data=testphot_clean) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_clean, + ) # Test slicing works as expected, leaving attributes intact - two_cols = phot_data[['ra','dec']] - assert two_cols.camera.gain == 1.5 * u.electron / u.adu + two_cols = phot_data[["ra", "dec"]] + assert two_cols.camera.gain == 1.5 * u.electron / u.adu assert two_cols.camera.read_noise == 10.0 * u.electron assert two_cols.camera.dark_current == 0.01 * u.electron / u.second assert two_cols.camera.pixel_scale == 0.563 * u.arcsec / u.pix @@ -286,141 +466,194 @@ def test_photometry_slicing(): def test_photometry_recursive(): # Create photometry data instance - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, input_data=testphot_clean) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_clean, + ) # Attempt recursive call with pytest.raises(TypeError): - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, input_data=phot_data) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=phot_data, + ) def test_photometry_badtime(): with pytest.raises(ValueError): - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, - input_data=testphot_data) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_data, + ) def test_photometry_inconsistent_count_units(): with pytest.raises(ValueError): - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, - input_data=testphot_goodTime) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_goodTime, + ) + def test_photometry_inconsistent_computed_col_exists(): with pytest.raises(ValueError): - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, - input_data=testphot_goodUnits) - - phot_data = PhotometryData(observatory=feder_obs, camera=feder_cg_16m, - passband_map=feder_passbands, - input_data=testphot_goodUnits, - retain_user_computed=True) + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_goodUnits, + ) + + phot_data = PhotometryData( + observatory=feder_obs, + camera=feder_cg_16m, + passband_map=feder_passbands, + input_data=testphot_goodUnits, + retain_user_computed=True, + ) # This keeps a bad user column for 'snr' which has bogus units, so check the units # cause a crash in the math. with pytest.raises(u.core.UnitConversionError): - assert np.abs(phot_data['snr'][0] - 46.795229859903905) < 1e-6 - assert np.abs(phot_data['snr'][0].value - 46.795229859903905) < 1e-6 + assert np.abs(phot_data["snr"][0] - 46.795229859903905) < 1e-6 + assert np.abs(phot_data["snr"][0].value - 46.795229859903905) < 1e-6 # Load test catalog -test_cat = ascii.read(get_pkg_data_filename('data/test_vsx_table.ecsv'), format='ecsv', - fast_reader=False) +test_cat = ascii.read( + get_pkg_data_filename("data/test_vsx_table.ecsv"), format="ecsv", fast_reader=False +) def test_catalog_missing_col(): # Fails with ValueError due to not having 'ra' column with pytest.raises(ValueError): - catalog_dat = CatalogData(input_data=test_cat, catalog_name="VSX", - catalog_source="Vizier") + catalog_dat = CatalogData( + input_data=test_cat, catalog_name="VSX", catalog_source="Vizier" + ) def test_catalog_colname_map(): # Map column names - vsx_colname_map = {'Name':'id', 'RAJ2000':'ra', 'DEJ2000':'dec', 'max':'mag', - 'n_max':'passband'} - catalog_dat = CatalogData(input_data=test_cat, catalog_name="VSX", - catalog_source="Vizier", - colname_map=vsx_colname_map) - - assert catalog_dat['id'][0] == 'ASASSN-V J000052.03+002216.6' - assert np.abs(catalog_dat['mag'][0].value - 12.660) - assert catalog_dat['passband'][0] == 'g' - assert catalog_dat.catalog_name == 'VSX' - assert catalog_dat.catalog_source == 'Vizier' + vsx_colname_map = { + "Name": "id", + "RAJ2000": "ra", + "DEJ2000": "dec", + "max": "mag", + "n_max": "passband", + } + catalog_dat = CatalogData( + input_data=test_cat, + catalog_name="VSX", + catalog_source="Vizier", + colname_map=vsx_colname_map, + ) + + assert catalog_dat["id"][0] == "ASASSN-V J000052.03+002216.6" + assert np.abs(catalog_dat["mag"][0].value - 12.660) + assert catalog_dat["passband"][0] == "g" + assert catalog_dat.catalog_name == "VSX" + assert catalog_dat.catalog_source == "Vizier" def test_catalog_bandpassmap(): # Map column and bandpass names - vsx_colname_map = {'Name':'id', 'RAJ2000':'ra', 'DEJ2000':'dec', 'max':'mag', - 'n_max':'passband'} - passband_map = {'g' :'SG', 'r':'SR'} - catalog_dat = CatalogData(input_data=test_cat, catalog_name="VSX", - catalog_source="Vizier", colname_map=vsx_colname_map, - passband_map=passband_map) - - assert catalog_dat['passband'][0] == 'SG' - assert catalog_dat.catalog_name == 'VSX' - assert catalog_dat.catalog_source == 'Vizier' + vsx_colname_map = { + "Name": "id", + "RAJ2000": "ra", + "DEJ2000": "dec", + "max": "mag", + "n_max": "passband", + } + passband_map = {"g": "SG", "r": "SR"} + catalog_dat = CatalogData( + input_data=test_cat, + catalog_name="VSX", + catalog_source="Vizier", + colname_map=vsx_colname_map, + passband_map=passband_map, + ) + + assert catalog_dat["passband"][0] == "SG" + assert catalog_dat.catalog_name == "VSX" + assert catalog_dat.catalog_source == "Vizier" def test_catalog_recursive(): # Construct good objects - vsx_colname_map = {'Name':'id', 'RAJ2000':'ra', 'DEJ2000':'dec', 'max':'mag', - 'n_max':'passband'} - catalog_dat = CatalogData(input_data=test_cat, catalog_name="VSX", - catalog_source="Vizier", colname_map=vsx_colname_map) + vsx_colname_map = { + "Name": "id", + "RAJ2000": "ra", + "DEJ2000": "dec", + "max": "mag", + "n_max": "passband", + } + catalog_dat = CatalogData( + input_data=test_cat, + catalog_name="VSX", + catalog_source="Vizier", + colname_map=vsx_colname_map, + ) # Attempt recursive call with pytest.raises(TypeError): - catalog_dat2 = CatalogData(input_data=catalog_dat, catalog_name="VSX", - catalog_source="Vizier", colname_map=vsx_colname_map) + catalog_dat2 = CatalogData( + input_data=catalog_dat, + catalog_name="VSX", + catalog_source="Vizier", + colname_map=vsx_colname_map, + ) # Load test apertures -test_sl_data = ascii.read(get_pkg_data_filename('data/test_sourcelist.ecsv'), - format='ecsv', - fast_reader=False) +test_sl_data = ascii.read( + get_pkg_data_filename("data/test_sourcelist.ecsv"), format="ecsv", fast_reader=False +) def test_sourcelist(): sl_test = SourceListData(input_data=test_sl_data, colname_map=None) - assert sl_test['star_id'][0] == 0 + assert sl_test["star_id"][0] == 0 def test_sourcelist_no_skypos(): test_sl_data2 = test_sl_data.copy() - del test_sl_data2['ra'] - del test_sl_data2['dec'] + del test_sl_data2["ra"] + del test_sl_data2["dec"] sl_test = SourceListData(input_data=test_sl_data2, colname_map=None) - assert sl_test['star_id'][0] == 0 - assert np.isnan(sl_test['ra'][4]) - assert np.isnan(sl_test['dec'][2]) + assert sl_test["star_id"][0] == 0 + assert np.isnan(sl_test["ra"][4]) + assert np.isnan(sl_test["dec"][2]) def test_sourcelist_no_imgpos(): test_sl_data3 = test_sl_data.copy() - del test_sl_data3['xcenter'] - del test_sl_data3['ycenter'] + del test_sl_data3["xcenter"] + del test_sl_data3["ycenter"] sl_test = SourceListData(input_data=test_sl_data3, colname_map=None) - assert sl_test['star_id'][0] == 0 - assert np.isnan(sl_test['xcenter'][4]) - assert np.isnan(sl_test['ycenter'][2]) + assert sl_test["star_id"][0] == 0 + assert np.isnan(sl_test["xcenter"][4]) + assert np.isnan(sl_test["ycenter"][2]) def test_sourcelist_missing_cols(): test_sl_data4 = test_sl_data.copy() - del test_sl_data4['ra'] - del test_sl_data4['dec'] - del test_sl_data4['xcenter'] - del test_sl_data4['ycenter'] + del test_sl_data4["ra"] + del test_sl_data4["dec"] + del test_sl_data4["xcenter"] + del test_sl_data4["ycenter"] with pytest.raises(ValueError): sl_test = SourceListData(input_data=test_sl_data4, colname_map=None) test_sl_data5 = test_sl_data.copy() - del test_sl_data5['star_id'] + del test_sl_data5["star_id"] with pytest.raises(ValueError): sl_test = SourceListData(input_data=test_sl_data5, colname_map=None) @@ -428,7 +661,7 @@ def test_sourcelist_missing_cols(): def test_sourcelist_recursive(): # Create good sourcelist data instance sl_test = SourceListData(input_data=test_sl_data, colname_map=None) - assert sl_test['star_id'][0] == 0 + assert sl_test["star_id"][0] == 0 # Attempt recursive call with pytest.raises(TypeError): @@ -436,7 +669,7 @@ def test_sourcelist_recursive(): def test_sourcelist_dropping_skycoords(): - # Create good sourcelist data instance + # Create good sourcelist data instance sl_test = SourceListData(input_data=test_sl_data, colname_map=None) # Drop sky coordinates @@ -446,7 +679,7 @@ def test_sourcelist_dropping_skycoords(): def test_sourcelist_dropping_imagecoords(): - # Create good sourcelist data instance + # Create good sourcelist data instance sl_test = SourceListData(input_data=test_sl_data, colname_map=None) # Drop sky coordinates @@ -463,10 +696,10 @@ def test_sourcelist_slicing(): slicing_test = sl_test[:][1:3] # compare this slice to the original data table passed in - assert slicing_test['star_id'][0] == 1 - assert slicing_test['star_id'][1] == 2 - assert slicing_test['xcenter'][0] == sl_test['xcenter'][1] - assert slicing_test['xcenter'][1] == sl_test['xcenter'][2] + assert slicing_test["star_id"][0] == 1 + assert slicing_test["star_id"][1] == 2 + assert slicing_test["xcenter"][0] == sl_test["xcenter"][1] + assert slicing_test["xcenter"][1] == sl_test["xcenter"][2] # Checking attributes survive slicing assert slicing_test.has_ra_dec == True assert slicing_test.has_x_y == True diff --git a/stellarphot/transit_fitting/__init__.py b/stellarphot/transit_fitting/__init__.py index a8ce586c..bb67a43f 100644 --- a/stellarphot/transit_fitting/__init__.py +++ b/stellarphot/transit_fitting/__init__.py @@ -1 +1 @@ -from .core import * \ No newline at end of file +from .core import * diff --git a/stellarphot/transit_fitting/core.py b/stellarphot/transit_fitting/core.py index fda1b6ac..caf48df0 100644 --- a/stellarphot/transit_fitting/core.py +++ b/stellarphot/transit_fitting/core.py @@ -3,8 +3,7 @@ import numpy as np from astropy.modeling.models import custom_model -from astropy.modeling.fitting import (LevMarLSQFitter, - _validate_model) +from astropy.modeling.fitting import LevMarLSQFitter, _validate_model # Functions below changed from private to public in astropy 5 try: @@ -23,12 +22,15 @@ try: import batman except ImportError: - ImportError('You must install the batman exoplanet package. Try:\n' - 'conda install batman-package\n' - 'or\n' - 'pip install batman-package') + ImportError( + "You must install the batman exoplanet package. Try:\n" + "conda install batman-package\n" + "or\n" + "pip install batman-package" + ) + +__all__ = ["VariableArgsFitter", "TransitModelFit"] -__all__ = ['VariableArgsFitter', 'TransitModelFit'] class VariableArgsFitter(LevMarLSQFitter): """ @@ -37,18 +39,29 @@ class VariableArgsFitter(LevMarLSQFitter): astropy.modeling.fitting.LevMarLSQFitter fitter. """ + def __init__(self): super().__init__() # This is a straight copy-paste from the LevMarLSQFitter __call__. # The only modification is to allow any number of arguments. - def __call__(self, model, *args, weights=None, - maxiter=100, acc=1e-7, - epsilon=1.4901161193847656e-08, estimate_jacobian=False): + def __call__( + self, + model, + *args, + weights=None, + maxiter=100, + acc=1e-7, + epsilon=1.4901161193847656e-08, + estimate_jacobian=False, + ): from scipy import optimize model_copy = _validate_model(model, self.supported_constraints) - farg = (model_copy, weights, ) + args + farg = ( + model_copy, + weights, + ) + args if model_copy.fit_deriv is None or estimate_jacobian: dfunc = None else: @@ -60,31 +73,45 @@ def __call__(self, model, *args, weights=None, # convert simply into an array within scipy.optimize.leastsq, when called. # So we handle model_bounds here first to the scipy.optimize.leastsq format. # can handle the list of initial values we pass in. - init_values = np.concatenate((np.asarray(init_values0[0]).flatten(), - np.asarray(init_values0[1]).flatten(), - np.asarray(init_values0[2]).flatten()), axis=None) + init_values = np.concatenate( + ( + np.asarray(init_values0[0]).flatten(), + np.asarray(init_values0[1]).flatten(), + np.asarray(init_values0[2]).flatten(), + ), + axis=None, + ) fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq( - self.objective_function, init_values, args=farg, Dfun=dfunc, - col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon, - xtol=acc, full_output=True) + self.objective_function, + init_values, + args=farg, + Dfun=dfunc, + col_deriv=model_copy.col_fit_deriv, + maxfev=maxiter, + epsfcn=epsilon, + xtol=acc, + full_output=True, + ) fitter_to_model_params(model_copy, fitparams) self.fit_info.update(dinfo) - self.fit_info['cov_x'] = cov_x - self.fit_info['message'] = mess - self.fit_info['ierr'] = ierr + self.fit_info["cov_x"] = cov_x + self.fit_info["message"] = mess + self.fit_info["ierr"] = ierr if ierr not in [1, 2, 3, 4]: - warnings.warn("The fit may be unsuccessful; check " - "fit_info['message'] for more information.", - AstropyUserWarning) + warnings.warn( + "The fit may be unsuccessful; check " + "fit_info['message'] for more information.", + AstropyUserWarning, + ) # now try to compute the true covariance matrix if (len(args[-1]) > len(init_values)) and cov_x is not None: - sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2) + sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2) dof = len(args[-1]) - len(init_values) - self.fit_info['param_cov'] = cov_x * sum_sqrs / dof + self.fit_info["param_cov"] = cov_x * sum_sqrs / dof else: - self.fit_info['param_cov'] = None + self.fit_info["param_cov"] = None return model_copy @@ -113,6 +140,7 @@ class TransitModelFit: width : array-like Width of the star in pixels at each time. Must be set before fitting. """ + def __init__(self, batman_params=None): self._batman_params = batman.TransitParams() self._set_default_batman_params() @@ -125,7 +153,7 @@ def __init__(self, batman_params=None): self._batman_mod_for_fit = None self.weights = None self._detrend_parameters = set() - self._all_detrend_params = ['airmass', 'width', 'spp'] + self._all_detrend_params = ["airmass", "width", "spp"] def _check_consistent_lengths(self, proposed_value): """ @@ -137,8 +165,7 @@ def _check_consistent_lengths(self, proposed_value): return True new_length = len(proposed_value) - for independent_var in [self._times, self._airmass, - self._spp, self._width]: + for independent_var in [self._times, self._airmass, self._spp, self._width]: if independent_var is None: continue elif len(independent_var) != new_length: @@ -159,8 +186,9 @@ def times(self): @times.setter def times(self, value): if not self._check_consistent_lengths(value): - raise ValueError('Length of times not consistent with ' - 'other independent variables.') + raise ValueError( + "Length of times not consistent with " "other independent variables." + ) self._times = value try: @@ -180,14 +208,15 @@ def airmass(self): @airmass.setter def airmass(self, value): if not self._check_consistent_lengths(value): - raise ValueError('Length of airmass not consistent with ' - 'other independent variables.') + raise ValueError( + "Length of airmass not consistent with " "other independent variables." + ) self._airmass = value if value is not None: - self._detrend_parameters.add('airmass') + self._detrend_parameters.add("airmass") else: - self._detrend_parameters.discard('airmass') + self._detrend_parameters.discard("airmass") @property def width(self): @@ -201,14 +230,15 @@ def width(self): @width.setter def width(self, value): if not self._check_consistent_lengths(value): - raise ValueError('Length of width not consistent with ' - 'other independent variables.') + raise ValueError( + "Length of width not consistent with " "other independent variables." + ) self._width = value if value is not None: - self._detrend_parameters.add('width') + self._detrend_parameters.add("width") else: - self._detrend_parameters.discard('width') + self._detrend_parameters.discard("width") @property def spp(self): @@ -221,14 +251,15 @@ def spp(self): @spp.setter def spp(self, value): if not self._check_consistent_lengths(value): - raise ValueError('Length of spp not consistent with ' - 'other independent variables.') + raise ValueError( + "Length of spp not consistent with " "other independent variables." + ) self._spp = value if value is not None: - self._detrend_parameters.add('spp') + self._detrend_parameters.add("spp") else: - self._detrend_parameters.discard('spp') + self._detrend_parameters.discard("spp") @property def data(self): @@ -241,8 +272,9 @@ def data(self): @data.setter def data(self, value): if not self._check_consistent_lengths(value): - raise ValueError('Length of data not consistent with ' - 'independent variables.') + raise ValueError( + "Length of data not consistent with " "independent variables." + ) self._data = value @property @@ -273,13 +305,13 @@ def _set_default_batman_params(self): self._batman_params.a = 12.2 # orbital inclination (in degrees) - self._batman_params.inc = 90. + self._batman_params.inc = 90.0 # eccentricity - self._batman_params.ecc = 0. + self._batman_params.ecc = 0.0 # longitude of periastron (in degrees) - self._batman_params.w = 90. + self._batman_params.w = 90.0 # limb darkening model self._batman_params.limb_dark = "quadratic" @@ -289,11 +321,8 @@ def _set_default_batman_params(self): def _set_up_batman_model_for_fitting(self): if self._times is None: - raise ValueError('times need to be set before setting up ' - 'transit model.') - self._batman_mod_for_fit = \ - batman.TransitModel(self._batman_params, - self.times) + raise ValueError("times need to be set before setting up " "transit model.") + self._batman_mod_for_fit = batman.TransitModel(self._batman_params, self.times) def _setup_transit_model(self): """ @@ -302,12 +331,23 @@ def _setup_transit_model(self): model. """ - def transit_model_with_trends(time, airmass, width, sky_per_pix, - t0=0.0, period=1.0, rp=0.1, a=10.0, - inclination=90.0, eccentricity=0.0, - limb_u1=0.3, limb_u2=0.3, - airmass_trend=0.0, width_trend=0.0, - spp_trend=0.0): + def transit_model_with_trends( + time, + airmass, + width, + sky_per_pix, + t0=0.0, + period=1.0, + rp=0.1, + a=10.0, + inclination=90.0, + eccentricity=0.0, + limb_u1=0.3, + limb_u2=0.3, + airmass_trend=0.0, + width_trend=0.0, + spp_trend=0.0, + ): self._batman_params.t0 = t0 self._batman_params.per = period self._batman_params.rp = rp @@ -338,11 +378,17 @@ def transit_model_with_trends(time, airmass, width, sky_per_pix, # Planet radius cannot be too small or too big self._model.rp.bounds = (0.01, 0.5) - def setup_model(self, t0=0, depth=0, duration=0, - period=1, inclination=90, - airmass_trend=0.0, - width_trend=0.0, - spp_trend=0.0): + def setup_model( + self, + t0=0, + depth=0, + duration=0, + period=1, + inclination=90, + airmass_trend=0.0, + width_trend=0.0, + spp_trend=0.0, + ): """ Configure a transit model for fitting. The ``duration`` and ``depth`` are used to estimate underlying fit parameters; they are not @@ -416,10 +462,9 @@ def fit(self): """ # Maybe do some correctness check of the model before starting. if self.times is None: - raise ValueError('The times must be set before trying ' - 'to fit.') + raise ValueError("The times must be set before trying " "to fit.") if self._model is None: - raise ValueError('Run setup_model() before trying to fit.') + raise ValueError("Run setup_model() before trying to fit.") if self._batman_mod_for_fit is None: self._set_up_batman_model_for_fitting() @@ -429,8 +474,7 @@ def fit(self): original_values = {} if self.spp is None: - original_values['spp_trend'] = \ - self.model.spp_trend.fixed + original_values["spp_trend"] = self.model.spp_trend.fixed self.model.spp_trend = 0 self.model.spp_trend.fixed = True spp = np.zeros_like(self.times) @@ -438,8 +482,7 @@ def fit(self): spp = self.spp if self.airmass is None: - original_values['airmass_trend'] = \ - self.model.airmass_trend.fixed + original_values["airmass_trend"] = self.model.airmass_trend.fixed self.model.airmass_trend = 0 self.model.airmass_trend.fixed = True airmass = np.zeros_like(self.times) @@ -447,8 +490,7 @@ def fit(self): airmass = self.airmass if self.width is None: - original_values['width_trend'] = \ - self.model.width_trend.fixed + original_values["width_trend"] = self.model.width_trend.fixed self.model.width_trend = 0 self.model.width_trend.fixed = True width = np.zeros_like(self.times) @@ -457,13 +499,9 @@ def fit(self): # Do the fitting - new_model = self._fitter(self.model, - self.times, - airmass, - width, - spp, - self.data, - weights=self.weights) + new_model = self._fitter( + self.model, self.times, airmass, width, spp, self.data, weights=self.weights + ) # Update the model (might not be necessary but can't hurt) self._model = new_model @@ -475,16 +513,18 @@ def fit(self): param.fixed = v def _detrend(self, model, detrend_by): - if detrend_by == 'all': - detrend_by = [p for p in self._all_detrend_params - if p in self._detrend_parameters] + if detrend_by == "all": + detrend_by = [ + p for p in self._all_detrend_params if p in self._detrend_parameters + ] elif isinstance(detrend_by, str): detrend_by = [detrend_by] detrended = model.copy() for trend in detrend_by: - detrended = detrended - (getattr(self.model, f'{trend}_trend') - * getattr(self, trend)) + detrended = detrended - ( + getattr(self.model, f"{trend}_trend") * getattr(self, trend) + ) return detrended @@ -549,8 +589,9 @@ def model_light_curve(self, at_times=None, detrend_by=None): # then restore it. original_model = self._batman_mod_for_fit - self._batman_mod_for_fit = batman.TransitModel(self._batman_params, - at_times) + self._batman_mod_for_fit = batman.TransitModel( + self._batman_params, at_times + ) model = self.model(at_times, airmass, width, spp) self._batman_mod_for_fit = original_model else: @@ -568,10 +609,11 @@ def n_fit_parameters(self): @property def BIC(self): residual = self.data - self.model_light_curve() - chi_sq = ((residual * self.weights)**2).sum() + chi_sq = ((residual * self.weights) ** 2).sum() BIC = chi_sq + self.n_fit_parameters * np.log(len(self.data)) return BIC + # example use # self.model.eccentricity.fixed = True diff --git a/stellarphot/transit_fitting/gui.py b/stellarphot/transit_fitting/gui.py index 217e026d..51d0d81a 100644 --- a/stellarphot/transit_fitting/gui.py +++ b/stellarphot/transit_fitting/gui.py @@ -10,29 +10,36 @@ from stellarphot.transit_fitting.io import get_tic_info -__all__ = ['MyValid', 'make_checker','validate_exposure_time', - 'populate_TIC_boxes', 'populate_TOI_boxes', 'exotic_settings_widget', - 'set_values_from_json_file', 'get_values_from_widget', - 'generate_json_file_name'] - - -template_types = ['known', 'candidate'] +__all__ = [ + "MyValid", + "make_checker", + "validate_exposure_time", + "populate_TIC_boxes", + "populate_TOI_boxes", + "exotic_settings_widget", + "set_values_from_json_file", + "get_values_from_widget", + "generate_json_file_name", +] + + +template_types = ["known", "candidate"] template_json = {} to_fill = {} for template in template_types: - template_name = get_pkg_data_filename('data/tic-template-for-exotic-' - f'{template}.json') + template_name = get_pkg_data_filename( + "data/tic-template-for-exotic-" f"{template}.json" + ) with open(template_name) as f: template_json[template] = json.load(f) - template_name = get_pkg_data_filename(f'data/exotic-to-mod-{template}.json') + template_name = get_pkg_data_filename(f"data/exotic-to-mod-{template}.json") with open(Path(template_name)) as f: to_fill[template] = json.load(f) exotic_arguments = dict( - known=['--nasaexoarch', '--pre'], - candidate=['--override', '--pre'] + known=["--nasaexoarch", "--pre"], candidate=["--override", "--pre"] ) # Nested keys are flattened by joining them with this character @@ -58,21 +65,22 @@ class MyValid(ipw.Button): Current value of the indicator. Initizlized to False. """ + value = Bool(False, help="Bool value").tag(sync=True) def __init__(self, **kwd): super().__init__(**kwd) - self.layout.width = '40px' + self.layout.width = "40px" self._set_properties(None) - @observe('value') + @observe("value") def _set_properties(self, change): if self.value: - self.style.button_color = 'green' - self.icon = 'check' + self.style.button_color = "green" + self.icon = "check" else: - self.style.button_color = 'red' - self.icon = 'times' + self.style.button_color = "red" + self.icon = "times" def make_checker(indicator_widget, value_widget): @@ -99,16 +107,17 @@ def make_checker(indicator_widget, value_widget): Function with the correct signature for use as an observer on an ipywidget. """ + def check_name(change): # Valid TIC number is 9 digits - ticced = re.compile(r'TIC \d{9,10}$') - owner = change['owner'] - is_tic = ticced.match(change['new']) + ticced = re.compile(r"TIC \d{9,10}$") + owner = change["owner"] + is_tic = ticced.match(change["new"]) if is_tic: if indicator_widget is not None: indicator_widget.value = True owner.disabled = True - tic_info = get_tic_info(change['new'][-9:]) + tic_info = get_tic_info(change["new"][-9:]) if not tic_info: indicator_widget.value = False indicator_widget.tooltip = "Not a valid TIC number" @@ -119,13 +128,13 @@ def check_name(change): owner.disabled = False if indicator_widget is not None: indicator_widget.value = False - indicator_widget.tooltip = 'TIC numbers have 9 digits' + indicator_widget.tooltip = "TIC numbers have 9 digits" return check_name def validate_exposure_time(indicator_widget, value_widget): - """ Validates the exposure time input. + """Validates the exposure time input. Parameters ---------- @@ -143,14 +152,16 @@ def validate_exposure_time(indicator_widget, value_widget): indicator_widget to indicate if the value of the exposure time is valid. This can be used as an observer for an ipywidget """ + def check_exposure(change): # Valid Exposure time is greater than zero - if change['new'] > 0: + if change["new"] > 0: if indicator_widget is not None: indicator_widget.value = True else: if indicator_widget is not None: indicator_widget.value = False + return check_exposure @@ -186,15 +197,14 @@ def populate_TIC_boxes(tic_info, value_widget): "Host Star Name": "UCAC", "Star Metallicity ([FE/H])": "MH", "Star Metallicity (+) Uncertainty": "e_MH", - "Star Metallicity (-) Uncertainty": "e_MH" + "Star Metallicity (-) Uncertainty": "e_MH", } for k, v in exotic_tic.items(): exotic_key = join_char.join(["planetary_parameters", k]) if k == "Host Star Name": - value_widget['candidate'][exotic_key].value = \ - f'UCAC4 {tic_info[v][0]}' + value_widget["candidate"][exotic_key].value = f"UCAC4 {tic_info[v][0]}" elif not np.isnan(tic_info[v][0]): - value_widget['candidate'][exotic_key].value = tic_info[v][0] + value_widget["candidate"][exotic_key].value = tic_info[v][0] def populate_TOI_boxes(toi, exotic_widget): @@ -221,40 +231,41 @@ def populate_TOI_boxes(toi, exotic_widget): # Match EXOTIC json keys to columns in the result returned from # astroquery exotic_toi = { - "Planet Name": "tic_id", - "Target Star RA": "coord", - "Target Star Dec": "coordP", - "Orbital Period (days)": "period", - "Orbital Period Uncertainty": "period_error", - "Published Mid-Transit Time (BJD-UTC)": "epoch", - "Mid-Transit Time Uncertainty": "epoch_error", - # Could maybe get these from TOI information, but not straightforward - #"Ratio of Planet to Stellar Radius (Rp/Rs)": 0.0, - #"Ratio of Planet to Stellar Radius (Rp/Rs) Uncertainty": 0.0, - #"Ratio of Distance to Stellar Radius (a/Rs)": 0.0, - #"Ratio of Distance to Stellar Radius (a/Rs) Uncertainty": 0.0, + "Planet Name": "tic_id", + "Target Star RA": "coord", + "Target Star Dec": "coordP", + "Orbital Period (days)": "period", + "Orbital Period Uncertainty": "period_error", + "Published Mid-Transit Time (BJD-UTC)": "epoch", + "Mid-Transit Time Uncertainty": "epoch_error", + # Could maybe get these from TOI information, but not straightforward + # "Ratio of Planet to Stellar Radius (Rp/Rs)": 0.0, + # "Ratio of Planet to Stellar Radius (Rp/Rs) Uncertainty": 0.0, + # "Ratio of Distance to Stellar Radius (a/Rs)": 0.0, + # "Ratio of Distance to Stellar Radius (a/Rs) Uncertainty": 0.0, } for k, v in exotic_toi.items(): exotic_key = join_char.join(["planetary_parameters", k]) if k == "Planet Name": - exotic_widget['candidate'][exotic_key].value = \ - f'TIC {toi.tic_id}' + exotic_widget["candidate"][exotic_key].value = f"TIC {toi.tic_id}" elif k == "Target Star RA": - exotic_widget['candidate'][exotic_key].value = \ - toi.coord.ra.to_string(unit='hour', decimal=False, sep=":") + exotic_widget["candidate"][exotic_key].value = toi.coord.ra.to_string( + unit="hour", decimal=False, sep=":" + ) elif k == "Target Star Dec": - exotic_widget['candidate'][exotic_key].value = \ - toi.coord.dec.to_string(unit='degree', decimal=False, sep=":") + exotic_widget["candidate"][exotic_key].value = toi.coord.dec.to_string( + unit="degree", decimal=False, sep=":" + ) else: - exotic_widget['candidate'][exotic_key].value = getattr(toi, v).value + exotic_widget["candidate"][exotic_key].value = getattr(toi, v).value # This sets up the specific widgets whose values get validated. # That includes the widget that contains the TIC number for candidates. validators = dict(known={}, candidate={}) -validators['candidate']['Planet Name'] = make_checker +validators["candidate"]["Planet Name"] = make_checker for k in validators: - validators[k]['Exposure Time (s)'] = validate_exposure_time + validators[k]["Exposure Time (s)"] = validate_exposure_time def exotic_settings_widget(): @@ -280,8 +291,8 @@ def exotic_settings_widget(): widget_list = {} # Each widget has the same layout for its description and its value/input - layout_description = ipw.Layout(width='70%') - layout_input = ipw.Layout(width='30%') + layout_description = ipw.Layout(width="70%") + layout_input = ipw.Layout(width="30%") # Maintain a separate dict of just the value widgets value_widget = {} @@ -308,8 +319,9 @@ def exotic_settings_widget(): # Each horizontal box below is one "cell" and the input grid. # The HTML widget has the label, and input_widget is the text or # float widget. - hb = ipw.HBox([ipw.HTML(value=k2, layout=layout_description), - input_widget]) + hb = ipw.HBox( + [ipw.HTML(value=k2, layout=layout_description), input_widget] + ) # Widgets with validators need some observers added try: @@ -326,8 +338,9 @@ def exotic_settings_widget(): kids.append(indicator) # Add an observer to the widget to watch for changes - input_widget.observe(validator(indicator, value_widget), - names='value') + input_widget.observe( + validator(indicator, value_widget), names="value" + ) hb.children = kids @@ -337,24 +350,29 @@ def exotic_settings_widget(): hb2 = {} for template in template_types: - hb2[template] = ipw.HBox([ipw.VBox(widget_list[template][:16], - layout=ipw.Layout(padding='10px')), - ipw.VBox(widget_list[template][16:])]) + hb2[template] = ipw.HBox( + [ + ipw.VBox(widget_list[template][:16], layout=ipw.Layout(padding="10px")), + ipw.VBox(widget_list[template][16:]), + ] + ) select_planet_type = ipw.ToggleButtons( - description='Known or candidate exoplanet?', + description="Known or candidate exoplanet?", options=template_types, - style={'description_width': 'initial'} + style={"description_width": "initial"}, ) - lookup_link_text = dict(known='https://exoplanetarchive.ipac.caltech.edu/', - candidate='https://exofop.ipac.caltech.edu/tess/') + lookup_link_text = dict( + known="https://exoplanetarchive.ipac.caltech.edu/", + candidate="https://exofop.ipac.caltech.edu/tess/", + ) lookup_link_html = {} for k, v in lookup_link_text.items(): lookup_link_html[k] = ipw.HTML( - f'

For some information about this ' + f"

For some information about this " f'object: {v}

' ) @@ -363,17 +381,19 @@ def exotic_settings_widget(): whole_thing = ipw.VBox(children=[select_planet_type, input_container]) whole_thing.planet_type = select_planet_type whole_thing.value_widget = value_widget - pre_reduced_file = join_char.join(['optional_info', 'Pre-reduced File:']) + pre_reduced_file = join_char.join(["optional_info", "Pre-reduced File:"]) whole_thing.data_file_widget = { - 'candidate': value_widget['candidate'][pre_reduced_file], - 'known': value_widget['known'][pre_reduced_file] + "candidate": value_widget["candidate"][pre_reduced_file], + "known": value_widget["known"][pre_reduced_file], } def observe_select(change): - input_container.children = [lookup_link_html[select_planet_type.value], - hb2[select_planet_type.value]] + input_container.children = [ + lookup_link_html[select_planet_type.value], + hb2[select_planet_type.value], + ] - select_planet_type.observe(observe_select, names='value') + select_planet_type.observe(observe_select, names="value") observe_select(select_planet_type.value) return whole_thing @@ -460,11 +480,11 @@ def generate_json_file_name(exotic_widget, key=None): key = exotic_widget.planet_type.value get_values_from_widget(exotic_widget, key=key) - user_info = 'user_info' - planet = 'planetary_parameters' - filter_key = 'Filter Name (aavso.org/filters)' - date = template_json[key][user_info]['Observation date'] - planet_name = template_json[key][planet]['Planet Name'] + user_info = "user_info" + planet = "planetary_parameters" + filter_key = "Filter Name (aavso.org/filters)" + date = template_json[key][user_info]["Observation date"] + planet_name = template_json[key][planet]["Planet Name"] filter_name = template_json[key][user_info][filter_key] - name = f'{planet_name}-{date}-{filter_name}' - return name.replace(' ', '_') + '.json' + name = f"{planet_name}-{date}-{filter_name}" + return name.replace(" ", "_") + ".json" diff --git a/stellarphot/transit_fitting/io.py b/stellarphot/transit_fitting/io.py index 7f9264a2..80c89965 100644 --- a/stellarphot/transit_fitting/io.py +++ b/stellarphot/transit_fitting/io.py @@ -1,7 +1,7 @@ from astroquery.mast import Catalogs -__all__ = ['get_tic_info'] +__all__ = ["get_tic_info"] def get_tic_info(TIC_ID): @@ -23,4 +23,4 @@ def get_tic_info(TIC_ID): """ catalog_data = Catalogs.query_criteria(catalog="Tic", ID=TIC_ID) - return catalog_data \ No newline at end of file + return catalog_data diff --git a/stellarphot/transit_fitting/plotting.py b/stellarphot/transit_fitting/plotting.py index f786457c..334a792a 100644 --- a/stellarphot/transit_fitting/plotting.py +++ b/stellarphot/transit_fitting/plotting.py @@ -1,10 +1,16 @@ import matplotlib.pyplot as plt -__all__ = ['plot_predict_ingress_egress'] +__all__ = ["plot_predict_ingress_egress"] -def plot_predict_ingress_egress(ingress_time, egress_time, end_line=1, - ingress_x_pos=1, egress_x_pos=1, labels_y_pos=1): +def plot_predict_ingress_egress( + ingress_time, + egress_time, + end_line=1, + ingress_x_pos=1, + egress_x_pos=1, + labels_y_pos=1, +): """ Plot vertical lines at the ingress and egress times and label them. @@ -37,13 +43,18 @@ def plot_predict_ingress_egress(ingress_time, egress_time, end_line=1, ymin, ymax = plt.ylim() # create a vertical line at the ingress time and label it - plt.vlines(ingress_time, ymin - end_line, ymax, - linestyle=(0, (5, 10)), color='red') - plt.annotate("Predicted Ingress", (ingress_time - ingress_x_pos, - ymin - labels_y_pos), color='red') + plt.vlines(ingress_time, ymin - end_line, ymax, linestyle=(0, (5, 10)), color="red") + plt.annotate( + "Predicted Ingress", + (ingress_time - ingress_x_pos, ymin - labels_y_pos), + color="red", + ) # create a vertical line at the egress time and label it - plt.vlines(egress_time, ymin - end_line, ymax, - linestyle=(0, (5, 10)), color='red') - plt.annotate("Predicted Egress", (egress_time - egress_x_pos, - ymin - labels_y_pos), fontsize=10, color='red') + plt.vlines(egress_time, ymin - end_line, ymax, linestyle=(0, (5, 10)), color="red") + plt.annotate( + "Predicted Egress", + (egress_time - egress_x_pos, ymin - labels_y_pos), + fontsize=10, + color="red", + ) diff --git a/stellarphot/transit_fitting/tests/test_transit_fitting_gui.py b/stellarphot/transit_fitting/tests/test_transit_fitting_gui.py index a6a9bf45..118e2751 100644 --- a/stellarphot/transit_fitting/tests/test_transit_fitting_gui.py +++ b/stellarphot/transit_fitting/tests/test_transit_fitting_gui.py @@ -1,7 +1,10 @@ -from stellarphot.transit_fitting.gui import exotic_settings_widget, get_values_from_widget +from stellarphot.transit_fitting.gui import ( + exotic_settings_widget, + get_values_from_widget, +) def test_json_has_stuff(): widget = exotic_settings_widget() - stuff = get_values_from_widget(widget, key='known') - assert 'user_info' in stuff.keys() + stuff = get_values_from_widget(widget, key="known") + assert "user_info" in stuff.keys() diff --git a/stellarphot/transit_fitting/tests/test_transit_model_fit.py b/stellarphot/transit_fitting/tests/test_transit_model_fit.py index 9daf28a3..84c2caab 100644 --- a/stellarphot/transit_fitting/tests/test_transit_model_fit.py +++ b/stellarphot/transit_fitting/tests/test_transit_model_fit.py @@ -5,18 +5,22 @@ from stellarphot.transit_fitting import TransitModelFit -batman = pytest.importorskip('batman') +batman = pytest.importorskip("batman") DEFAULT_TESTING_PARAMS = dict( - t0=2, period=5, duration=0.1, - depth=10, inclination=90, + t0=2, + period=5, + duration=0.1, + depth=10, + inclination=90, airmass_trend=0.005, width_trend=0.01, - spp_trend=0.002 + spp_trend=0.002, ) -pytestmark = pytest.mark.skipif(sys.platform.startswith('win'), - reason="Intermittent hard crash on windows") +pytestmark = pytest.mark.skipif( + sys.platform.startswith("win"), reason="Intermittent hard crash on windows" +) def _create_data_from_model_with_trends(transit_model, noise_dev=0.01): @@ -24,9 +28,7 @@ def _create_data_from_model_with_trends(transit_model, noise_dev=0.01): # data with a touch of noise. # Make our own batman model and data from it - model = batman.TransitModel(transit_model._batman_params, - transit_model.times - ) + model = batman.TransitModel(transit_model._batman_params, transit_model.times) data = model.light_curve(transit_model._batman_params) if transit_model.airmass is not None: @@ -61,26 +63,26 @@ def test_transit_fit_value_length_check(): # Each of these should raise an error, has different length with pytest.raises(ValueError) as e: tmod.spp = list(range(3)) - assert 'Length of spp not consistent' in str(e.value) + assert "Length of spp not consistent" in str(e.value) with pytest.raises(ValueError) as e: tmod.width = list(range(3)) - assert 'Length of width not consistent' in str(e.value) + assert "Length of width not consistent" in str(e.value) with pytest.raises(ValueError) as e: tmod.times = list(range(3)) - assert 'Length of times not consistent' in str(e.value) + assert "Length of times not consistent" in str(e.value) with pytest.raises(ValueError) as e: tmod.airmass = list(range(3)) - assert 'Length of airmass not consistent' in str(e.value) + assert "Length of airmass not consistent" in str(e.value) with pytest.raises(ValueError) as e: tmod.data = list(range(3)) - assert 'Length of data not consistent' in str(e.value) + assert "Length of data not consistent" in str(e.value) -@pytest.mark.parametrize('detrend_by', ['airmass', 'spp', 'width']) +@pytest.mark.parametrize("detrend_by", ["airmass", "spp", "width"]) def test_setting_unsetting_parameters_updates_detrend(detrend_by): tmod = TransitModelFit() @@ -100,7 +102,7 @@ def test_transit_fit_setting_independent_vars(): values = np.array(list(range(5))) - for attr in ['times', 'airmass', 'width', 'spp']: + for attr in ["times", "airmass", "width", "spp"]: setattr(tmod, attr, values) assert (getattr(tmod, attr) == values).all() @@ -114,11 +116,11 @@ def test_transit_fit_setting_independent_vars_to_none(): values = np.array(list(range(5))) # First set values to something that is not None - for attr in ['times', 'airmass', 'width', 'spp', 'data']: + for attr in ["times", "airmass", "width", "spp", "data"]: setattr(tmod, attr, values) # Now set to None and check value - for attr in ['times', 'airmass', 'width', 'spp', 'data']: + for attr in ["times", "airmass", "width", "spp", "data"]: setattr(tmod, attr, None) assert getattr(tmod, attr) is None @@ -130,29 +132,28 @@ def test_transit_create_model(): tmod.setup_model(**DEFAULT_TESTING_PARAMS) - duration = DEFAULT_TESTING_PARAMS['duration'] - period = DEFAULT_TESTING_PARAMS['period'] + duration = DEFAULT_TESTING_PARAMS["duration"] + period = DEFAULT_TESTING_PARAMS["period"] expected_a = 1 / np.sin(duration * np.pi / period) - assert tmod.model.t0 == DEFAULT_TESTING_PARAMS['t0'] - assert tmod.model.rp == np.sqrt(DEFAULT_TESTING_PARAMS['depth'] / 1000) + assert tmod.model.t0 == DEFAULT_TESTING_PARAMS["t0"] + assert tmod.model.rp == np.sqrt(DEFAULT_TESTING_PARAMS["depth"] / 1000) assert np.abs(tmod.model.a - expected_a) < 1e-7 - assert tmod.model.period == DEFAULT_TESTING_PARAMS['period'] - assert tmod.model.airmass_trend == DEFAULT_TESTING_PARAMS['airmass_trend'] - assert tmod.model.width_trend == DEFAULT_TESTING_PARAMS['width_trend'] - assert tmod.model.spp_trend == DEFAULT_TESTING_PARAMS['spp_trend'] + assert tmod.model.period == DEFAULT_TESTING_PARAMS["period"] + assert tmod.model.airmass_trend == DEFAULT_TESTING_PARAMS["airmass_trend"] + assert tmod.model.width_trend == DEFAULT_TESTING_PARAMS["width_trend"] + assert tmod.model.spp_trend == DEFAULT_TESTING_PARAMS["spp_trend"] -def _make_transit_model_with_data(noise_dev=1e-1, - with_airmass=False, - with_width=False, - with_spp=False): +def _make_transit_model_with_data( + noise_dev=1e-1, with_airmass=False, with_width=False, with_spp=False +): tmod = TransitModelFit() tmod.setup_model(**DEFAULT_TESTING_PARAMS) - t0 = DEFAULT_TESTING_PARAMS['t0'] - duration = DEFAULT_TESTING_PARAMS['duration'] + t0 = DEFAULT_TESTING_PARAMS["t0"] + duration = DEFAULT_TESTING_PARAMS["duration"] # Last factor ensures some out of transit data start = t0 - duration / 2 * 1.3 @@ -163,7 +164,7 @@ def _make_transit_model_with_data(noise_dev=1e-1, # Make an airmass that starts at 1.4 and decreases to 1.0 # over the duration and is parabolic centered on the final time. - airmass = 0.4 / (times[0] - times[-1])**2 * (times - times[-1]) ** 2 + 1.0 + airmass = 0.4 / (times[0] - times[-1]) ** 2 * (times - times[-1]) ** 2 + 1.0 tmod.airmass = airmass if with_airmass else None @@ -187,13 +188,12 @@ def _make_transit_model_with_data(noise_dev=1e-1, def test_transit_fit_all_parameters(): - tmod = _make_transit_model_with_data(noise_dev=1e-5, - with_airmass=True, - with_width=True, - with_spp=True) + tmod = _make_transit_model_with_data( + noise_dev=1e-5, with_airmass=True, with_width=True, with_spp=True + ) - duration = DEFAULT_TESTING_PARAMS['duration'] - period = DEFAULT_TESTING_PARAMS['period'] + duration = DEFAULT_TESTING_PARAMS["duration"] + period = DEFAULT_TESTING_PARAMS["period"] expected_a = 1 / np.sin(duration * np.pi / period) @@ -202,77 +202,78 @@ def test_transit_fit_all_parameters(): tmod.fit() # Check the non-exoplanet trends - for fit_trend in ['airmass_trend', 'width_trend', 'spp_trend']: - assert (np.abs(getattr(tmod.model, fit_trend) - - DEFAULT_TESTING_PARAMS[fit_trend]) < 1e-5) + for fit_trend in ["airmass_trend", "width_trend", "spp_trend"]: + assert ( + np.abs(getattr(tmod.model, fit_trend) - DEFAULT_TESTING_PARAMS[fit_trend]) + < 1e-5 + ) # Check a few of the exoplanet parameters - assert np.abs(tmod.model.t0 - DEFAULT_TESTING_PARAMS['t0']) < 1e-3 + assert np.abs(tmod.model.t0 - DEFAULT_TESTING_PARAMS["t0"]) < 1e-3 assert np.abs(tmod.model.a - expected_a) < 1e-2 - expected_rp = np.sqrt(DEFAULT_TESTING_PARAMS['depth'] / 1000) + expected_rp = np.sqrt(DEFAULT_TESTING_PARAMS["depth"] / 1000) assert np.abs(tmod.model.rp - expected_rp) < 1e-4 - assert 'airmass' in tmod._detrend_parameters - assert 'width' in tmod._detrend_parameters - assert 'spp' in tmod._detrend_parameters + assert "airmass" in tmod._detrend_parameters + assert "width" in tmod._detrend_parameters + assert "spp" in tmod._detrend_parameters def test_transit_model_detrend(): - tmod = _make_transit_model_with_data(noise_dev=0, - with_airmass=True, - with_width=True, - with_spp=True) - - no_trends = _make_transit_model_with_data(noise_dev=0, - with_airmass=False, - with_width=False, - with_spp=False) + tmod = _make_transit_model_with_data( + noise_dev=0, with_airmass=True, with_width=True, with_spp=True + ) + + no_trends = _make_transit_model_with_data( + noise_dev=0, with_airmass=False, with_width=False, with_spp=False + ) full_model = tmod.model_light_curve() - np.testing.assert_allclose(full_model - no_trends.model_light_curve(), - tmod.model.airmass_trend * tmod.airmass + - tmod.model.width_trend * tmod.width + - tmod.model.spp_trend * tmod.spp) + np.testing.assert_allclose( + full_model - no_trends.model_light_curve(), + tmod.model.airmass_trend * tmod.airmass + + tmod.model.width_trend * tmod.width + + tmod.model.spp_trend * tmod.spp, + ) for trend in tmod._all_detrend_params: detrended_model = tmod.model_light_curve(detrend_by=trend) - trend_param = getattr(tmod.model, f'{trend}_trend') + trend_param = getattr(tmod.model, f"{trend}_trend") trend_data = getattr(tmod, trend) assert trend_param != 0 assert trend_data is not None - np.testing.assert_allclose(full_model - detrended_model, - trend_param * trend_data) + np.testing.assert_allclose( + full_model - detrended_model, trend_param * trend_data + ) - detrended_model = tmod.model_light_curve(detrend_by='all') + detrended_model = tmod.model_light_curve(detrend_by="all") np.testing.assert_allclose(detrended_model, no_trends.model_light_curve()) def test_transit_data_detrend(): - tmod = _make_transit_model_with_data(noise_dev=0, - with_airmass=True, - with_width=True, - with_spp=True) + tmod = _make_transit_model_with_data( + noise_dev=0, with_airmass=True, with_width=True, with_spp=True + ) - no_trends = _make_transit_model_with_data(noise_dev=0, - with_airmass=False, - with_width=False, - with_spp=False) + no_trends = _make_transit_model_with_data( + noise_dev=0, with_airmass=False, with_width=False, with_spp=False + ) - np.testing.assert_allclose(tmod.data_light_curve(detrend_by='all'), - no_trends.data) - np.testing.assert_allclose(tmod.data - no_trends.data, - tmod.model.airmass_trend * tmod.airmass + - tmod.model.width_trend * tmod.width + - tmod.model.spp_trend * tmod.spp) + np.testing.assert_allclose(tmod.data_light_curve(detrend_by="all"), no_trends.data) + np.testing.assert_allclose( + tmod.data - no_trends.data, + tmod.model.airmass_trend * tmod.airmass + + tmod.model.width_trend * tmod.width + + tmod.model.spp_trend * tmod.spp, + ) def test_transit_fit_parameters_unfreeze_as_expected(): - tmod = _make_transit_model_with_data(noise_dev=1e-5, - with_airmass=False, - with_width=False, - with_spp=False) + tmod = _make_transit_model_with_data( + noise_dev=1e-5, with_airmass=False, with_width=False, with_spp=False + ) # None of these are fixed by default assert not tmod.model.airmass_trend.fixed diff --git a/stellarphot/utils/__init__.py b/stellarphot/utils/__init__.py index 2fb0a214..42d65e62 100644 --- a/stellarphot/utils/__init__.py +++ b/stellarphot/utils/__init__.py @@ -3,4 +3,3 @@ from .comparison_utils import * from .catalog_search import * from .magnitude_transforms import * - diff --git a/stellarphot/utils/catalog_search.py b/stellarphot/utils/catalog_search.py index 75dbfe3b..916b19f8 100644 --- a/stellarphot/utils/catalog_search.py +++ b/stellarphot/utils/catalog_search.py @@ -8,12 +8,12 @@ import astropy.units as units __all__ = [ - 'in_frame', - 'catalog_search', - 'catalog_clean', - 'find_apass_stars', - 'find_known_variables', - 'filter_catalog' + "in_frame", + "catalog_search", + "catalog_clean", + "find_apass_stars", + "find_known_variables", + "filter_catalog", ] @@ -49,12 +49,16 @@ def in_frame(frame_wcs, coordinates, padding=0): return in_x & in_y -def catalog_search(frame_wcs_or_center, shape, desired_catalog, - ra_column='RAJ2000', - dec_column='DEJ2000', - radius=0.5, - clip_by_frame=True, - padding=100): +def catalog_search( + frame_wcs_or_center, + shape, + desired_catalog, + ra_column="RAJ2000", + dec_column="DEJ2000", + radius=0.5, + clip_by_frame=True, + padding=100, +): """ Return the items from catalog that are within the search radius and (optionally) within the field of view of a frame. @@ -103,12 +107,12 @@ def catalog_search(frame_wcs_or_center, shape, desired_catalog, # Center was passed in, just use it. center = frame_wcs_or_center if clip_by_frame: - raise ValueError('To clip entries by frame you must use ' - 'a WCS as the first argument.') + raise ValueError( + "To clip entries by frame you must use " "a WCS as the first argument." + ) else: # Find the center of the frame - center = frame_wcs_or_center.pixel_to_world(shape[1] / 2, - shape[0] / 2) + center = frame_wcs_or_center.pixel_to_world(shape[1] / 2, shape[0] / 2) # Get catalog via cone search Vizier.ROW_LIMIT = -1 # Set row_limit to have no limit @@ -124,8 +128,7 @@ def catalog_search(frame_wcs_or_center, shape, desired_catalog, return cat[in_fov] -def catalog_clean(catalog, remove_rows_with_mask=True, - **other_restrictions): +def catalog_clean(catalog, remove_rows_with_mask=True, **other_restrictions): """ Return a catalog with only the rows that meet the criteria specified. @@ -154,15 +157,15 @@ def catalog_clean(catalog, remove_rows_with_mask=True, """ comparisons = { - '<': np.less, - '=': np.equal, - '>': np.greater, - '<=': np.less_equal, - '>=': np.greater_equal, - '!=': np.not_equal + "<": np.less, + "=": np.equal, + ">": np.greater, + "<=": np.less_equal, + ">=": np.greater_equal, + "!=": np.not_equal, } - recognized_comparison_ops = '|'.join(comparisons.keys()) + recognized_comparison_ops = "|".join(comparisons.keys()) keepers = np.ones([len(catalog)], dtype=bool) if remove_rows_with_mask and catalog.masked: @@ -170,24 +173,25 @@ def catalog_clean(catalog, remove_rows_with_mask=True, keepers &= ~catalog[c].mask for column, restriction in other_restrictions.items(): - criteria_re = re.compile(r'({})([-+a-zA-Z0-9]+)'.format(recognized_comparison_ops)) + criteria_re = re.compile( + r"({})([-+a-zA-Z0-9]+)".format(recognized_comparison_ops) + ) results = criteria_re.match(restriction) if not results: - raise ValueError("Criteria {}{} not " - "understood.".format(column, restriction)) + raise ValueError( + "Criteria {}{} not " "understood.".format(column, restriction) + ) comparison_func = comparisons[results.group(1)] comparison_value = results.group(2) - new_keepers = comparison_func(catalog[column], - float(comparison_value)) + new_keepers = comparison_func(catalog[column], float(comparison_value)) keepers = keepers & new_keepers return catalog[keepers] -def find_apass_stars(image_or_center, - radius=1, - max_mag_error=0.05, - max_color_error=0.1): +def find_apass_stars( + image_or_center, radius=1, max_mag_error=0.05, max_color_error=0.1 +): """ Get APASS data from Vizer. @@ -224,14 +228,21 @@ def find_apass_stars(image_or_center, cen_wcs = image_or_center.wcs shape = image_or_center.shape # use the catalog_search function to find the APASS stars in the frame of the image read above - all_apass = catalog_search(cen_wcs, shape, 'II/336/apass9', - ra_column='RAJ2000', dec_column='DEJ2000', radius=radius, - clip_by_frame=False) + all_apass = catalog_search( + cen_wcs, + shape, + "II/336/apass9", + ra_column="RAJ2000", + dec_column="DEJ2000", + radius=radius, + clip_by_frame=False, + ) # Creates a boolean array of the APASS stars that have well defined # magnitudes and color. - apass_lower_error = (all_apass['e_r_mag'] < max_mag_error) & ( - all_apass['e_B-V'] < max_color_error) + apass_lower_error = (all_apass["e_r_mag"] < max_mag_error) & ( + all_apass["e_B-V"] < max_color_error + ) # create new table of APASS stars that meet error restrictions apass_lower_error = all_apass[apass_lower_error] @@ -256,11 +267,17 @@ def find_known_variables(image): Table of known variable stars in the field of view. """ try: - vsx = catalog_search(image.wcs, image.shape, 'B/vsx/vsx', - ra_column='RAJ2000', dec_column='DEJ2000') + vsx = catalog_search( + image.wcs, + image.shape, + "B/vsx/vsx", + ra_column="RAJ2000", + dec_column="DEJ2000", + ) except IndexError: - raise RuntimeError('No variables found in this field of view ' - f'centered on {image.wcs}') + raise RuntimeError( + "No variables found in this field of view " f"centered on {image.wcs}" + ) return vsx @@ -286,10 +303,10 @@ def filter_catalog(catalog, **kwd): One value for each row in the catalog; values are ``True`` if the row meets the criteria, ``False`` otherwise. """ - good_ones = np.ones(len(catalog), dtype='bool') + good_ones = np.ones(len(catalog), dtype="bool") for key, value in kwd.items(): print(key, value, catalog[key] <= value) - good_ones &= (catalog[key] <= value) + good_ones &= catalog[key] <= value return good_ones diff --git a/stellarphot/utils/comparison_utils.py b/stellarphot/utils/comparison_utils.py index df66bd06..0d2271ee 100644 --- a/stellarphot/utils/comparison_utils.py +++ b/stellarphot/utils/comparison_utils.py @@ -11,8 +11,7 @@ from stellarphot.photometry import * -__all__ = ['read_file', 'set_up', 'crossmatch_APASS2VSX', 'mag_scale', - 'in_field'] +__all__ = ["read_file", "set_up", "crossmatch_APASS2VSX", "mag_scale", "in_field"] DESC_STYLE = {"description_width": "initial"} @@ -34,15 +33,15 @@ def read_file(radec_file): Table with target information, including a `astropy.coordinates.SkyCoord` column. """ - df = pd.read_csv(radec_file, names=['RA', 'Dec', 'a', 'b', 'Mag']) + df = pd.read_csv(radec_file, names=["RA", "Dec", "a", "b", "Mag"]) target_table = Table.from_pandas(df) - ra = target_table['RA'] - dec = target_table['Dec'] - target_table['coords'] = SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.degree)) + ra = target_table["RA"] + dec = target_table["Dec"] + target_table["coords"] = SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.degree)) return target_table -def set_up(sample_image_for_finding_stars, directory_with_images='.'): +def set_up(sample_image_for_finding_stars, directory_with_images="."): """ Read in sample image and find known variables in the field of view. @@ -66,7 +65,7 @@ def set_up(sample_image_for_finding_stars, directory_with_images='.'): Table with known variables in the field of view. """ - if sample_image_for_finding_stars.startswith('http'): + if sample_image_for_finding_stars.startswith("http"): path = sample_image_for_finding_stars else: path = Path(directory_with_images) / sample_image_for_finding_stars @@ -77,9 +76,9 @@ def set_up(sample_image_for_finding_stars, directory_with_images='.'): except RuntimeError: vsx = [] else: - ra = vsx['RAJ2000'] - dec = vsx['DEJ2000'] - vsx['coords'] = SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.degree)) + ra = vsx["RAJ2000"] + dec = vsx["DEJ2000"] + vsx["coords"] = SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.degree)) return ccd, vsx @@ -114,28 +113,25 @@ def crossmatch_APASS2VSX(CCD, RD, vsx): Angular separation between APASS stars and input targets. """ apass, apass_in_bright = find_apass_stars(CCD) - ra = apass['RAJ2000'] - dec = apass['DEJ2000'] - apass['coords'] = SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.degree)) - apass_coord = apass['coords'] + ra = apass["RAJ2000"] + dec = apass["DEJ2000"] + apass["coords"] = SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.degree)) + apass_coord = apass["coords"] if vsx: - v_index, v_angle, v_dist = \ - apass_coord.match_to_catalog_sky(vsx['coords']) + v_index, v_angle, v_dist = apass_coord.match_to_catalog_sky(vsx["coords"]) else: v_angle = [] if RD: - RD_index, RD_angle, RD_dist = \ - apass_coord.match_to_catalog_sky(RD['coords']) + RD_index, RD_angle, RD_dist = apass_coord.match_to_catalog_sky(RD["coords"]) else: RD_angle = [] return apass, v_angle, RD_angle -def mag_scale(cmag, apass, v_angle, RD_angle, - brighter_dmag=0.44, dimmer_dmag=0.75): +def mag_scale(cmag, apass, v_angle, RD_angle, brighter_dmag=0.44, dimmer_dmag=0.75): """ Select comparison stars that are 1) not close the VSX stars or to other target stars and 2) fall within a particular magnitude range. @@ -170,21 +166,21 @@ def mag_scale(cmag, apass, v_angle, RD_angle, good_stars : `astropy.table.Table` Table with the comparison stars. """ - high_mag = apass['r_mag'] < cmag + dimmer_dmag - low_mag = apass['r_mag'] > cmag - brighter_dmag - if len(v_angle)>0: + high_mag = apass["r_mag"] < cmag + dimmer_dmag + low_mag = apass["r_mag"] > cmag - brighter_dmag + if len(v_angle) > 0: good_v_angle = v_angle > 1.0 * u.arcsec else: good_v_angle = True - if len(RD_angle)>0: + if len(RD_angle) > 0: good_RD_angle = RD_angle > 1.0 * u.arcsec else: good_RD_angle = True good_stars = high_mag & low_mag & good_RD_angle & good_v_angle good_apass = apass[good_stars] - apass_good_coord = good_apass['coords'] + apass_good_coord = good_apass["coords"] return apass_good_coord, good_stars @@ -213,8 +209,7 @@ def in_field(apass_good_coord, ccd, apass, good_stars): ent : `astropy.table.Table` Table with APASS stars in the field of view. """ - apassx, apassy = ccd.wcs.all_world2pix( - apass_good_coord.ra, apass_good_coord.dec, 0) + apassx, apassy = ccd.wcs.all_world2pix(apass_good_coord.ra, apass_good_coord.dec, 0) ccdx, ccdy = ccd.shape xin = (apassx < ccdx) & (0 < apassx) @@ -223,4 +218,4 @@ def in_field(apass_good_coord, ccd, apass, good_stars): apass_good_coord[xy_in] nt = apass[good_stars] ent = nt[xy_in] - return ent \ No newline at end of file + return ent diff --git a/stellarphot/utils/magnitude_transforms.py b/stellarphot/utils/magnitude_transforms.py index be08ea5c..6047a3a1 100644 --- a/stellarphot/utils/magnitude_transforms.py +++ b/stellarphot/utils/magnitude_transforms.py @@ -13,11 +13,14 @@ __all__ = [ - 'f', 'get_cat', 'opts_to_str', 'calc_residual', - 'filter_transform', - 'calculate_transform_coefficients', - 'transform_magnitudes', - 'transform_to_catalog', + "f", + "get_cat", + "opts_to_str", + "calc_residual", + "filter_transform", + "calculate_transform_coefficients", + "transform_magnitudes", + "transform_to_catalog", ] @@ -42,7 +45,7 @@ def f(X, a, b, c, d, z): """ mag_inst, color = X - return a * mag_inst + b * mag_inst ** 2 + c * color + d * color**2 + z + return a * mag_inst + b * mag_inst**2 + c * color + d * color**2 + z def get_cat(image): @@ -64,15 +67,15 @@ def get_cat(image): Table containing the APASS catalog entries within 1 degree of first object in Astropy table. """ - our_coords = SkyCoord(image['RA'], image['Dec'], unit='degree') + our_coords = SkyCoord(image["RA"], image["Dec"], unit="degree") # Get catalog via cone search Vizier.ROW_LIMIT = -1 # Set row_limit to have no limit - desired_catalog = 'II/336/apass9' + desired_catalog = "II/336/apass9" a_star = our_coords[0] rad = 1 * u.degree cat = Vizier.query_region(a_star, radius=rad, catalog=desired_catalog) cat = cat[0] - cat_coords = SkyCoord(cat['RAJ2000'], cat['DEJ2000']) + cat_coords = SkyCoord(cat["RAJ2000"], cat["DEJ2000"]) return cat, cat_coords @@ -92,11 +95,11 @@ def opts_to_str(opts): str String representation of the options. """ - opt_names = ['a', 'b', 'c', 'd', 'z'] + opt_names = ["a", "b", "c", "d", "z"] names = [] for name, value in zip(opt_names, opts): - names.append(f'{name}={value:.4f}') - return ', '.join(names) + names.append(f"{name}={value:.4f}") + return ", ".join(names) def calc_residual(new_cal, catalog): @@ -124,9 +127,7 @@ def calc_residual(new_cal, catalog): return resid.std() -def filter_transform(mag_data, output_filter, - g=None, r=None, i=None, - transform=None): +def filter_transform(mag_data, output_filter, g=None, r=None, i=None, transform=None): """ Transform SDSS magnitudes to BVRI using either the transforms from Jester et al or Ivezic et al. @@ -167,47 +168,43 @@ def filter_transform(mag_data, output_filter, http://aspbooks.org/custom/publications/paper/364-0165.html """ - supported_transforms = ['jester', 'ivezic'] + supported_transforms = ["jester", "ivezic"] if transform not in supported_transforms: - raise ValueError('Transform {} is not known. Must be one of ' - '{}'.format(transform, supported_transforms)) + raise ValueError( + "Transform {} is not known. Must be one of " + "{}".format(transform, supported_transforms) + ) transform_ivezic = { - 'B': [0.2628, -0.7952, 1.0544, 0.0268], - 'V': [0.0688, -0.2056, -0.3838, -0.0534], - 'R': [-0.0107, 0.0050, -0.2689, -0.1540], - 'I': [-0.0307, 0.1163, -0.3341, -0.3584] - } - base_mag_ivezic = { - 'B': g, - 'V': g, - 'R': r, - 'I': i + "B": [0.2628, -0.7952, 1.0544, 0.0268], + "V": [0.0688, -0.2056, -0.3838, -0.0534], + "R": [-0.0107, 0.0050, -0.2689, -0.1540], + "I": [-0.0307, 0.1163, -0.3341, -0.3584], } + base_mag_ivezic = {"B": g, "V": g, "R": r, "I": i} # For jester, using the transform for "all stars with Rc-Ic < 1.15" # from # http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jester2005 jester_transforms = { - 'B': [1.39, -0.39, 0, 0.21], - 'V': [0.41, 0.59, 0, -0.01], - 'R': [0.41, -0.5, 1.09, -0.23], - 'I': [0.41, -1.5, 2.09, -0.44] + "B": [1.39, -0.39, 0, 0.21], + "V": [0.41, 0.59, 0, -0.01], + "R": [0.41, -0.5, 1.09, -0.23], + "I": [0.41, -1.5, 2.09, -0.44], } if output_filter not in base_mag_ivezic.keys(): - raise ValueError('the desired filter must be a string R B V or I') + raise ValueError("the desired filter must be a string R B V or I") - if transform == 'ivezic': - if output_filter == 'R' or output_filter == 'I': + if transform == "ivezic": + if output_filter == "R" or output_filter == "I": # This will throw a KeyError if the column is missing c = mag_data[r] - mag_data[i] - if output_filter == 'B' or output_filter == 'V': + if output_filter == "B" or output_filter == "V": # This will throw a KeyError if the column is missing c = mag_data[g] - mag_data[r] transform_poly = np.poly1d(transform_ivezic[output_filter]) - out_mag = transform_poly(c) + \ - mag_data[base_mag_ivezic[output_filter]] + out_mag = transform_poly(c) + mag_data[base_mag_ivezic[output_filter]] # poly1d ignores masks. Add masks back in here if necessary. try: input_mask = c.mask @@ -215,25 +212,33 @@ def filter_transform(mag_data, output_filter, pass else: out_mag = np.ma.array(out_mag, mask=input_mask) - elif transform == 'jester': + elif transform == "jester": coeff = jester_transforms[output_filter] - out_mag = (coeff[0] * mag_data[g] + coeff[1] * mag_data[r] + - coeff[2] * mag_data[i] + coeff[3]) + out_mag = ( + coeff[0] * mag_data[g] + + coeff[1] * mag_data[r] + + coeff[2] * mag_data[i] + + coeff[3] + ) - out_mag.name = '{}_mag'.format(output_filter) - out_mag.description = ('{}-band magnitude transformed ' - 'from gri'.format(output_filter)) + out_mag.name = "{}_mag".format(output_filter) + out_mag.description = "{}-band magnitude transformed " "from gri".format( + output_filter + ) return out_mag -def calculate_transform_coefficients(input_mag, catalog_mag, color, - input_mag_error=None, - catalog_mag_error=None, - faintest_mag=None, - order=1, - sigma=2.0, - gain=None, - ): +def calculate_transform_coefficients( + input_mag, + catalog_mag, + color, + input_mag_error=None, + catalog_mag_error=None, + faintest_mag=None, + order=1, + sigma=2.0, + gain=None, +): """ Calculate linear transform coefficients from input magnitudes to catalog magnitudes. @@ -340,8 +345,7 @@ def calculate_transform_coefficients(input_mag, catalog_mag, color, g_init = models.Polynomial1D(order) fit = fitting.LinearLSQFitter() - or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, - niter=2, sigma=sigma) + or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=2, sigma=sigma) if faintest_mag is not None: bright = catalog_mag < faintest_mag @@ -351,20 +355,20 @@ def calculate_transform_coefficients(input_mag, catalog_mag, color, # Might not have had a masked array... pass else: - bright = np.ones_like(mag_diff, dtype='bool') + bright = np.ones_like(mag_diff, dtype="bool") bright_index = np.nonzero(bright) # get fitted model and filtered data - or_fitted_model, filtered_data_mask = or_fit(g_init, - color[bright], - mag_diff[bright]) + or_fitted_model, filtered_data_mask = or_fit( + g_init, color[bright], mag_diff[bright] + ) # Restore the filtered_data to the same size as the input # magnitudes. Unmasked values were included in the fit, # masked were not, either because they were too faint # or because they were sigma clipped out. - restored_mask = np.zeros_like(mag_diff, dtype='bool') + restored_mask = np.zeros_like(mag_diff, dtype="bool") restored_mask[bright_index] = filtered_data_mask restored_mask[~bright] = True @@ -374,15 +378,18 @@ def calculate_transform_coefficients(input_mag, catalog_mag, color, return (restored_filtered, or_fitted_model) -def transform_magnitudes(input_mags, catalog, - transform_catalog, - input_mag_colum='mag_inst_r', - catalog_mag_column='r_mag', - catalog_color_column='B-V', - faintest_mag_for_transform=14, - sigma=2, - order=1, - gain=None): +def transform_magnitudes( + input_mags, + catalog, + transform_catalog, + input_mag_colum="mag_inst_r", + catalog_mag_column="r_mag", + catalog_color_column="B-V", + faintest_mag_for_transform=14, + sigma=2, + order=1, + gain=None, +): """ Calculate catalog magnitudes and transform coefficients from instrumental magnitudes. @@ -445,24 +452,22 @@ def transform_magnitudes(input_mags, catalog, of the term ``x**i``. Warning: This returns a namedtuple if the fit fails. """ - catalog_all_coords = SkyCoord(catalog['RAJ2000'], - catalog['DEJ2000'], - unit='deg') + catalog_all_coords = SkyCoord(catalog["RAJ2000"], catalog["DEJ2000"], unit="deg") - transform_catalog_coords = SkyCoord(transform_catalog['RAJ2000'], - transform_catalog['DEJ2000'], - unit='deg') - input_coords = SkyCoord(input_mags['RA'], input_mags['Dec']) + transform_catalog_coords = SkyCoord( + transform_catalog["RAJ2000"], transform_catalog["DEJ2000"], unit="deg" + ) + input_coords = SkyCoord(input_mags["RA"], input_mags["Dec"]) - transform_catalog_index, d2d, _ = \ - match_coordinates_sky(input_coords, transform_catalog_coords) + transform_catalog_index, d2d, _ = match_coordinates_sky( + input_coords, transform_catalog_coords + ) # create a boolean of all of the matches that have a discrepancy of less # than 5 arcseconds good_match_for_transform = d2d < 2 * u.arcsecond - catalog_index, d2d, _ = match_coordinates_sky(input_coords, - catalog_all_coords) + catalog_index, d2d, _ = match_coordinates_sky(input_coords, catalog_all_coords) good_match_all = d2d < 5 * u.arcsecond @@ -472,10 +477,8 @@ def transform_magnitudes(input_mags, catalog, catalog_match_indexes = transform_catalog_index[good_match_for_transform] - catalog_match_mags = \ - transform_catalog[catalog_mag_column][catalog_match_indexes] - catalog_match_color = \ - transform_catalog[catalog_color_column][catalog_match_indexes] + catalog_match_mags = transform_catalog[catalog_mag_column][catalog_match_indexes] + catalog_match_color = transform_catalog[catalog_color_column][catalog_match_indexes] good_mags = ~np.isnan(input_match_mags) @@ -484,32 +487,44 @@ def transform_magnitudes(input_mags, catalog, catalog_match_color = catalog_match_color[good_mags] try: - matched_data, transforms = \ - calculate_transform_coefficients(input_match_mags, - catalog_match_mags, - catalog_match_color, - sigma=sigma, - faintest_mag=faintest_mag_for_transform, - order=order, - gain=gain) + matched_data, transforms = calculate_transform_coefficients( + input_match_mags, + catalog_match_mags, + catalog_match_color, + sigma=sigma, + faintest_mag=faintest_mag_for_transform, + order=order, + gain=gain, + ) except np.linalg.LinAlgError as e: - print('Danger! LinAlgError: {}'.format(str(e))) - Transform = namedtuple('Transform', ['parameters']) + print("Danger! LinAlgError: {}".format(str(e))) + Transform = namedtuple("Transform", ["parameters"]) transforms = Transform(parameters=(np.nan,) * (order + 1)) - our_cat_mags = (input_mags[input_mag_colum][good_match_all] + - transforms(catalog[catalog_color_column][catalog_all_indexes])) + our_cat_mags = input_mags[input_mag_colum][good_match_all] + transforms( + catalog[catalog_color_column][catalog_all_indexes] + ) return our_cat_mags, good_match_all, transforms -def transform_to_catalog(observed_mags_grouped, obs_mag_col, obs_filter, - obs_error_column=None, - cat_filter='r_mag', cat_color=('r_mag', 'i_mag'), - a_delta=0.5, a_cen=0, b_delta=1e-6, c_delta=0.5, d_delta=1e-6, - zero_point_range=(18, 22), - in_place=True, fit_diff=True, - verbose=True): +def transform_to_catalog( + observed_mags_grouped, + obs_mag_col, + obs_filter, + obs_error_column=None, + cat_filter="r_mag", + cat_color=("r_mag", "i_mag"), + a_delta=0.5, + a_cen=0, + b_delta=1e-6, + c_delta=0.5, + d_delta=1e-6, + zero_point_range=(18, 22), + in_place=True, + fit_diff=True, + verbose=True, +): """ Transform a set of intrumental magnitudes to a standard system using either instrumental colors or catalog colors. @@ -572,19 +587,19 @@ def transform_to_catalog(observed_mags_grouped, obs_mag_col, obs_filter, print("are you sure you want to do that? Error weighting is important!") fit_bounds_lower = [ - a_cen - a_delta, # a - -b_delta, # b - -c_delta, # c - -d_delta, # d - zero_point_range[0], # z + a_cen - a_delta, # a + -b_delta, # b + -c_delta, # c + -d_delta, # d + zero_point_range[0], # z ] fit_bounds_upper = [ - a_cen + a_delta, # a + a_cen + a_delta, # a b_delta, # b c_delta, # c d_delta, # d - zero_point_range[1], # z + zero_point_range[1], # z ] fit_bounds = (fit_bounds_lower, fit_bounds_upper) @@ -605,28 +620,33 @@ def transform_to_catalog(observed_mags_grouped, obs_mag_col, obs_filter, cat = None cat_coords = None - for file, one_image in zip(observed_mags_grouped.groups.keys, observed_mags_grouped.groups): - our_coords = SkyCoord(one_image['RA'], one_image['Dec'], unit='degree') + for file, one_image in zip( + observed_mags_grouped.groups.keys, observed_mags_grouped.groups + ): + our_coords = SkyCoord(one_image["RA"], one_image["Dec"], unit="degree") if cat is None or cat_coords is None: cat, cat_coords = get_cat(one_image) - cat['color'] = cat[cat_color[0]] - cat[cat_color[1]] + cat["color"] = cat[cat_color[0]] - cat[cat_color[1]] cat_idx, d2d, _ = our_coords.match_to_catalog_sky(cat_coords) mag_inst = one_image[obs_mag_col] cat_mag = cat[cat_filter][cat_idx] - color = cat['color'][cat_idx] + color = cat["color"][cat_idx] # Impose some constraints on what is included in the fit good_cat = ~(color.mask | cat_mag.mask) & (d2d.arcsecond < 1) - good_dat = ((one_image[obs_mag_col] < -3) & - (one_image[obs_mag_col] > -20) & - ~np.isnan(one_image[obs_mag_col]) - ) + good_dat = ( + (one_image[obs_mag_col] < -3) + & (one_image[obs_mag_col] > -20) + & ~np.isnan(one_image[obs_mag_col]) + ) mag_diff = cat_mag - mag_inst - good_data = good_dat & (np.abs(mag_diff - np.nanmedian(mag_diff[good_dat & ~mag_diff.mask])) < 1) + good_data = good_dat & ( + np.abs(mag_diff - np.nanmedian(mag_diff[good_dat & ~mag_diff.mask])) < 1 + ) try: good_data = good_data & ~one_image[obs_mag_col].mask except AttributeError: @@ -651,9 +671,9 @@ def transform_to_catalog(observed_mags_grouped, obs_mag_col, obs_filter, offset = 0 # Do the fit - popt, pcov = curve_fit(f, X, catm - offset, - p0=init_guess, bounds=fit_bounds, - sigma=errors) + popt, pcov = curve_fit( + f, X, catm - offset, p0=init_guess, bounds=fit_bounds, sigma=errors + ) # Accumulate the parameters for param, value in zip(all_params, popt): @@ -675,9 +695,9 @@ def transform_to_catalog(observed_mags_grouped, obs_mag_col, obs_filter, cat_colors.extend(color) # Keep the user entertained.... - print(f'{file[0]} has fit {opts_to_str(popt)} with {residual=:.4f}') + print(f"{file[0]} has fit {opts_to_str(popt)} with {residual=:.4f}") - mag_col_name = obs_mag_col + '_cal' + mag_col_name = obs_mag_col + "_cal" if not in_place: result = observed_mags_grouped.copy() else: @@ -685,14 +705,14 @@ def transform_to_catalog(observed_mags_grouped, obs_mag_col, obs_filter, result[mag_col_name] = cal_mags if obs_error_column is not None: - result[mag_col_name + '_error'] = ( - (1 + np.asarray(all_params[0])) * result[obs_error_column] - ) - opt_names = ['a', 'b', 'c', 'd', 'z'] + result[mag_col_name + "_error"] = (1 + np.asarray(all_params[0])) * result[ + obs_error_column + ] + opt_names = ["a", "b", "c", "d", "z"] for name, values in zip(opt_names, all_params): result[name] = values - result['mag_cat'] = cat_mags - result['color_cat'] = cat_colors + result["mag_cat"] = cat_mags + result["color_cat"] = cat_colors return result diff --git a/stellarphot/utils/tests/test_catalog_search.py b/stellarphot/utils/tests/test_catalog_search.py index 35386d31..aca8acc0 100644 --- a/stellarphot/utils/tests/test_catalog_search.py +++ b/stellarphot/utils/tests/test_catalog_search.py @@ -11,17 +11,21 @@ from astropy.nddata import CCDData from astropy.wcs.wcs import FITSFixedWarning -from ..catalog_search import catalog_clean, in_frame, \ - catalog_search, find_known_variables, \ - find_apass_stars, filter_catalog +from ..catalog_search import ( + catalog_clean, + in_frame, + catalog_search, + find_known_variables, + find_apass_stars, + filter_catalog, +) from ...tests.make_wcs import make_wcs CCD_SHAPE = [2048, 3073] def a_table(masked=False): - test_table = Table([(1, 2, 3), (1, -1, -1)], names=('a', 'b'), - masked=masked) + test_table = Table([(1, 2, 3), (1, -1, -1)], names=("a", "b"), masked=masked) return test_table @@ -30,32 +34,30 @@ def test_clean_criteria_none_removed(): If all rows satisfy the criteria, none should be removed. """ inp = a_table() - criteria = {'a': '>0'} + criteria = {"a": ">0"} out = catalog_clean(inp, **criteria) assert len(out) == len(inp) assert (out == inp).all() -@pytest.mark.parametrize("condition", - ['>0', '=1', '!=-1', '>=1']) +@pytest.mark.parametrize("condition", [">0", "=1", "!=-1", ">=1"]) def test_clean_criteria_some_removed(condition): """ Try a few filters which remove the second row and check that it is removed. """ inp = a_table() - criteria = {'b': condition} + criteria = {"b": condition} out = catalog_clean(inp, **criteria) assert len(out) == 1 assert (out[0] == inp[0]).all() -@pytest.mark.parametrize("clean_masked", - [False, True]) +@pytest.mark.parametrize("clean_masked", [False, True]) def test_clean_masked_handled_correctly(clean_masked): inp = a_table(masked=True) # Mask negative values - inp['b'].mask = inp['b'] < 0 + inp["b"].mask = inp["b"] < 0 out = catalog_clean(inp, remove_rows_with_mask=clean_masked) if clean_masked: assert len(out) == 1 @@ -72,11 +74,11 @@ def test_clean_masked_and_criteria(): """ inp = a_table(masked=True) # Mask the first row. - inp['b'].mask = inp['b'] > 0 + inp["b"].mask = inp["b"] > 0 inp_copy = inp.copy() # This should remove the third row. - criteria = {'a': '<=2'} + criteria = {"a": "<=2"} out = catalog_clean(inp, remove_rows_with_mask=True, **criteria) @@ -90,9 +92,10 @@ def test_clean_masked_and_criteria(): assert (inp == inp_copy).all() -@pytest.mark.parametrize("criteria,error_msg", [ - ({'a': '5'}, "not understood"), - ({'a': ' 0 assert len(vsx_vars) == len(vsx_vars2) @@ -207,51 +216,58 @@ def test_catalog_search_with_coord_and_frame_clip_fails(): # Check that calling catalog_search with a coordinate instead # of WCS and with clip_by_frame = True generates an appropriate # error. - data_file = 'data/sample_wcs_ey_uma.fits' + data_file = "data/sample_wcs_ey_uma.fits" data = get_pkg_data_filename(data_file) with fits.open(data) as hdulist: with warnings.catch_warnings(): # Ignore the warning about the WCS having a different number of # axes than the (non-existent) image. - warnings.filterwarnings("ignore", - message="The WCS transformation has more", - category=FITSFixedWarning) + warnings.filterwarnings( + "ignore", + message="The WCS transformation has more", + category=FITSFixedWarning, + ) wcs = WCS(hdulist[0].header) cen_coord = wcs.pixel_to_world(4096 / 2, 4096 / 2) with pytest.raises(ValueError) as e: - _ = catalog_search(cen_coord, [4096, 4096], 'B/vsx/vsx', - clip_by_frame=True) - assert 'To clip entries by frame' in str(e.value) + _ = catalog_search(cen_coord, [4096, 4096], "B/vsx/vsx", clip_by_frame=True) + assert "To clip entries by frame" in str(e.value) @pytest.mark.remote_data def test_find_apass(): # This is really checking from APASS DR9 on Vizier, or at least that # is where the "expected" data is drawn from. - expected_all = Table.read(get_pkg_data_filename('data/all_apass_ey_uma_sorted_ra_first_20.fits')) - expected_low_error = Table.read(get_pkg_data_filename('data/low_error_apass_ey_uma_sorted_ra_first_20.fits')) - wcs_file = get_pkg_data_filename('data/sample_wcs_ey_uma.fits') + expected_all = Table.read( + get_pkg_data_filename("data/all_apass_ey_uma_sorted_ra_first_20.fits") + ) + expected_low_error = Table.read( + get_pkg_data_filename("data/low_error_apass_ey_uma_sorted_ra_first_20.fits") + ) + wcs_file = get_pkg_data_filename("data/sample_wcs_ey_uma.fits") with fits.open(wcs_file) as hdulist: with warnings.catch_warnings(): # Ignore the warning about the WCS having a different number of # axes than the (non-existent) image. - warnings.filterwarnings("ignore", - message="The WCS transformation has more", - category=FITSFixedWarning) + warnings.filterwarnings( + "ignore", + message="The WCS transformation has more", + category=FITSFixedWarning, + ) wcs = WCS(hdulist[0].header) wcs.pixel_shape = list(reversed(CCD_SHAPE)) - ccd = CCDData(data=np.zeros(CCD_SHAPE), wcs=wcs, unit='adu') + ccd = CCDData(data=np.zeros(CCD_SHAPE), wcs=wcs, unit="adu") all_apass, apass_low_error = find_apass_stars(ccd) # print(all_apass) # REference data was sorted by RA, first 20 entries kept - all_apass.sort('RAJ2000') + all_apass.sort("RAJ2000") all_apass = all_apass[:20] - apass_low_error.sort('RAJ2000') + apass_low_error.sort("RAJ2000") apass_low_error = apass_low_error[:20] # It is hard to imagine the RAs matching and other entries not matching, # so just check the RAs. - assert all(all_apass['RAJ2000'] == expected_all['RAJ2000']) - assert all(apass_low_error['RAJ2000'] == expected_low_error['RAJ2000']) + assert all(all_apass["RAJ2000"] == expected_all["RAJ2000"]) + assert all(apass_low_error["RAJ2000"] == expected_low_error["RAJ2000"]) def test_filter_catalog(): diff --git a/stellarphot/utils/tests/test_magnitude_transforms.py b/stellarphot/utils/tests/test_magnitude_transforms.py index 68786857..ccb2923c 100644 --- a/stellarphot/utils/tests/test_magnitude_transforms.py +++ b/stellarphot/utils/tests/test_magnitude_transforms.py @@ -2,9 +2,11 @@ import numpy as np -from ..magnitude_transforms import (filter_transform, - calculate_transform_coefficients, - transform_magnitudes) +from ..magnitude_transforms import ( + filter_transform, + calculate_transform_coefficients, + transform_magnitudes, +) import pytest @@ -19,8 +21,7 @@ def generate_input_mags(n_stars): # Generate n_stars with magnitude in range 10 to 15 rg = np.random.default_rng(1024) input_mags = rg.integers(0, high=50, size=n_stars) / 10 + 10 - instr_mags = Column(name='instrumental', - data=input_mags) + instr_mags = Column(name="instrumental", data=input_mags) return instr_mags @@ -32,10 +33,9 @@ def generate_catalog_mags(instrument_mags, color, model): return instrument_mags + model(color) -def generate_star_coordinates(n_stars, - ra_start=180 * u.degree, - dec_start=45 * u.degree, - separation=10 * u.arcsec): +def generate_star_coordinates( + n_stars, ra_start=180 * u.degree, dec_start=45 * u.degree, separation=10 * u.arcsec +): """ Generate RA/Dec coordinates for a set of stars. """ @@ -61,86 +61,85 @@ def generate_tables(n_stars, mag_model): instr_mags = generate_input_mags(n_stars) # Set name to match default value in function. - instr_mags.name = 'mag_inst_r' + instr_mags.name = "mag_inst_r" # Set name to be default name for color. - color = Column(name='B-V', - data=np.linspace(0.0, 1.0, num=len(instr_mags))) + color = Column(name="B-V", data=np.linspace(0.0, 1.0, num=len(instr_mags))) catalog = generate_catalog_mags(instr_mags, color, mag_model) # Again, set default name. - catalog.name = 'r_mag' + catalog.name = "r_mag" # We'll use the same RA/Dec for the catalog and and the instrumental # magnitudes. ra, dec = generate_star_coordinates(n_stars) # Instrumental magnitudes - ra_col = Column(name='RA', data=ra) - dec_col = Column(name='Dec', data=dec) + ra_col = Column(name="RA", data=ra) + dec_col = Column(name="Dec", data=dec) instrumental = Table([instr_mags, ra_col, dec_col]) # Yes, these really do need to be renamed for the catalog table - ra_col.name = 'RAJ2000' - dec_col.name = 'DEJ2000' + ra_col.name = "RAJ2000" + dec_col.name = "DEJ2000" catalog_table = Table([catalog, ra_col, dec_col, color]) return instrumental, catalog_table -@pytest.mark.parametrize('bad_system', [None, 'monkeys']) +@pytest.mark.parametrize("bad_system", [None, "monkeys"]) def test_filter_transform_bad_system(bad_system): fake_data = Table() with pytest.raises(ValueError) as e: - filter_transform(fake_data, 'B', transform=bad_system) - assert 'Must be one of' in str(e.value) + filter_transform(fake_data, "B", transform=bad_system) + assert "Must be one of" in str(e.value) assert str(bad_system) in str(e.value) -@pytest.mark.parametrize('system', ['ivezic', 'jester']) +@pytest.mark.parametrize("system", ["ivezic", "jester"]) def test_filter_transform(system): - data_file = get_pkg_data_filename('data/mag_transform.csv') + data_file = get_pkg_data_filename("data/mag_transform.csv") data = Table.read(data_file) - in_system = data['system'] == system + in_system = data["system"] == system data = data[in_system] - for output_filter in ['B', 'V', 'R', 'I']: - f = filter_transform(data, output_filter, g='g', r='r', i='i', - transform=system) + for output_filter in ["B", "V", "R", "I"]: + f = filter_transform(data, output_filter, g="g", r="r", i="i", transform=system) np.testing.assert_allclose(f, data[output_filter]) def test_filter_transform_bad_filter(): with pytest.raises(ValueError) as e: - filter_transform([], 'not a filter', transform='jester') - assert 'the desired filter must be a string R B V or I' in str(e) + filter_transform([], "not a filter", transform="jester") + assert "the desired filter must be a string R B V or I" in str(e) -@pytest.mark.parametrize('order', [1, 2, 5]) +@pytest.mark.parametrize("order", [1, 2, 5]) def test_catalog_same_as_input(order): # Check that we get the correct transform when catalog magnitudes # are identical to instrument magnitudes. - instr_mags = Column(name='instrumental', data=[10, 12.5, 11]) + instr_mags = Column(name="instrumental", data=[10, 12.5, 11]) zero = models.Const1D(0.0) - color = Column(name='color', data=[1.0] * len(instr_mags)) + color = Column(name="color", data=[1.0] * len(instr_mags)) catalog = generate_catalog_mags(instr_mags, color, zero) # We expect these fits to be poorly conditioned because the two # sets of magnitudes are identical. with warnings.catch_warnings(): - warnings.filterwarnings('ignore', - message='The fit may be poorly conditioned', - category=AstropyUserWarning) - _, fit_model = calculate_transform_coefficients(instr_mags, - catalog, - color, - order=order) + warnings.filterwarnings( + "ignore", + message="The fit may be poorly conditioned", + category=AstropyUserWarning, + ) + _, fit_model = calculate_transform_coefficients( + instr_mags, catalog, color, order=order + ) assert len(fit_model.parameters) == order + 1 assert all(fit_model.parameters == 0) -@pytest.mark.parametrize('order', [1, 2, 5]) +@pytest.mark.parametrize("order", [1, 2, 5]) def test_catalog_linear_to_input(order): # Check that we recover the correct relationship between # the catalog and instrumental magnitudes when the relationship @@ -148,14 +147,12 @@ def test_catalog_linear_to_input(order): n_stars = 100 instr_mags = generate_input_mags(n_stars) true_relationship = models.Polynomial1D(1, c0=0.5, c1=0.75) - color = Column(name='color', - data=np.linspace(0.0, 1.0, num=len(instr_mags))) + color = Column(name="color", data=np.linspace(0.0, 1.0, num=len(instr_mags))) catalog = generate_catalog_mags(instr_mags, color, true_relationship) - _, fit_model = calculate_transform_coefficients(instr_mags, - catalog, - color, - order=order) + _, fit_model = calculate_transform_coefficients( + instr_mags, catalog, color, order=order + ) assert len(fit_model.parameters) == order + 1 assert np.abs(fit_model.c0 - true_relationship.c0) < 1e-7 assert np.abs(fit_model.c1 - true_relationship.c1) < 1e-7 @@ -165,7 +162,7 @@ def test_catalog_linear_to_input(order): assert all(np.abs(fit_model.parameters[2:]) < 1e-7) -@pytest.mark.parametrize('order', [1, 2, 5]) +@pytest.mark.parametrize("order", [1, 2, 5]) def test_catalog_quadratic_to_input(order): # Check that we recover the correct relationship between # the catalog and instrumental magnitudes when the relationship @@ -173,13 +170,11 @@ def test_catalog_quadratic_to_input(order): n_stars = 100 instr_mags = generate_input_mags(n_stars) true_relationship = models.Polynomial1D(2, c0=0.5, c1=0.75, c2=0.25) - color = Column(name='color', - data=np.linspace(0.0, 1.0, num=len(instr_mags))) + color = Column(name="color", data=np.linspace(0.0, 1.0, num=len(instr_mags))) catalog = generate_catalog_mags(instr_mags, color, true_relationship) - _, fit_model = calculate_transform_coefficients(instr_mags, - catalog, - color, - order=order) + _, fit_model = calculate_transform_coefficients( + instr_mags, catalog, color, order=order + ) assert len(fit_model.parameters) == order + 1 if order >= 2: # We expect a good fit in this case @@ -195,7 +190,7 @@ def test_catalog_quadratic_to_input(order): assert all(np.abs(fit_model.parameters[3:]) < 1e-7) -@pytest.mark.parametrize('faintest_magnitude', [None, 14]) +@pytest.mark.parametrize("faintest_magnitude", [None, 14]) def test_faintest_magnitude_has_effect(faintest_magnitude): # Check that the limit on magnitude when doing fits is respected. # We'll do this by setting up a linear relationship then @@ -208,8 +203,7 @@ def test_faintest_magnitude_has_effect(faintest_magnitude): instr_mags = generate_input_mags(n_stars) true_relationship = models.Polynomial1D(1, c0=0.5, c1=0.75) - color = Column(name='color', - data=np.linspace(0.0, 1.0, num=len(instr_mags))) + color = Column(name="color", data=np.linspace(0.0, 1.0, num=len(instr_mags))) catalog = generate_catalog_mags(instr_mags, color, true_relationship) faint_ones = catalog >= 14 @@ -218,17 +212,16 @@ def test_faintest_magnitude_has_effect(faintest_magnitude): # Scramble the faint ones rg = np.random.default_rng(40482) - catalog[faint_ones] = (catalog[faint_ones] - + 5 * rg.random(faint_ones.sum())) + catalog[faint_ones] = catalog[faint_ones] + 5 * rg.random(faint_ones.sum()) _, fit_model = calculate_transform_coefficients( - instr_mags, - catalog, - color, - order=1, - faintest_mag=faintest_magnitude, - sigma=5000 # So that nothing is clipped - ) + instr_mags, + catalog, + color, + order=1, + faintest_mag=faintest_magnitude, + sigma=5000, # So that nothing is clipped + ) if faintest_magnitude: assert np.abs(fit_model.c0 - true_relationship.c0) < 1e-7 @@ -238,7 +231,7 @@ def test_faintest_magnitude_has_effect(faintest_magnitude): assert np.abs(fit_model.c1 - true_relationship.c1) > 1e-2 -@pytest.mark.parametrize('order', [1, 2, 5]) +@pytest.mark.parametrize("order", [1, 2, 5]) def test_transform_magnitudes_identical_input(order): # Analogous to the test case for calculate_transform_coefficients # above where the input magnitudes are identical, except the input @@ -249,18 +242,18 @@ def test_transform_magnitudes_identical_input(order): instrumental, catalog_table = generate_tables(n_stars, zero) - calib_mags, stars_with_match, transform = \ - transform_magnitudes(instrumental, catalog_table, catalog_table, - order=order) + calib_mags, stars_with_match, transform = transform_magnitudes( + instrumental, catalog_table, catalog_table, order=order + ) print(calib_mags) - assert all(calib_mags == catalog_table['r_mag']) + assert all(calib_mags == catalog_table["r_mag"]) assert all(stars_with_match) assert len(transform.parameters) == order + 1 assert all(transform.parameters == 0) -@pytest.mark.parametrize('order', [1, 2, 5]) +@pytest.mark.parametrize("order", [1, 2, 5]) def test_transform_magnitudes_identical_coord_quad_mags(order): # Analogous to the test case for calculate_transform_coefficients # above where the input magnitudes are identical, except the input @@ -271,22 +264,23 @@ def test_transform_magnitudes_identical_coord_quad_mags(order): instrumental, catalog_table = generate_tables(n_stars, true_relationship) - calib_mags, stars_with_match, transform = \ - transform_magnitudes(instrumental, catalog_table, catalog_table, - order=order) + calib_mags, stars_with_match, transform = transform_magnitudes( + instrumental, catalog_table, catalog_table, order=order + ) assert all(stars_with_match) assert len(transform.parameters) == order + 1 if order >= 2: # We expect a good fit in this case - np.testing.assert_allclose(calib_mags, catalog_table['r_mag'], - rtol=1e-7, atol=1e-7) + np.testing.assert_allclose( + calib_mags, catalog_table["r_mag"], rtol=1e-7, atol=1e-7 + ) assert np.abs(transform.c0 - true_relationship.c0) < 1e-7 assert np.abs(transform.c1 - true_relationship.c1) < 1e-7 assert np.abs(transform.c2 - true_relationship.c2) < 1e-7 else: # But a line just can't fit a quadratic that well - assert (np.abs(calib_mags - catalog_table['r_mag']) > 1e-5).all() + assert (np.abs(calib_mags - catalog_table["r_mag"]) > 1e-5).all() assert np.abs(transform.c0 - true_relationship.c0) > 1e-7 assert np.abs(transform.c1 - true_relationship.c1) > 1e-7 if order >= 2: @@ -304,12 +298,11 @@ def test_coordinate_mismatches(): instrumental, catalog_table = generate_tables(n_stars, true_relationship) # Mess up the coordinates of half of the stars so that they don't match. - catalog_table['RAJ2000'][50:] = (catalog_table['RAJ2000'][50:] + - 0.5 * u.degree) + catalog_table["RAJ2000"][50:] = catalog_table["RAJ2000"][50:] + 0.5 * u.degree - calib_mags, stars_with_match, transform = \ - transform_magnitudes(instrumental, catalog_table, catalog_table[:50], - order=2) + calib_mags, stars_with_match, transform = transform_magnitudes( + instrumental, catalog_table, catalog_table[:50], order=2 + ) assert all(stars_with_match[:50]) assert all(~stars_with_match[50:]) @@ -324,21 +317,25 @@ def test_coordinate_all_mismatches(): instrumental, catalog_table = generate_tables(n_stars, true_relationship) # Mess up the coordinates of half of the stars so that they don't match. - catalog_table['RAJ2000'] = catalog_table['RAJ2000'] + 0.5 * u.degree + catalog_table["RAJ2000"] = catalog_table["RAJ2000"] + 0.5 * u.degree # Since no stars match we expect a divide by zero in the fitting, # so we'll ignore that. # # We also expect the fit to be poorly conditioned in this case. with warnings.catch_warnings(): - warnings.filterwarnings('ignore', - message='invalid value encountered in divide', - category=RuntimeWarning) - warnings.filterwarnings('ignore', - message='The fit may be poorly conditioned', - category=AstropyUserWarning) - calib_mags, stars_with_match, transform = \ - transform_magnitudes(instrumental, catalog_table, catalog_table[:50], - order=2) + warnings.filterwarnings( + "ignore", + message="invalid value encountered in divide", + category=RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + message="The fit may be poorly conditioned", + category=AstropyUserWarning, + ) + calib_mags, stars_with_match, transform = transform_magnitudes( + instrumental, catalog_table, catalog_table[:50], order=2 + ) assert not any(stars_with_match) diff --git a/stellarphot/version.py b/stellarphot/version.py index 71b9634c..a4736c1a 100644 --- a/stellarphot/version.py +++ b/stellarphot/version.py @@ -1,9 +1,11 @@ -version = 'unknown.dev' +version = "unknown.dev" try: from importlib_metadata import version as _version, PackageNotFoundError - version = _version('my-package') + + version = _version("my-package") except ImportError: from pkg_resources import get_distribution, DistributionNotFound + try: version = get_distribution("my-package").version except DistributionNotFound: From ea7f3e72ac710f37893d9de091221cb450b75974 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 15 Dec 2023 10:50:02 -0600 Subject: [PATCH 2/3] Ignore black formatting changes in git blame --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..08b7c4c2 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Ignore black formatting changes +c47587cbfa829b03be1dc2e5a79dc96af6ea712f From 519e26761ef1ac4f2a0ff321a51dfa1403f35108 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 15 Dec 2023 10:51:34 -0600 Subject: [PATCH 3/3] Configure git pre-commit --- .pre-commit-config.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..7445cb20 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + # Using this mirror lets us use mypyc-compiled black, which is about 2x faster + - repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.12.0 + hooks: + - id: black + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.11