From c5c330f644ef0fa9aa9891351a8af044856463d9 Mon Sep 17 00:00:00 2001 From: Meg Schwamb Date: Sun, 31 Dec 2023 13:55:55 +0000 Subject: [PATCH] more and more docstring updates updating remaining file docstrings to try and get them to be as standarized as possible and generate in readthedocs --- src/sorcha/ephemeris/simulation_constants.py | 11 ++-- src/sorcha/ephemeris/simulation_data_files.py | 12 ++-- src/sorcha/ephemeris/simulation_driver.py | 44 +++++++------- src/sorcha/ephemeris/simulation_setup.py | 14 +++-- src/sorcha/modules/miniDifi.py | 46 +++++++++++++-- src/sorcha/readers/CSVReader.py | 37 ++++++++---- src/sorcha/readers/CombinedDataReader.py | 26 ++++---- src/sorcha/readers/DatabaseReader.py | 28 ++++++--- src/sorcha/readers/HDF5Reader.py | 25 +++++--- src/sorcha/readers/OIFReader.py | 36 +++++++---- src/sorcha/readers/ObjectDataReader.py | 32 ++++++---- src/sorcha/readers/OrbitAuxReader.py | 26 +++++--- src/sorcha/sorcha.py | 44 +++++++------- src/sorcha/utilities/citation_text.py | 8 +++ .../utilities/createResultsSQLDatabase.py | 59 +++++++++++-------- src/sorcha/utilities/dataUtilitiesForTests.py | 12 ++-- src/sorcha/utilities/diffTestUtils.py | 18 ++++-- src/sorcha/utilities/generateGoldens.py | 5 ++ src/sorcha/utilities/generate_meta_kernel.py | 12 ++-- .../retrieve_ephemeris_data_files.py | 38 ++++++------ src/sorcha/utilities/sorchaArguments.py | 14 ++++- 21 files changed, 359 insertions(+), 188 deletions(-) diff --git a/src/sorcha/ephemeris/simulation_constants.py b/src/sorcha/ephemeris/simulation_constants.py index 190c56d7..716cee47 100644 --- a/src/sorcha/ephemeris/simulation_constants.py +++ b/src/sorcha/ephemeris/simulation_constants.py @@ -14,13 +14,16 @@ def create_ecl_to_eq_rotation_matrix(ecl): system's ecliptic obliquity is already provided as `ECL_TO_EQ_ROTATION_MATRIX`. - Parameters: + Parameters ----------- - ecl (float): The ecliptical obliquity. + ecl : float + The ecliptical obliquity. - Returns: + Returns ----------- - `numpy` array with shape (3,3). + rotmat: numpy array/matrix of floats + rotation matrix for transofmring ecliptical coordinates to equatorial coordinates. + Array has shape (3,3). """ ce = np.cos(-ecl) diff --git a/src/sorcha/ephemeris/simulation_data_files.py b/src/sorcha/ephemeris/simulation_data_files.py index 2569f3e1..61d0f059 100644 --- a/src/sorcha/ephemeris/simulation_data_files.py +++ b/src/sorcha/ephemeris/simulation_data_files.py @@ -74,16 +74,16 @@ def make_retriever(directory_path: str = None, registry: dict = REGISTRY) -> poo Parameters ---------- - directory_path : str, optional - The base directory to place all downloaded files, by default None - registry : dict, optional + directory_path : string, optional + The base directory to place all downloaded files. Default = None + registry : dictionary, optional A dictionary of file names to SHA hashes. Generally we'll not use SHA=None - because the files we're tracking change frequently, by default REGISTRY + because the files we're tracking change frequently. Default = REGISTRY Returns ------- - pooch.Pooch - The Pooch object used to track and retrieve files. + : pooch + The instance of a Pooch object used to track and retrieve files. """ dir_path = pooch.os_cache("sorcha") if directory_path: diff --git a/src/sorcha/ephemeris/simulation_driver.py b/src/sorcha/ephemeris/simulation_driver.py index 0e2ffa4c..38562664 100644 --- a/src/sorcha/ephemeris/simulation_driver.py +++ b/src/sorcha/ephemeris/simulation_driver.py @@ -35,29 +35,15 @@ def create_ephemeris(orbits_df, pointings_df, args, configs): """Generate a set of observations given a collection of orbits and set of pointings. - This works by calculating and regularly updating the sky-plane - locations (unit vectors) of all the objects in the collection - of orbits. The HEALPix index for each of the locations is calculated. - A dictionary with pixel indices as keys and lists of ObjIDs for - those objects in each HEALPix tile as values. One of these - calculations is called a 'picket', as one element of a long picket - fence. At present, - - Given a specific pointing, the set of HEALPix tiles that are overlapped - by the pointing (and a buffer region) is computed. These the precise - locations of just those objects within that set of HEALPix tiles are - computed. Details for those that actually do land within the field - of view are passed along. - Parameters ---------- - orbits_df : pd.DataFrame + orbits_df : pandas dataframe The dataframe containing the collection of orbits. - pointings_df : pd.DataFrame + pointings_df : pandas dataframe The dataframe containing the collection of telescope/camera pointings. args : Various arguments necessary for the calculation - configs : dict + configs : dictionary Various configuration parameters necessary for the calculation ang_fov : float The angular size (deg) of the field of view @@ -74,14 +60,30 @@ def create_ephemeris(orbits_df, pointings_df, args, configs): The MPC code for the observatory. (This is current a configuration parameter, but these should be included in the visit information, to allow for multiple observatories. - nside : int + nside : integer The nside value used for the HEALPIx calculations. Must be a power of 2 (1, 2, 4, ...) nside=64 is current default. Returns ------- - pd.DataFrame + observations: pandas dataframe The dataframe of observations needed for Sorcha to continue + + Notes + ------- + This works by calculating and regularly updating the sky-plane + locations (unit vectors) of all the objects in the collection + of orbits. The HEALPix index for each of the locations is calculated. + A dictionary with pixel indices as keys and lists of ObjIDs for + those objects in each HEALPix tile as values. One of these + calculations is called a 'picket', as one element of a long picket + fence. At present, + + Given a specific pointing, the set of HEALPix tiles that are overlapped + by the pointing (and a buffer region) is computed. These the precise + locations of just those objects within that set of HEALPix tiles are + computed. Details for those that actually do land within the field + of view are passed along. """ verboselog = args.pplogger.info if args.verbose else lambda *a, **k: None @@ -254,14 +256,14 @@ def calculate_rates_and_geometry(pointing: pd.DataFrame, ephem_geom_params: Ephe Parameters ---------- - pointing : pd.DataFrame + pointing : pandas dataframe The dataframe containing the pointing database. ephem_geom_params : EphemerisGeometryParameters Various parameters necessary to calculate the ephemeris Returns ------- - tuple + : tuple Tuple containing the ephemeris parameters needed for Sorcha post processing. """ ra0, dec0 = vec2ra_dec(ephem_geom_params.rho_hat) diff --git a/src/sorcha/ephemeris/simulation_setup.py b/src/sorcha/ephemeris/simulation_setup.py index ef51b1cc..5c30c906 100644 --- a/src/sorcha/ephemeris/simulation_setup.py +++ b/src/sorcha/ephemeris/simulation_setup.py @@ -36,8 +36,12 @@ def create_assist_ephemeris(args) -> tuple: Returns ------- - Ephem, gm_sun + Ephem : ASSIST ephemeris obejct The ASSIST ephemeris object + gm_sun : float + value for the GM_SUN value + gm_total : float + value for gm_total """ pplogger = logging.getLogger(__name__) @@ -133,16 +137,16 @@ def precompute_pointing_information(pointings_df, args, configs): Parameters ---------- - pointings_df : pd.dataframe + pointings_df : pandas dataframe Contains the telescope pointing database. - args : dict + args : dictionary Command line arguments needed for initialization. - configs : dict + configs : dictionary Configuration settings. Returns ------- - pointings_df : pd.dataframe + pointings_df : pandas dataframe The original dataframe with several additional columns of precomputed values. """ ephem, _, _ = create_assist_ephemeris(args) diff --git a/src/sorcha/modules/miniDifi.py b/src/sorcha/modules/miniDifi.py index fe65bf15..fe33aaf8 100755 --- a/src/sorcha/modules/miniDifi.py +++ b/src/sorcha/modules/miniDifi.py @@ -10,6 +10,28 @@ def haversine_np(lon1, lat1, lon2, lat2): Calculate the great circle distance between two points on the earth (specified in decimal degrees) + Parameters + ----------------- + + lon1 : float or array of floats + longitude of point 1 + + lat1 : float or array of floats + latitude of point 1 + + lon2 : float or array of floats + longitude of point 2 + + lat1 : float or array of floats + latitude of point 1 + + Returns + ---------- + : float or array of floats + Great distance between the two points [Units: Decimal degrees] + + Notes + ------- All args must be of equal length. Because SkyCoord is slow AF. @@ -29,12 +51,28 @@ def haversine_np(lon1, lat1, lon2, lat2): @njit(cache=True) def hasTracklet(mjd, ra, dec, maxdt_minutes, minlen_arcsec): """ - Given a set of observations in one night, calculate it has at least one - detectable tracklet. + Given a set of observations in one night, calculate it has at least one + detectable tracklet. + + Parameters + ------------- + mjd : numpy array of floats + Modified Julian date time [Units: days] + + ra : numpy array of floats + Object's RA at given mjd [Units: degrees] + + dec : numpy array of floats + Object's dec at given mjd [Units: degrees] + + maxdt_mintes: float - Inputs: numpy arrays of mjd (time, days), ra (degrees), dec(degrees). + minlen_arcsec : float - Output: True or False + Returns + --------- + : boolean + True if tracklet can be made else False """ ## a tracklet must be longer than some minimum separation (1arcsec) ## and shorter than some maximum time (90 minutes). We find diff --git a/src/sorcha/readers/CSVReader.py b/src/sorcha/readers/CSVReader.py index 2f9de978..6c5ce6fb 100755 --- a/src/sorcha/readers/CSVReader.py +++ b/src/sorcha/readers/CSVReader.py @@ -17,14 +17,19 @@ def __init__(self, filename, sep="csv", header=-1, **kwargs): Parameters ----------- - filename : str + filename : string Location/name of the data file. - sep : str, optional + sep : string, optional Format of input file ("whitespace"/"comma"/"csv"). + Default = csv - header : int, optional + header : integer, optional The row number of the header. If not provided, does an automatic search. + Default = -1 + + **kwargs: dictionary, optional + Extra arguments """ super().__init__(**kwargs) self.filename = filename @@ -50,7 +55,7 @@ def get_reader_info(self): Returns -------- - name : str + name : string The reader information. """ return f"CSVDataReader:{self.filename}" @@ -61,7 +66,7 @@ def _find_header_line(self): Returns -------- - int + : integer The line index of the header. """ @@ -86,19 +91,23 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): Parameters ----------- - block_start : int, optional + block_start : integer, optional The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header - and return data starting on row=2. [Default=0] + and return data starting on row=2. Default =0 - block_size: int, optional, default=None + block_size: integer, optional, default=None The number of rows to read in. Use block_size=None to read in all available data. + default =None + + **kwargs : dictionary, optional + Extra arguments Returns ----------- - Pandas dataframe + res_df : pandas dataframe Dataframe of the object data. """ # Skip the rows before the header and then begin_loc rows after the header. @@ -155,9 +164,12 @@ def _read_objects_internal(self, obj_ids, **kwargs): obj_ids : list A list of object IDs to use. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- - Pandas dataframe + res_df : pandas dataframe The dataframe for the object data. """ self._build_id_map() @@ -197,9 +209,12 @@ def _process_and_validate_input_table(self, input_table, **kwargs): input_table : Pandas dataframe A loaded table. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- - Pandas dataframe + input_table: pandas dataframe Returns the input dataframe modified in-place. """ # Perform the parent class's validation (checking object ID column). diff --git a/src/sorcha/readers/CombinedDataReader.py b/src/sorcha/readers/CombinedDataReader.py index f3796f26..d10a94e8 100755 --- a/src/sorcha/readers/CombinedDataReader.py +++ b/src/sorcha/readers/CombinedDataReader.py @@ -25,12 +25,12 @@ def __init__(self, ephem_primary=False, **kwargs): """ Parameters ---------- - ephem_primary: bool, optional + ephem_primary: boolean, optional Use the ephemeris reader as the primary reader. Otherwise uses the first auxiliary data reader. - Default = false + Default = False - **kwargs : dict, optional + **kwargs : dictionary, optional Extra arguments """ @@ -69,21 +69,21 @@ def read_block(self, block_size=None, verbose=False, **kwargs): Parameters ----------- - block_size: int, optional + block_size: integer, optional the number of rows to read in. Use block_size=None to read in all available data. Default = None - verbose : bool, optional + verbose : boolean, optional Use verbose logging. - Default = None + Default = False - **kwargs : dict, optional + **kwargs : dictionary, optional Extra arguments Returns ----------- - res_df : Pandas dataframe + res_df : pandas dataframe dataframe of the combined object data. """ @@ -160,21 +160,21 @@ def read_aux_block(self, block_size=None, verbose=False, **kwargs): Parameters ----------- - block_size : int, optional + block_size : integer, optional the number of rows to read in. Use block_size=None to read in all available data. - [Default = None] + Default = None - verbose : bool, optional + verbose : boolean, optional use verbose logging. Default = False - **kwargs : dict, optional + **kwargs : dictionary, optional Extra arguments Returns ----------- - res_df : Pandas dataframe + res_df : pandas dataframe dataframe of the combined object data, excluding any ephemeris data. """ pplogger = logging.getLogger(__name__) diff --git a/src/sorcha/readers/DatabaseReader.py b/src/sorcha/readers/DatabaseReader.py index a8eb9b7d..53bcfe1f 100755 --- a/src/sorcha/readers/DatabaseReader.py +++ b/src/sorcha/readers/DatabaseReader.py @@ -26,6 +26,11 @@ def __init__(self, intermdb, **kwargs): ----------- intermdb : string filepath/name of temporary database. + + Default = None + + **kwargs : dictionary, optional + Extra arguments """ super().__init__(**kwargs) self.intermdb = intermdb @@ -46,23 +51,24 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): Parameters ----------- - block_start : int, optional + block_start : integer, optional The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header - and return data starting on row=2. [Default=0] + and return data starting on row=2. Default=0 block_size : int, optional the number of rows to read in. Use block_size=None to read in all available data. A non-None block size must be provided if block_start > 0. + Default = None - validate_data : bool, optional - if True then checks the data for NaNs or nulls. + **kwargs : dictionary, optional + Extra arguments Returns ---------- - res_df : Pandas dataframe + res_df : pandas dataframe dataframe of the object data. Notes @@ -90,9 +96,12 @@ def _read_objects_internal(self, obj_ids, **kwargs): obj_ids : list A list of object IDs to use. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- - res_df : Pandas dataframe + res_df : pandas dataframe The dataframe for the object data. """ con = sqlite3.connect(self.intermdb) @@ -114,12 +123,15 @@ def _process_and_validate_input_table(self, input_table, **kwargs): Parameters ----------- - input_table : Pandas dataframe + input_table : pandas dataframe A loaded table. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- - input_table : Pandas dataframe + input_table : pandas dataframe Returns the input dataframe modified in-place. """ # Perform the parent class's validation (checking object ID column). diff --git a/src/sorcha/readers/HDF5Reader.py b/src/sorcha/readers/HDF5Reader.py index a559eb17..4d51a879 100755 --- a/src/sorcha/readers/HDF5Reader.py +++ b/src/sorcha/readers/HDF5Reader.py @@ -37,23 +37,24 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): Parameters ----------- - block_start : int, optional + block_start : integer, optional The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header - and return data starting on row=2. [Default=0] + and return data starting on row=2. Default=0 - block_size : int, optional + block_size : integer, optional the number of rows to read in. Use block_size=None to read in all available data. - [Default = None] + Default = None - validate_data : bool, optional - if True then checks the data for NaNs or nulls. + **kwargs : dictionary, optional + Extra arguments Returns ----------- - res_df (Pandas dataframe): dataframe of the object data. + res_df : pandas dataframe + Dataframe of the object data. """ if block_size is None: res_df = pd.read_hdf( @@ -83,6 +84,9 @@ def _read_objects_internal(self, obj_ids, **kwargs): obj_ids : list A list of object IDs to use. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- res_df : Pandas dataframe @@ -106,12 +110,15 @@ def _process_and_validate_input_table(self, input_table, **kwargs): Parameters ----------- - input_table : Pandas dataframe + input_table : pandas dataframe A loaded table. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- - input_table : Pandas dataframe + input_table : pandas dataframe Returns the input dataframe modified in-place. """ # Perform the parent class's validation (checking object ID column). diff --git a/src/sorcha/readers/OIFReader.py b/src/sorcha/readers/OIFReader.py index e85b2a0e..0539478b 100755 --- a/src/sorcha/readers/OIFReader.py +++ b/src/sorcha/readers/OIFReader.py @@ -27,6 +27,10 @@ def __init__(self, filename, inputformat, **kwargs): inputformat : string format of input file ("whitespace"/"comma"/"csv"/"h5"/"hdf5"). + + **kwargs : dictionary, optional + Extra arguments + """ super().__init__(**kwargs) @@ -50,7 +54,7 @@ def get_reader_info(self): Returns -------- - name : string + : string The reader information. """ return f"OIFDataReader|{self.reader.get_reader_info()}" @@ -64,12 +68,15 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs): The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header - and return data starting on row=2. [Default=0] + and return data starting on row=2. Default =0 block_size : int, optional the number of rows to read in. Use block_size=None to read in all available data. - [Default = None] + Default = None + + **kwargs : dictionary, optional + Extra arguments Returns ----------- @@ -89,9 +96,12 @@ def _read_objects_internal(self, obj_ids, **kwargs): obj_ids : list A list of object IDs to use. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- - res_df : Pandas dataframe + res_df : pandas dataframe The dataframe for the object data. """ res_df = self.reader.read_objects(obj_ids, **kwargs) @@ -101,21 +111,25 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Notes - ----- - The base implementation includes filtering that is common to most - input types. Subclasses should call super.process_and_validate() - to ensure that the ancestor’s validation is also applied. - Parameters ----------- input_table : Pandas dataframe A loaded table. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- input_table : Pandas dataframe Returns the input dataframe modified in-place. + + Notes + ----- + The base implementation includes filtering that is common to most + input types. Subclasses should call super.process_and_validate() + to ensure that the ancestor’s validation is also applied. + """ # We do not call reader.process_and_validate_input_table() or # super().process_and_validate_input_table() because reader's read functions have @@ -179,7 +193,7 @@ def read_full_oif_table(filename, inputformat): Returns ----------- - res_df : Pandas dataframe + res_df : pandas dataframe dataframe of the object data. """ diff --git a/src/sorcha/readers/ObjectDataReader.py b/src/sorcha/readers/ObjectDataReader.py index c2c593ac..b95730da 100755 --- a/src/sorcha/readers/ObjectDataReader.py +++ b/src/sorcha/readers/ObjectDataReader.py @@ -57,12 +57,15 @@ def read_rows(self, block_start=0, block_size=None, **kwargs): The 0-indexed row number from which to start reading the data. For example in a CSV file block_start=2 would skip the first two lines after the header - and return data starting on row=2. [Default=0] + and return data starting on row=2. Default=0 block_size : int (optional) the number of rows to read in. Use block_size=None to read in all available data. - [Default = None] + Default = None + + **kwargs : dictionary, optional + Extra arguments Returns ----------- @@ -102,6 +105,9 @@ def read_objects(self, obj_ids, **kwargs): obj_ids : list A list of object IDs to use. + **kwargs : dictionary, optional + Extra arguments + Returns ----------- res_df : Pandas dataframe @@ -153,24 +159,30 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Note - ---- - The base implementation includes filtering that is common to most - input types. Subclasses should call super.process_and_validate() - to ensure that the ancestor’s validation is also applied. - Parameters ----------- input_table : Pandas dataframe A loaded table. - disallow_nan : bool (optional) - if True then checks the data for NaNs or nulls. + **kwargs : dictionary, optional + Extra arguments Returns ----------- input_table :Pandas dataframe Returns the input dataframe modified in-place. + + Notes + -------- + The base implementation includes filtering that is common to most + input types. Subclasses should call super.process_and_validate() + to ensure that the ancestor’s validation is also applied. + + Additional arguments to use: + + disallow_nan : boolean + if True then checks the data for NaNs or nulls. + """ input_table = self._validate_object_id_column(input_table) diff --git a/src/sorcha/readers/OrbitAuxReader.py b/src/sorcha/readers/OrbitAuxReader.py index 17b17f46..1840c66d 100755 --- a/src/sorcha/readers/OrbitAuxReader.py +++ b/src/sorcha/readers/OrbitAuxReader.py @@ -18,9 +18,14 @@ def __init__(self, filename, sep="csv", header=-1, **kwargs): sep : string, optional format of input file ("whitespace"/"csv"). + Default = "csv" header : int The row number of the header. If not provided, does an automatic search. + Default = -1 + + **kwargs : dictionary, optional + Extra arguments """ super().__init__(filename, sep, header, **kwargs) @@ -30,7 +35,7 @@ def get_reader_info(self): Returns -------- - name : string + : string The reader information. """ return f"OrbitAuxReader:{self.filename}" @@ -39,21 +44,24 @@ def _process_and_validate_input_table(self, input_table, **kwargs): """Perform any input-specific processing and validation on the input table. Modifies the input dataframe in place. - Notes - ------ - The base implementation includes filtering that is common to most - input types. Subclasses should call super.process_and_validate() - to ensure that the ancestor’s validation is also applied. - Parameters ----------- - input_table : Pandas dataframe + input_table : pandas dataframe A loaded table. + **kwargs : dictionary, optional + Returns ----------- - res_df : Pandas dataframe + res_df : pandas dataframe Returns the input dataframe modified in-place. + + Notes + ------ + The base implementation includes filtering that is common to most + input types. Subclasses should call super.process_and_validate() + to ensure that the ancestor’s validation is also applied. + """ # Do standard CSV file processing super()._process_and_validate_input_table(input_table, **kwargs) diff --git a/src/sorcha/sorcha.py b/src/sorcha/sorcha.py index 764c98cb..c93fe7f1 100755 --- a/src/sorcha/sorcha.py +++ b/src/sorcha/sorcha.py @@ -65,13 +65,14 @@ def runLSSTSimulation(args, configs, pplogger=None): Parameters ----------- - args (dictionary or `sorchaArguments` object): + args : dictionary or `sorchaArguments` object dictionary of command-line arguments. pplogger : logging.Logger, optional The logger to use in this function. If None creates a new one. + Default = None - Returns: + Returns ----------- None. @@ -307,32 +308,33 @@ def main(): model Solar System small body population to what the specified wide-field survey would observe. - usage: sorcha [-h] -c C -o O -ob OB -p P -pd PD [-er E] [-ew E] [-cp CP] [-dw [DW]] [-dr DR] [-dl] [-f] [-s S] [-t T] [-v] + usage: sorcha [-h] -c C -o O -ob OB -p P -pd PD [-er ER] [-ew EW] [-ar AR] [-cp CP] [-f] [-s S] [-t T] [-v] options: - -h, --help show this help message and exit + -h, --help show this help message and exit Required arguments: - -c C, --config C Input configuration file name (default: None) - -o O, --outfile O Path to store output and logs. (default: None) - -ob OB, --orbit OB Orbit file name (default: None) - -p P, --params P Physical parameters file name (default: None) - -pd PD, --pointing_database PD + -c C, --config C Input configuration file name (default: None) + -o O, --outfile O Path to store output and logs. (default: None) + -ob OB, --orbit OB + Orbit file name (default: None) + -p P, --params P Physical parameters file name (default: None) + -pd PD, --pointing_database PD Survey pointing information (default: None) Optional arguments: - -er E, --ephem_read E Existing ephemeris simulation output file name (default: None) - -ew E, --ephem_write E - Output file name for newly generated ephemeris simulation (default: None) - -cp CP, --complex_physical_parameters CP - Complex physical parameters file name (default: None) - -dw [DW] Make temporary ephemeris database. If no filepath/name supplied, default name and ephemeris input location used. (default: None) - -dr DR Location of existing/previous temporary ephemeris database to read from if wanted. (default: None) - -dl Delete the temporary ephemeris database after code has completed. (default: False) - -f, --force Force deletion/overwrite of existing output file(s). (default: False) - -s S, --survey S Survey to simulate (default: LSST) - -t T, --stem T Output file name stem. (default: SSPPOutput) - -v, --verbose Verbosity. Default currently true; include to turn off verbosity. (default: True) + -er ER, --ephem_read ER + Previously generated ephemeris simulation file name, required if ephemerides_type in config file is 'external'. (default: None) + -ew EW, --ephem_write EW + Output file name for newly generated ephemeris simulation, required if ephemerides_type in config file is not 'external'. (default: None) + -ar AR, --ar_data_path AR + Directory path where Assist+Rebound data files where stored when running bootstrap_sorcha_data_files from the command line. (default: None) + -cp CP, --complex_physical_parameters CP + Complex physical parameters file name (default: None) + -f, --force Force deletion/overwrite of existing output file(s). Default False. (default: False) + -s S, --survey S Survey to simulate (default: LSST) + -t T, --stem T Output file name stem. (default: SSPPOutput) + -v, --verbose Verbosity. Default currently true; include to turn off verbosity. (default: True) """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) diff --git a/src/sorcha/utilities/citation_text.py b/src/sorcha/utilities/citation_text.py index 7dfe4e28..12750638 100644 --- a/src/sorcha/utilities/citation_text.py +++ b/src/sorcha/utilities/citation_text.py @@ -4,6 +4,14 @@ def cite_sorcha(): """Providing the bibtex, AAS Journals software latex command, and acknowledgement statements for Sorcha and the associated packages that power it. + + Parameters + ----------- + None + + Returns + ----------- + None """ print("\nSorcha: \n") print("Merritt et al. (in prep)") diff --git a/src/sorcha/utilities/createResultsSQLDatabase.py b/src/sorcha/utilities/createResultsSQLDatabase.py index 40d1ac3a..9652cd24 100644 --- a/src/sorcha/utilities/createResultsSQLDatabase.py +++ b/src/sorcha/utilities/createResultsSQLDatabase.py @@ -16,25 +16,30 @@ from sorcha.modules.PPConfigParser import PPFindDirectoryOrExit -def create_results_table(cnx_out, filename, output_path, output_stem, table_name="pp_results"): +def create_results_table(cnx_out, filename, output_path, output_stem, table_name="sorcha_results"): """ Creates a table in a SQLite database from SSPP results. - Parameters: + Parameters ----------- - cnx_out (sqlite3 connection): Connection to sqlite3 database. + cnx_out : sqlite3 connection + Connection to sqlite3 database. - filename (string): filepath/name of sqlite3 database. + filename : string + filepath/name of sqlite3 database. - output_path (string): filepath of directory containing SSPP output folders. + output_path : string + filepath of directory containing SSPP output folders. - output_stem (string): stem filename for SSPP outputs. + output_stem : string + stem filename for SSPP outputs. - table_name (string): name of table of SSPP results. + table_name : string, optional + name of table of for storing sorcha results. Default ="sorcha_results" - Returns: + Returns ----------- - None. + None """ @@ -83,17 +88,20 @@ def create_inputs_table(cnx_out, input_path, table_type): Creates a table in a SQLite database from the input files (i.e. orbits, physical parameters, etc). - Parameters: + Parameters ----------- - cnx_out (sqlite3 connection): Connection to sqlite3 database. + cnx_out : sqlite3 connection + Connection to sqlite3 database. - input_path (string): filepath of directory containing input files. + input_path : string + Filepath of directory containing input files. - table_type (string): type of file. Should be "orbits"/"params"/"comet". + table_type : string + Type of file. Should be "orbits"/"params"/"comet". - Returns: + Returns ----------- - None. + None """ @@ -121,13 +129,14 @@ def create_results_database(args): Creates a SQLite database with tables of SSPP results and all orbit/physical parameters/comet files. - Parameters: + Parameters ----------- - args (argparse ArgumentParser object): command line arguments. + args : ArgumentParser + argparse ArgumentParser object; command line arguments. - Returns: + Returns ----------- - None. + None """ @@ -147,17 +156,19 @@ def create_results_database(args): create_inputs_table(cnx_out, "comet") -def get_column_names(filename, table_name="pp_results"): +def get_column_names(filename, table_name="sorcha_results"): """ Obtains column names from a table in a SQLite database. - Parameters: + Parameters ----------- - filename (string): filepath/name of sqlite3 database. + filename : string + Filepath/name of sqlite3 database. - table_name (string): name of table. + table_name : string, optional + Name of table. Default = "sorcha_results" - Returns: + Returns ----------- col_names (list): list of column names. diff --git a/src/sorcha/utilities/dataUtilitiesForTests.py b/src/sorcha/utilities/dataUtilitiesForTests.py index 360d644f..04847629 100644 --- a/src/sorcha/utilities/dataUtilitiesForTests.py +++ b/src/sorcha/utilities/dataUtilitiesForTests.py @@ -18,12 +18,12 @@ def get_test_filepath(filename): Parameters ---------- - filename : `str` + filename : string The name of the file inside the ``tests/data`` directory. Returns ------- - filepath : `str` + : string The full path to the file. """ @@ -40,12 +40,12 @@ def get_demo_filepath(filename): Parameters ---------- - filename : `str` + filename : string The name of the file inside the ``demo`` directory. Returns ------- - filepath : `str` + : string The full path to the file. """ @@ -62,12 +62,12 @@ def get_data_out_filepath(filename): Parameters ---------- - filename : `str` + filename : string The name of the file inside the ``data/out`` directory. Returns ------- - filepath : `str` + : string The full path to the file. """ diff --git a/src/sorcha/utilities/diffTestUtils.py b/src/sorcha/utilities/diffTestUtils.py index 9c025640..c42c8fe9 100644 --- a/src/sorcha/utilities/diffTestUtils.py +++ b/src/sorcha/utilities/diffTestUtils.py @@ -15,13 +15,16 @@ def compare_result_files(test_output, golden_output): Parameters ---------- - test_output (str): The path and file name of the test results. + test_output : string + The path and file name of the test results. - golden_output (str): The path and file name of the golden set results. + golden_output : string + The path and file name of the golden set results. Returns ------- - bool : Indicates whether the results are the same. + : bool + Indicates whether the results are the same. """ test_data = pd.read_csv(test_output) golden_data = pd.read_csv(golden_output) @@ -79,7 +82,14 @@ def override_seed_and_run(outpath, arg_set="baseline"): Parameters ---------- - outpath (str): The path for the output files. + outpath : string + The path for the output files. + + arg_set : string, optional + set of arguments for setting up the run. Options: "baseline" or "with_ephemeris". + "baseline"" run does not ephemeris generation. "with_ephemeeris" is a full end to end run + of all main components of sorcha. + Default = "baseline" """ if arg_set == "baseline": diff --git a/src/sorcha/utilities/generateGoldens.py b/src/sorcha/utilities/generateGoldens.py index ae816b07..096a5582 100644 --- a/src/sorcha/utilities/generateGoldens.py +++ b/src/sorcha/utilities/generateGoldens.py @@ -8,6 +8,11 @@ from sorcha.utilities.dataUtilitiesForTests import get_demo_filepath if __name__ == "__main__": + """ + Generates "golden" output for sorcha runs for testing. This should only b + be run sparingly when confident all changes have been confirmed and tested with + unit tests + """ # Create a goldens directory if it does not exist. golden_dir = get_demo_filepath("goldens") if not os.path.exists(golden_dir): diff --git a/src/sorcha/utilities/generate_meta_kernel.py b/src/sorcha/utilities/generate_meta_kernel.py index 2aacf9b9..45a5d34a 100644 --- a/src/sorcha/utilities/generate_meta_kernel.py +++ b/src/sorcha/utilities/generate_meta_kernel.py @@ -35,8 +35,12 @@ def build_meta_kernel_file(retriever: pooch.Pooch) -> None: Parameters ---------- - retriever : pooch.Pooch + retriever : pooch Pooch object that maintains the registry of files to download + + Returns + --------- + None """ # build meta_kernel file path meta_kernel_file_path = os.path.join(retriever.abspath, META_KERNEL) @@ -62,16 +66,16 @@ def _build_file_name(cache_dir: str, file_path: str) -> str: Parameters ---------- - cache_dir : str + cache_dir : string The full path to the cache directory used when retrieving files for Assist and Rebound. - file_path : str + file_path : string The full file path for a given file that will have the cache directory segment replace. Returns ------- - str + : string Shortened file path, appropriate for use in kernel_meta files. """ diff --git a/src/sorcha/utilities/retrieve_ephemeris_data_files.py b/src/sorcha/utilities/retrieve_ephemeris_data_files.py index 1f7738ff..3a55e1f9 100644 --- a/src/sorcha/utilities/retrieve_ephemeris_data_files.py +++ b/src/sorcha/utilities/retrieve_ephemeris_data_files.py @@ -19,12 +19,16 @@ def _decompress(fname, action, pup): Parameters ---------- - fname : str + fname : string Original filename - action : str + action : string One of []"download", "update", "fetch"] - pup : pooch.Pooch + pup : pooch The Pooch object that defines the location of the file. + + Returns + ---------- + None """ known_extentions = [".gz", ".bz2", ".xz"] if os.path.splitext(fname)[-1] in known_extentions: @@ -39,7 +43,7 @@ def _remove_files(retriever: pooch.Pooch) -> None: Parameters ---------- - retriever : pooch.Pooch + retriever : pooch Pooch object that maintains the registry of files to download. """ @@ -51,19 +55,19 @@ def _remove_files(retriever: pooch.Pooch) -> None: def _check_for_existing_files(retriever: pooch.Pooch, file_list: list[str]) -> bool: """Will check for existing local files, any file not found will be printed - to the terminal. - - Parameters - ---------- - retriever : pooch.Pooch - Pooch object that maintains the registry of files to download. - file_list : list[str] - A list of file names look for in the local cache. - - Returns - ------- - bool - Returns True if all files are found in the local cache, False otherwise. + to the terminal. + + Parameters + ---------- + retriever : pooch + Pooch object that maintains the registry of files to download. + file_list : list of strings + A list of file names look for in the local cache. + + Returns + ------- + : bool + Returns True if all files are found in the local cache, False otherwise. """ # choosing clarity over brevity with these variables. diff --git a/src/sorcha/utilities/sorchaArguments.py b/src/sorcha/utilities/sorchaArguments.py index c3067bff..baedff80 100644 --- a/src/sorcha/utilities/sorchaArguments.py +++ b/src/sorcha/utilities/sorchaArguments.py @@ -45,7 +45,19 @@ def __init__(self, cmd_args_dict=None, pplogger=None): self.read_from_dict(cmd_args_dict) def read_from_dict(self, args): - """set the parameters from a cmd_args dict.""" + """set the parameters from a cmd_args dict. + + Parameters + --------------- + aguments : dictionary + dictionary of configuration parameters + + Returns + ---------- + None + + """ + self.paramsinput = args["paramsinput"] self.orbinfile = args["orbinfile"] self.oifoutput = args.get("oifoutput")