Skip to content

Commit

Permalink
more and more docstring updates
Browse files Browse the repository at this point in the history
updating remaining file docstrings to try and get them to be as standarized as possible and generate in readthedocs
  • Loading branch information
mschwamb committed Dec 31, 2023
1 parent f48d84e commit c5c330f
Show file tree
Hide file tree
Showing 21 changed files with 359 additions and 188 deletions.
11 changes: 7 additions & 4 deletions src/sorcha/ephemeris/simulation_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,16 @@ def create_ecl_to_eq_rotation_matrix(ecl):
system's ecliptic obliquity is already provided as
`ECL_TO_EQ_ROTATION_MATRIX`.
Parameters:
Parameters
-----------
ecl (float): The ecliptical obliquity.
ecl : float
The ecliptical obliquity.
Returns:
Returns
-----------
`numpy` array with shape (3,3).
rotmat: numpy array/matrix of floats
rotation matrix for transofmring ecliptical coordinates to equatorial coordinates.
Array has shape (3,3).
"""
ce = np.cos(-ecl)
Expand Down
12 changes: 6 additions & 6 deletions src/sorcha/ephemeris/simulation_data_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,16 +74,16 @@ def make_retriever(directory_path: str = None, registry: dict = REGISTRY) -> poo
Parameters
----------
directory_path : str, optional
The base directory to place all downloaded files, by default None
registry : dict, optional
directory_path : string, optional
The base directory to place all downloaded files. Default = None
registry : dictionary, optional
A dictionary of file names to SHA hashes. Generally we'll not use SHA=None
because the files we're tracking change frequently, by default REGISTRY
because the files we're tracking change frequently. Default = REGISTRY
Returns
-------
pooch.Pooch
The Pooch object used to track and retrieve files.
: pooch
The instance of a Pooch object used to track and retrieve files.
"""
dir_path = pooch.os_cache("sorcha")
if directory_path:
Expand Down
44 changes: 23 additions & 21 deletions src/sorcha/ephemeris/simulation_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,29 +35,15 @@ def create_ephemeris(orbits_df, pointings_df, args, configs):
"""Generate a set of observations given a collection of orbits
and set of pointings.
This works by calculating and regularly updating the sky-plane
locations (unit vectors) of all the objects in the collection
of orbits. The HEALPix index for each of the locations is calculated.
A dictionary with pixel indices as keys and lists of ObjIDs for
those objects in each HEALPix tile as values. One of these
calculations is called a 'picket', as one element of a long picket
fence. At present,
Given a specific pointing, the set of HEALPix tiles that are overlapped
by the pointing (and a buffer region) is computed. These the precise
locations of just those objects within that set of HEALPix tiles are
computed. Details for those that actually do land within the field
of view are passed along.
Parameters
----------
orbits_df : pd.DataFrame
orbits_df : pandas dataframe
The dataframe containing the collection of orbits.
pointings_df : pd.DataFrame
pointings_df : pandas dataframe
The dataframe containing the collection of telescope/camera pointings.
args :
Various arguments necessary for the calculation
configs : dict
configs : dictionary
Various configuration parameters necessary for the calculation
ang_fov : float
The angular size (deg) of the field of view
Expand All @@ -74,14 +60,30 @@ def create_ephemeris(orbits_df, pointings_df, args, configs):
The MPC code for the observatory. (This is current a configuration
parameter, but these should be included in the visit information,
to allow for multiple observatories.
nside : int
nside : integer
The nside value used for the HEALPIx calculations. Must be a
power of 2 (1, 2, 4, ...) nside=64 is current default.
Returns
-------
pd.DataFrame
observations: pandas dataframe
The dataframe of observations needed for Sorcha to continue
Notes
-------
This works by calculating and regularly updating the sky-plane
locations (unit vectors) of all the objects in the collection
of orbits. The HEALPix index for each of the locations is calculated.
A dictionary with pixel indices as keys and lists of ObjIDs for
those objects in each HEALPix tile as values. One of these
calculations is called a 'picket', as one element of a long picket
fence. At present,
Given a specific pointing, the set of HEALPix tiles that are overlapped
by the pointing (and a buffer region) is computed. These the precise
locations of just those objects within that set of HEALPix tiles are
computed. Details for those that actually do land within the field
of view are passed along.
"""
verboselog = args.pplogger.info if args.verbose else lambda *a, **k: None

Expand Down Expand Up @@ -254,14 +256,14 @@ def calculate_rates_and_geometry(pointing: pd.DataFrame, ephem_geom_params: Ephe
Parameters
----------
pointing : pd.DataFrame
pointing : pandas dataframe
The dataframe containing the pointing database.
ephem_geom_params : EphemerisGeometryParameters
Various parameters necessary to calculate the ephemeris
Returns
-------
tuple
: tuple
Tuple containing the ephemeris parameters needed for Sorcha post processing.
"""
ra0, dec0 = vec2ra_dec(ephem_geom_params.rho_hat)
Expand Down
14 changes: 9 additions & 5 deletions src/sorcha/ephemeris/simulation_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,12 @@ def create_assist_ephemeris(args) -> tuple:
Returns
-------
Ephem, gm_sun
Ephem : ASSIST ephemeris obejct
The ASSIST ephemeris object
gm_sun : float
value for the GM_SUN value
gm_total : float
value for gm_total
"""
pplogger = logging.getLogger(__name__)

Expand Down Expand Up @@ -133,16 +137,16 @@ def precompute_pointing_information(pointings_df, args, configs):
Parameters
----------
pointings_df : pd.dataframe
pointings_df : pandas dataframe
Contains the telescope pointing database.
args : dict
args : dictionary
Command line arguments needed for initialization.
configs : dict
configs : dictionary
Configuration settings.
Returns
-------
pointings_df : pd.dataframe
pointings_df : pandas dataframe
The original dataframe with several additional columns of precomputed values.
"""
ephem, _, _ = create_assist_ephemeris(args)
Expand Down
46 changes: 42 additions & 4 deletions src/sorcha/modules/miniDifi.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,28 @@ def haversine_np(lon1, lat1, lon2, lat2):
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
Parameters
-----------------
lon1 : float or array of floats
longitude of point 1
lat1 : float or array of floats
latitude of point 1
lon2 : float or array of floats
longitude of point 2
lat1 : float or array of floats
latitude of point 1
Returns
----------
: float or array of floats
Great distance between the two points [Units: Decimal degrees]
Notes
-------
All args must be of equal length.
Because SkyCoord is slow AF.
Expand All @@ -29,12 +51,28 @@ def haversine_np(lon1, lat1, lon2, lat2):
@njit(cache=True)
def hasTracklet(mjd, ra, dec, maxdt_minutes, minlen_arcsec):
"""
Given a set of observations in one night, calculate it has at least one
detectable tracklet.
Given a set of observations in one night, calculate it has at least one
detectable tracklet.
Parameters
-------------
mjd : numpy array of floats
Modified Julian date time [Units: days]
ra : numpy array of floats
Object's RA at given mjd [Units: degrees]
dec : numpy array of floats
Object's dec at given mjd [Units: degrees]
maxdt_mintes: float
Inputs: numpy arrays of mjd (time, days), ra (degrees), dec(degrees).
minlen_arcsec : float
Output: True or False
Returns
---------
: boolean
True if tracklet can be made else False
"""
## a tracklet must be longer than some minimum separation (1arcsec)
## and shorter than some maximum time (90 minutes). We find
Expand Down
37 changes: 26 additions & 11 deletions src/sorcha/readers/CSVReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,19 @@ def __init__(self, filename, sep="csv", header=-1, **kwargs):
Parameters
-----------
filename : str
filename : string
Location/name of the data file.
sep : str, optional
sep : string, optional
Format of input file ("whitespace"/"comma"/"csv").
Default = csv
header : int, optional
header : integer, optional
The row number of the header. If not provided, does an automatic search.
Default = -1
**kwargs: dictionary, optional
Extra arguments
"""
super().__init__(**kwargs)
self.filename = filename
Expand All @@ -50,7 +55,7 @@ def get_reader_info(self):
Returns
--------
name : str
name : string
The reader information.
"""
return f"CSVDataReader:{self.filename}"
Expand All @@ -61,7 +66,7 @@ def _find_header_line(self):
Returns
--------
int
: integer
The line index of the header.
"""
Expand All @@ -86,19 +91,23 @@ def _read_rows_internal(self, block_start=0, block_size=None, **kwargs):
Parameters
-----------
block_start : int, optional
block_start : integer, optional
The 0-indexed row number from which
to start reading the data. For example in a CSV file
block_start=2 would skip the first two lines after the header
and return data starting on row=2. [Default=0]
and return data starting on row=2. Default =0
block_size: int, optional, default=None
block_size: integer, optional, default=None
The number of rows to read in.
Use block_size=None to read in all available data.
default =None
**kwargs : dictionary, optional
Extra arguments
Returns
-----------
Pandas dataframe
res_df : pandas dataframe
Dataframe of the object data.
"""
# Skip the rows before the header and then begin_loc rows after the header.
Expand Down Expand Up @@ -155,9 +164,12 @@ def _read_objects_internal(self, obj_ids, **kwargs):
obj_ids : list
A list of object IDs to use.
**kwargs : dictionary, optional
Extra arguments
Returns
-----------
Pandas dataframe
res_df : pandas dataframe
The dataframe for the object data.
"""
self._build_id_map()
Expand Down Expand Up @@ -197,9 +209,12 @@ def _process_and_validate_input_table(self, input_table, **kwargs):
input_table : Pandas dataframe
A loaded table.
**kwargs : dictionary, optional
Extra arguments
Returns
-----------
Pandas dataframe
input_table: pandas dataframe
Returns the input dataframe modified in-place.
"""
# Perform the parent class's validation (checking object ID column).
Expand Down
26 changes: 13 additions & 13 deletions src/sorcha/readers/CombinedDataReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@ def __init__(self, ephem_primary=False, **kwargs):
"""
Parameters
----------
ephem_primary: bool, optional
ephem_primary: boolean, optional
Use the ephemeris reader as the primary
reader. Otherwise uses the first auxiliary data reader.
Default = false
Default = False
**kwargs : dict, optional
**kwargs : dictionary, optional
Extra arguments
"""
Expand Down Expand Up @@ -69,21 +69,21 @@ def read_block(self, block_size=None, verbose=False, **kwargs):
Parameters
-----------
block_size: int, optional
block_size: integer, optional
the number of rows to read in.
Use block_size=None to read in all available data.
Default = None
verbose : bool, optional
verbose : boolean, optional
Use verbose logging.
Default = None
Default = False
**kwargs : dict, optional
**kwargs : dictionary, optional
Extra arguments
Returns
-----------
res_df : Pandas dataframe
res_df : pandas dataframe
dataframe of the combined object data.
"""
Expand Down Expand Up @@ -160,21 +160,21 @@ def read_aux_block(self, block_size=None, verbose=False, **kwargs):
Parameters
-----------
block_size : int, optional
block_size : integer, optional
the number of rows to read in.
Use block_size=None to read in all available data.
[Default = None]
Default = None
verbose : bool, optional
verbose : boolean, optional
use verbose logging.
Default = False
**kwargs : dict, optional
**kwargs : dictionary, optional
Extra arguments
Returns
-----------
res_df : Pandas dataframe
res_df : pandas dataframe
dataframe of the combined object data, excluding any ephemeris data.
"""
pplogger = logging.getLogger(__name__)
Expand Down
Loading

0 comments on commit c5c330f

Please sign in to comment.