Skip to content

Commit

Permalink
Standardising randomisation, small fixes. (#777)
Browse files Browse the repository at this point in the history
* Standardising randomisation, small fixes.

* Fixing randomisation notebook, oops.

* Editing pyproject.toml.
  • Loading branch information
astronomerritt authored Jan 11, 2024
1 parent 1ba0367 commit 32308bb
Show file tree
Hide file tree
Showing 8 changed files with 27 additions and 623 deletions.
6 changes: 2 additions & 4 deletions docs/notebooks/demo_UncertaintiesAndRandomization.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -280,9 +280,7 @@
"metadata": {},
"outputs": [],
"source": [
"observations['AstRATrue(deg)'] = observations['AstRA(deg)']\n",
"observations['AstDecTrue(deg)'] = observations['AstDec(deg)']\n",
"observations['AstRA(deg)'], observations['AstDec(deg)'] = randomizeAstrometry(observations, rng, sigName='AstrometricSigma(deg)', sigUnits='deg')"
"observations = randomizeAstrometry(observations, rng, sigName='AstrometricSigma(deg)', sigUnits='deg')"
]
},
{
Expand Down Expand Up @@ -326,7 +324,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.10.13"
}
},
"nbformat": 4,
Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ dependencies = [

[project.scripts]
sorcha = "sorcha.sorcha:main"
makeConfigSorcha = "sorcha.utilities.makeConfigPP:main"
makeSLURMscript = "sorcha.utilities.makeSLURMscript:main"
createResultsSQLDatabase = "sorcha.utilities.createResultsSQLDatabase:main"
bootstrap_sorcha_data_files = "sorcha.utilities.retrieve_ephemeris_data_files:main"
Expand Down
97 changes: 0 additions & 97 deletions src/sorcha/modules/PPLinkingFilter.py
Original file line number Diff line number Diff line change
@@ -1,103 +1,6 @@
import pandas as pd
import numpy as np

# from difi.metrics import NightlyLinkagesMetric


def PPLinkingFilter_OLD(
observations,
detection_efficiency,
min_observations,
min_tracklets,
tracklet_interval,
minimum_separation,
maximum_time,
survey_name="lsst",
):
"""
A function which mimics the effects of the SSP linking process by looking
for valid tracklets within valid tracks and only outputting observations
which would be thus successfully "linked" by SSP.
Parameters:
-----------
detection_efficiency (float): the fractional percentage of successfully linked
detections.
min_observations (int): the minimum number of observations in a night required
to form a tracklet.
min_tracklets (int): the minimum number of tracklets required to form a valid track.
tracklet_interval (int): the time window (in days) in which the minimum number of
tracklets must occur to form a valid track.
minimum_separation (float): the minimum separation inside a tracklet for it
to be recognised as motion between images (in arcseconds).
maximum_time (float): # Maximum time separation (in days) between subsequent observations in a tracklet.
rng (numpy Generator object): numpy random number generator object.
survey_name (str): a string with the survey name. used for time-zone purposes.
Currently only accepts "lsst", "LSST".
Returns:
-----------
observations_out (pandas dataframe): a pandas dataframe containing observations
of linked objects only.
"""

# we need to take into account timezones when we determine whether an observation
# occurs on a specific night. this is implemented via survey_name
# i.e. LSST is on Chile time.
# we then calculate the boundary time between one night and the next in UTC MJD.

if survey_name in ["lsst", "LSST"]:
UTC_night_boundary = 17.0 / 24.0 # this corresponds to 5pm UTC, or 2pm Chile time.

# calculate night number from FieldMJD_TAI
first_day = np.min(observations["FieldMJD_TAI"].values)
observations["night"] = (
np.floor(observations["FieldMJD_TAI"].values - np.floor(first_day) + UTC_night_boundary).astype(int)
+ 1
)

# create a small dataframe for difi to work on with only the relevant columns

difi_dataframe = pd.DataFrame(
{
"object_id": observations["ObjID"].astype(object),
"obs_id": observations["FieldID"],
"time": observations["FieldMJD_TAI"],
"night": observations["night"],
"ra": observations["AstRA(deg)"],
"dec": observations["AstDec(deg)"],
}
)

metric = NightlyLinkagesMetric(
linkage_min_obs=min_observations, # min observations per tracklet
max_obs_separation=maximum_time, # maximum time separation between subsequent observations in a tracklet [days]
min_linkage_nights=min_tracklets, # minimum number of nights on which a tracklet must be detected
min_obs_angular_separation=minimum_separation, # minimum angular separation between observations in a tracklet [arcseconds]
)

findable, _ = metric.run(
difi_dataframe,
detection_window=tracklet_interval, # number of nights in a rolling detection window
min_window_nights=min_tracklets, # minimum size of a window as the rolling window is moved
by_object=True, # split observations by object instead of by window [set to True if you want ignore_after_discovery=True]
discovery_probability=detection_efficiency, # probability of discovery for any discovery chance
num_jobs=1, # number of parallel jobs
ignore_after_discovery=True, # ignore observations of an object after it has been discovered
)

linked_object_observations = observations[observations["ObjID"].isin(findable["object_id"])]

return linked_object_observations


def PPLinkingFilter(
observations,
Expand Down
45 changes: 22 additions & 23 deletions src/sorcha/modules/PPRandomizeMeasurements.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,16 @@ def randomizeAstrometry(
module_rngs,
raName="AstRA(deg)",
decName="AstDec(deg)",
raRndName="AstRARnd(deg)",
decRndName="AstDecRnd(deg)",
raOrigName="AstRATrue(deg)",
decOrigName="AstDecTrue(deg)",
sigName="AstSig(deg)",
radecUnits="deg",
sigUnits="mas",
):
"""
Randomize astrometry with a normal distribution around the actual RADEC pointing.
The randomized values are added to the input pandas data frame.
The randomized values replace the original astrometry, with the original values
stored in separate columns.
Parameters
-----------
Expand All @@ -52,13 +53,13 @@ def randomizeAstrometry(
dec_Name : string, optional
"df" dataframe column name for the declination. Default = "AstDec(deg)"
raRndName : string, optional
"df" dataframe column name for where to store new randomized right
ascension. Default = "AstRARnd(deg)"
raOrigName : string, optional
"df" dataframe column name for where to store original right
ascension. Default = "AstRATrue(deg)"
decRndName : string, optional
"df" dataframe column name for where to store new randomized declination.
Default = "AstDecRnd(deg)"
decOrigName : string, optional
"df" dataframe column name for where to store original declination.
Default = "AstDecTrue(deg)"
sigName : string, optional
"df" dataframe column name for the standard deviation, uncertainty in the
Expand All @@ -74,25 +75,20 @@ def randomizeAstrometry(
Returns
---------
ra : numpy array of floats
Randomized right ascension values for each entry in "df"
dec : array of floats
Randomized dec values for each entry in "df"
df : pandas dataframe
original input dataframe with RA and Dec columns randomized around
astrometric sigma and original RA and Dec stored in separate columns
Notes
-----------
Covariances in RADEC are currently not supported. The routine calculates
a normal distribution on the unit sphere, so as to allow for a correct modeling of
the poles. Distributions close to the poles may look odd in RADEC.
"df" dataframe is also modified with raRndName and decRndName columns added
with the new randomized right ascension and declination values stored
"""

deg2rad = np.deg2rad
zeros = np.zeros
df[raOrigName] = df[raName]
df[decOrigName] = df[decName]

if radecUnits == "deg":
center = radec2icrf(df[raName], df[decName]).T
Expand All @@ -104,16 +100,16 @@ def randomizeAstrometry(
print("Bad units were provided for RA and Dec.")

if sigUnits == "deg":
sigmarad = deg2rad(df[sigName])
sigmarad = np.deg2rad(df[sigName])
elif sigUnits == "mas":
sigmarad = deg2rad(df[sigName] / 3600000.0)
sigmarad = np.deg2rad(df[sigName] / 3600000.0)
elif sigUnits == "rad":
sigmarad = df[sigName]
else:
print("Bad units were provided for astrometric uncertainty.")

n = len(df.index)
xyz = zeros([n, 3])
xyz = np.zeros([n, 3])

xyz = sampleNormalFOV(center, sigmarad, module_rngs, ndim=3)

Expand All @@ -123,7 +119,10 @@ def randomizeAstrometry(
else:
[ra, dec] = icrf2radec(xyz[:, 0], xyz[:, 1], xyz[:, 2], deg=False)

return ra, dec
df[raName] = ra
df[decName] = dec

return df


def sampleNormalFOV(center, sigma, module_rngs, ndim=3):
Expand Down
4 changes: 1 addition & 3 deletions src/sorcha/sorcha.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,7 @@ def runLSSTSimulation(args, configs):
)

verboselog("Randomising astrometry...")
observations["AstRATrue(deg)"] = observations["AstRA(deg)"]
observations["AstDecTrue(deg)"] = observations["AstDec(deg)"]
observations["AstRA(deg)"], observations["AstDec(deg)"] = PPRandomizeMeasurements.randomizeAstrometry(
observations = PPRandomizeMeasurements.randomizeAstrometry(
observations, args._rngs, sigName="AstrometricSigma(deg)", sigUnits="deg"
)

Expand Down
Loading

0 comments on commit 32308bb

Please sign in to comment.