Skip to content

Commit

Permalink
Backwards compatibility with deprecation warning (#46)
Browse files Browse the repository at this point in the history
* intriduce data product instead of lightcurves and metadata separately

* update env

* fix type timewise_lightcurve

* use new data_product in luminosity and flux_density stuyff

* bugfix: right use of data_product

* only wait 5 seconds after submitting to TAP server

* make indices integers not string

* use integer indexing when building data_product

* remove making metadata

* use gzip to save data as compressed bytes

* remove todos

* make data_product file before submitting to cluster, assert that all objects have downloaded wise data

* fix ids in submit_cluster

* check of old dataproducts are None

* string index because json

* fix data preoduct setup

* adjust logging message

* load from bigdata dir

* use string indices when making data_product

* bring back save and load lightcurves

* bring back load and save lightcurves
  • Loading branch information
JannisNe authored Nov 6, 2022
1 parent cc3f43e commit f8a4634
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 1 deletion.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "timewise"
version = "0.3.0.a"
version = "0.3.0a1"
description = "A small package to download infrared data from the WISE satellite"
authors = ["Jannis Necker <[email protected]>"]
license = "MIT"
Expand Down
23 changes: 23 additions & 0 deletions timewise/wise_bigdata_desy_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,29 @@ def _save_data_product(self, data_product, service, chunk_number=None, jobID=Non
with gzip.open(fn, 'w') as f:
f.write(json.dumps(data_product).encode('utf-8'))

def _load_lightcurves(
self,
service,
chunk_number=None,
jobID=None,
return_filename=False,
load_from_bigdata_dir=False
):
fn = self._lightcurve_filename(service, chunk_number, jobID)

if load_from_bigdata_dir:
fn = fn.replace(data_dir, bigdata_dir)

logger.debug(f"loading {fn}")
try:
with open(fn, "r") as f:
lcs = json.load(f)
if return_filename:
return lcs, fn
return lcs
except FileNotFoundError:
logger.warning(f"No file {fn}")

# ----------------------------------------------------- #
# END using gzip to compress the data when saving #
# ---------------------------------------------------------------------------------- #
Expand Down
43 changes: 43 additions & 0 deletions timewise/wise_data_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,6 +614,49 @@ def _save_data_product(self, data_product, service, chunk_number=None, jobID=Non
with open(fn, "w") as f:
json.dump(data_product, f, indent=4)

def _lightcurve_filename(self, service, chunk_number=None, jobID=None):

warnings.warn("Separate `binned_lightcurves` and `metadata` will be deprecated in v0.3.0!", DeprecationWarning)

if (chunk_number is None) and (jobID is None):
return os.path.join(self.lightcurve_dir, f"binned_lightcurves_{service}.json")
else:
fn = f"binned_lightcurves_{service}{self._split_chunk_key}{chunk_number}"
if (chunk_number is not None) and (jobID is None):
return os.path.join(self._cache_photometry_dir, fn + ".json")
else:
return os.path.join(self._cache_photometry_dir, fn + f"_{jobID}.json")

def _load_lightcurves(self, service, chunk_number=None, jobID=None, return_filename=False):
fn = self._lightcurve_filename(service, chunk_number, jobID)
logger.debug(f"loading {fn}")
try:
with open(fn, "r") as f:
lcs = json.load(f)
if return_filename:
return lcs, fn
return lcs
except FileNotFoundError:
logger.warning(f"No file {fn}")

def _save_lightcurves(self, lcs, service, chunk_number=None, jobID=None, overwrite=False):
fn = self._lightcurve_filename(service, chunk_number, jobID)
logger.debug(f"saving {len(lcs)} new lightcurves to {fn}")

if fn == self._lightcurve_filename(service):
self._cached_final_products['lightcurves'][service] = lcs

if not overwrite:
try:
old_lcs = self._load_lightcurves(service=service, chunk_number=chunk_number, jobID=jobID)
logger.debug(f"Found {len(old_lcs)}. Combining")
lcs = lcs.update(old_lcs)
except FileNotFoundError as e:
logger.info(f"FileNotFoundError: {e}. Making new binned lightcurves.")

with open(fn, "w") as f:
json.dump(lcs, f)

def load_binned_lcs(self, service):
"""Loads the binned lightcurves. For any int `ID` the lightcurves can convieniently read into a pandas.DataFrame
via
Expand Down

0 comments on commit f8a4634

Please sign in to comment.