Skip to content

Commit

Permalink
some more formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
TeaganKing committed May 16, 2024
1 parent e4a6206 commit f6be20b
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 26 deletions.
1 change: 0 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ repos:
- id: end-of-file-fixer
- id: check-yaml
- id: debug-statements
- id: double-quote-string-fixer
- id: check-docstring-first
- id: check-json

Expand Down
12 changes: 6 additions & 6 deletions cupid/read.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,21 @@ def read_yaml(path_to_yaml):
def get_collection(path_to_catalog, **kwargs):
"""Get collection of datasets from intake catalog"""
cat = intake.open_esm_datastore(path_to_catalog)
### note that the json file points to the csv, so the path that the
### yaml file contains does not actually get used. this can cause issues
# note that the json file points to the csv, so the path that the
# yaml file contains does not actually get used. this can cause issues

cat_subset = cat.search(**kwargs)

if "variable" in kwargs.keys():
# pylint: disable=invalid-name
def preprocess(ds):
## the double brackets return a Dataset rather than a DataArray
## this is fragile and could cause issues, not sure what subsetting on time_bound does
# the double brackets return a Dataset rather than a DataArray
# this is fragile and could cause issues, not sure what subsetting on time_bound does
return ds[[kwargs["variable"], "time_bound"]]

## not sure what the chunking kwarg is doing here either
# not sure what the chunking kwarg is doing here either
dsets = cat_subset.to_dataset_dict(
xarray_open_kwargs={"chunks": {"time": -1}}, preprocess=preprocess
xarray_open_kwargs={"chunks": {"time": -1}}, preprocess=preprocess,
)

else:
Expand Down
3 changes: 2 additions & 1 deletion cupid/run.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python

"""
Main script for running all notebooks and scripts specified in the configuration file.
Expand All @@ -24,9 +23,11 @@

import os
import warnings

import click
import intake
import ploomber

import cupid.timeseries
import cupid.util

Expand Down
19 changes: 10 additions & 9 deletions cupid/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

import xarray as xr


def call_ncrcat(cmd):
"""This is an internal function to `create_time_series`
It just wraps the subprocess.call() function, so it can be
Expand Down Expand Up @@ -124,7 +125,7 @@ def create_time_series(
for year in range(start_year, end_year + 1):
# Add files to main file list:
for fname in starting_location.glob(
f"*{hist_str}.*{str(year).zfill(4)}*.nc"
f"*{hist_str}.*{str(year).zfill(4)}*.nc",
):
files_list.append(fname)
# End for
Expand All @@ -135,7 +136,7 @@ def create_time_series(

# Open an xarray dataset from the first model history file:
hist_file_ds = xr.open_dataset(
hist_files[0], decode_cf=False, decode_times=False
hist_files[0], decode_cf=False, decode_times=False,
)

# Get a list of data variables in the 1st hist file:
Expand Down Expand Up @@ -227,7 +228,7 @@ def create_time_series(
if var not in hist_file_var_list:
if component == "ocn":
print(
"ocean vars seem to not be present in all files and thus cause errors"
"ocean vars seem to not be present in all files and thus cause errors",
)
continue
if (
Expand Down Expand Up @@ -325,7 +326,7 @@ def create_time_series(
if vars_to_derive:
if component == "atm":
derive_cam_variables(
vars_to_derive=vars_to_derive, ts_dir=ts_dir[case_idx]
vars_to_derive=vars_to_derive, ts_dir=ts_dir[case_idx],
)

if serial:
Expand Down Expand Up @@ -357,7 +358,7 @@ def derive_cam_variables(vars_to_derive=None, ts_dir=None, overwrite=None):
# PRECT can be found by simply adding PRECL and PRECC
# grab file names for the PRECL and PRECC files from the case ts directory
if glob.glob(os.path.join(ts_dir, "*PRECC*")) and glob.glob(
os.path.join(ts_dir, "*PRECL*")
os.path.join(ts_dir, "*PRECL*"),
):
constit_files = sorted(glob.glob(os.path.join(ts_dir, "*PREC*")))
else:
Expand All @@ -374,7 +375,7 @@ def derive_cam_variables(vars_to_derive=None, ts_dir=None, overwrite=None):
else:
print(
f"[{__name__}] Warning: PRECT file was found and overwrite is False"
+ "Will use existing file."
+ "Will use existing file.",
)
continue
# append PRECC to the file containing PRECL
Expand All @@ -385,7 +386,7 @@ def derive_cam_variables(vars_to_derive=None, ts_dir=None, overwrite=None):
# RESTOM = FSNT-FLNT
# Have to be more precise than with PRECT because FSNTOA, FSTNC, etc are valid variables
if glob.glob(os.path.join(ts_dir, "*.FSNT.*")) and glob.glob(
os.path.join(ts_dir, "*.FLNT.*")
os.path.join(ts_dir, "*.FLNT.*"),
):
input_files = [
sorted(glob.glob(os.path.join(ts_dir, f"*.{v}.*")))
Expand All @@ -408,12 +409,12 @@ def derive_cam_variables(vars_to_derive=None, ts_dir=None, overwrite=None):
else:
print(
f"[{__name__}] Warning: RESTOM file was found and overwrite is False."
+ "Will use existing file."
+ "Will use existing file.",
)
continue
# append FSNT to the file containing FLNT
os.system(f"ncks -A -v FLNT {constit_files[0]} {constit_files[1]}")
# create new file with the difference of FLNT and FSNT
os.system(
f"ncap2 -s 'RESTOM=(FSNT-FLNT)' {constit_files[1]} {derived_file}"
f"ncap2 -s 'RESTOM=(FSNT-FLNT)' {constit_files[1]} {derived_file}",
)
15 changes: 7 additions & 8 deletions cupid/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,8 @@
import papermill as pm
import ploomber
import yaml
from papermill.engines import NBClientEngine
from jinja2 import Template

from papermill.engines import NBClientEngine

class MarkdownJinjaEngine(NBClientEngine):
"""Class for using the Jinja Engine to run notebooks"""
Expand Down Expand Up @@ -133,7 +132,7 @@ def setup_book(config_path):


def create_ploomber_nb_task(
nb, info, cat_path, nb_path_root, output_dir, global_params, dag, dependency=None
nb, info, cat_path, nb_path_root, output_dir, global_params, dag, dependency=None,
):
"""
Creates a ploomber task for running a notebook, including necessary parameters.
Expand All @@ -154,7 +153,7 @@ def create_ploomber_nb_task(

parameter_groups = info["parameter_groups"]

### passing in subset kwargs if they're provided
# passing in subset kwargs if they're provided
if "subset" in info:
subset_kwargs = info["subset"]
else:
Expand All @@ -170,7 +169,7 @@ def create_ploomber_nb_task(

output_path = f"{output_dir}/{output_name}"

### all of these things should be optional
# all of these things should be optional
parms_in = dict(**default_params)
parms_in.update(**global_params)
parms_in.update(dict(**parms))
Expand Down Expand Up @@ -207,7 +206,7 @@ def create_ploomber_nb_task(


def create_ploomber_script_task(
script, info, cat_path, nb_path_root, global_params, dag, dependency=None
script, info, cat_path, nb_path_root, global_params, dag, dependency=None,
):
"""
Creates a Ploomber task for running a script, including necessary parameters.
Expand All @@ -230,7 +229,7 @@ def create_ploomber_script_task(

parameter_groups = info["parameter_groups"]

### passing in subset kwargs if they're provided
# passing in subset kwargs if they're provided
if "subset" in info:
subset_kwargs = info["subset"]
else:
Expand All @@ -246,7 +245,7 @@ def create_ploomber_script_task(

# output_path = f"{output_dir}/{output_name}"

### all of these things should be optional
# all of these things should be optional
parms_in = dict(**default_params)
parms_in.update(**global_params)
parms_in.update(dict(**parms))
Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
project = "CUPiD"

current_year = datetime.datetime.now().year
copyright = "{}, University Corporation for Atmospheric Research".format(current_year)
copyright = f"{current_year}, University Corporation for Atmospheric Research"

author = "NSF NCAR"

Expand Down

0 comments on commit f6be20b

Please sign in to comment.