diff --git a/.github/ISSUE_TEMPLATE/enhancement_request.md b/.github/ISSUE_TEMPLATE/enhancement_request.md index e1134781cd..78cd208f15 100644 --- a/.github/ISSUE_TEMPLATE/enhancement_request.md +++ b/.github/ISSUE_TEMPLATE/enhancement_request.md @@ -39,8 +39,8 @@ Consider breaking the enhancement down into sub-issues. - [ ] Select **requestor(s)** ### Milestone and Projects ### -- [ ] Select **Milestone** as the next official version or **Backlog of Development Ideas** -- [ ] For the next official version, select the **MET-X.Y.Z Development** project +- [ ] Select **Milestone** as a **MET-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **MET-X.Y.Z** version, select the **MET-X.Y.Z Development** project ## Define Related Issue(s) ## Consider the impact to the other METplus components. diff --git a/.github/ISSUE_TEMPLATE/new_feature_request.md b/.github/ISSUE_TEMPLATE/new_feature_request.md index e4adb302c7..ea95d70c13 100644 --- a/.github/ISSUE_TEMPLATE/new_feature_request.md +++ b/.github/ISSUE_TEMPLATE/new_feature_request.md @@ -43,8 +43,8 @@ Consider breaking the new feature down into sub-issues. - [ ] Select **requestor(s)** ### Milestone and Projects ### -- [ ] Select **Milestone** as the next official version or **Backlog of Development Ideas** -- [ ] For the next official version, select the **MET-X.Y.Z Development** project +- [ ] Select **Milestone** as a **MET-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **MET-X.Y.Z** version, select the **MET-X.Y.Z Development** project ## Define Related Issue(s) ## Consider the impact to the other METplus components. diff --git a/.github/ISSUE_TEMPLATE/sub-issue.md b/.github/ISSUE_TEMPLATE/sub-issue.md index e02109e963..45ee00432d 100644 --- a/.github/ISSUE_TEMPLATE/sub-issue.md +++ b/.github/ISSUE_TEMPLATE/sub-issue.md @@ -29,5 +29,5 @@ This is a sub-issue of #*List the parent issue number here*. - [ ] Select **requestor(s)** ### Milestone and Projects ### -- [ ] Select **Milestone** as the next official version or **Backlog of Development Ideas** -- [ ] For the next official version, select the **MET-X.Y.Z Development** project +- [ ] Select **Milestone** as a **MET-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **MET-X.Y.Z** version, select the **MET-X.Y.Z Development** project diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md index 05ad99baf8..33685adf25 100644 --- a/.github/ISSUE_TEMPLATE/task.md +++ b/.github/ISSUE_TEMPLATE/task.md @@ -39,8 +39,8 @@ Consider breaking the task down into sub-issues. - [ ] Select **requestor(s)** ### Milestone and Projects ### -- [ ] Select **Milestone** as the next official version or **Backlog of Development Ideas** -- [ ] For the next official version, select the **MET-X.Y.Z Development** project +- [ ] Select **Milestone** as a **MET-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **MET-X.Y.Z** version, select the **MET-X.Y.Z Development** project ## Define Related Issue(s) ## Consider the impact to the other METplus components. diff --git a/.github/jobs/run_unit_tests.sh b/.github/jobs/run_unit_tests.sh index e866b2cc61..b85bac84fe 100755 --- a/.github/jobs/run_unit_tests.sh +++ b/.github/jobs/run_unit_tests.sh @@ -15,7 +15,7 @@ source ${MET_REPO_DIR}/.github/jobs/test_env_vars.sh echo "Running MET unit tests..." for testname in $TESTS_TO_RUN; do CMD_LOGFILE=/met/logs/unit_${testname}.log - time_command ${MET_TEST_BASE}/perl/unit.pl ${MET_TEST_BASE}/xml/unit_${testname}.xml + time_command ${MET_TEST_BASE}/python/unit.py ${MET_TEST_BASE}/xml/unit_${testname}.xml if [ $? != 0 ]; then echo "ERROR: Unit test ${testname} failed" cat /met/logs/unit_${testname}.log diff --git a/.github/jobs/set_job_controls.sh b/.github/jobs/set_job_controls.sh index 9c39a50716..aa72ad4162 100755 --- a/.github/jobs/set_job_controls.sh +++ b/.github/jobs/set_job_controls.sh @@ -6,7 +6,7 @@ run_unit_tests=false run_diff=false run_update_truth=false met_base_repo=met-base -met_base_tag=v3.2 +met_base_tag=v3.3 input_data_version=develop truth_data_version=develop @@ -57,6 +57,12 @@ elif [ "${GITHUB_EVENT_NAME}" == "push" ]; then input_data_version=${branch_name:6} fi + # check for main_vX.Y in the branch name + elif [[ "${branch_name}" =~ .*(main_v)([0-9]+\.[0-9]+).* ]]; then + + truth_data_version=${BASH_REMATCH[1]}${BASH_REMATCH[2]} + input_data_version=${BASH_REMATCH[2]} + fi # check commit messages for skip or force keywords @@ -87,14 +93,27 @@ elif [ "${GITHUB_EVENT_NAME}" == "push" ]; then elif [ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]; then + branch_name=`cut -d "/" -f3 <<< "${GITHUB_REF}"` + + # check for main_vX.Y in the branch name + if [[ "${branch_name}" =~ .*(main_v)([0-9]+\.[0-9]+).* ]]; then + + truth_data_version=${BASH_REMATCH[1]}${BASH_REMATCH[2]} + input_data_version=${BASH_REMATCH[2]} + + fi + if [ "${force_tests}" == "true" ]; then + run_diff=true + fi fi # if updating truth or running diff, run unit tests -if [ "$run_update_truth" == "true" ] || [ "$run_diff" == "true" ]; then +if [ "$run_update_truth" == "true" ] || + [ "$run_diff" == "true" ]; then run_unit_tests=true diff --git a/.github/workflows/build_docker_and_trigger_metplus.yml b/.github/workflows/build_docker_and_trigger_metplus.yml index 13606d5ed3..7d1ab738d8 100644 --- a/.github/workflows/build_docker_and_trigger_metplus.yml +++ b/.github/workflows/build_docker_and_trigger_metplus.yml @@ -5,7 +5,7 @@ on: branches: - develop paths-ignore: - - 'met/docs/**' + - 'docs/**' workflow_dispatch: @@ -29,7 +29,7 @@ jobs: env: SOURCE_BRANCH: ${{ steps.get_branch_name.outputs.branch_name }}-lite MET_BASE_REPO: met-base - MET_BASE_TAG: v3.2 + MET_BASE_TAG: v3.3 - name: Push Docker Image run: .github/jobs/push_docker_image.sh diff --git a/.github/workflows/compilation_options.yml b/.github/workflows/compilation_options.yml index 7a8dc73cf0..cf6c5fc97a 100644 --- a/.github/workflows/compilation_options.yml +++ b/.github/workflows/compilation_options.yml @@ -63,6 +63,10 @@ jobs: config: '--enable-mode_graphics' - jobid: 'job8' config: '--enable-modis' + - jobid: 'job9' + config: '--enable-all MET_CXX_STANDARD=11' + - jobid: 'job10' + config: '--enable-all MET_CXX_STANDARD=14' fail-fast: false steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release-checksum.yml b/.github/workflows/release-checksum.yml index 683f258a6c..55d8b41cb0 100644 --- a/.github/workflows/release-checksum.yml +++ b/.github/workflows/release-checksum.yml @@ -9,6 +9,6 @@ jobs: add-checksum: runs-on: ubuntu-latest steps: - - uses: dtcenter/metplus-action-release-checksum@v1 + - uses: dtcenter/metplus-action-release-checksum@v2 with: token: ${{ secrets.METPLUS_BOT_TOKEN }} diff --git a/.github/workflows/sonarqube.yml b/.github/workflows/sonarqube.yml index 6a6627fb69..b476a488b6 100644 --- a/.github/workflows/sonarqube.yml +++ b/.github/workflows/sonarqube.yml @@ -61,7 +61,7 @@ jobs: run: .github/jobs/build_sonarqube_image.sh env: MET_BASE_REPO: met-base - MET_BASE_TAG: v3.2 + MET_BASE_TAG: v3.3 SOURCE_BRANCH: ${{ steps.get_branch_name.outputs.branch_name }} WD_REFERENCE_BRANCH: ${{ github.event.inputs.reference_branch }} SONAR_SCANNER_VERSION: 5.0.1.3006 diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 1fccc357fa..4e6e3872b7 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -176,7 +176,7 @@ jobs: - jobid: 'job1' tests: 'ascii2nc_indy pb2nc_indy tc_dland tc_pairs tc_stat plot_tc tc_rmw rmw_analysis tc_diag tc_gen' - jobid: 'job2' - tests: 'met_test_scripts mode_multivar mode_graphics mtd regrid airnow gsi_tools netcdf modis series_analysis wwmca_regrid gen_vx_mask grid_weight interp_shape grid_diag grib_tables lidar2nc shift_data_plane trmm2nc aeronet wwmca_plot ioda2nc gaussian' + tests: 'met_test_scripts mode_multivar mode_graphics mtd regrid airnow gsi_tools netcdf modis series_analysis wwmca_regrid gen_vx_mask interp_shape grid_diag grib_tables lidar2nc shift_data_plane trmm2nc aeronet wwmca_plot ioda2nc gaussian' fail-fast: false steps: - uses: actions/checkout@v4 @@ -310,6 +310,8 @@ jobs: tests: 'ensemble_stat stat_analysis_es' - jobid: 'job5' tests: 'ugrid' + - jobid: 'job6' + tests: 'grid_weight point_weight' fail-fast: false steps: - uses: actions/checkout@v4 @@ -360,6 +362,8 @@ jobs: - jobid: 'job3' tests: 'climatology_2.5deg' - jobid: 'job4' + tests: 'climatology_mixed' + - jobid: 'job5' tests: 'python point2grid plot_data_plane mode mode_analysis perc_thresh hira plot_point_obs quality_filter obs_summary duplicate_flag' fail-fast: false steps: diff --git a/.github/workflows/update_truth.yml b/.github/workflows/update_truth.yml index adb67ecacc..a5ce785489 100644 --- a/.github/workflows/update_truth.yml +++ b/.github/workflows/update_truth.yml @@ -12,7 +12,7 @@ on: jobs: update_truth: - name: "Update or create truth reference branch" + name: "Update reference branch truth data" runs-on: ubuntu-latest steps: - name: Check if branch is develop or main_vX.Y @@ -31,7 +31,8 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.METPLUS_BOT_TOKEN }} - - name: Resolve conflicts between branch and branch-ref + - name: Resolve conflicts with an update branch + id: resolve_conflicts run: | branch_name=${{ env.branch_name }} cd ${GITHUB_WORKSPACE} @@ -39,23 +40,34 @@ jobs: echo ERROR: ${branch_name}-ref does not exist exit 1 fi - + echo ${branch_name}-ref does exist -- update it git config --global user.name "metplus-bot" git config --global user.email "97135045+metplus-bot@users.noreply.github.com" + + # checkout branch (develop or main_vX.Y) echo git checkout ${branch_name} git checkout ${branch_name} - echo git checkout -b update_truth_for_${branch_name} - git checkout -b update_truth_for_${branch_name} + + # create unique branch name to update *-ref branch + update_branch=update_${branch_name}_$(uuidgen | cut -d "-" -f1) + echo "update_branch=${update_branch}" >> $GITHUB_OUTPUT + echo git checkout -b ${update_branch} + git checkout -b ${update_branch} + + # merge -ref branch into the update branch (favoring update branch changes) echo git merge -s ours origin/${branch_name}-ref git merge -s ours origin/${branch_name}-ref - echo git push origin update_truth_for_${branch_name} - git push origin update_truth_for_${branch_name} + + # push update branch to origin + echo git push origin ${update_branch} + git push origin ${update_branch} - name: Create Pull Request - run: gh pr create --base $BASE --body "$BODY" --title "$TITLE" + run: gh pr create --head $HEAD --base $BASE --body "$BODY" --title "$TITLE" env: GH_TOKEN: ${{ github.token }} + HEAD: ${{ steps.resolve_conflicts.outputs.update_branch }} BASE: ${{ env.branch_name }}-ref - BODY: ${{ github.event.inputs.change_summary }}
Created by @${{ github.actor}} + BODY: ${{ github.event.inputs.change_summary }}
Created by @${{ github.actor }} TITLE: Update ${{ env.branch_name }}-ref after ${{ github.event.inputs.pull_requests }} diff --git a/.gitignore b/.gitignore index d26e2e4a32..bbd21255b1 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,5 @@ make.log make_install.log .idea cmake-build-debug + +__pycache__ \ No newline at end of file diff --git a/Makefile.in b/Makefile.in index 582d2a65c8..0ae957d7b3 100644 --- a/Makefile.in +++ b/Makefile.in @@ -256,6 +256,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/configure b/configure index 40672ec913..758f3502f6 100755 --- a/configure +++ b/configure @@ -797,6 +797,7 @@ MET_ATLAS MET_PROJLIB MET_PROJINC MET_PROJ +MET_CXX_STANDARD OPENMP_CFLAGS am__fastdepCC_FALSE am__fastdepCC_TRUE @@ -933,6 +934,7 @@ CFLAGS LDFLAGS LIBS CPPFLAGS +MET_CXX_STANDARD MET_PROJ MET_PROJINC MET_PROJLIB @@ -1667,6 +1669,9 @@ Some influential environment variables: LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory + MET_CXX_STANDARD + Specify the version of the supported C++ standard. Values may be + 11, 14, or 17. Default value is 17. MET_PROJ Where proj lib and include subdirectories are located. If they are installed in /usr/local, you don't have to specify them. MET_PROJINC Where proj include files are located. Use if the libraries and @@ -5317,6 +5322,26 @@ printf "%s\n" "$ac_cv_prog_c_openmp" >&6; } CPPFLAGS="${CPPFLAGS} ${OPENMP_CFLAGS}" LDFLAGS="${LDFLAGS} ${OPENMP_CFLAGS}" +# +# Look for a specified C++ Standard +# + +# Configure the variable. The help text will appear if the user uses "configure --help". + + + +# Set a default value + +if (test -z "$MET_CXX_STANDARD"); then + MET_CXX_STANDARD=17 + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: c++17 will be used as the C++ standard" >&5 +printf "%s\n" "$as_me: c++17 will be used as the C++ standard" >&6;} +elif (test -n "$MET_CXX_STANDARD"); then + MET_CXX_STANDARD=${MET_CXX_STANDARD} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: MET_CXX_STANDARD will be set" >&5 +printf "%s\n" "$as_me: MET_CXX_STANDARD will be set" >&6;} +fi + # # Look for the Proj library. # @@ -6988,7 +7013,7 @@ fi CPPFLAGS=$CPPFLAGS' -DMET_BASE="\"$(pkgdatadir)\""' # Add -std=c++11 to CXXFLAGS -CXXFLAGS=$CXXFLAGS' -std=c++11' +CXXFLAGS=$CXXFLAGS' -std=c++'$MET_CXX_STANDARD # Define other variables for the makefiles diff --git a/configure.ac b/configure.ac index 2f8159d58c..03436847af 100644 --- a/configure.ac +++ b/configure.ac @@ -15,6 +15,24 @@ AC_OPENMP() CPPFLAGS="${CPPFLAGS} ${OPENMP_CFLAGS}" LDFLAGS="${LDFLAGS} ${OPENMP_CFLAGS}" +# +# Look for a specified C++ Standard +# + +# Configure the variable. The help text will appear if the user uses "configure --help". + +AC_ARG_VAR([MET_CXX_STANDARD], [Specify the version of the supported C++ standard. Values may be 11, 14, or 17. Default value is 17.]) + +# Set a default value + +if (test -z "$MET_CXX_STANDARD"); then + MET_CXX_STANDARD=17 + AC_MSG_NOTICE([c++17 will be used as the C++ standard]) +elif (test -n "$MET_CXX_STANDARD"); then + MET_CXX_STANDARD=${MET_CXX_STANDARD} + AC_MSG_NOTICE([MET_CXX_STANDARD will be set]) +fi + # # Look for the Proj library. # @@ -1160,7 +1178,7 @@ AM_CONDITIONAL([ENABLE_DEVELOPMENT], [test -n "$MET_DEVELOPMENT"]) CPPFLAGS=$CPPFLAGS' -DMET_BASE="\"$(pkgdatadir)\""' # Add -std=c++11 to CXXFLAGS -CXXFLAGS=$CXXFLAGS' -std=c++11' +CXXFLAGS=$CXXFLAGS' -std=c++'$MET_CXX_STANDARD # Define other variables for the makefiles diff --git a/data/Makefile.in b/data/Makefile.in index 262e9e43cc..43aab3f3d9 100644 --- a/data/Makefile.in +++ b/data/Makefile.in @@ -260,6 +260,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/climo/Makefile.in b/data/climo/Makefile.in index c6415ac06c..0030bfbd90 100644 --- a/data/climo/Makefile.in +++ b/data/climo/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/climo/seeps/Makefile.in b/data/climo/seeps/Makefile.in index 761a867e6b..f40096bb49 100644 --- a/data/climo/seeps/Makefile.in +++ b/data/climo/seeps/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/colortables/Makefile.in b/data/colortables/Makefile.in index 82b6b46710..05fcc3df5a 100644 --- a/data/colortables/Makefile.in +++ b/data/colortables/Makefile.in @@ -260,6 +260,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/colortables/NCL_colortables/Makefile.in b/data/colortables/NCL_colortables/Makefile.in index df79826d8d..1d174cffda 100644 --- a/data/colortables/NCL_colortables/Makefile.in +++ b/data/colortables/NCL_colortables/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/config/ConfigConstants b/data/config/ConfigConstants index 23210b6c34..15a2a8db4c 100644 --- a/data/config/ConfigConstants +++ b/data/config/ConfigConstants @@ -137,6 +137,10 @@ NONE = 1; COS_LAT = 2; AREA = 3; +// Point weight flag settings +NONE = 1; +SID = 2; + // Duplicate flag settings NONE = 1; UNIQUE = 2; diff --git a/data/config/EnsembleStatConfig_default b/data/config/EnsembleStatConfig_default index 57f1984807..c4b2463f7d 100644 --- a/data/config/EnsembleStatConfig_default +++ b/data/config/EnsembleStatConfig_default @@ -120,6 +120,8 @@ message_type_group_map = [ { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + // // Ensemble bin sizes // May be set separately in each "obs.field" entry @@ -130,7 +132,8 @@ ens_phist_bin_size = 0.05; //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -149,12 +152,17 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } // +// Climatology distribution settings // May be set separately in each "obs.field" entry // climo_cdf = { @@ -260,8 +268,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = ""; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/data/config/GenEnsProdConfig_default b/data/config/GenEnsProdConfig_default index e2cb994bba..16a36f9833 100644 --- a/data/config/GenEnsProdConfig_default +++ b/data/config/GenEnsProdConfig_default @@ -13,7 +13,6 @@ model = "FCST"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; @@ -95,7 +94,7 @@ nmep_smooth = { //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data // climo_mean = { @@ -114,6 +113,9 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// climo_stdev = climo_mean; climo_stdev = { file_name = []; diff --git a/data/config/GridStatConfig_default b/data/config/GridStatConfig_default index 4bec5ecf4b..aa4d9517b5 100644 --- a/data/config/GridStatConfig_default +++ b/data/config/GridStatConfig_default @@ -75,7 +75,8 @@ obs = fcst; //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -94,12 +95,17 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } // +// Climatology distribution settings // May be set separately in each "obs.field" entry // climo_cdf = { @@ -265,8 +271,9 @@ nc_pairs_flag = { //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = ""; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/data/config/Makefile.in b/data/config/Makefile.in index e88150a766..6ff5c30139 100644 --- a/data/config/Makefile.in +++ b/data/config/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/config/Point2GridConfig_default b/data/config/Point2GridConfig_default index 8361e871c9..3b8625023d 100644 --- a/data/config/Point2GridConfig_default +++ b/data/config/Point2GridConfig_default @@ -22,11 +22,12 @@ obs_window = { //////////////////////////////////////////////////////////////////////////////// - // -// Observation message type +// Point observation filtering options // -message_type = []; +message_type = []; +obs_quality_inc = []; +obs_quality_exc = []; //////////////////////////////////////////////////////////////////////////////// @@ -72,10 +73,6 @@ var_name_map = [ //////////////////////////////////////////////////////////////////////////////// -quality_mark_thresh = 2; - -//////////////////////////////////////////////////////////////////////////////// - tmp_dir = "/tmp"; version = "V12.0.0"; diff --git a/data/config/PointStatConfig_default b/data/config/PointStatConfig_default index d4b277c6a3..95665fe017 100644 --- a/data/config/PointStatConfig_default +++ b/data/config/PointStatConfig_default @@ -115,10 +115,13 @@ message_type_group_map = [ { key = "WATERSF"; val = "SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -137,12 +140,17 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } // +// Climatology distribution settings // May be set separately in each "obs.field" entry // climo_cdf = { @@ -299,8 +307,10 @@ output_flag = { //////////////////////////////////////////////////////////////////////////////// -tmp_dir = "/tmp"; -output_prefix = ""; -version = "V12.0.0"; +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/data/config/SeriesAnalysisConfig_default b/data/config/SeriesAnalysisConfig_default index 2b0cdfa53e..8f76139011 100644 --- a/data/config/SeriesAnalysisConfig_default +++ b/data/config/SeriesAnalysisConfig_default @@ -61,7 +61,8 @@ obs = fcst; //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -80,11 +81,19 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } +// +// Climatology distribution settings +// May be set separately in each "obs.field" entry +// climo_cdf = { cdf_bins = 1; center_bins = FALSE; diff --git a/data/map/Makefile.in b/data/map/Makefile.in index a34c9c189f..12e4540ca0 100644 --- a/data/map/Makefile.in +++ b/data/map/Makefile.in @@ -260,6 +260,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/map/admin_by_country/Makefile.in b/data/map/admin_by_country/Makefile.in index 3f36ae61fb..825f9576c6 100644 --- a/data/map/admin_by_country/Makefile.in +++ b/data/map/admin_by_country/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/poly/HMT_masks/Makefile.in b/data/poly/HMT_masks/Makefile.in index 7284d8de7d..305405cc2a 100644 --- a/data/poly/HMT_masks/Makefile.in +++ b/data/poly/HMT_masks/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/poly/Makefile.in b/data/poly/Makefile.in index 36890cca0a..04906070f5 100644 --- a/data/poly/Makefile.in +++ b/data/poly/Makefile.in @@ -260,6 +260,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/poly/NCEP_masks/Makefile.in b/data/poly/NCEP_masks/Makefile.in index acda936d43..4ecea4b0d0 100644 --- a/data/poly/NCEP_masks/Makefile.in +++ b/data/poly/NCEP_masks/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/ps/Makefile.in b/data/ps/Makefile.in index c017420485..2677c7fb04 100644 --- a/data/ps/Makefile.in +++ b/data/ps/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/table_files/Makefile.in b/data/table_files/Makefile.in index 08d91b866e..5f82cc573e 100644 --- a/data/table_files/Makefile.in +++ b/data/table_files/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/data/table_files/met_header_columns_V12.0.txt b/data/table_files/met_header_columns_V12.0.txt index 299e6cb4d6..3f4785edf5 100644 --- a/data/table_files/met_header_columns_V12.0.txt +++ b/data/table_files/met_header_columns_V12.0.txt @@ -5,15 +5,15 @@ V12.0 : STAT : FHO : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID V12.0 : STAT : ISC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL TILE_DIM TILE_XLL TILE_YLL NSCALE ISCALE MSE ISC FENERGY2 OENERGY2 BASER FBIAS V12.0 : STAT : MCTC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_CAT) F[0-9]*_O[0-9]* EC_VALUE V12.0 : STAT : MCTS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_CAT ACC ACC_NCL ACC_NCU ACC_BCL ACC_BCU HK HK_BCL HK_BCU HSS HSS_BCL HSS_BCU GER GER_BCL GER_BCU HSS_EC HSS_EC_BCL HSS_EC_BCU EC_VALUE -V12.0 : STAT : MPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX OBS_SID OBS_LAT OBS_LON OBS_LVL OBS_ELV FCST OBS OBS_QC CLIMO_MEAN CLIMO_STDEV CLIMO_CDF -V12.0 : STAT : SEEPS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL S12 S13 S21 S23 S31 S32 PF1 PF2 PF3 PV1 PV2 PV3 MEAN_FCST MEAN_OBS SEEPS +V12.0 : STAT : MPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX OBS_SID OBS_LAT OBS_LON OBS_LVL OBS_ELV FCST OBS OBS_QC OBS_CLIMO_MEAN OBS_CLIMO_STDEV OBS_CLIMO_CDF FCST_CLIMO_MEAN FCST_CLIMO_STDEV +V12.0 : STAT : SEEPS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL ODFL ODFH OLFD OLFH OHFD OHFL PF1 PF2 PF3 PV1 PV2 PV3 MEAN_FCST MEAN_OBS SEEPS V12.0 : STAT : SEEPS_MPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE OBS_SID OBS_LAT OBS_LON FCST OBS OBS_QC FCST_CAT OBS_CAT P1 P2 T1 T2 SEEPS V12.0 : STAT : NBRCNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBS FBS_BCL FBS_BCU FSS FSS_BCL FSS_BCU AFSS AFSS_BCL AFSS_BCU UFSS UFSS_BCL UFSS_BCU F_RATE F_RATE_BCL F_RATE_BCU O_RATE O_RATE_BCL O_RATE_BCU V12.0 : STAT : NBRCTC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FY_OY FY_ON FN_OY FN_ON V12.0 : STAT : NBRCTS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL BASER BASER_NCL BASER_NCU BASER_BCL BASER_BCU FMEAN FMEAN_NCL FMEAN_NCU FMEAN_BCL FMEAN_BCU ACC ACC_NCL ACC_NCU ACC_BCL ACC_BCU FBIAS FBIAS_BCL FBIAS_BCU PODY PODY_NCL PODY_NCU PODY_BCL PODY_BCU PODN PODN_NCL PODN_NCU PODN_BCL PODN_BCU POFD POFD_NCL POFD_NCU POFD_BCL POFD_BCU FAR FAR_NCL FAR_NCU FAR_BCL FAR_BCU CSI CSI_NCL CSI_NCU CSI_BCL CSI_BCU GSS GSS_BCL GSS_BCU HK HK_NCL HK_NCU HK_BCL HK_BCU HSS HSS_BCL HSS_BCU ODDS ODDS_NCL ODDS_NCU ODDS_BCL ODDS_BCU LODDS LODDS_NCL LODDS_NCU LODDS_BCL LODDS_BCU ORSS ORSS_NCL ORSS_NCU ORSS_BCL ORSS_BCU EDS EDS_NCL EDS_NCU EDS_BCL EDS_BCU SEDS SEDS_NCL SEDS_NCU SEDS_BCL SEDS_BCU EDI EDI_NCL EDI_NCU EDI_BCL EDI_BCU SEDI SEDI_NCL SEDI_NCU SEDI_BCL SEDI_BCU BAGSS BAGSS_BCL BAGSS_BCU V12.0 : STAT : GRAD : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FGBAR OGBAR MGBAR EGBAR S1 S1_OG FGOG_RATIO DX DY V12.0 : STAT : DMAP : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FY OY FBIAS BADDELEY HAUSDORFF MED_FO MED_OF MED_MIN MED_MAX MED_MEAN FOM_FO FOM_OF FOM_MIN FOM_MAX FOM_MEAN ZHU_FO ZHU_OF ZHU_MIN ZHU_MAX ZHU_MEAN G GBETA BETA_VALUE -V12.0 : STAT : ORANK : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX OBS_SID OBS_LAT OBS_LON OBS_LVL OBS_ELV OBS PIT RANK N_ENS_VLD (N_ENS) ENS_[0-9]* OBS_QC ENS_MEAN CLIMO_MEAN SPREAD ENS_MEAN_OERR SPREAD_OERR SPREAD_PLUS_OERR CLIMO_STDEV +V12.0 : STAT : ORANK : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX OBS_SID OBS_LAT OBS_LON OBS_LVL OBS_ELV OBS PIT RANK N_ENS_VLD (N_ENS) ENS_[0-9]* OBS_QC ENS_MEAN OBS_CLIMO_MEAN SPREAD ENS_MEAN_OERR SPREAD_OERR SPREAD_PLUS_OERR OBS_CLIMO_STDEV FCST_CLIMO_MEAN FCST_CLIMO_STDEV V12.0 : STAT : PCT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_THRESH) THRESH_[0-9]* OY_[0-9]* ON_[0-9]* V12.0 : STAT : PJC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_THRESH) THRESH_[0-9]* OY_TP_[0-9]* ON_TP_[0-9]* CALIBRATION_[0-9]* REFINEMENT_[0-9]* LIKELIHOOD_[0-9]* BASER_[0-9]* V12.0 : STAT : PRC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_THRESH) THRESH_[0-9]* PODY_[0-9]* POFD_[0-9]* @@ -27,9 +27,9 @@ V12.0 : STAT : RELP : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID V12.0 : STAT : SAL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FABAR OABAR FOABAR FFABAR OOABAR MAE V12.0 : STAT : SL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBAR OBAR FOBAR FFBAR OOBAR MAE V12.0 : STAT : SSVAR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_BIN BIN_i BIN_N VAR_MIN VAR_MAX VAR_MEAN FBAR OBAR FOBAR FFBAR OOBAR FBAR_NCL FBAR_NCU FSTDEV FSTDEV_NCL FSTDEV_NCU OBAR_NCL OBAR_NCU OSTDEV OSTDEV_NCL OSTDEV_NCU PR_CORR PR_CORR_NCL PR_CORR_NCU ME ME_NCL ME_NCU ESTDEV ESTDEV_NCL ESTDEV_NCU MBIAS MSE BCMSE RMSE -V12.0 : STAT : VAL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFABAR VFABAR UOABAR VOABAR UVFOABAR UVFFABAR UVOOABAR FA_SPEED_BAR OA_SPEED_BAR DIRA_ME DIRA_MAE DIRA_MSE -V12.0 : STAT : VL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFBAR VFBAR UOBAR VOBAR UVFOBAR UVFFBAR UVOOBAR F_SPEED_BAR O_SPEED_BAR DIR_ME DIR_MAE DIR_MSE -V12.0 : STAT : VCNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBAR FBAR_BCL FBAR_BCU OBAR OBAR_BCL OBAR_BCU FS_RMS FS_RMS_BCL FS_RMS_BCU OS_RMS OS_RMS_BCL OS_RMS_BCU MSVE MSVE_BCL MSVE_BCU RMSVE RMSVE_BCL RMSVE_BCU FSTDEV FSTDEV_BCL FSTDEV_BCU OSTDEV OSTDEV_BCL OSTDEV_BCU FDIR FDIR_BCL FDIR_BCU ODIR ODIR_BCL ODIR_BCU FBAR_SPEED FBAR_SPEED_BCL FBAR_SPEED_BCU OBAR_SPEED OBAR_SPEED_BCL OBAR_SPEED_BCU VDIFF_SPEED VDIFF_SPEED_BCL VDIFF_SPEED_BCU VDIFF_DIR VDIFF_DIR_BCL VDIFF_DIR_BCU SPEED_ERR SPEED_ERR_BCL SPEED_ERR_BCU SPEED_ABSERR SPEED_ABSERR_BCL SPEED_ABSERR_BCU DIR_ERR DIR_ERR_BCL DIR_ERR_BCU DIR_ABSERR DIR_ABSERR_BCL DIR_ABSERR_BCU ANOM_CORR ANOM_CORR_NCL ANOM_CORR_NCU ANOM_CORR_BCL ANOM_CORR_BCU ANOM_CORR_UNCNTR ANOM_CORR_UNCNTR_BCL ANOM_CORR_UNCNTR_BCU DIR_ME DIR_ME_BCL DIR_ME_BCU DIR_MAE DIR_MAE_BCL DIR_MAE_BCU DIR_MSE DIR_MSE_BCL DIR_MSE_BCU DIR_RMSE DIR_RMSE_BCL DIR_RMSE_BCU +V12.0 : STAT : VAL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFABAR VFABAR UOABAR VOABAR UVFOABAR UVFFABAR UVOOABAR FA_SPEED_BAR OA_SPEED_BAR TOTAL_DIR DIRA_ME DIRA_MAE DIRA_MSE +V12.0 : STAT : VL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFBAR VFBAR UOBAR VOBAR UVFOBAR UVFFBAR UVOOBAR F_SPEED_BAR O_SPEED_BAR TOTAL_DIR DIR_ME DIR_MAE DIR_MSE +V12.0 : STAT : VCNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBAR FBAR_BCL FBAR_BCU OBAR OBAR_BCL OBAR_BCU FS_RMS FS_RMS_BCL FS_RMS_BCU OS_RMS OS_RMS_BCL OS_RMS_BCU MSVE MSVE_BCL MSVE_BCU RMSVE RMSVE_BCL RMSVE_BCU FSTDEV FSTDEV_BCL FSTDEV_BCU OSTDEV OSTDEV_BCL OSTDEV_BCU FDIR FDIR_BCL FDIR_BCU ODIR ODIR_BCL ODIR_BCU FBAR_SPEED FBAR_SPEED_BCL FBAR_SPEED_BCU OBAR_SPEED OBAR_SPEED_BCL OBAR_SPEED_BCU VDIFF_SPEED VDIFF_SPEED_BCL VDIFF_SPEED_BCU VDIFF_DIR VDIFF_DIR_BCL VDIFF_DIR_BCU SPEED_ERR SPEED_ERR_BCL SPEED_ERR_BCU SPEED_ABSERR SPEED_ABSERR_BCL SPEED_ABSERR_BCU DIR_ERR DIR_ERR_BCL DIR_ERR_BCU DIR_ABSERR DIR_ABSERR_BCL DIR_ABSERR_BCU ANOM_CORR ANOM_CORR_NCL ANOM_CORR_NCU ANOM_CORR_BCL ANOM_CORR_BCU ANOM_CORR_UNCNTR ANOM_CORR_UNCNTR_BCL ANOM_CORR_UNCNTR_BCU TOTAL_DIR DIR_ME DIR_ME_BCL DIR_ME_BCU DIR_MAE DIR_MAE_BCL DIR_MAE_BCU DIR_MSE DIR_MSE_BCL DIR_MSE_BCU DIR_RMSE DIR_RMSE_BCL DIR_RMSE_BCU V12.0 : STAT : GENMPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX STORM_ID PROB_LEAD PROB_VAL AGEN_INIT AGEN_FHR AGEN_LAT AGEN_LON AGEN_DLAND BGEN_LAT BGEN_LON BGEN_DLAND GEN_DIST GEN_TDIFF INIT_TDIFF DEV_CAT OPS_CAT V12.0 : STAT : SSIDX : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE FCST_MODEL REF_MODEL N_INIT N_TERM N_VLD SS_INDEX diff --git a/data/tc_data/Makefile.in b/data/tc_data/Makefile.in index 9ef1261884..eee63e1885 100644 --- a/data/tc_data/Makefile.in +++ b/data/tc_data/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/docs/Users_Guide/appendixA.rst b/docs/Users_Guide/appendixA.rst index 83b10c2c32..384422af1f 100644 --- a/docs/Users_Guide/appendixA.rst +++ b/docs/Users_Guide/appendixA.rst @@ -121,6 +121,11 @@ Q. How can I understand the number of matched pairs? in the configuration file. So all of the 1166 observations are rejected for the same reason. + In addition, running point_stat with at least verbosity level 9 (-v 9) + will result in a log message being printed to explain why each + observation is skipped or retained for each verification task. + This level of detail is intended only for debugging purposes. + Q. What types of NetCDF files can MET read? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -510,7 +515,7 @@ Q. What is an example of using Grid-Stat with regridding and masking turned on? This tells Grid-Stat to do verification on the "observation" grid. Grid-Stat reads the GFS and Stage4 data and then automatically regrids the GFS data to the Stage4 domain using budget interpolation. - Use "FCST" to verify the forecast domain. And use either a named + Use FCST to verify the forecast domain. And use either a named grid or a grid specification string to regrid both the forecast and observation to a common grid. For example, to_grid = "G212"; will regrid both to NCEP Grid 212 before comparing them. @@ -1796,8 +1801,11 @@ Q. What are MET's conventions for latitude, longitude, azimuth and bearing angle .. dropdown:: Answer - MET considers north latitude and east longitude positive. Latitudes - have range from :math:`-90^\circ` to :math:`+90^\circ`. Longitudes have + MET considers north latitude and east longitude positive. However, + internally MET considers east longitude negative so users may encounter + DEBUG statements with longitude of a different sign than they provided + (e.g. for observation locations or grid metadata). Latitudes have + range from :math:`-90^\circ` to :math:`+90^\circ`. Longitudes have range from :math:`-180^\circ` to :math:`+180^\circ`. Plane angles such as azimuths and bearing (example: horizontal wind direction) have range :math:`0^\circ` to :math:`360^\circ` and are measured clockwise diff --git a/docs/Users_Guide/appendixC.rst b/docs/Users_Guide/appendixC.rst index 15c3ab5c2d..a6bbb0fe51 100644 --- a/docs/Users_Guide/appendixC.rst +++ b/docs/Users_Guide/appendixC.rst @@ -616,23 +616,23 @@ Anomaly Correlation Coefficient Called "ANOM_CORR" and "ANOM_CORR_UNCNTR" for centered and uncentered versions in CNT output :numref:`table_PS_format_info_CNT` -The anomaly correlation coefficient is equivalent to the Pearson correlation coefficient, except that both the forecasts and observations are first adjusted according to a climatology value. The anomaly is the difference between the individual forecast or observation and the typical situation, as measured by a climatology (**c**) of some variety. It measures the strength of linear association between the forecast anomalies and observed anomalies. The anomaly correlation coefficient is defined as: +The anomaly correlation coefficient is equivalent to the Pearson correlation coefficient, except that both the forecasts and observations are first adjusted by subtracting their corresponding climatology value. The anomaly is the difference between the individual forecast or observation and the typical situation, as measured by a forecast climatology (:math:`c_f`) and observation climatology (:math:`c_o`). It measures the strength of linear association between the forecast anomalies and observed anomalies. The anomaly correlation coefficient is defined as: -.. math:: \text{Anomaly Correlation} = \frac{\sum(f_i - c)(o_i - c)}{\sqrt{\sum(f_i - c)^2} \sqrt{\sum(o_i -c)^2}} . +.. math:: \text{Anomaly Correlation} = \frac{\sum(f_i - {c_f}_i)(o_i - {c_o}_i)}{\sqrt{\sum(f_i - {c_f}_i)^2} \sqrt{\sum(o_i - {c_o}_i)^2}} . The centered anomaly correlation coefficient (ANOM_CORR) which includes the mean error is defined as: .. only:: latex - .. math:: \text{ANOM\_CORR } = \frac{ \overline{[(f - c) - \overline{(f - c)}][(a - c) - \overline{(a - c)}]}}{ \sqrt{ \overline{( (f - c) - \overline{(f - c)})^2} \overline{( (a - c) - \overline{(a - c)})^2}}} + .. math:: \text{ANOM\_CORR } = \frac{ \overline{[(f - c_f) - \overline{(f - c_f)}][(o - c_o) - \overline{(o - c_o)}]}}{ \sqrt{ \overline{( (f - c_f) - \overline{(f - c_f)})^2} \overline{( (o - c_o) - \overline{(o - c_o)})^2}}} .. only:: html - .. math:: \text{ANOM_CORR } = \frac{ \overline{[(f - c) - \overline{(f - c)}][(a - c) - \overline{(a - c)}]}}{ \sqrt{ \overline{( (f - c) - \overline{(f - c)})^2} \overline{( (a - c) - \overline{(a - c)})^2}}} + .. math:: \text{ANOM_CORR } = \frac{ \overline{[(f - c_f) - \overline{(f - c_f)}][(o - c_o) - \overline{(o - c_o)}]}}{ \sqrt{ \overline{( (f - c_f) - \overline{(f - c_f)})^2} \overline{( (o - c_o) - \overline{(o - c_o)})^2}}} The uncentered anomaly correlation coefficient (ANOM_CORR_UNCNTR) which does not include the mean errors is defined as: -.. math:: \text{Anomaly Correlation Raw } = \frac{ \overline{(f - c)(a - c)}}{ \sqrt{\overline{(f - c)^2} \overline{(a - c)^2}}} +.. math:: \text{Anomaly Correlation Raw } = \frac{ \overline{(f - c_f)(o - c_o)}}{ \sqrt{\overline{(f - c_f)^2} \overline{(o - c_o)^2}}} Anomaly correlation can range between -1 and 1; a value of 1 indicates perfect correlation and a value of -1 indicates perfect negative correlation. A value of 0 indicates that the forecast and observed anomalies are not correlated. @@ -650,56 +650,60 @@ The partial sums can be accumulated over individual cases to produce statistics Scalar L1 and L2 Values ----------------------- -Called "FBAR", "OBAR", "FOBAR", "FFBAR", and "OOBAR" in SL1L2 output :numref:`table_PS_format_info_SL1L2` +Called "FBAR", "OBAR", "FOBAR", "FFBAR", "OOBAR", and "MAE" in SL1L2 output :numref:`table_PS_format_info_SL1L2` These statistics are simply the 1st and 2nd moments of the forecasts, observations and errors: .. math:: - \text{FBAR} = \text{Mean}(f) = \bar{f} = \frac{1}{n} \sum_{i=1}^n f_i + \text{FBAR} = \text{Mean}(f) = \frac{1}{n} \sum_{i=1}^n f_i - \text{OBAR} = \text{Mean}(o) = \bar{o} = \frac{1}{n} \sum_{i=1}^n o_i + \text{OBAR} = \text{Mean}(o) = \frac{1}{n} \sum_{i=1}^n o_i - \text{FOBAR} = \text{Mean}(fo) = \bar{fo} = \frac{1}{n} \sum_{i=1}^n f_i o_i + \text{FOBAR} = \text{Mean}(fo) = \frac{1}{n} \sum_{i=1}^n f_i o_i - \text{FFBAR} = \text{Mean}(f^2) = \bar{f}^2 = \frac{1}{n} \sum_{i=1}^n f_i^2 + \text{FFBAR} = \text{Mean}(f^2) = \frac{1}{n} \sum_{i=1}^n f_i^2 - \text{OOBAR} = \text{Mean}(o^2) = \bar{o}^2 = \frac{1}{n} \sum_{i=1}^n o_i^2 + \text{OOBAR} = \text{Mean}(o^2) = \frac{1}{n} \sum_{i=1}^n o_i^2 + + \text{MAE} = \text{Mean}(|f - o|) = \frac{1}{n} \sum_{i=1}^n |f_i - o_i| Some of the other statistics for continuous forecasts (e.g., RMSE) can be derived from these moments. Scalar Anomaly L1 and L2 Values ------------------------------- -Called "FABAR", "OABAR", "FOABAR", "FFABAR", "OOABAR" in SAL1L2 output :numref:`table_PS_format_info_SAL1L2` +Called "FABAR", "OABAR", "FOABAR", "FFABAR", "OOABAR", and "MAE" in SAL1L2 output :numref:`table_PS_format_info_SAL1L2` -Computation of these statistics requires a climatological value, c. These statistics are the 1st and 2nd moments of the scalar anomalies. The moments are defined as: +Computation of these statistics requires climatological values, where :math:`c_f` is the forecast climatology value and :math:`c_o` is the observation climatology value. These statistics are the 1st and 2nd moments of the scalar anomalies. The moments are defined as: .. math:: - \text{FABAR} = \text{Mean}(f - c) = \bar{f - c} = \frac{1}{n} \sum_{i=1}^n (f_i - c) + \text{FABAR} = \text{Mean}(f - c_f) = \frac{1}{n} \sum_{i=1}^n (f_i - {c_f}_i) + + \text{OABAR} = \text{Mean}(o - c_o) = \frac{1}{n} \sum_{i=1}^n (o_i - {c_o}_i) - \text{OABAR} = \text{Mean}(o - c) = \bar{o - c} = \frac{1}{n} \sum_{i=1}^n (o_i - c) + \text{FOABAR} = \text{Mean}[(f - c_f)(o - c_o)] = \frac{1}{n} \sum_{i=1}^n (f_i - {c_f}_i)(o_i - {c_o}_i) - \text{FOABAR} = \text{Mean}[(f - c)(o - c)] = \bar{(f - c)(o - c)} = \frac{1}{n} \sum_{i=1}^n (f_i - c)(o_i - c) + \text{FFABAR} = \text{Mean}[(f - c_f)^2] = \frac{1}{n} \sum_{i=1}^n (f_i - {c_f}_i)^2 - \text{FFABAR} = \text{Mean}[(f - c)^2] = \bar{(f - c)}^2 = \frac{1}{n} \sum_{i=1}^n (f_i - c)^2 + \text{OOABAR} = \text{Mean}[(o - c_o)^2] = \frac{1}{n} \sum_{i=1}^n (o_i - {c_o}_i)^2 - \text{OOABAR} = \text{Mean}[(o - c)^2] = \bar{(o - c)}^2 = \frac{1}{n} \sum_{i=1}^n (o_i - c)^2 + \text{MAE} = \text{Mean}(|(f - c_f) - (o - c_o)|) = \frac{1}{n} \sum_{i=1}^n |(f_i - {c_f}_i) - (o_i - {c_o}_i)| Vector L1 and L2 Values ----------------------- -Called "UFBAR", "VFBAR", "UOBAR", "VOBAR", "UVFOBAR", "UVFFBAR", "UVOOBAR" in VL1L2 output :numref:`table_PS_format_info_VL1L2` +Called "UFBAR", "VFBAR", "UOBAR", "VOBAR", "UVFOBAR", "UVFFBAR", and "UVOOBAR" in VL1L2 output :numref:`table_PS_format_info_VL1L2` -These statistics are the moments for wind vector values, where **u** is the E-W wind component and **v** is the N-S wind component ( :math:`u_f` is the forecast E-W wind component; :math:`u_o` is the observed E-W wind component; :math:`v_f` is the forecast N-S wind component; and :math:`v_o` is the observed N-S wind component). The following measures are computed: +These statistics are the moments for wind vector values, where :math:`u` is the E-W wind component and :math:`v` is the N-S wind component ( :math:`u_f` is the forecast E-W wind component; :math:`u_o` is the observed E-W wind component; :math:`v_f` is the forecast N-S wind component; and :math:`v_o` is the observed N-S wind component). The following measures are computed: .. math:: - \text{UFBAR} = \text{Mean}(u_f) = \bar{u}_f = \frac{1}{n} \sum_{i=1}^n u_{fi} + \text{UFBAR} = \text{Mean}(u_f) = \frac{1}{n} \sum_{i=1}^n u_{fi} - \text{VFBAR} = \text{Mean}(v_f) = \bar{v}_f = \frac{1}{n} \sum_{i=1}^n v_{fi} + \text{VFBAR} = \text{Mean}(v_f) = \frac{1}{n} \sum_{i=1}^n v_{fi} - \text{UOBAR} = \text{Mean}(u_o) = \bar{u}_o = \frac{1}{n} \sum_{i=1}^n u_{oi} + \text{UOBAR} = \text{Mean}(u_o) = \frac{1}{n} \sum_{i=1}^n u_{oi} - \text{VOBAR} = \text{Mean}(v_o) = \bar{v}_o = \frac{1}{n} \sum_{i=1}^n v_{oi} + \text{VOBAR} = \text{Mean}(v_o) = \frac{1}{n} \sum_{i=1}^n v_{oi} \text{UVFOBAR} = \text{Mean}(u_f u_o + v_f v_o) = \frac{1}{n} \sum_{i=1}^n (u_{fi} u_{oi} + v_{fi} v_{oi}) @@ -710,25 +714,27 @@ These statistics are the moments for wind vector values, where **u** is the E-W Vector Anomaly L1 and L2 Values ------------------------------- -Called "UFABAR", "VFABAR", "UOABAR", "VOABAR", "UVFOABAR", "UVFFABAR", "UVOOABAR" in VAL1L2 output :numref:`table_PS_format_info_VAL1L2` +Called "UFABAR", "VFABAR", "UOABAR", "VOABAR", "UVFOABAR", "UVFFABAR", and "UVOOABAR" in VAL1L2 output :numref:`table_PS_format_info_VAL1L2` -These statistics require climatological values for the wind vector components, :math:`u_c \text{ and } v_c`. The measures are defined below: +These statistics require climatological values for the wind vector components, where :math:`{u_c}_f` and :math:`{v_c}_f` are the forecast climatology vectors and :math:`{u_c}_o` and :math:`{v_c}_o` are the observation climatology vectors. The measures are defined below: .. math:: - \text{UFABAR} = \text{Mean}(u_f - u_c) = \frac{1}{n} \sum_{i=1}^n (u_{fi} - u_c) + \text{UFABAR} = \text{Mean}(u_f - {u_c}_f) = \frac{1}{n} \sum_{i=1}^n ({u_f}_i - {{u_c}_f}_i) - \text{VFBAR} = \text{Mean}(v_f - v_c) = \frac{1}{n} \sum_{i=1}^n (v_{fi} - v_c) + \text{VFBAR} = \text{Mean}(v_f - {v_c}_f) = \frac{1}{n} \sum_{i=1}^n ({v_f}_i - {{v_c}_f}_i) - \text{UOABAR} = \text{Mean}(u_o - u_c) = \frac{1}{n} \sum_{i=1}^n (u_{oi} - u_c) + \text{UOABAR} = \text{Mean}(u_o - {u_c}_o) = \frac{1}{n} \sum_{i=1}^n ({u_o}_i - {{u_c}_o}_i) - \text{VOABAR} = \text{Mean}(v_o - v_c) = \frac{1}{n} \sum_{i=1}^n (v_{oi} - v_c) + \text{VOABAR} = \text{Mean}(v_o - {v_c}_o) = \frac{1}{n} \sum_{i=1}^n ({v_o}_i - {{v_c}_o}_i) - \text{UVFOABAR} &= \text{Mean}[(u_f - u_c)(u_o - u_c) + (v_f - v_c)(v_o - v_c)] \\ - &= \frac{1}{n} \sum_{i=1}^n (u_{fi} - u_c) + (u_{oi} - u_c) + (v_{fi} - v_c)(v_{oi} - v_c) + \text{UVFOABAR} &= \text{Mean}[(u_f - {u_c}_f)(u_o - {u_c}_o) + (v_f - {v_c}_f)(v_o - {v_c}_o)] \\ + &= \frac{1}{n} \sum_{i=1}^n ({u_f}_i - {{u_c}_f}_i) + ({u_o}_i - {{u_c}_o}_i) + ({v_f}_i - {{v_c}_f}_i)({v_o}_i - {{v_c}_o}_i) - \text{UVFFABAR} = \text{Mean}[(u_f - u_c)^2 + (v_f - v_c)^2] = \frac{1}{n} \sum_{i=1}^n ((u_{fi} - u_c)^2 + (v_{fi} - v_c)^2) + \text{UVFFABAR} &= \text{Mean}[(u_f - {u_c}_f)^2 + (v_f - {v_c}_f)^2] \\ + &= \frac{1}{n} \sum_{i=1}^n (({u_f}_i - {{u_c}_f}_i)^2 + ({v_f}_i - {{v_c}_f}_i)^2) - \text{UVOOABAR} = \text{Mean}[(u_o - u_c)^2 + (v_o - v_c)^2] = \frac{1}{n} \sum_{i=1}^n ((u_{oi} - u_c)^2 + (v_{oi} - v_c)^2) + \text{UVOOABAR} &= \text{Mean}[(u_o - {u_c}_o)^2 + (v_o - {v_c}_o)^2] \\ + &= \frac{1}{n} \sum_{i=1}^n (({u_o}_i - {{u_c}_o}_i)^2 + ({v_o}_i - {{v_c}_o}_i)^2) Gradient Values --------------- diff --git a/docs/Users_Guide/appendixF.rst b/docs/Users_Guide/appendixF.rst index dc81f0cd96..17628c7ee5 100644 --- a/docs/Users_Guide/appendixF.rst +++ b/docs/Users_Guide/appendixF.rst @@ -355,7 +355,7 @@ The first argument for the Plot-Data-Plane tool is the gridded data file to be r 'level': 'Surface', 'units': 'None', 'init': '20050807_000000', 'valid': '20050807_120000', 'lead': '120000', 'accum': '120000' - 'grid': {...} } + 'grid': { ... } } DEBUG 1: Creating postscript file: fcst.ps Special Case for Ensemble-Stat, Series-Analysis, and MTD @@ -368,7 +368,7 @@ The Ensemble-Stat, Series-Analysis, MTD and Gen-Ens-Prod tools all have the abil gen_ens_prod ens1.nc ens2.nc ens3.nc ens4.nc -out ens_prod.nc -config GenEnsProd_config -In this case, a user is passing 4 ensemble members to Gen-Ens-Prod to be evaluated, and each member is in a separate file. If a user wishes to use Python embedding to process the ensemble input files, then the same exact command is used however special modifications inside the GenEnsProd_config file are needed. In the config file dictionary, the user must set the **file_type** entry to either **PYTHON_NUMPY** or **PYTHON_XARRAY** to activate the Python embedding for these tools. Then, in the **name** entry of the config file dictionaries for the forecast or observation data, the user must list the **full path** to the Python script to be run. However, in the Python command, replace the name of the input gridded data file to the Python script with the constant string **MET_PYTHON_INPUT_ARG**. When looping over all of the input files, the MET tools will replace that constant **MET_PYTHON_INPUT_ARG** with the path to the input file currently being processed and optionally, any command line arguments for the Python script. Here is what this looks like in the GenEnsProd_config file for the above example: +In this case, a user is passing 4 ensemble members to Gen-Ens-Prod to be evaluated, and each member is in a separate file. If a user wishes to use Python embedding to process the ensemble input files, then the same exact command is used; however special modifications inside the GenEnsProd_config file are needed. In the config file dictionary, the user must set the **file_type** entry to either **PYTHON_NUMPY** or **PYTHON_XARRAY** to activate the Python embedding for these tools. Then, in the **name** entry of the config file dictionaries for the forecast or observation data, the user must list the **full path** to the Python script to be run. However, in the Python command, replace the name of the input gridded data file to the Python script with the constant string **MET_PYTHON_INPUT_ARG**. When looping over all of the input files, the MET tools will replace that constant **MET_PYTHON_INPUT_ARG** with the path to the input file currently being processed and optionally, any command line arguments for the Python script. Here is what this looks like in the GenEnsProd_config file for the above example: .. code-block:: :caption: Gen-Ens-Prod MET_PYTHON_INPUT_ARG Config diff --git a/docs/Users_Guide/config_options.rst b/docs/Users_Guide/config_options.rst index fb6e4f47eb..b85b2d233b 100644 --- a/docs/Users_Guide/config_options.rst +++ b/docs/Users_Guide/config_options.rst @@ -21,86 +21,92 @@ which are dictionaries themselves. The configuration file language supports the following data types: * Dictionary: - + * Grouping of one or more entries enclosed by curly braces {}. * Array: - + * List of one or more entries enclosed by square braces []. - + * Array elements are separated by commas. * String: - + * A character string enclosed by double quotation marks "". - + * Integer: - + * A numeric integer value. - + * Float: - + * A numeric float value. - + * Boolean: - + * A boolean value (TRUE or FALSE). - + * Threshold: - + * A threshold type (<, <=, ==, !-, >=, or >) followed by a numeric value. - + * The threshold type may also be specified using two letter abbreviations (lt, le, eq, ne, ge, gt). - + * Multiple thresholds may be combined by specifying the logic type of AND (&&) or OR (||). For example, ">=5&&<=10" defines the numbers between 5 and 10 and "==1||==2" defines numbers exactly equal to 1 or 2. - + * Percentile Thresholds: * A threshold type (<, <=, ==, !=, >=, or >), followed by a percentile - type description (SFP, SOP, SCP, USP, CDP, or FBIAS), followed by a - numeric value, typically between 0 and 100. + type description (SFP, SOP, SFCP, SOCP, USP, FCDP, OCDP, or FBIAS), + followed by a numeric value, typically between 0 and 100. * Note that the two letter threshold type abbreviations (lt, le, eq, ne, ge, gt) are not supported for percentile thresholds. - + * Thresholds may be defined as percentiles of the data being processed in several places: - + * In Point-Stat and Grid-Stat when setting "cat_thresh", "wind_thresh" and "cnt_thresh". - + * In Wavelet-Stat when setting "cat_thresh". - + * In MODE when setting "conv_thresh" and "merge_thresh". - + * In Ensemble-Stat when setting "obs_thresh". - + * When using the "censor_thresh" config option. - + * In the Stat-Analysis "-out_fcst_thresh" and "-out_obs_thresh" job command options. - + * In the Gen-Vx-Mask "-thresh" command line option. - + * The following percentile threshold types are supported: - - * "SFP" for a percentile of the sample forecast values. + + * SFP for a percentile of the sample forecast values. e.g. ">SFP33.3" means greater than the 33.3-rd forecast percentile. - - * "SOP" for a percentile of the sample observation values. + + * SOP for a percentile of the sample observation values. e.g. ">SOP75" means greater than the 75-th observation percentile. - - * "SCP" for a percentile of the sample climatology values. - e.g. ">SCP90" means greater than the 90-th climatology percentile. - - * "USP" for a user-specified percentile threshold. + + * SFCP for a percentile of the sample forecast climatology values. + e.g. ">SFCP90" means greater than the 90-th forecast climatology + percentile. + + * SOCP for a percentile of the sample observation climatology values. + e.g. ">SOCP90" means greater than the 90-th observation climatology + percentile. For backward compatibility, the "SCP" threshold type + is processed the same as "SOCP". + + * USP for a user-specified percentile threshold. e.g. "5.0 threshold to the observations and then chooses a forecast threshold which results in a frequency bias of 1. The frequency bias can be any float value > 0.0. - - * "CDP" for climatological distribution percentile thresholds. - These thresholds require that the climatological mean and standard - deviation be defined using the climo_mean and climo_stdev config file - options, respectively. The categorical (cat_thresh), conditional - (cnt_thresh), or wind speed (wind_thresh) thresholds are defined - relative to the climatological distribution at each point. Therefore, - the actual numeric threshold applied can change for each point. - e.g. ">CDP50" means greater than the 50-th percentile of the + + * FCDP for forecast climatological distribution percentile thresholds. + These thresholds require that the forecast climatological mean and + standard deviation be defined using the "climo_mean" and "climo_stdev" + config file options, respectively. The categorical (cat_thresh), + conditional (cnt_thresh), or wind speed (wind_thresh) thresholds can + be defined relative to the climatological distribution at each point. + Therefore, the actual numeric threshold applied can change for each point. + e.g. ">FCDP50" means greater than the 50-th percentile of the climatological distribution for each point. - - * When percentile thresholds of type SFP, SOP, SCP, or CDP are requested - for continuous filtering thresholds (cnt_thresh), wind speed thresholds - (wind_thresh), or observation filtering thresholds (obs_thresh in - ensemble_stat), the following special logic is applied. Percentile + + * OCDP for observation climatological distribution percentile thresholds. + The "OCDP" threshold logic matches the "FCDP" logic described above. + However these thresholds are defined using the observation climatological + mean and standard deviation rather than the forecast climatological data. + For backward compatibility, the "CDP" threshold type is processed the + same as "OCDP". + + * When percentile thresholds of type SFP, SOP, SFCP, SOCP, FCDP, or OCDP are + requested for continuous filtering thresholds (cnt_thresh), wind speed + thresholds (wind_thresh), or observation filtering thresholds (obs_thresh + in ensemble_stat), the following special logic is applied. Percentile thresholds of type equality are automatically converted to percentile bins which span the values from 0 to 100. - For example, "==CDP25" is automatically expanded to 4 percentile bins: - >=CDP0&&=CDP25&&=CDP50&&=CDP75&&<=CDP100 - - * When sample percentile thresholds of type SFP, SOP, SCP, or FBIAS are - requested, MET recomputes the actual percentile that the threshold + For example, ==OCDP25 is automatically expanded to 4 percentile bins: + >=OCDP0&&=OCDP25&&=OCDP50&&=OCDP75&&<=OCDP100 + + * When sample percentile thresholds of type SFP, SOP, SFCP, SOCP, or FBIAS + are requested, MET recomputes the actual percentile that the threshold represents. If the requested percentile and actual percentile differ by more than 5%, a warning message is printed. This may occur when the sample size is small or the data values are not truly continuous. - - * When percentile thresholds of type SFP, SOP, SCP, or USP are used, the - actual threshold value is appended to the FCST_THRESH and OBS_THRESH + + * When percentile thresholds of type SFP, SOP, SFCP, SOCP, or USP are used, + the actual threshold value is appended to the FCST_THRESH and OBS_THRESH output columns. For example, if the 90-th percentile of the current set of forecast values is 3.5, then the requested threshold "<=SFP90" is written to the output as "<=SFP90(3.5)". - + * When parsing FCST_THRESH and OBS_THRESH columns, the Stat-Analysis tool ignores the actual percentile values listed in parentheses. - + +.. note:: + + Prior to MET version 12.0.0, forecast climatological inputs were not + supported. The observation climatological inputs were used to process + threshold types named SCP and CDP. + + For backward compatibility, the SCP threshold type is processed the same + as SOCP and CDP the same as OCDP. + + Users are encouraged to replace the deprecated SCP and CDP threshold + types with the updated SOCP and OCDP types, respectively. + * Piecewise-Linear Function (currently used only by MODE): - + * A list of (x, y) points enclosed in parenthesis (). - + * The (x, y) points are *NOT* separated by commas. - + * User-defined function of a single variable: - + * Left side is a function name followed by variable name in parenthesis. - + * Right side is an equation which includes basic math functions (+,-,*,/), built-in functions (listed below), or other user-defined functions. - + * Built-in functions include: sin, cos, tan, sind, cosd, tand, asin, acos, atan, asind, acosd, atand, atan2, atan2d, arg, argd, log, exp, log10, exp10, sqrt, abs, min, max, @@ -299,8 +324,10 @@ To run this utility: -e EXISTING_FILE, --existing=EXISTING_FILE Save the text into the named file (optional, default: ../../../data/table_files/ndbc_stations.xml) -NOTE: The downloaded files are written to a subdirectory ndbc_temp_data which -can be deleted once the final output file is created. +.. note:: + + The downloaded files are written to a subdirectory ndbc_temp_data which + can be deleted once the final output file is created. MET_BASE -------- @@ -324,14 +351,14 @@ values and/or define observation bias corrections. When processing point and gridded observations, Ensemble-Stat searches the table to find the entry defining the observation error information. The table consists of 15 columns and includes a header row defining each column. The -special string "ALL" is interpreted as a wildcard in these files. The first 6 +special string ALL is interpreted as a wildcard in these files. The first 6 columns (OBS_VAR, MESSAGE_TYPE, PB_REPORT_TYPE, IN_REPORT_TYPE, INSTRUMENT_TYPE, and STATION_ID) may be set to a comma-separated list of strings to be matched. In addition, the strings in the OBS_VAR column are interpreted as regular expressions when searching for a match. For example, setting the OBS_VAR column to 'APCP_[0-9]+' would match observations for both APCP_03 and APCP_24. The -HGT_RANGE, VAL_RANGE, and PRS_RANGE columns should either be set to "ALL" or -"BEG,END" where BEG and END specify the range of values to be used. The +HGT_RANGE, VAL_RANGE, and PRS_RANGE columns should either be set to ALL or +BEG,END where BEG and END specify the range of values to be used. The INST_BIAS_SCALE and INST_BIAS_OFFSET columns define instrument bias adjustments which are applied to the observation values. The DIST_TYPE and DIST_PARM columns define the distribution from which random perturbations should be drawn @@ -339,7 +366,7 @@ and applied to the ensemble member values. See the obs_error description below for details on the supported error distributions. The last two columns, MIN and MAX, define the bounds for the valid range of the bias-corrected observation values and randomly perturbed ensemble member values. Values less than MIN are -reset to the mimimum value and values greater than MAX are reset to the maximum +reset to the minimum value and values greater than MAX are reset to the maximum value. A value of NA indicates that the variable is unbounded. MET_GRIB_TABLES @@ -357,7 +384,7 @@ At runtime, the MET tools read default GRIB tables from the installed *share/met/table_files* directory, and their file formats are described below: GRIB1 table files begin with "grib1" prefix and end with a ".txt" suffix. -The first line of the file must contain "GRIB1". +The first line of the file must contain GRIB1. The following lines consist of 4 integers followed by 3 strings: | Column 1: GRIB code (e.g. 11 for temperature) @@ -374,10 +401,10 @@ References: | `Office Note 388 GRIB1 `_ | `A Guide to the Code Form FM 92-IX Ext. GRIB Edition 1 `_ -| +| GRIB2 table files begin with "grib2" prefix and end with a ".txt" suffix. -The first line of the file must contain "GRIB2". +The first line of the file must contain GRIB2. The following lines consist of 8 integers followed by 3 strings. | Column 1: Section 0 Discipline @@ -391,7 +418,7 @@ The following lines consist of 8 integers followed by 3 strings. | Column 9: variable name | Column 10: variable description | Column 11: units -| +| References: @@ -475,7 +502,7 @@ parallelization: * :code:`grid_ens_prod` * :code:`mode` -**Thread Binding** +**Thread Binding** It is normally beneficial to bind threads to particular cores, sometimes called *affinitization*. There are a few reasons for this, but at the very least it @@ -591,7 +618,7 @@ writing of NetCDF files within MET significantly. output_precision ---------------- - + The "output_precision" entry in ConfigConstants defines the precision (number of significant decimal places) to be written to the ASCII output files. Setting this option in the config file of one of the tools will @@ -605,7 +632,7 @@ override the default value set in ConfigConstants. tmp_dir ------- - + The "tmp_dir" entry in ConfigConstants defines the directory for the temporary files. The directory must exist and be writable. The environment variable MET_TMP_DIR overrides the default value at the configuration file. @@ -633,16 +660,35 @@ used. .. code-block:: none - mesage_type_group_map = [ + message_type_group_map = [ { key = "SURFACE"; val = "ADPSFC,SFCSHP,MSONET"; }, { key = "ANYAIR"; val = "AIRCAR,AIRCFT"; }, { key = "ANYSFC"; val = "ADPSFC,SFCSHP,ADPUPA,PROFLR,MSONET"; }, { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag +------------------------ + +The "obtype_as_group_val_flag" entry is a boolean that controls how the +OBTYPE header column is populated for message type groups defined in +"message_type_group_map". If set to TRUE and when writing matched pair +line types (MPR, SEEPS_MPR, and ORANK), write OBTYPE as the group map +*value*, i.e. the input message type for each individual observation. +If set to FALSE (default) and for all other line types, write OBTYPE +as the group map key, i.e. the name of the message type group. + +For example, if FALSE, write the OBTYPE column in the MPR line type +as the "ANYAIR" message type group name. If TRUE, write OBTYPE as "AIRCAR" +or "AIRCFT", based on the input message type of each point observation. + +.. code-block:: none + + obtyp_as_group_val_flag = FALSE; + message_type_map ---------------- - + The "message_type_map" entry is an array of dictionaries, each containing a "key" string and "val" string. This defines a mapping of input strings to output message types. This mapping is applied in ASCII2NC when @@ -666,7 +712,7 @@ types. model ----- - + The "model" entry specifies a name for the model being verified. This name is written to the MODEL column of the ASCII output generated. If you're verifying multiple models, you should choose descriptive model names (no @@ -679,7 +725,7 @@ e.g. model = "GFS"; desc ---- - + The "desc" entry specifies a user-specified description for each verification task. This string is written to the DESC column of the ASCII output generated. It may be set separately in each "obs.field" verification task @@ -709,10 +755,10 @@ the configuration file obtype value is written. obtype = "ANALYS"; .. _regrid: - + regrid ------ - + The "regrid" entry is a dictionary containing information about how to handle input gridded data files. The "regrid" entry specifies regridding logic using the following entries: @@ -720,17 +766,17 @@ using the following entries: * The "to_grid" entry may be set to NONE, FCST, OBS, a named grid, the path to a gridded data file defining the grid, or an explicit grid specification string. - + * to_grid = NONE; To disable regridding. - + * to_grid = FCST; To regrid observations to the forecast grid. - + * to_grid = OBS; To regrid forecasts to the observation grid. - + * to_grid = "G218"; To regrid both to a named grid. - + * to_grid = "path"; To regrid both to a grid defined by a file. - + * to_grid = "spec"; To define a grid specification string, as described in :ref:`appendixB`. @@ -741,42 +787,53 @@ using the following entries: write bad data for the current point. * The "method" entry defines the regridding method to be used. - + * Valid regridding methods: - + * MIN for the minimum value - + * MAX for the maximum value - + * MEDIAN for the median value - + * UW_MEAN for the unweighted average value - + * DW_MEAN for the distance-weighted average value (weight = distance^-2) - + * AW_MEAN for an area-weighted mean when regridding from high to low resolution grids (width = 1) - + * LS_FIT for a least-squares fit - + * BILIN for bilinear interpolation (width = 2) - + * NEAREST for the nearest grid point (width = 1) - + * BUDGET for the mass-conserving budget interpolation - + + * The budget interpolation method is often used for precipitation + in order to roughly conserve global averages. However it is + computationally intensive and relatively slow. To compute the + interpolated value for each point of the target grid, a higher + resolution 5x5 mesh with 0.2 grid box spacing is centered on + the point and bilinear interpolation is performed for each + of those 25 lat/lon locations. The budget interpolation value + is computed as the average of those 25 bilinear interpolation + values, assuming enough valid data is present to meet the + "vld_thresh" threshold. + * FORCE to compare gridded data directly with no interpolation as long as the grid x and y dimensions match. - + * UPPER_LEFT for the upper left grid point (width = 1) - + * UPPER_RIGHT for the upper right grid point (width = 1) - + * LOWER_RIGHT for the lower right grid point (width = 1) - + * LOWER_LEFT for the lower left grid point (width = 1) - + * MAXGAUSS to compute the maximum value in the neighborhood and apply a Gaussian smoother to the result @@ -786,7 +843,7 @@ using the following entries: - width = 4; To regrid using a 4x4 box or circle with diameter 4. * The "shape" entry defines the shape of the neighborhood. - Valid values are "SQUARE" or "CIRCLE" + Valid values are SQUARE or CIRCLE * The "gaussian_dx" entry specifies a delta distance for Gaussian smoothing. The default is 81.271. Ignored if not Gaussian method. @@ -804,7 +861,7 @@ using the following entries: regridding step. The conversion operation is applied first, followed by the censoring operation. Note that these operations are limited in scope. They are only applied if defined within the regrid dictionary itself. - Settings defined at higher levels of config file context are not applied. + Settings defined at higher levels of config file context are not applied. .. code-block:: none @@ -823,7 +880,7 @@ using the following entries: fcst ---- - + The "fcst" entry is a dictionary containing information about the field(s) to be verified. This dictionary may include the following entries: @@ -999,16 +1056,16 @@ to be verified. This dictionary may include the following entries: thresholds to specify which matched pairs should be included in the statistics. These options apply to the Point-Stat and Grid-Stat tools. They are parsed seperately for each "obs.field" array entry. - The "mpr_column" strings specify MPR column names ("FCST", "OBS", - "CLIMO_MEAN", "CLIMO_STDEV", or "CLIMO_CDF"), differences of columns - ("FCST-OBS"), or the absolute value of those differences ("ABS(FCST-OBS)"). + The "mpr_column" strings specify MPR column names (FCST, OBS, + CLIMO_MEAN, CLIMO_STDEV, or CLIMO_CDF), differences of columns + (FCST-OBS), or the absolute value of those differences (ABS(FCST-OBS)). The number of "mpr_thresh" thresholds must match the number of "mpr_column" entries, and the n-th threshold is applied to the n-th column. Any matched pairs which do not meet any of the specified thresholds are excluded from the analysis. For example, the following settings exclude matched pairs where the observation value differs from the forecast or climatological mean values by more than 10: - + .. code-block:: none mpr_column = [ "ABS(OBS-FCST)", "ABS(OBS-CLIMO_MEAN)" ]; @@ -1106,103 +1163,103 @@ File-format specific settings for the "field" entry: * `GRIB1 Product Definition Section `_ * `GRIB2 Product Definition Section `_ - + * The "level" entry specifies a level type and value: - + * ANNN for accumulation interval NNN - + * ZNNN for vertical level NNN - + * ZNNN-NNN for a range of vertical levels - + * PNNN for pressure level NNN in hPa - + * PNNN-NNN for a range of pressure levels in hPa - + * LNNN for a generic level type - + * RNNN for a specific GRIB record number - + * The "GRIB_lvl_typ" entry is an integer specifying the level type. - + * The "GRIB_lvl_val1" and "GRIB_lvl_val2" entries are floats specifying the first and second level values. - + * The "GRIB_ens" entry is a string specifying NCEP's usage of the extended PDS for ensembles. Set to "hi_res_ctl", "low_res_ctl", "+n", or "-n", for the n-th ensemble member. - - * The "GRIB1_ptv" entry is an integer specifying the GRIB1 parameter + + * The GRIB1_ptv entry is an integer specifying the GRIB1 parameter table version number. - - * The "GRIB1_code" entry is an integer specifying the GRIB1 code (wgrib + + * The GRIB1_code entry is an integer specifying the GRIB1 code (wgrib kpds5 value). - - * The "GRIB1_center" is an integer specifying the originating center. - - * The "GRIB1_subcenter" is an integer specifying the originating + + * The GRIB1_center is an integer specifying the originating center. + + * The GRIB1_subcenter is an integer specifying the originating subcenter. - - * The "GRIB1_tri" is an integer specifying the time range indicator. - - * The "GRIB2_mtab" is an integer specifying the master table number. - - * The "GRIB2_ltab" is an integer specifying the local table number. - - * The "GRIB2_disc" is an integer specifying the GRIB2 discipline code. - - * The "GRIB2_parm_cat" is an integer specifying the parameter category + + * The GRIB1_tri is an integer specifying the time range indicator. + + * The GRIB2_mtab is an integer specifying the master table number. + + * The GRIB2_ltab is an integer specifying the local table number. + + * The GRIB2_disc is an integer specifying the GRIB2 discipline code. + + * The GRIB2_parm_cat is an integer specifying the parameter category code. - - * The "GRIB2_parm" is an integer specifying the parameter code. - - * The "GRIB2_pdt" is an integer specifying the product definition + + * The GRIB2_parm is an integer specifying the parameter code. + + * The GRIB2_pdt is an integer specifying the product definition template (Table 4.0). - - * The "GRIB2_process" is an integer specifying the generating process + + * The GRIB2_process is an integer specifying the generating process (Table 4.3). - - * The "GRIB2_cntr" is an integer specifying the originating center. - - * The "GRIB2_ens_type" is an integer specifying the ensemble type + + * The GRIB2_cntr is an integer specifying the originating center. + + * The GRIB2_ens_type is an integer specifying the ensemble type (Table 4.6). - - * The "GRIB2_der_type" is an integer specifying the derived product + + * The GRIB2_der_type is an integer specifying the derived product type (Table 4.7). - - * The "GRIB2_stat_type" is an integer specifying the statistical + + * The GRIB2_stat_type is an integer specifying the statistical processing type (Table 4.10). - * The "GRIB2_perc_val" is an integer specifying the requested percentile + * The GRIB2_perc_val is an integer specifying the requested percentile value (0 to 100) to be used. This applies only to GRIB2 product definition templates 4.6 and 4.10. - * The "GRIB2_aerosol_type" is an integer specifying the aerosol type - (Table 4.233). This applies only to GRIB2 product defintion templates + * The GRIB2_aerosol_type is an integer specifying the aerosol type + (Table 4.233). This applies only to GRIB2 product definition templates 4.46 and 4.48. - * The "GRIB2_aerosol_interval_type" is an integer specifying the aerosol - size interval (Table 4.91). This applies only to GRIB2 product defintion + * The GRIB2_aerosol_interval_type is an integer specifying the aerosol + size interval (Table 4.91). This applies only to GRIB2 product definition templates 4.46 and 4.48. - * The "GRIB2_aerosol_size_lower" and "GRIB2_aerosol_size_upper" are doubles + * The GRIB2_aerosol_size_lower and "GRIB2_aerosol_size_upper" are doubles specifying the endpoints of the aerosol size interval. These applies only to GRIB2 product defintion templates 4.46 and 4.48. - * The "GRIB2_ipdtmpl_index" and "GRIB2_ipdtmpl_val" entries are arrays + * The GRIB2_ipdtmpl_index and GRIB2_ipdtmpl_val entries are arrays of integers which specify the product description template values to be used. The indices are 0-based. For example, use the following to request a GRIB2 record whose 9-th and 27-th product description template values are 1 and 2, respectively: GRIB2_ipdtmpl_index=[8, 26]; GRIB2_ipdtmpl_val=[1, 2]; - + * NetCDF (from MET tools, CF-compliant, p_interp, and wrf_interp): - + * The "name" entry specifies the NetCDF variable name. - + * The "level" entry specifies the dimensions to be used: - + * (i,...,j,*,*) for a single field, where i,...,j specifies fixed dimension values and *,* specifies the two dimensions for the gridded field. @ specifies the vertical level value or time value @@ -1233,10 +1290,10 @@ File-format specific settings for the "field" entry: ]; * Python (using PYTHON_NUMPY or PYTHON_XARRAY): - + * The Python interface for MET is described in Appendix F of the MET User's Guide. - + * Two methods for specifying the Python command and input file name are supported. For tools which read a single gridded forecast and/or observation file, both options work. However, only the second option @@ -1244,13 +1301,13 @@ File-format specific settings for the "field" entry: as Ensemble-Stat, Series-Analysis, and MTD. Option 1: - + * On the command line, replace the path to the input gridded data file with the constant string PYTHON_NUMPY or PYTHON_XARRAY. - + * Specify the configuration "name" entry as the Python command to be executed to read the data. - + * The "level" entry is not required for Python. For example: @@ -1265,14 +1322,14 @@ File-format specific settings for the "field" entry: * On the command line, leave the path to the input gridded data as is. - + * Set the configuration "file_type" entry to the constant PYTHON_NUMPY or PYTHON_XARRAY. - + * Specify the configuration "name" entry as the Python command to be executed to read the data, but replace the input gridded data file with the constant MET_PYTHON_INPUT_ARG. - + * The "level" entry is not required for Python. For example: @@ -1299,7 +1356,7 @@ File-format specific settings for the "field" entry: init_time = "20120619_12"; valid_time = "20120620_00"; lead_time = "12"; - + field = [ { name = "APCP"; @@ -1415,16 +1472,16 @@ or that filtering by station ID may also be accomplished using the "mask.sid" option. However, when using the "sid_inc" option, statistics are reported separately for each masking region. - + * The "sid_exc" entry is an array of station ID groups indicating which station ID's should be excluded from the verification task. - + * Each element in the "sid_inc" and "sid_exc" arrays is either the name of a single station ID or the full path to a station ID group file name. A station ID group file consists of a name for the group followed by a list of station ID's. All of the station ID's indicated will be concatenated into one long list of station ID's to be included or excluded. - + * As with "message_type" above, the "sid_inc" and "sid_exc" settings can be placed in the in the "field" array element to control which station ID's are included or excluded for each verification task. @@ -1435,10 +1492,13 @@ or climo_mean ---------- - + The "climo_mean" dictionary specifies climatology mean data to be read by the -Grid-Stat, Point-Stat, Ensemble-Stat, and Series-Analysis tools. It consists -of several entires defining the climatology file names and fields to be used. +Grid-Stat, Point-Stat, Ensemble-Stat, and Series-Analysis tools. It can be +set inside the "fcst" and "obs" dictionaries to specify separate forecast and +observation climatology data or once at the top-level configuration file +context to use the same data for both. It consists of several entries defining +the climatology file names and fields to be used. * The "file_names" entry specifies one or more file names containing the gridded climatology data to be used. @@ -1455,7 +1515,7 @@ of several entires defining the climatology file names and fields to be used. * The "time_interp_method" entry specifies how the climatology data should be interpolated in time to the forecast valid time: - + * NEAREST for data closest in time * UW_MEAN for average of data before and after * DW_MEAN for linear interpolation in time of data before and after @@ -1469,22 +1529,25 @@ of several entires defining the climatology file names and fields to be used. with 6 and 12 being common choices. Use "NA" if the timing of the climatology data should not be checked. -* The "day_interval" and "hour_interval" entries replace the deprecated - entries "match_month", "match_day", and "time_step". +.. note:: + + As of MET version 11.0.0, the "day_interval" and "hour_interval" entries + replace the "match_month", "match_day", and "time_step" entries, which are + now deprecated. .. code-block:: none climo_mean = { - + file_name = [ "/path/to/climatological/mean/files" ]; field = []; - + regrid = { method = NEAREST; width = 1; vld_thresh = 0.5; } - + time_interp_method = DW_MEAN; day_interval = 31; hour_interval = 6; @@ -1492,22 +1555,25 @@ of several entires defining the climatology file names and fields to be used. climo_stdev ----------- - + The "climo_stdev" dictionary specifies climatology standard deviation data to be read by the Grid-Stat, Point-Stat, Ensemble-Stat, and Series-Analysis -tools. The "climo_mean" and "climo_stdev" data define the climatological -distribution for each grid point, assuming normality. These climatological -distributions are used in two ways: +tools. It can be set inside the "fcst" and "obs" dictionaries to specify +separate forecast and observation climatology data or once at the top-level +configuration file context to use the same data for both. The "climo_mean" and +"climo_stdev" data define the climatological distribution for each grid point, +assuming normality. These climatological distributions are used in two ways: (1) - To define climatological distribution percentile (CDP) thresholds which - can be used as categorical (cat_thresh), continuous (cnt_thresh), or wind - speed (wind_thresh) thresholds. + To define climatological distribution percentiles thresholds (FCDP and + OCDP) which can be used as categorical (cat_thresh), continuous (cnt_thresh), + or wind speed (wind_thresh) thresholds. (2) To subset matched pairs into climatological bins based on where the - observation value falls within the climatological distribution. See the - "climo_cdf" dictionary. + observation value falls within the observation climatological distribution. + See the "climo_cdf" dictionary. Note that only the observation climatology + data is used for this purpose, not the forecast climatology data. This dictionary is identical to the "climo_mean" dictionary described above but points to files containing climatological standard deviation values @@ -1521,14 +1587,36 @@ over the "climo_mean" setting and then updating the "file_name" entry. file_name = [ "/path/to/climatological/standard/deviation/files" ]; } +Prior to MET version 12.0.0, forecast climatological inputs were not supported. +If the "climo_mean" and "climo_stdev" dictionaries are defined at the top-level +configuration file context, the same data is used for both the forecast and +observation climatologies. To specify separate forecast and observation +climatologies, define "climo_mean" and "climo_stdev" inside the "fcst" and "obs" +dictionaries, as shown below. + +.. code-block:: none + + fcst = { + field = [ ... ]; + climo_mean = { ... }; + climo_stdev = { ... }; + } + + obs = { + field = [ ... ]; + climo_mean = { ... }; + climo_stdev = { ... }; + } + climo_cdf --------- - -The "climo_cdf" dictionary specifies how the the climatological mean -("climo_mean") and standard deviation ("climo_stdev") data are used to + +The "climo_cdf" dictionary specifies how the the observation climatological +mean ("climo_mean") and standard deviation ("climo_stdev") data are used to evaluate model performance relative to where the observation value falls -within the climatological distribution. This dictionary consists of the -following entries: +within the observation climatological distribution. It can be set inside the +"obs" dictionary or at the top-level configuration file context. This +dictionary consists of the following entries: (1) The "cdf_bins" entry defines the climatological bins either as an integer @@ -1542,11 +1630,11 @@ following entries: (4) The "direct_prob" entry may be set to TRUE or FALSE. -MET uses the climatological mean and standard deviation to construct a normal -PDF at each observation location. The total area under the PDF is 1, and the -climatological CDF value is computed as the area of the PDF to the left of -the observation value. Since the CDF is a value between 0 and 1, the CDF -bins must span that same range. +MET uses the observation climatological mean and standard deviation to +construct a normal PDF at each observation location. The total area under the +PDF is 1, and the climatological CDF value is computed as the area of the PDF +to the left of the observation value. Since the CDF is a value between 0 and 1, +the CDF bins must span that same range. When "cdf_bins" is set to an array of floats, they explicitly define the climatological bins. The array must begin with 0.0 and end with 1.0. @@ -1590,20 +1678,21 @@ all pairs into a single climatological bin. climate_data ------------ - -When specifying climatology data for probability forecasts, either supply a -probabilistic "climo_mean" field or non-probabilistic "climo_mean" and -"climo_stdev" fields from which a normal approximation of the climatological -probabilities should be derived. -When "climo_mean" is set to a probability field with a range of [0, 1] and -"climo_stdev" is unset, the MET tools use the "climo_mean" probability values -directly to compute Brier Skill Score (BSS). +When specifying observation climatology data to evaluate probability +forecasts, either supply a probabilistic observation "climo_mean" field or +non-probabilistic "climo_mean" and "climo_stdev" fields from which a normal +approximation of the observation climatological probabilities should be +derived. -When "climo_mean" and "climo_stdev" are both set to non-probability fields, -the MET tools use the mean, standard deviation, and observation event -threshold to derive a normal approximation of the climatological -probabilities. +When the observation "climo_mean" is set to a probability field with a range +of [0, 1] and "climo_stdev" is unset, the MET tools use the "climo_mean" +probability values directly to compute Brier Skill Score (BSS). + +When the observation "climo_mean" and "climo_stdev" are both set to +non-probability fields, the MET tools use the mean, standard deviation, and +observation event threshold to derive a normal approximation of the +observation climatological probabilities. The "direct_prob" option controls the derivation logic. When "direct_prob" is true, the climatological probability is computed directly from the @@ -1626,19 +1715,39 @@ The default setting is >=0.1&&<=0.85. seeps_p1_thresh = >=0.1&&<=0.85; +seeps_grid_climo_name +--------------------- + +The "seeps_grid_climo_name" option sets the location and the filename of the SEEPS climo file for the gridded data. +The default setting is the empty string. It should be configured by the user. It can be overridden by the environment variable, MET_SEEPS_GRID_CLIMO_NAME. + +.. code-block:: none + + seeps_grid_climo_name = ""; + +seeps_point_climo_name +---------------------- + +The "seeps_point_climo_name" option controls the location and the filename of the SEEPS climo file for the point data. +The default setting is the empty string which does not override the default location and name. It can be overridden by the environment variable, MET_SEEPS_POINT_CLIMO_NAME. + +.. code-block:: none + + seeps_point_climo_name = ""; + mask_missing_flag ----------------- The "mask_missing_flag" entry specifies how missing data should be handled in the Wavelet-Stat and MODE tools: - * "NONE" to perform no masking of missing data - - * "FCST" to mask the forecast field with missing observation data - - * "OBS" to mask the observation field with missing forecast data - - * "BOTH" to mask both fields with missing data from the other + * NONE to perform no masking of missing data + + * FCST to mask the forecast field with missing observation data + + * OBS to mask the observation field with missing forecast data + + * BOTH to mask both fields with missing data from the other .. code-block:: none @@ -1665,8 +1774,8 @@ Point-Stat and Ensemble-Stat, the reference time is the forecast valid time. .. _config_options-mask: mask ---- - +---- + The "mask" entry is a dictionary that specifies the verification masking regions to be used when computing statistics. Each mask defines a geographic extent, and any matched pairs falling inside that area will be @@ -1680,16 +1789,23 @@ in the following ways: three digit grid number. Supplying a value of "FULL" indicates that the verification should be performed over the entire grid on which the data resides. - See: `ON388 - TABLE B, GRID IDENTIFICATION (PDS Octet 7), MASTER LIST OF NCEP STORAGE GRIDS, GRIB Edition 1 (FM92) `_. + See: `ON388 - TABLE B, GRID IDENTIFICATION (PDS Octet 7), MASTER LIST OF NCEP STORAGE GRIDS, GRIB Edition 1 (FM92) `_. The "grid" entry can be the gridded data file defining grid. * The "poly" entry contains a comma-separated list of files that define verification masking regions. These masking regions may be specified in - two ways: in an ASCII file containing lat/lon points defining the mask polygon, - or using a gridded data file such as the NetCDF output of the Gen-Vx-Mask tool. - Some details for each of these options are described below: + three ways: + + 1. An ASCII polyline file containing lat/lon points defining the mask polygon. + 2. The NetCDF output of the Gen-Vx-Mask tool. + 3. Any gridded data file followed by a configuration string describing the + data to be read and an optional threshold to be applied to that data. + + These three options are described below: - * If providing an ASCII file containing the lat/lon points defining the mask + * Option 1 - ASCII polyline file: + + If providing an ASCII file containing the lat/lon points defining the mask polygon, the file must contain a name for the region followed by the latitude (degrees north) and longitude (degrees east) for each vertex of the polygon. The values are separated by whitespace (e.g. spaces or newlines), and the @@ -1717,27 +1833,51 @@ in the following ways: observation point falls within the polygon defined is done in x/y grid space. - * The NetCDF output of the gen_vx_mask tool. Please see :numref:`masking` + .. code-block:: none + + mask = { poly = [ "share/met/poly/CONUS.poly" ]; } + + * Option 2 - Gen-Vx-Mask output: + + The NetCDF output of the gen_vx_mask tool. Please see :numref:`masking` for more details. - * Any gridded data file that MET can read may be used to define a + .. code-block:: none + + mask = { poly = [ "/path/to/gen_vx_mask_output.nc" ]; } + + * Option 3 - Any gridded data file: + + Any gridded data file that MET can read may be used to define a verification masking region. Users must specify a description of the field to be used from the input file and, optionally, may specify a threshold to be applied to that field. Once this threshold is applied, any grid point where the resulting field is 0, the mask is turned off. Any grid point where it is non-zero, the mask is turned on. - For example, "sample.grib {name = \"TMP\"; level = \"Z2\";} >273" - -* The "sid" entry is an array of strings which define groups of - observation station ID's over which to compute statistics. Each entry - in the array is either a filename of a comma-separated list. - - * For a filename, the strings are whitespace-separated. The first - string is the mask "name" and the remaining strings are the station + + .. code-block:: none + + mask = { poly = [ "/path/to/sample.grib {name = \"TMP\"; level = \"Z2\";} >273" ]; } + + .. note:: + + The syntax for the Option 3 is complicated since it includes quotes + embedded within another quoted string. Any such embedded quotes must + be escaped using a preceeding backslash character. + +* The "sid" entry is an array of strings which define groups of observation station + ID's over which to compute statistics. Each station ID string can be followed by an + optional numeric weight enclosed in parenethesis and used by the "point_weight_flag" + configuration option. Each entry in the "sid" "array is either a filename or a + comma-separated list. + + * For an ASCII filename, the strings contained within it are whitespace-separated. + The first string is the mask "name" and the remaining strings are the station ID's to be used. * For a comma-separated list, optionally use a colon to specify a name. - For "MY_LIST:SID1,SID2", name = MY_LIST and values = SID1 and SID2. + For "MY_LIST:SID1(WGT1),SID2(WGT2)", name = MY_LIST which consists of + two station ID's (SID1 and SID2) and optional numeric weights (WGT1 and WGT2). * For a comma-separated list of length one with no name specified, the mask "name" and value are both set to the single station ID string. For "SID1", name = SID1 and value = SID1. @@ -1747,6 +1887,7 @@ in the following ways: For "SID1,SID2", name = MASK_SID and values = SID1 and SID2. * The "name" of the station ID mask is written to the VX_MASK column of the MET output files. + * The "llpnt" entry is either a single dictionary or an array of dictionaries. Each dictionary contains three entries, the "name" for the masking region, "lat_thresh", and "lon_thresh". The latitude and @@ -1811,11 +1952,11 @@ bootstrap confidence intervals. The interval variable indicates what method should be used for computing bootstrap confidence intervals: * The "interval" entry specifies the confidence interval method: - - * "BCA" for the BCa (bias-corrected percentile) interval method is + + * BCA for the BCa (bias-corrected percentile) interval method is highly accurate but computationally intensive. - - * "PCTILE" uses the percentile method which is somewhat less accurate + + * PCTILE uses the percentile method which is somewhat less accurate but more efficient. * The "rep_prop" entry specifies a proportion between 0 and 1 to define @@ -1847,7 +1988,7 @@ should be used for computing bootstrap confidence intervals: documentation of the `GNU Scientific Library `_ for a listing of the random number generators available for use. - + * The "seed" entry may be set to a specific value to make the computation of bootstrap confidence intervals fully repeatable. When left empty the random number generator seed is chosen automatically which will lead @@ -1876,12 +2017,12 @@ This dictionary may include the following entries: * The "field" entry specifies to which field(s) the interpolation method should be applied. This does not apply when doing point verification with the Point-Stat or Ensemble-Stat tools: - - * "FCST" to interpolate/smooth the forecast field. - - * "OBS" to interpolate/smooth the observation field. - - * "BOTH" to interpolate/smooth both the forecast and the observation. + + * FCST to interpolate/smooth the forecast field. + + * OBS to interpolate/smooth the observation field. + + * BOTH to interpolate/smooth both the forecast and the observation. * The "vld_thresh" entry specifies a number between 0 and 1. When performing interpolation over some neighborhood of points the ratio of @@ -1915,38 +2056,38 @@ This dictionary may include the following entries: * The "method" entry is an array of interpolation procedures to be applied to the points in the box: - + * MIN for the minimum value - + * MAX for the maximum value - + * MEDIAN for the median value - + * UW_MEAN for the unweighted average value - + * DW_MEAN for the distance-weighted average value where weight = distance^-2 * LS_FIT for a least-squares fit - + * BILIN for bilinear interpolation (width = 2) - + * NEAREST for the nearest grid point (width = 1) - + * BEST for the value closest to the observation - + * UPPER_LEFT for the upper left grid point (width = 1) * UPPER_RIGHT for the upper right grid point (width = 1) - + * LOWER_RIGHT for the lower right grid point (width = 1) - + * LOWER_LEFT for the lower left grid point (width = 1) * GAUSSIAN for the Gaussian kernel * MAXGAUSS for the maximum value followed by a Gaussian smoother - + * GEOG_MATCH for the nearest grid point where the land/sea mask and geography criteria are satisfied @@ -1978,7 +2119,7 @@ This dictionary may include the following entries: land_mask --------- - + The "land_mask" dictionary defines the land/sea mask field used when verifying at the surface. The "flag" entry enables/disables this logic. When enabled, the "message_type_group_map" dictionary must contain entries @@ -2006,7 +2147,7 @@ The "land_mask.flag" entry may be set separately in each "obs.field" entry. topo_mask --------- - + The "topo_mask" dictionary defines the model topography field used when verifying at the surface. The flag entry enables/disables this logic. When enabled, the "message_type_group_map" dictionary must contain an entry @@ -2036,7 +2177,7 @@ The "topo_mask.flag" entry may be set separately in each "obs.field" entry. hira ---- - + The "hira" entry is a dictionary that is very similar to the "interp" and "nbrhd" entries. It specifies information for applying the High Resolution Assessment (HiRA) verification logic in Point-Stat. HiRA is analogous to @@ -2068,7 +2209,7 @@ This dictionary may include the following entries: output line and used for computing probabilistic statistics. * The "shape" entry defines the shape of the neighborhood. - Valid values are "SQUARE" or "CIRCLE" + Valid values are SQUARE or CIRCLE * The "prob_cat_thresh" entry defines the thresholds which define ensemble probabilities from which to compute the ranked probability score output. @@ -2089,16 +2230,16 @@ This dictionary may include the following entries: output_flag ----------- - + The "output_flag" entry is a dictionary that specifies what verification methods should be applied to the input data. Options exist for each output line type from the MET tools. Each line type may be set to one of: -* "NONE" to skip the corresponding verification method - -* "STAT" to write the verification output only to the ".stat" output file - -* "BOTH" to write to the ".stat" output file as well the optional +* NONE to skip the corresponding verification method + +* STAT to write the verification output only to the ".stat" output file + +* BOTH to write to the ".stat" output file as well the optional "_type.txt" file, a more readable ASCII file sorted by line type. .. code-block:: none @@ -2171,7 +2312,7 @@ netcdf output will be generated. nc_pairs_var_name ----------------- - + The "nc_pairs_var_name" entry specifies a string for each verification task in Grid-Stat. This string is parsed from each "obs.field" dictionary entry and is used to construct variable names for the NetCDF matched pairs output @@ -2184,14 +2325,14 @@ For example: | nc_pairs_var_name = "TMP"; | - + .. code-block:: none nc_pairs_var_name = ""; nc_pairs_var_suffix ------------------- - + The "nc_pairs_var_suffix" entry is similar to the "nc_pairs_var_name" entry described above. It is also parsed from each "obs.field" dictionary entry. However, it defines a suffix to be appended to the output variable name. @@ -2205,8 +2346,10 @@ For example: | nc_pairs_var_suffix = "FREEZING"; (for the freezing level height) | -NOTE: This option was previously named "nc_pairs_var_str", which is -now deprecated. +.. note:: + + Prior to MET version 9.0.0, this option was named "nc_pairs_var_str",' + which is now deprecated. .. code-block:: none @@ -2214,7 +2357,7 @@ now deprecated. ps_plot_flag ------------ - + The "ps_plot_flag" entry is a boolean value for Wavelet-Stat and MODE indicating whether a PostScript plot should be generated summarizing the verification. @@ -2225,28 +2368,74 @@ the verification. grid_weight_flag ---------------- - + The "grid_weight_flag" specifies how grid weighting should be applied -during the computation of continuous statistics and partial sums. It is -meant to account for grid box area distortion and is often applied to global -Lat/Lon grids. It is only applied for grid-to-grid verification in Grid-Stat -and Ensemble-Stat and is not applied for grid-to-point verification. +during the computation of contingency tables (CTC, MCTC, PCT, and +NBRCTC), partial sums (SL1L2, SAL1L2, VL1L2, and VAL1L2), and statistics +(CNT, CTS, MCTS, PSTD, PRC, PJC, ECLV, NBRCNT, and NBRCTS). +It is meant to account for grid box area distortion and is often applied +to global Lat/Lon grids. It is only applied for grid-to-grid verification +in Grid-Stat and Ensemble-Stat and is not applied for grid-to-point +verification, which is controlled by the "point_weight_flag" option. +It can only be defined once at the highest level of config file context +and applies to all verification tasks for that run. + Three grid weighting options are currently supported: -* "NONE" to disable grid weighting using a constant weight (default). - -* "COS_LAT" to define the weight as the cosine of the grid point latitude. +* NONE to disable grid weighting using a constant weight of 1.0 (default). + +* COS_LAT to define the weight as the cosine of the grid point latitude. This an approximation for grid box area used by NCEP and WMO. - -* "AREA" to define the weight as the true area of the grid box (km^2). -The weights are ultimately computed as the weight at each grid point divided -by the sum of the weights for the current masking region. +* AREA to define the weight as the true area of the grid box (km^2). + +If requested in the config file, the raw grid weights can be written to +the NetCDF output from Grid-Stat and Ensemble-Stat. + +When computing partial sums and continuous statistics, the weights are +first normalized by dividing by the sum of the weights for the current +masking region. When computing contingency tables and deriving statistics, +each contingency table cell contains the sum of the weights of the matching +grid points rather than the integer count of those grid points. Statistics +are derived using these sums of weights rather than the raw counts. + +When no grid weighting is requested (**NONE**), contingency tables are +populated using a default constant weight of 1.0 and the corresponding cells +are written to the output as integer counts for consistency with earlier +versions of MET. + +.. note:: + + The FHO line type is not compatible with grid weighting. If requested + with grid weighting enabled, Grid-Stat prints a warning message and + automatically disables the FHO line type. Users are advised to request the + CTC line type instead. .. code-block:: none grid_weight_flag = NONE; +point_weight_flag +----------------- + +The "point_weight_flag" is similar to the "grid_weight_flag", described above, +but applies to grid-to-point verification in Point-Stat and Ensemble-Stat. +It is not applied for grid-to-grid verification which is controlled by the +"grid_weight_flag" option. It can only be defined once at the highest level +of config file context and applies to all verification tasks for that run. + +While only one point weighting option is currently supported, additional +methods are planned for future versions: + +* NONE to disable point weighting using a constant weight of 1.0 (default). + +* SID to use the weights defined by the station ID masking configuration option, + "mask.sid". + +.. code-block:: none + + point_weight_flag = NONE; + hss_ec_value ------------ @@ -2283,9 +2472,9 @@ duplicate_flag The "duplicate_flag" entry specifies how to handle duplicate point observations in Point-Stat and Ensemble-Stat: -* "NONE" to use all point observations (legacy behavior) - -* "UNIQUE" only use a single observation if two or more observations +* NONE to use all point observations (legacy behavior) + +* UNIQUE only use a single observation if two or more observations match. Matching observations are determined if they contain identical latitude, longitude, level, elevation, and time information. They may contain different observation values or station IDs @@ -2307,23 +2496,23 @@ observations that appear at a single location (lat,lon,level,elev) in Point-Stat and Ensemble-Stat. Eight techniques are currently supported: -* "NONE" to use all point observations (legacy behavior) - -* "NEAREST" use only the observation that has the valid +* NONE to use all point observations (legacy behavior) + +* NEAREST use only the observation that has the valid time closest to the forecast valid time - -* "MIN" use only the observation that has the lowest value - -* "MAX" use only the observation that has the highest value - -* "UW_MEAN" compute an unweighted mean of the observations - -* "DW_MEAN" compute a weighted mean of the observations based + +* MIN use only the observation that has the lowest value + +* MAX use only the observation that has the highest value + +* UW_MEAN compute an unweighted mean of the observations + +* DW_MEAN compute a weighted mean of the observations based on the time of the observation - -* "MEDIAN" use the median observation - -* "PERC" use the Nth percentile observation where N = obs_perc_value + +* MEDIAN use the median observation + +* PERC use the Nth percentile observation where N = obs_perc_value The reporting mechanism for this feature can be activated by specifying a verbosity level of three or higher. The report will show information @@ -2337,14 +2526,14 @@ in those cases. obs_perc_value -------------- - + Percentile value to use when obs_summary = PERC .. code-block:: none obs_perc_value = 50; - + obs_quality_inc --------------- @@ -2360,7 +2549,7 @@ Note "obs_quality_inc" replaces the older option "obs_quality". obs_quality_inc = [ "1", "2", "3", "9" ]; - + obs_quality_exc --------------- @@ -2375,7 +2564,7 @@ an array of strings, even if the values themselves are numeric. obs_quality_exc = [ "1", "2", "3", "9" ]; - + met_data_dir ------------ @@ -2565,7 +2754,7 @@ entries. This dictionary may include the following entries: censor_val = []; ens_thresh = 1.0; vld_thresh = 1.0; - + field = [ { name = "APCP"; @@ -2626,37 +2815,37 @@ combination of the categorical threshold (cat_thresh), neighborhood width ensemble_flag ^^^^^^^^^^^^^ - + The "ensemble_flag" entry is a dictionary of boolean value indicating which ensemble products should be generated: * "latlon" for a grid of the Latitude and Longitude fields * "mean" for the simple ensemble mean - + * "stdev" for the ensemble standard deviation - + * "minus" for the mean minus one standard deviation - + * "plus" for the mean plus one standard deviation - + * "min" for the ensemble minimum - + * "max" for the ensemble maximum - + * "range" for the range of ensemble values - + * "vld_count" for the number of valid ensemble members - + * "frequency" for the ensemble relative frequency meeting a threshold - + * "nep" for the neighborhood ensemble probability - + * "nmep" for the neighborhood maximum ensemble probability - + * "rank" to write the rank for the gridded observation field to separate NetCDF output file. - + * "weight" to write the grid weights specified in grid_weight_flag to the rank NetCDF output file. @@ -2678,7 +2867,7 @@ which ensemble products should be generated: rank = TRUE; weight = FALSE; } - + EnsembleStatConfig_default -------------------------- @@ -2711,7 +2900,7 @@ data is provided, the climo_cdf thresholds will be used instead. ens_ssvar_bin_size = 1; ens_phist_bin_size = 0.05; prob_cat_thresh = []; - + field = [ { name = "APCP"; @@ -2796,7 +2985,7 @@ CHISQUARED distributions are defined by a single parameter. The GAMMA, UNIFORM, and BETA distributions are defined by two parameters. See the `GNU Scientific Library Reference Manual `_ for more information on these distributions. - + The inst_bias_scale and inst_bias_offset entries specify bias scale and offset values that should be applied to observation values prior to @@ -3084,92 +3273,92 @@ Floating-point max/min options: Setting limits on various floating-point attributes. One may specify these as integers (i.e., without a decimal point), if desired. The following pairs of options indicate minimum and maximum values for each MODE attribute that can be described as a floating- -point number. Please refer to "The MODE Tool" section on attributes in the +point number. Please refer to :ref:`mode-attributes` in the MET User's Guide for a description of these attributes. .. code-block:: none // centroid_x_min = 0.0; // centroid_x_max = 0.0; - + // centroid_y_min = 0.0; // centroid_y_max = 0.0; - + // centroid_lat_min = 0.0; // centroid_lat_max = 0.0; - + // centroid_lon_min = 0.0; // centroid_lon_max = 0.0; - + // axis_ang_min = 0.0; // axis_ang_max = 0.0; - + // length_min = 0.0; // length_max = 0.0; - + // width_min = 0.0; // width_max = 0.0; - + // aspect_ratio_min = 0.0; // aspect_ratio_max = 0.0; - + // curvature_min = 0.0; // curvature_max = 0.0; - + // curvature_x_min = 0.0; // curvature_x_max = 0.0; - + // curvature_y_min = 0.0; // curvature_y_max = 0.0; - + // complexity_min = 0.0; // complexity_max = 0.0; - + // intensity_10_min = 0.0; // intensity_10_max = 0.0; - + // intensity_25_min = 0.0; // intensity_25_max = 0.0; // intensity_50_min = 0.0; // intensity_50_max = 0.0; - + // intensity_75_min = 0.0; // intensity_75_max = 0.0; - + // intensity_90_min = 0.0; // intensity_90_max = 0.0; - + // intensity_user_min = 0.0; // intensity_user_max = 0.0; - + // intensity_sum_min = 0.0; // intensity_sum_max = 0.0; - + // centroid_dist_min = 0.0; // centroid_dist_max = 0.0; - + // boundary_dist_min = 0.0; // boundary_dist_max = 0.0; - + // convex_hull_dist_min = 0.0; // convex_hull_dist_max = 0.0; - + // angle_diff_min = 0.0; // angle_diff_max = 0.0; - + // area_ratio_min = 0.0; // area_ratio_max = 0.0; - + // intersection_over_area_min = 0.0; // intersection_over_area_max = 0.0; - + // complexity_ratio_min = 0.0; // complexity_ratio_max = 0.0; - + // percentile_intensity_ratio_min = 0.0; // percentile_intensity_ratio_max = 0.0; - + // interest_min = 0.0; // interest_max = 0.0; @@ -3250,15 +3439,15 @@ The object definition settings for MODE are contained within the "fcst" and merge_thresh = [ >=1.0, >=2.0, >=3.0 ]; * The "merge_flag" entry specifies the merging methods to be applied: - - * "NONE" for no merging - - * "THRESH" for the double-threshold merging method. Merge objects + + * NONE for no merging + + * THRESH for the double-threshold merging method. Merge objects that would be part of the same object at the lower threshold. - - * "ENGINE" for the fuzzy logic approach comparing the field to itself - - * "BOTH" for both the double-threshold and engine merging methods + + * ENGINE for the fuzzy logic approach comparing the field to itself + + * BOTH for both the double-threshold and engine merging methods .. code-block:: none @@ -3267,7 +3456,7 @@ The object definition settings for MODE are contained within the "fcst" and name = "APCP"; level = "A03"; } - + censor_thresh = []; censor_val = []; conv_radius = 60.0/grid_res; in grid squares @@ -3297,15 +3486,15 @@ match_flag The "match_flag" entry specifies the matching method to be applied: -* "NONE" for no matching between forecast and observation objects - -* "MERGE_BOTH" for matching allowing additional merging in both fields. +* NONE for no matching between forecast and observation objects + +* MERGE_BOTH for matching allowing additional merging in both fields. If two objects in one field match the same object in the other field, those two objects are merged. - -* "MERGE_FCST" for matching allowing only additional forecast merging - -* "NO_MERGE" for matching with no additional merging in either field + +* MERGE_FCST for matching allowing only additional forecast merging + +* NO_MERGE for matching with no additional merging in either field .. code-block:: none @@ -3325,7 +3514,7 @@ skip unreasonable object comparisons. weight ^^^^^^ - + The weight variables control how much weight is assigned to each pairwise attribute when computing a total interest value for object pairs. The weights need not sum to any particular value but must be non-negative. When the @@ -3359,23 +3548,23 @@ mathematical functions. .. code-block:: none interest_function = { - + centroid_dist = ( ( 0.0, 1.0 ) ( 60.0/grid_res, 1.0 ) ( 600.0/grid_res, 0.0 ) ); - + boundary_dist = ( ( 0.0, 1.0 ) ( 400.0/grid_res, 0.0 ) ); - + convex_hull_dist = ( ( 0.0, 1.0 ) ( 400.0/grid_res, 0.0 ) ); - + angle_diff = ( ( 0.0, 1.0 ) ( 30.0, 1.0 ) @@ -3388,24 +3577,24 @@ mathematical functions. ( corner, 1.0 ) ( 1.0, 1.0 ) ); - + area_ratio = ratio_if; - + int_area_ratio = ( ( 0.00, 0.00 ) ( 0.10, 0.50 ) ( 0.25, 1.00 ) ( 1.00, 1.00 ) ); - + complexity_ratio = ratio_if; - + inten_perc_ratio = ratio_if; } total_interest_thresh ^^^^^^^^^^^^^^^^^^^^^ - + The total_interest_thresh variable should be set between 0 and 1. This threshold is applied to the total interest values computed for each pair of objects and is used in determining matches. @@ -3454,7 +3643,7 @@ lines in the grid. ct_stats_flag ^^^^^^^^^^^^^ - + The ct_stats_flag can be set to TRUE or FALSE to produce additional output, in the form of contingency table counts and statistics. @@ -3484,16 +3673,16 @@ The PB2NC tool filters out observations from PREPBUFR or BUFR files using the following criteria: (1) by message type: supply a list of PREPBUFR message types to retain - + (2) by station id: supply a list of observation stations to retain - + (3) by valid time: supply the beginning and ending time offset values in the obs_window entry described above. (4) by location: use the "mask" entry described below to supply either an NCEP masking grid, a masking lat/lon polygon or a file to a mask lat/lon polygon - + (5) by elevation: supply min/max elevation values (6) by report type: supply a list of report types to retain using @@ -3501,15 +3690,15 @@ following criteria: (7) by instrument type: supply a list of instrument type to retain - + (8) by vertical level: supply beg/end vertical levels using the level_range entry described below - + (9) by variable type: supply a list of observation variable types to retain using the obs_bufr_var entry described below - + (10) by quality mark: supply a quality mark threshold - + (11) Flag to retain values for all quality marks, or just the first quality mark (highest): use the event_stack_flag described below @@ -3517,24 +3706,24 @@ following criteria: retain. 0 - Surface level (mass reports only) - + 1 - Mandatory level (upper-air profile reports) - + 2 - Significant temperature level (upper-air profile reports) - + 2 - Significant temperature and winds-by-pressure level (future combined mass and wind upper-air reports) - + 3 - Winds-by-pressure level (upper-air profile reports) - + 4 - Winds-by-height level (upper-air profile reports) - + 5 - Tropopause level (upper-air profile reports) - + 6 - Reports on a single level (e.g., aircraft, satellite-wind, surface wind, precipitable water retrievals, etc.) - + 7 - Auxiliary levels generated via interpolation from spanning levels (upper-air profile reports) @@ -3545,14 +3734,14 @@ In the PB2NC tool, the "message_type" entry is an array of message types to be retained. An empty list indicates that all should be retained. | List of valid message types: -| ADPUPA AIRCAR AIRCFT ADPSFC ERS1DA GOESND GPSIPW -| MSONET PROFLR QKSWND RASSDA SATEMP SATWND SFCBOG -| SFCSHP SPSSMI SYNDAT VADWND +| “ADPUPA”, “AIRCAR”, “AIRCFT”, “ADPSFC”, “ERS1DA”, “GOESND”, “GPSIPW”, +| “MSONET”, “PROFLR”, “QKSWND”, “RASSDA”, “SATEMP”, +| “SATWND”, “SFCBOG”, “SFCSHP”, “SPSSMI”, “SYNDAT”, “VADWND” For example: | message_type[] = [ "ADPUPA", "AIRCAR" ]; -| +| `Current Table A Entries in PREPBUFR mnemonic table `_ @@ -3662,12 +3851,12 @@ categories should be retained: | 1 = Mandatory level (upper-air profile reports) -| 2 = Significant temperature level (upper-air profile reports) +| 2 = Significant temperature level (upper-air profile reports) | 2 = Significant temperature and winds-by-pressure level (future combined mass -| and wind upper-air reports) +| and wind upper-air reports) -| 3 = Winds-by-pressure level (upper-air profile reports) +| 3 = Winds-by-pressure level (upper-air profile reports) | 4 = Winds-by-height level (upper-air profile reports) @@ -3679,7 +3868,7 @@ categories should be retained: | 7 = Auxiliary levels generated via interpolation from spanning levels | (upper-air profile reports) -| +| An empty list indicates that all should be retained. @@ -3728,7 +3917,7 @@ obs_prepbufr_map Default mapping for PREPBUFR. Replace input BUFR variable names with GRIB abbreviations in the output. This default map is appended to obs_bufr_map. This should not typically be overridden. This default mapping provides -backward-compatibility for earlier versions of MET which wrote GRIB +backward compatibility for earlier versions of MET which wrote GRIB abbreviations to the output. .. code-block:: none @@ -3750,7 +3939,7 @@ abbreviations to the output. quality_mark_thresh ^^^^^^^^^^^^^^^^^^^ - + The "quality_mark_thresh" entry specifies the maximum quality mark value to be retained. Observations with a quality mark LESS THAN OR EQUAL TO this threshold will be retained, while observations with a quality mark @@ -3765,7 +3954,7 @@ See `Code table for observation quality markers `_ * Valid combinations of the two are listed below: - - * "HAAR" for Haar wavelet (member = 2) - - * "HAAR_CNTR" for Centered-Haar wavelet (member = 2) - * "DAUB" for Daubechies wavelet (member = 4, 6, 8, 10, 12, 14, 16, + * HAAR for Haar wavelet (member = 2) + + * HAAR_CNTR for Centered-Haar wavelet (member = 2) + + * DAUB for Daubechies wavelet (member = 4, 6, 8, 10, 12, 14, 16, 18, 20) - - * "DAUB_CNTR" for Centered-Daubechies wavelet (member = 4, 6, 8, 10, + + * DAUB_CNTR for Centered-Daubechies wavelet (member = 4, 6, 8, 10, 12, 14, 16, 18, 20) - - * "BSPLINE" for Bspline wavelet (member = 103, 105, 202, 204, 206, + + * BSPLINE for Bspline wavelet (member = 103, 105, 202, 204, 206, 208, 301, 303, 305, 307, 309) - * "BSPLINE_CNTR" for Centered-Bspline wavelet (member = 103, 105, 202, + * BSPLINE_CNTR for Centered-Bspline wavelet (member = 103, 105, 202, 204, 206, 208, 301, 303, 305, 307, 309) .. code-block:: none diff --git a/docs/Users_Guide/config_options_tc.rst b/docs/Users_Guide/config_options_tc.rst index 1aa1e31f7a..cd3a69e96d 100644 --- a/docs/Users_Guide/config_options_tc.rst +++ b/docs/Users_Guide/config_options_tc.rst @@ -36,7 +36,7 @@ basin Specify a comma-separated list of basins to be used. Expected format is a 2-letter basin identifier. An empty list indicates that all should be used. -| Valid basins: WP, IO, SH, CP, EP, AL, SL +| Valid basins: "WP", "IO", "SH", "CP", "EP", "AL", "SL" | For example: @@ -235,12 +235,12 @@ Specify whether special processing should be performed for interpolated model names ending in 'I' (e.g. AHWI). Search for corresponding tracks whose model name ends in '2' (e.g. AHW2) and apply the following logic: -* "NONE" to do nothing. +* NONE to do nothing. -* "FILL" to create a copy of '2' track and rename it as 'I' only when the +* FILL to create a copy of '2' track and rename it as 'I' only when the 'I' track does not already exist. -* "REPLACE" to create a copy of the '2' track and rename it as 'I' in all +* REPLACE to create a copy of the '2' track and rename it as 'I' in all cases, replacing any 'I' tracks that may already exist. .. code-block:: none @@ -394,16 +394,16 @@ replaced with "val". This map can be used to modify basin names to make them consistent across the ATCF input files. Many global modeling centers use ATCF basin identifiers based on region -(e.g., 'SP' for South Pacific Ocean, etc.), however the best track data +(e.g., "SP" for South Pacific Ocean, etc.), however the best track data provided by the Joint Typhoon Warning Center (JTWC) use just one basin -identifier 'SH' for all of the Southern Hemisphere basins. Additionally, +identifier "SH" for all of the Southern Hemisphere basins. Additionally, some modeling centers may report basin identifiers separately for the Bay -of Bengal (BB) and Arabian Sea (AB) whereas JTWC uses 'IO'. +of Bengal (BB) and Arabian Sea (AB) whereas JTWC uses "IO". The basin mapping allows MET to map the basin identifiers to the expected values without having to modify your data. For example, the first entry -in the list below indicates that any data entries for 'SI' will be matched -as if they were 'SH'. In this manner, all verification results for the +in the list below indicates that any data entries for "SI" will be matched +as if they were "SH". In this manner, all verification results for the Southern Hemisphere basins will be reported together as one basin. An empty list indicates that no basin mapping should be used. Use this if @@ -854,11 +854,11 @@ Where "job_name" is set to one of the following: specified using the "-line_type" and "-column" arguments. For TCStat, the "-column" argument may be set to: - * "TRACK" for track, along-track, and cross-track errors. - * "WIND" for all wind radius errors. - * "TI" for track and maximum wind intensity errors. - * "AC" for along-track and cross-track errors. - * "XY" for x-track and y-track errors. + * TRACK for track, along-track, and cross-track errors. + * WIND for all wind radius errors. + * TI for track and maximum wind intensity errors. + * AC for along-track and cross-track errors. + * XY for x-track and y-track errors. * "col" for a specific column name. * "col1-col2" for a difference of two columns. * "ABS(col or col1-col2)" for the absolute value. diff --git a/docs/Users_Guide/ensemble-stat.rst b/docs/Users_Guide/ensemble-stat.rst index dd221dd1d2..1680164d70 100644 --- a/docs/Users_Guide/ensemble-stat.rst +++ b/docs/Users_Guide/ensemble-stat.rst @@ -50,7 +50,7 @@ The relative position (RELP) is a count of the number of times each ensemble mem The ranked probability score (RPS) is included in the Ranked Probability Score (RPS) line type. It is the mean of the Brier scores computed from ensemble probabilities derived for each probability category threshold (prob_cat_thresh) specified in the configuration file. The continuous ranked probability score (CRPS) is the average the distance between the forecast (ensemble) cumulative distribution function and the observation cumulative distribution function. It is an analog of the Brier score, but for continuous forecast and observation fields. The CRPS statistic is computed using two methods: assuming a normal distribution defined by the ensemble mean and spread (:ref:`Gneiting et al., 2004 `) and using the empirical ensemble distribution (:ref:`Hersbach, 2000 `). The CRPS statistic using the empirical ensemble distribution can be adjusted (bias corrected) by subtracting 1/(2*m) times the mean absolute difference of the ensemble members, where m is the ensemble size. This is reported as a separate statistic called CRPS_EMP_FAIR. The empirical CRPS and its fair version are included in the Ensemble Continuous Statistics (ECNT) line type, along with other statistics quantifying the ensemble spread and ensemble mean skill. -The Ensemble-Stat tool can derive ensemble relative frequencies and verify them as probability forecasts all in the same run. Note however that these simple ensemble relative frequencies are not actually calibrated probability forecasts. If probabilistic line types are requested (output_flag), this logic is applied to each pair of fields listed in the forecast (fcst) and observation (obs) dictionaries of the configuration file. Each probability category threshold (prob_cat_thresh) listed for the forecast field is applied to the input ensemble members to derive a relative frequency forecast. The probability category threshold (prob_cat_thresh) parsed from the corresponding observation entry is applied to the (gridded or point) observations to determine whether or not the event actually occurred. The paired ensemble relative freqencies and observation events are used to populate an Nx2 probabilistic contingency table. The dimension of that table is determined by the probability PCT threshold (prob_pct_thresh) configuration file option parsed from the forecast dictionary. All probabilistic output types requested are derived from the this Nx2 table and written to the ascii output files. Note that the FCST_VAR name header column is automatically reset as "PROB({FCST_VAR}{THRESH})" where {FCST_VAR} is the current field being evaluated and {THRESH} is the threshold that was applied. +The Ensemble-Stat tool can derive ensemble relative frequencies and verify them as probability forecasts all in the same run. Note however that these simple ensemble relative frequencies are not actually calibrated probability forecasts. If probabilistic line types are requested (output_flag), this logic is applied to each pair of fields listed in the forecast (fcst) and observation (obs) dictionaries of the configuration file. Each probability category threshold (prob_cat_thresh) listed for the forecast field is applied to the input ensemble members to derive a relative frequency forecast. The probability category threshold (prob_cat_thresh) parsed from the corresponding observation entry is applied to the (gridded or point) observations to determine whether or not the event actually occurred. The paired ensemble relative frequencies and observation events are used to populate an Nx2 probabilistic contingency table. The dimension of that table is determined by the probability PCT threshold (prob_pct_thresh) configuration file option parsed from the forecast dictionary. All probabilistic output types requested are derived from this Nx2 table and written to the ascii output files. Note that the FCST_VAR name header column is automatically reset as "PROB({FCST_VAR}{THRESH})" where {FCST_VAR} is the current field being evaluated and {THRESH} is the threshold that was applied. Note that if no probability category thresholds (prob_cat_thresh) are defined, but climatological mean and standard deviation data is provided along with climatological bins, climatological distribution percentile thresholds are automatically derived and used to compute probabilistic outputs. @@ -160,34 +160,48 @@ ____________________ .. code-block:: none - model = "FCST"; - desc = "NA"; - obtype = "ANALYS"; - regrid = { ... } - climo_mean = { ... } - climo_stdev = { ... } - climo_cdf = { ... } - obs_window = { beg = -5400; end = 5400; } - mask = { grid = [ "FULL" ]; poly = []; sid = []; } - ci_alpha = [ 0.05 ]; - interp = { field = BOTH; vld_thresh = 1.0; shape = SQUARE; - type = [ { method = NEAREST; width = 1; } ]; } - eclv_points = []; - sid_inc = []; - sid_exc = []; - duplicate_flag = NONE; + model = "FCST"; + desc = "NA"; + obtype = "ANALYS"; + regrid = { ... } + climo_mean = { ... } + climo_stdev = { ... } + climo_cdf = { ... } + obs_window = { beg = -5400; end = 5400; } + mask = { grid = [ "FULL" ]; poly = []; sid = []; } + ci_alpha = [ 0.05 ]; + interp = { field = BOTH; vld_thresh = 1.0; shape = SQUARE; + type = [ { method = NEAREST; width = 1; } ]; } + eclv_points = []; + sid_inc = []; + sid_exc = []; + duplicate_flag = NONE; obs_quality_inc = []; obs_quality_exc = []; - obs_summary = NONE; - obs_perc_value = 50; + obs_summary = NONE; + obs_perc_value = 50; message_type_group_map = [...]; - output_prefix = ""; - version = "VN.N"; + obtype_as_group_val_flag = FALSE; + grid_weight_flag = NONE; + point_weight_flag = NONE; + output_prefix = ""; + version = "VN.N"; The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. -Note that the **HIRA** interpolation method is only supported in Ensemble-Stat. +.. note:: + + The **HIRA** interpolation method is only supported in Ensemble-Stat. + +.. note:: + + The "grid_weight_flag" and "point_weight_flag" options described in + :numref:`config_options` define how matched pairs are weighted for + grid-to-grid and grid-to-point verification in Ensemble-Stat. These + weights currently only apply to the computation of probabilistic + outputs (PCT, PSTD, PJC, and PRC) but no other Ensemble-Stat output + line types. _____________________ @@ -856,30 +870,36 @@ The format of the STAT and ASCII output of the Ensemble-Stat tool are described * - 37 - ENS_i - Value of the ith ensemble member (repeated) - * - Last-7 + * - Last-9 - OBS_QC - Quality control string for the observation - * - Last-6 + * - Last-8 - ENS_MEAN - The unperturbed ensemble mean value - * - Last-5 - - CLIMO_MEAN - - Climatological mean value (named CLIMO prior to met-10.0.0) - * - Last-4 + * - Last-7 + - OBS_CLIMO_MEAN + - Observation climatological mean value (named CLIMO_MEAN prior to met-12.0.0) + * - Last-6 - SPREAD - The spread (standard deviation) of the unperturbed ensemble member values - * - Last-3 + * - Last-5 - ENS_MEAN _OERR - The PERTURBED ensemble mean (e.g. with Observation Error). - * - Last-2 + * - Last-4 - SPREAD_OERR - The spread (standard deviation) of the PERTURBED ensemble member values (e.g. with Observation Error). - * - Last-1 + * - Last-3 - SPREAD_PLUS_OERR - The square root of the sum of the unperturbed ensemble variance and the observation error variance. + * - Last-2 + - OBS_CLIMO_STDEV + - Observation climatological standard deviation value (named CLIMO_STDEV prior to met-12.0.0) + * - Last-1 + - FCST_CLIMO_MEAN + - Forecast climatological mean value * - Last - - CLIMO_STDEV - - Climatological standard deviation value + - FCST_CLIMO_STDEV + - Forecast climatological standard deviation value .. role:: raw-html(raw) :format: html diff --git a/docs/Users_Guide/figure/installation_dir.png b/docs/Users_Guide/figure/installation_dir.png deleted file mode 100644 index 40f1b48231..0000000000 Binary files a/docs/Users_Guide/figure/installation_dir.png and /dev/null differ diff --git a/docs/Users_Guide/figure/installation_dir_after.png b/docs/Users_Guide/figure/installation_dir_after.png deleted file mode 100644 index e87a14e595..0000000000 Binary files a/docs/Users_Guide/figure/installation_dir_after.png and /dev/null differ diff --git a/docs/Users_Guide/gen-ens-prod.rst b/docs/Users_Guide/gen-ens-prod.rst index 57da0849cb..7f6d68c6c7 100644 --- a/docs/Users_Guide/gen-ens-prod.rst +++ b/docs/Users_Guide/gen-ens-prod.rst @@ -30,7 +30,9 @@ The Gen-Ens-Prod tool writes the gridded relative frequencies, NEP, and NMEP fie Climatology Data ---------------- -The ensemble relative frequencies derived by Gen-Ens-Prod are computed by applying threshold(s) to the input ensemble member data. Those thresholds can be simple and remain constant over the entire domain (e.g. >0) or can be defined relative to the climatological distribution at each grid point (e.g. >CDP90, for exceeding the 90-th percentile of climatology). When using climatological distribution percentile (CDP) thresholds, the climatological mean and standard deviation must be provided in the configuration file. +The ensemble relative frequencies derived by Gen-Ens-Prod are computed by applying threshold(s) to the input ensemble member data. Those thresholds can be simple and remain constant over the entire domain (e.g. >0) or can be defined relative to the climatological distribution at each grid point (e.g. >OCDP90, for exceeding the 90-th percentile of the observation climatology data provided). + +To use climatological distribution percentile thresholds, users must specify the climatological mean ("climo_mean") and standard deviation ("climo_stdev") entries in the configuration file. With forecast climatology inputs, use forecast climatology distribution percentile thresholds (e.g. >FCDP90). With observation climatology inputs, use observation climatological distribution percentile thresholds instead (e.g. >OCDP90). However, Gen-Ens-Prod cannot actually determine the input climatology data source and both "FCDP" and "OCDP" threshold types will work. Practical Information ===================== @@ -295,7 +297,7 @@ The **ensemble_flag** specifies which derived ensemble fields should be calculat 13. Climatology mean (**climo_mean**) and standard deviation (**climo_stdev**) data regridded to the model domain -14. Climatological Distribution Percentile field for each CDP threshold specified +14. Climatological Distribution Percentile field for each FCDP or OCDP threshold specified gen_ens_prod Output ------------------- diff --git a/docs/Users_Guide/grid-diag.rst b/docs/Users_Guide/grid-diag.rst index f2fd55e78c..59bbb38616 100644 --- a/docs/Users_Guide/grid-diag.rst +++ b/docs/Users_Guide/grid-diag.rst @@ -27,7 +27,9 @@ The following sections describe the usage statement, required arguments, and opt [-v level] [-compress level] - NOTE: The "-data" option can be used once to read all fields from each input file or once for each field to be processed. +.. note:: + + The "-data" option can be used once to read all fields from each input file or once for each field to be processed. grid_diag has required arguments and can accept several optional arguments. diff --git a/docs/Users_Guide/grid-stat.rst b/docs/Users_Guide/grid-stat.rst index c09df58d54..631afbdaf2 100644 --- a/docs/Users_Guide/grid-stat.rst +++ b/docs/Users_Guide/grid-stat.rst @@ -80,6 +80,8 @@ The Stable Equitable Error in Probability Space (SEEPS) was devised for monitori The capability to calculate the SEEPS has also been added to Grid-Stat. This follows the method described in :ref:`North et al, 2022 `, which uses the TRMM 3B42 v7 gridded satellite product for the climatological values and interpolates the forecast and observed products onto this grid for evaluation. A 24-hour TRMM climatology (valid at 00 UTC) constructed from data over the time period 1998-2015 is supplied with the release. Expansion of the capability to other fields will occur as well vetted examples and funding allow. +The gridded climatology required to compute SEEPS is not distributed as part of the code release and can be downloaded from `Zenodo `. The path to the file needs to be specified using MET_SEEPS_GRID_CLIMO_NAME. + Fourier Decomposition --------------------- @@ -239,31 +241,32 @@ __________________________ .. code-block:: none - model = "FCST"; - desc = "NA"; - obtype = "ANALYS"; - fcst = { ... } - obs = { ... } - regrid = { ... } - climo_mean = { ... } - climo_stdev = { ... } - climo_cdf = { ... } - mask = { grid = [ "FULL" ]; poly = []; } - ci_alpha = [ 0.05 ]; - boot = { interval = PCTILE; rep_prop = 1.0; n_rep = 1000; - rng = "mt19937"; seed = ""; } - interp = { field = BOTH; vld_thresh = 1.0; shape = SQUARE; - type = [ { method = NEAREST; width = 1; } ]; } - censor_thresh = []; - censor_val = []; - mpr_column = []; - mpr_thresh = []; - eclv_points = 0.05; - hss_ec_value = NA; - rank_corr_flag = TRUE; - tmp_dir = "/tmp"; - output_prefix = ""; - version = "VN.N"; + model = "FCST"; + desc = "NA"; + obtype = "ANALYS"; + fcst = { ... } + obs = { ... } + regrid = { ... } + climo_mean = { ... } + climo_stdev = { ... } + climo_cdf = { ... } + mask = { grid = [ "FULL" ]; poly = []; } + ci_alpha = [ 0.05 ]; + boot = { interval = PCTILE; rep_prop = 1.0; n_rep = 1000; + rng = "mt19937"; seed = ""; } + interp = { field = BOTH; vld_thresh = 1.0; shape = SQUARE; + type = [ { method = NEAREST; width = 1; } ]; } + censor_thresh = []; + censor_val = []; + mpr_column = []; + mpr_thresh = []; + eclv_points = 0.05; + hss_ec_value = NA; + rank_corr_flag = TRUE; + grid_weight_flag = NONE; + tmp_dir = "/tmp"; + output_prefix = ""; + version = "VN.N"; The configuration options listed above are common to multiple MET tools and are described in :numref:`config_options`. @@ -428,7 +431,7 @@ The **output_flag** array controls the type of output that the Grid-Stat tool ge Note that the first two line types are easily derived from one another. The user is free to choose which measure is most desired. The output line types are described in more detail in :numref:`grid_stat-output`. -The SEEPS climo file is not distributed with MET tools because of the file size. It should be configured by using the environment variable, MET_SEEPS_GRID_CLIMO_NAME. +The SEEPS climo file is not distributed with MET tools because of the file size. It should be configured by using the configuration file (seeps_grid_climo_name). It can be overridden by the environment variable, MET_SEEPS_GRID_CLIMO_NAME. _____________________ @@ -451,7 +454,7 @@ _____________________ The **nc_pairs_flag** entry may either be set to a boolean value or a dictionary specifying which fields should be written. Setting it to TRUE indicates the output NetCDF matched pairs file should be created with all available output fields, while setting all to FALSE disables its creation. This is done regardless of if **output_flag** dictionary indicates any statistics should be computed. The **latlon, raw**, and **diff** entries control the creation of output variables for the latitude and longitude, the forecast and observed fields after they have been modified by any user-defined regridding, censoring, and conversion, and the forecast minus observation difference fields, respectively. The **climo, weight**, and **nbrhd** entries control the creation of output variables for the climatological mean and standard deviation fields, the grid area weights applied, and the fractional coverage fields computed for neighborhood verification methods. Setting these entries to TRUE indicates that they should be written, while setting them to FALSE disables their creation. -Setting the **climo_cdp** entry to TRUE enables the creation of an output variable for each climatological distribution percentile (CDP) threshold requested in the configuration file. Note that enabling **nbrhd** output may lead to very large output files. The **gradient** entry controls the creation of output variables for the FCST and OBS gradients in the grid-x and grid-y directions. The **distance_map** entry controls the creation of output variables for the FCST and OBS distance maps for each categorical threshold. The **apply_mask** entry controls whether to create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to TRUE will create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to FALSE will create the FCST, OBS, and DIFF output variables for only the FULL verification domain. +Setting the **climo_cdp** entry to TRUE enables the creation of an output variable for each climatological distribution percentile (FCDP or OCDP) threshold requested in the configuration file. Note that enabling **nbrhd** output may lead to very large output files. The **gradient** entry controls the creation of output variables for the FCST and OBS gradients in the grid-x and grid-y directions. The **distance_map** entry controls the creation of output variables for the FCST and OBS distance maps for each categorical threshold. The **apply_mask** entry controls whether to create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to TRUE will create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to FALSE will create the FCST, OBS, and DIFF output variables for only the FULL verification domain. ______________________ diff --git a/docs/Users_Guide/installation.rst b/docs/Users_Guide/installation.rst index 4540d341b4..5e5d1469db 100644 --- a/docs/Users_Guide/installation.rst +++ b/docs/Users_Guide/installation.rst @@ -162,18 +162,34 @@ Environment Variables to Run Script Before running the compilation script, there are five environment variables that are required: **TEST_BASE**, **COMPILER**, **MET_SUBDIR**, **MET_TARBALL**, and **USE_MODULES**. + If compiling support for Python embedding, the script will need the following additional environment variables: **MET_PYTHON**, **MET_PYTHON_CC**, and -**MET_PYTHON_LD**. All of these environment variables are discussed -in further detail in the Environment Variable Descriptions section below. -An easy way to set these environment variables is in an environment -configuration file (for example, **install_met_env.**). An -example environment configuration file to start from (**install_met_env.generic_gnu**), -as well as environment configuration files used on HPCs at NCAR and NOAA, +**MET_PYTHON_LD**. + +All of these environment variables are discussed in further detail in the +Environment Variable Descriptions section below. An easy way to set these +environment variables is in an environment configuration file +(for example, **install_met_env.**). An example environment +configuration file to start with (**install_met_env.generic_gnu**), +as well as the environment configuration files used on HPCs at NCAR and NOAA, can be found in the `MET GitHub repository `_ in the `scripts/installation/config `_ directory. +.. note:: Starting with MET-12.0.0, C++17 is the default C++ standard for MET due to the requirements of its dependent libraries. However, MET itself only makes use of C++11 features. + + The ATLAS library (conditionally required for MET, if support for + unstructured grids is desired) + `versions 0.33.0 `_ + and later requires compiler support for the C++17 standard. + + At this time, users with systems that do not yet support the C++17 + standard, can still compile MET with an older C++ standard, using an + older version of ATLAS, by adding the MET_CXX_STANDARD variable to + the environment configuration file as described in the **OPTIONAL** + section below. + Environment Variable Descriptions --------------------------------- @@ -198,7 +214,7 @@ Environment Variable Descriptions subdirectory will be installed and is often set equivalent to **TEST_BASE** (e.g. ${TEST_BASE}). - **MET_TARBALL** – Format is *v12.0.0tar.gz*. This is the name of the downloaded MET tarball. + **MET_TARBALL** – Format is *v12.0.0.tar.gz*. This is the name of the downloaded MET tarball. **USE_MODULES** – Format is *TRUE* or *FALSE*. Set to FALSE if using a machine that does not use modulefiles; set to TRUE if using a machine that does use modulefiles. For more information on @@ -218,6 +234,7 @@ Environment Variable Descriptions following environment variables if using the Intel compilers: | For non-oneAPI Intel compilers: + | | export FC=ifort | export F77=ifort | export F90=ifort @@ -226,6 +243,7 @@ Environment Variable Descriptions | For oneAPI Intel compilers: + | | export FC=ifx | export F77=ifx | export F90=ifx @@ -239,7 +257,6 @@ Environment Variable Descriptions configuration file, and users with a oneAPI Intel compiler should use the install_met_env.generic_intel_oneapi configuration file. - .. dropdown:: REQUIRED, IF COMPILING PYTHON EMBEDDING **MET_PYTHON** – Format is */usr/local/python3*. @@ -247,7 +264,7 @@ Environment Variable Descriptions containing the bin, include, lib, and share directories for Python. **MET_PYTHON_CC** - Format is -I followed by the directory containing - the Python include files (ex. -I/usr/local/python3/include/python3.10). + the Python include files (e.g. -I/usr/local/python3/include/python3.10). This information may be obtained by running :code:`python3-config --cflags`; however, this command can, on certain systems, @@ -257,7 +274,7 @@ Environment Variable Descriptions the Python library files then a space, then -l followed by the necessary Python libraries to link to - (ex. -L/usr/local/python3/lib/\\ -lpython3.10\\ + (e.g. -L/usr/local/python3/lib/\\ -lpython3.10\\ -lpthread\\ -ldl\\ -lutil\\ -lm). The backslashes are necessary in the example shown because of the spaces, which will be @@ -285,13 +302,17 @@ Environment Variable Descriptions without a specified value of cores to use. The automated MET testing scripts in the Docker environment have been successful with a value of - 5 (ex. export MAKE_ARGS=”-j 5”). + 5 (e.g. export MAKE_ARGS=”-j 5”). + + **export MET_CXX_STANDARD** - Specify the version of the supported + C++ standard. Values may be 11, 14, or 17. The default value is 17. + (e.g. export MET_CXX_STANDARD=11) External Library Handling in compile_MET_all.sh ----------------------------------------------- -.. dropdown:: IF THE USER WANTS TO HAVE THE COMPILATION SCRIPT DOWNLOAD THE LIBRARY DEPENDENCIES +.. dropdown:: IF THE USER WANTS TO HAVE THE COMPILATION SCRIPT COMPILE THE LIBRARY DEPENDENCIES The **compile_MET_all.sh** script will compile and install MET and its :ref:`required_external_libraries_to_build_MET`, if needed. @@ -395,7 +416,21 @@ particular system’s needs, MET is ready for installation. The screenshot below contents of the installation directory followed by the tar_files subdirectory at this step on the machine ‘hera’. -.. image:: figure/installation_dir.png +.. code-block:: ini + + /contrib/met/12.0.0$ ls + compile_MET_all.sh install_met_env.hera tar_files + + /contrib/met/12.0.0$ ls tar_files + HDF-EOS2.16v1.00.tar.Z eckit-1.24.4.tar.gz netcdf-4.7.4.tar.gz + HDF4.2r3.tar.gz freetype-2.11.0.tar.gz netcdf-cxx4-4.3.1.tar.gz + atlas-0.30.0.tar.gz g2clib-1.6.4.tar.gz pixman-0.40.0.tar.gz + atlas-0.35.0.tar.gz gsl-1.11.tar.gz proj-7.1.0.tar.gz + bufr_v11.6.0.tar.gz gsl-2.7.1.tar.gz sqlite-autoconf-3430100.tar.gz + cairo-1.16.0.tar.xz hdf5-1.12.2.tar.gz tiff-4.6.0.tar.gz + ecbuild-3.5.0.tar.gz jasper-2.0.25.tar.gz zlib-1.2.11.tar.gz + ecbuild-3.7.0.tar.gz jpegsrc.v9e.tar.gz + eckit-1.20.2.tar.gz libpng-1.6.37.tar.gz Simply enter the following into the terminal to execute the script: @@ -405,14 +440,17 @@ Simply enter the following into the terminal to execute the script: The screenshot below shows the contents of the installation directory after installation: -.. image:: figure/installation_dir_after.png +.. code-block:: ini + + /contrib/met/12.0.0$ ls + MET-12.0.0 bin compile_MET_all.sh external_libs install_met_env.hera share tar_files To confirm that MET was installed successfully, run the following command from the installation directory to check for errors in the test file: .. code-block:: ini - - grep -i error MET12.0.0/met.make_test.log - + + grep -i error MET-12.0.0/met.make_test.log + If no errors are returned, the installation was successful. Due to the highly variable nature of hardware systems, users may encounter issues during the installation process that result in MET not being installed. If this occurs please @@ -459,7 +497,6 @@ version. If a different version is required, select the correct version from the dropdown option. Follow Docker’s instructions for a successful installation. - Loading the Latest Docker Image of MET -------------------------------------- @@ -477,7 +514,6 @@ version number will result in an error due to Docker’s behavior of attempting to retrieve an image with the “latest” tag, which MET no longer uses. - Running the Docker version of MET --------------------------------- @@ -498,7 +534,7 @@ the same way the latest image of MET was pulled: .. code-block:: ini docker run -it --rm dtcenter/met:12.0.0 /bin/bash - + If the usage MET via Docker images was successful, it is highly recommended to move on to using the METplus wrappers of the tools, which have their own @@ -548,7 +584,6 @@ to make the container: singularity build met-12.0.0.sif docker://dtcenter/met:12.0.0 - Running the MET Container ------------------------- @@ -569,7 +604,7 @@ be used otherwise the instance will continue to run in the background: .. code-block:: ini - singularity instance stop /path/to/container/met-12.0.0.sif met-12.0.0 + singularity instance stop /path/to/container/met-12.0.0.sif met-12.0.0 Now that MET is successfully installed, it is highly recommended to next install the METplus wrappers to take full advantage of @@ -579,5 +614,3 @@ Users can also proceed to the and run through the examples that only utilize the MET processes (METplus wrapper applications and commands will not work unless METplus wrappers are also installed). - - diff --git a/docs/Users_Guide/masking.rst b/docs/Users_Guide/masking.rst index 0d705ac06e..5dd8fe72d8 100644 --- a/docs/Users_Guide/masking.rst +++ b/docs/Users_Guide/masking.rst @@ -178,4 +178,4 @@ In this example, the Gen-Vx-Mask tool will read the ASCII Lat/Lon file named **C Feature-Relative Methods ======================== -This section contains a description of several methods that may be used to perform feature-relative (or event -based) evaluation. The methodology pertains to examining the environment surrounding a particular feature or event such as a tropical, extra-tropical cyclone, convective cell, snow-band, etc. Several approaches are available for these types of investigations including applying masking described above (e.g. circle or box) or using the "FORCE" interpolation method in the regrid configuration option (see :numref:`config_options`). These methods generally require additional scripting, including potentially storm-track identification, outside of MET to be paired with the features of the MET tools. METplus may be used to execute this type of analysis. Please refer to the `METplus User's Guide `_. +This section contains a description of several methods that may be used to perform feature-relative (or event -based) evaluation. The methodology pertains to examining the environment surrounding a particular feature or event such as a tropical, extra-tropical cyclone, convective cell, snow-band, etc. Several approaches are available for these types of investigations including applying masking described above (e.g. circle or box) or using the FORCE interpolation method in the regrid configuration option (see :numref:`config_options`). These methods generally require additional scripting, including potentially storm-track identification, outside of MET to be paired with the features of the MET tools. METplus may be used to execute this type of analysis. Please refer to the `METplus User's Guide `_. diff --git a/docs/Users_Guide/mode-td.rst b/docs/Users_Guide/mode-td.rst index c8c07c1117..f7dd558a7c 100644 --- a/docs/Users_Guide/mode-td.rst +++ b/docs/Users_Guide/mode-td.rst @@ -217,7 +217,9 @@ Required Arguments for mtd Optional Arguments for mtd ^^^^^^^^^^^^^^^^^^^^^^^^^^ -4. **-single file\_list** may be used instead of **-fcst** and **-obs** to define objects in a single field. +4. **-single file\_list** command line option may be used instead of the **-fcst** and **-obs** command line options to define objects in a single field. + +.. note:: When the **-single** command line option is used, data specified in the **fcst** configuration file entry is read from those input files. 5. **-log file** gives the name of a file where a log of this MTD run will be written. All output that appears on the screen during a MTD run will be duplicated in the log file. diff --git a/docs/Users_Guide/mode.rst b/docs/Users_Guide/mode.rst index 491b452002..2dc4bc3e96 100644 --- a/docs/Users_Guide/mode.rst +++ b/docs/Users_Guide/mode.rst @@ -57,6 +57,7 @@ An example of the steps involved in resolving objects is shown in :numref:`mode- Example of an application of the MODE object identification process to a model precipitation field. +.. _mode-attributes: Attributes ---------- @@ -913,7 +914,7 @@ The contents of the columns in this ASCII file are summarized in :numref:`MODE_o * - 50 - AREA_RATIO - The forecast object area divided by the observation object area (unitless) :raw-html:`
` - **NOTE:** Prior to met-10.0.0, defined as the lesser of the two object areas divided by the greater of the two + **NOTE:** Prior to MET version 10.0.0, the AREA_RATIO was defined as the lesser of the two object areas divided by the greater of the two. * - 51 - INTERSECTION :raw-html:`
` \_AREA - Intersection area of two objects (in grid squares) diff --git a/docs/Users_Guide/overview.rst b/docs/Users_Guide/overview.rst index 37cf16b404..1e0d362bb4 100644 --- a/docs/Users_Guide/overview.rst +++ b/docs/Users_Guide/overview.rst @@ -62,7 +62,7 @@ The Grid-Diag tool produces multivariate probability density functions (PDFs) th The Wavelet-Stat tool decomposes two-dimensional forecasts and observations according to the Intensity-Scale verification technique described by :ref:`Casati et al. (2004) `. There are many types of spatial verification approaches and the Intensity-Scale technique belongs to the scale-decomposition (or scale-separation) verification approaches. The spatial scale components are obtained by applying a wavelet transformation to the forecast and observation fields. The resulting scale-decomposition measures error, bias and skill of the forecast on each spatial scale. Information is provided on the scale dependency of the error and skill, on the no-skill to skill transition scale, and on the ability of the forecast to reproduce the observed scale structure. The Wavelet-Stat tool is primarily used for precipitation fields. However, the tool can be applied to other variables, such as cloud fraction. -Results from the statistical analysis stage are output in ASCII, NetCDF and Postscript formats. The Point-Stat, Grid-Stat, Wavelet-Stat, and Ensemble-Stat tools create STAT (statistics) files which are tabular ASCII files ending with a ".stat" suffix. The STAT output files consist of multiple line types, each containing a different set of related statistics. The columns preceeding the LINE_TYPE column are common to all lines. However, the number and contents of the remaining columns vary by line type. +Results from the statistical analysis stage are output in ASCII, NetCDF and Postscript formats. The Point-Stat, Grid-Stat, Wavelet-Stat, and Ensemble-Stat tools create STAT (statistics) files which are tabular ASCII files ending with a ".stat" suffix. The STAT output files consist of multiple line types, each containing a different set of related statistics. The columns preceding the LINE_TYPE column are common to all lines. However, the number and contents of the remaining columns vary by line type. The Stat-Analysis and MODE-Analysis tools aggregate the output statistics from the previous steps across multiple cases. The Stat-Analysis tool reads the STAT output of Point-Stat, Grid-Stat, Ensemble-Stat, and Wavelet-Stat and can be used to filter the STAT data and produce aggregated continuous and categorical statistics. Stat-Analysis also reads matched pair data (i.e. MPR line type) via python embedding. The MODE-Analysis tool reads the ASCII output of the MODE tool and can be used to produce summary information about object location, size, and intensity (as well as other object characteristics) across one or more cases. diff --git a/docs/Users_Guide/point-stat.rst b/docs/Users_Guide/point-stat.rst index 6c32537146..41e154ac8c 100644 --- a/docs/Users_Guide/point-stat.rst +++ b/docs/Users_Guide/point-stat.rst @@ -23,7 +23,7 @@ Interpolation/Matching Methods This section provides information about the various methods available in MET to match gridded model output to point observations. Matching in the vertical and horizontal are completed separately using different methods. -In the vertical, if forecasts and observations are at the same vertical level, then they are paired as-is. If any discrepancy exists between the vertical levels, then the forecasts are interpolated to the level of the observation. The vertical interpolation is done in the natural log of pressure coordinates, except for specific humidity, which is interpolated using the natural log of specific humidity in the natural log of pressure coordinates. Vertical interpolation for heights above ground are done linear in height coordinates. When forecasts are for the surface, no interpolation is done. They are matched to observations with message types that are mapped to **SURFACE** in the **message_type_group_map** configuration option. By default, the surface message types include ADPSFC, SFCSHP, and MSONET. The regular expression is applied to the message type list at the message_type_group_map. The derived message types from the time summary ("ADPSFC_MIN_hhmmss" and "ADPSFC_MAX_hhmmss") are accepted as "ADPSFC". +In the vertical, if forecasts and observations are at the same vertical level, then they are paired as-is. If any discrepancy exists between the vertical levels, then the forecasts are interpolated to the level of the observation. The vertical interpolation is done in the natural log of pressure coordinates, except for specific humidity, which is interpolated using the natural log of specific humidity in the natural log of pressure coordinates. Vertical interpolation for heights above ground are done linear in height coordinates. When forecasts are for the surface, no interpolation is done. They are matched to observations with message types that are mapped to "SURFACE" in the **message_type_group_map** configuration option. By default, the surface message types include ADPSFC, SFCSHP, and MSONET. The regular expression is applied to the message type list at the message_type_group_map. The derived message types from the time summary ("ADPSFC_MIN_hhmmss" and "ADPSFC_MAX_hhmmss") are accepted as "ADPSFC". To match forecasts and observations in the horizontal plane, the user can select from a number of methods described below. Many of these methods require the user to define the width of the forecast grid W, around each observation point P, that should be considered. In addition, the user can select the interpolation shape, either a SQUARE or a CIRCLE. For example, a square of width 2 defines the 2 x 2 set of grid points enclosing P, or simply the 4 grid points closest to P. A square of width of 3 defines a 3 x 3 square consisting of 9 grid points centered on the grid point closest to P. :numref:`point_stat_fig1` provides illustration. The point P denotes the observation location where the interpolated value is calculated. The interpolation width W, shown is five. @@ -264,7 +264,7 @@ Practical Information The Point-Stat tool is used to perform verification of a gridded model field using point observations. The gridded model field to be verified must be in one of the supported file formats. The point observations must be formatted as the NetCDF output of the point reformatting tools described in :numref:`reformat_point`. The Point-Stat tool provides the capability of interpolating the gridded forecast data to the observation points using a variety of methods as described in :numref:`matching-methods`. The Point-Stat tool computes a number of continuous statistics on the matched pair data as well as discrete statistics once the matched pair data have been thresholded. -If no matched pairs are found for a particular verification task, a report listing counts for reasons why the observations were not used is written to the log output at the default verbosity level of 2. If matched pairs are found, this report is written at verbosity level 3. Inspecting these rejection reason counts is the first step in determining why Point-Stat found no matched pairs. The order of the log messages matches the order in which the processing logic is applied. Start from the last log message and work your way up, considering each of the non-zero rejection reason counts. +If no matched pairs are found for a particular verification task, a report listing counts for reasons why the observations were not used is written to the log output at the default verbosity level of 2. If matched pairs are found, this report is written at verbosity level 3. Inspecting these rejection reason counts is the first step in determining why Point-Stat found no matched pairs. The order of the log messages matches the order in which the processing logic is applied. Start from the last log message and work your way up, considering each of the non-zero rejection reason counts. Verbosity level 9 prints a very detailed explanation about why each observation is used or skipped for each verification task. point_stat Usage ---------------- @@ -362,6 +362,8 @@ ________________________ obs_summary = NONE; obs_perc_value = 50; message_type_group_map = [...]; + obtype_as_group_val_flag = FALSE; + point_weight_flag = NONE; tmp_dir = "/tmp"; output_prefix = ""; version = "VN.N"; @@ -501,7 +503,7 @@ Note that writing out matched pair data (MPR lines) for a large number of cases If all line types corresponding to a particular verification method are set to NONE, the computation of those statistics will be skipped in the code and thus make the Point-Stat tool run more efficiently. For example, if FHO, CTC, and CTS are all set to NONE, the Point-Stat tool will skip the categorical verification step. -The default SEEPS climo file exists at MET_BASE/climo/seeps/PPT24_seepsweights.nc. It can be overridden by using the environment variable, MET_SEEPS_POINT_CLIMO_NAME. +The default SEEPS climo file exists at MET_BASE/climo/seeps/PPT24_seepsweights.nc. It is configurable by using the configuration file (seeps_point_climo_name). It can be overridden by the environment variable, MET_SEEPS_POINT_CLIMO_NAME. .. _point_stat-output: @@ -1204,7 +1206,7 @@ The first set of header columns are common to all of the output files generated - Mean(o²) * - 31 - MAE - - Mean Absolute Error + - Mean(\|f-o\|) .. _table_PS_format_info_SAL1L2: @@ -1223,25 +1225,25 @@ The first set of header columns are common to all of the output files generated - Scalar Anomaly L1L2 line type * - 25 - TOTAL - - Total number of matched triplets of forecast (f), observation (o), and climatological value (c) + - Total number of matched pairs of forecast (f), observation (o), forecast climatology (cf), and observation climatology (co) * - 26 - FABAR - - Mean(f-c) + - Mean(f-cf) * - 27 - OABAR - - Mean(o-c) + - Mean(o-co) * - 28 - FOABAR - - Mean((f-c)*(o-c)) + - Mean((f-cf)*(o-co)) * - 29 - FFABAR - - Mean((f-c)²) + - Mean((f-cf)²) * - 30 - OOABAR - - Mean((o-c)²) + - Mean((o-co)²) * - 31 - MAE - - Mean Absolute Error + - Mean(\|(f-cf)-(o-co)\|) .. _table_PS_format_info_VL1L2: @@ -1289,12 +1291,15 @@ The first set of header columns are common to all of the output files generated - O_SPEED_BAR - Mean observed wind speed * - 35 + - TOTAL_DIR + - Total number of matched pairs for which both the forecast and observation wind directions are well-defined (i.e. non-zero vectors) + * - 36 - DIR_ME - Mean wind direction difference, from -180 to 180 degrees - * - 36 + * - 37 - DIR_MAE - Mean absolute wind direction difference - * - 37 + * - 38 - DIR_MSE - Mean squared wind direction difference @@ -1315,28 +1320,28 @@ The first set of header columns are common to all of the output files generated - Vector Anomaly L1L2 line type * - 25 - TOTAL - - Total number of matched triplets of forecast winds (uf, vf), observation winds (uo, vo), and climatological winds (uc, vc) + - Total number of matched pairs of forecast winds (uf, vf), observation winds (uo, vo), forecast climatology winds (ucf, vcf), and observation climatology winds (uco, vco) * - 26 - UFABAR - - Mean(uf-uc) + - Mean(uf-ucf) * - 27 - VFABAR - - Mean(vf-vc) + - Mean(vf-vcf) * - 28 - UOABAR - - Mean(uo-uc) + - Mean(uo-uco) * - 29 - VOABAR - - Mean(vo-vc) + - Mean(vo-vco) * - 30 - UVFOABAR - - Mean((uf-uc)*(uo-uc)+(vf-vc)*(vo-vc)) + - Mean((uf-ucf)*(uo-uco)+(vf-vcf)*(vo-vco)) * - 31 - UVFFABAR - - Mean((uf-uc)²+(vf-vc)²) + - Mean((uf-ucf)²+(vf-vcf)²) * - 32 - UVOOABAR - - Mean((uo-uc)²+(vo-vc)²) + - Mean((uo-uco)²+(vo-vco)²) * - 33 - FA_SPEED_BAR - Mean forecast wind speed anomaly @@ -1344,12 +1349,15 @@ The first set of header columns are common to all of the output files generated - OA_SPEED_BAR - Mean observed wind speed anomaly * - 35 + - TOTAL_DIR + - Total number of matched pairs for which the forecast, observation, forecast climatology, and observation climatology wind directions are well-defined (i.e. non-zero vectors) + * - 36 - DIRA_ME - Mean wind direction anomaly difference, from -180 to 180 degrees - * - 36 + * - 37 - DIRA_MAE - Mean absolute wind direction anomaly difference - * - 37 + * - 38 - DIRA_MSE - Mean squared wind direction anomaly difference @@ -1431,16 +1439,19 @@ The first set of header columns are common to all of the output files generated * - 85-87 - ANOM_CORR_UNCNTR, :raw-html:`
` ANOM_CORR_UNCNTR_BCL, :raw-html:`
` ANOM_CORR_UNCNTR_BCU - Uncentered vector Anomaly Correlation excluding mean error including bootstrap upper and lower confidence limits - * - 88-90 + * - 88 + - TOTAL_DIR + - Total number of matched pairs for which both the forecast and observation wind directions are well-defined (i.e. non-zero vectors) + * - 89-91 - DIR_ME, :raw-html:`
` DIR_ME_BCL, :raw-html:`
` DIR_ME_BCU - Mean direction difference, from -180 to 180 degrees, including bootstrap upper and lower confidence limits - * - 91-93 + * - 92-94 - DIR_MAE, :raw-html:`
` DIR_MAE_BCL, :raw-html:`
` DIR_MAE_BCU - Mean absolute direction difference including bootstrap upper and lower confidence limits - * - 94-96 + * - 95-97 - DIR_MSE, :raw-html:`
` DIR_MSE_BCL, :raw-html:`
` DIR_MSE_BCU - Mean squared direction difference including bootstrap upper and lower confidence limits - * - 97-99 + * - 98-100 - DIR_RMSE, :raw-html:`
` DIR_RMSE_BCL, :raw-html:`
` DIR_RMSE_BCU - Root mean squared direction difference including bootstrap upper and lower confidence limits @@ -1490,14 +1501,20 @@ The first set of header columns are common to all of the output files generated - OBS_QC - Quality control flag for observation * - 35 - - CLIMO_MEAN - - Climatological mean value + - OBS_CLIMO_MEAN + - Observation climatological mean value (named CLIMO_MEAN prior to met-12.0.0) * - 36 - - CLIMO_STDEV - - Climatological standard deviation value + - OBS_CLIMO_STDEV + - Observation climatological standard deviation value (named CLIMO_STDEV prior to met-12.0.0) * - 37 - - CLIMO_CDF - - Climatological cumulative distribution function value + - OBS_CLIMO_CDF + - Observation climatological cumulative distribution function value (named CLIMO_CDF prior to met-12.0.0) + * - 38 + - FCST_CLIMO_MEAN + - Forecast climatological mean value + * - 39 + - FCST_CLIMO_STDEV + - Forecast climatological standard deviation value .. _table_PS_format_info_SEEPS_MPR: @@ -1534,10 +1551,10 @@ The first set of header columns are common to all of the output files generated - Quality control flag for observation * - 31 - FCST_CAT - - Forecast category to 3 by 3 matrix + - Forecast category (dry, light, or heavy) * - 32 - OBS_CAT - - Observationtegory to 3 by 3 matrix + - Observation category (dry, light, or heavy) * - 33 - P1 - Climo-derived probability value for this station (dry) @@ -1546,10 +1563,10 @@ The first set of header columns are common to all of the output files generated - Climo-derived probability value for this station (dry + light) * - 35 - T1 - - Threshold 1 for p1 + - Threshold 1 for P1 (dry) * - 36 - T2 - - Threshold 2 for p2 + - Threshold 2 for P2 (dry + light) * - 37 - SEEPS - SEEPS (Stable Equitable Error in Probability Space) score @@ -1574,41 +1591,41 @@ The first set of header columns are common to all of the output files generated - TOTAL - Total number of SEEPS matched pairs * - 26 - - S12 - - Counts multiplied by the weights for FCST_CAT 1 and OBS_CAT 2 + - ODFL + - Counts multiplied by the weights for the observation dry, forecast light category * - 27 - - S13 - - Counts multiplied by the weights for FCST_CAT 1 and OBS_CAT 3 + - ODFH + - Counts multiplied by the weights for the observation dry, forecast heavy category * - 28 - - S21 - - Counts multiplied by the weights for FCST_CAT 2 and OBS_CAT 1 + - OLFD + - Counts multiplied by the weights for the observation light, forecast dry category * - 29 - - S23 - - Counts multiplied by the weights for FCST_CAT 2 and OBS_CAT 3 + - OLFH + - Counts multiplied by the weights for the observation light, forecast heavy category * - 30 - - S31 - - Counts multiplied by the weights for FCST_CAT 3 and OBS_CAT 1 + - OHFD + - Counts multiplied by the weights for the observation heavy, forecast dry category * - 31 - - S32 - - Counts multiplied by the weights for FCST_CAT 3 and OBS_CAT 2 + - OHFL + - Counts multiplied by the weights for the observation heavy, forecast light category * - 32 - PF1 - - marginal probabilities of the forecast values (FCST_CAT 1) + - Marginal probabilities of the forecast dry (FCST_CAT 0) * - 33 - PF2 - - marginal probabilities of the forecast values (FCST_CAT 2) + - Marginal probabilities of the forecast light (FCST_CAT 1) * - 34 - PF3 - - marginal probabilities of the forecast values (FCST_CAT 3) + - Marginal probabilities of the forecast heavy (FCST_CAT 2) * - 35 - PV1 - - marginal probabilities of the observed values (OBS_CAT 1) + - Marginal probabilities of the observed dry (OBS_CAT 0) * - 36 - PV2 - - marginal probabilities of the observed values (OBS_CAT 2) + - Marginal probabilities of the observed light (OBS_CAT 1) * - 37 - PV3 - - marginal probabilities of the observed values (OBS_CAT 3) + - Marginal probabilities of the observed heavy (OBS_CAT 2) * - 38 - SEEPS - Averaged SEEPS (Stable Equitable Error in Probability Space) score diff --git a/docs/Users_Guide/reformat_grid.rst b/docs/Users_Guide/reformat_grid.rst index afee78a7c6..d1bc065073 100644 --- a/docs/Users_Guide/reformat_grid.rst +++ b/docs/Users_Guide/reformat_grid.rst @@ -38,6 +38,7 @@ The usage statement for the Pcp-Combine tool is shown below: out_file [-field string] [-name list] + [-input_thresh n] [-vld_thresh n] [-log file] [-v level] @@ -79,13 +80,15 @@ Optional Arguments for pcp_combine 4. The **-name list** option is a comma-separated list of output variable names which override the default choices. If specified, the number of names must match the number of variables to be written to the output file. -5. The **-vld_thresh n** option overrides the default required ratio of valid data for at each grid point for an output value to be written. The default is 1.0. +5. The **-input_thresh n** option overrides the default required ratio of valid input files. This option does not apply to the -subtract command where exactly two valid inputs are required. The default is 1.0. -6. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. +6. The **-vld_thresh n** option overrides the default required ratio of valid data at each grid point for an output value to be written. The default is 1.0. -7. The **-v level** option indicates the desired level of verbosity. The contents of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity above 1 will increase the amount of logging. +7. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. -8. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. +8. The **-v level** option indicates the desired level of verbosity. The contents of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity above 1 will increase the amount of logging. + +9. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. Required Arguments for the pcp_combine Sum Command ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/Users_Guide/reformat_point.rst b/docs/Users_Guide/reformat_point.rst index ec09fa3f83..9be547fad1 100644 --- a/docs/Users_Guide/reformat_point.rst +++ b/docs/Users_Guide/reformat_point.rst @@ -41,51 +41,39 @@ pb2nc has both required and optional arguments. Required Arguments for pb2nc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. -The **prepbufr_file** argument is the input PrepBUFR file to be processed. +1. The **prepbufr_file** argument is the input PrepBUFR file to be processed. -2. -The **netcdf_file** argument is the output NetCDF file to be written. +2. The **netcdf_file** argument is the output NetCDF file to be written. -3. -The **config_file** argument is the configuration file to be used. The contents of the configuration file are discussed below. +3. The **config_file** argument is the configuration file to be used. The contents of the configuration file are discussed below. Optional Arguments for pb2nc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. -The **-pbfile prepbufr_file** option is used to pass additional input PrepBUFR files. +1. The **-pbfile prepbufr_file** option is used to pass additional input PrepBUFR files. -2. -The **-valid_beg** time option in YYYYMMDD[_HH[MMSS]] format sets the beginning of the retention time window. +2. The **-valid_beg** time option in YYYYMMDD[_HH[MMSS]] format sets the beginning of the retention time window. -3. -The **-valid_end** time option in YYYYMMDD[_HH[MMSS]] format sets the end of the retention time window. +3. The **-valid_end** time option in YYYYMMDD[_HH[MMSS]] format sets the end of the retention time window. -4. -The **-nmsg num_messages** option may be used for testing purposes. This argument indicates that only the first "num_messages" PrepBUFR messages should be processed rather than the whole file. This option is provided to speed up testing because running the PB2NC tool can take a few minutes for each file. Most users will not need this option. +4. The **-nmsg num_messages** option may be used for testing purposes. This argument indicates that only the first "num_messages" PrepBUFR messages should be processed rather than the whole file. This option is provided to speed up testing because running the PB2NC tool can take a few minutes for each file. Most users will not need this option. -5. -The **-dump path** option may be used to dump the entire contents of the PrepBUFR file to several ASCII files written to the directory specified by "path". The user may use this option to view a human-readable version of the input PrepBUFR file, although writing the contents to ASCII files can be slow. +5. The **-dump path** option may be used to dump the entire contents of the PrepBUFR file to several ASCII files written to the directory specified by "path". The user may use this option to view a human-readable version of the input PrepBUFR file, although writing the contents to ASCII files can be slow. -6. -The **-index** option shows the available variables with valid data from the BUFR input. It collects the available variable list from BUFR input and checks the existence of valid data and directs the variable names with valid data to the screen. The NetCDF output won't be generated. +6. The **-index** option shows the available variables with valid data from the BUFR input. It collects the available variable list from BUFR input and checks the existence of valid data and directs the variable names with valid data to the screen. The NetCDF output won't be generated. -7. -The **-log** file option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. +7. The **-log** file option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. -8. -The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity above 1 will increase the amount of logging. +8. The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity above 1 will increase the amount of logging. -9. -The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. +9. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. An example of the pb2nc calling sequence is shown below: .. code-block:: none - - pb2nc sample_pb.blk \ - sample_pb.nc \ - PB2NCConfig + + pb2nc sample_pb.blk \ + sample_pb.nc \ + PB2NCConfig In this example, the PB2NC tool will process the input **sample_pb.blk** file applying the configuration specified in the **PB2NCConfig** file and write the output to a file named **sample_pb.nc**. @@ -101,11 +89,11 @@ Note that environment variables may be used when editing configuration files, as ____________________ .. code-block:: none - - obs_window = { beg = -5400; end = 5400; } - mask = { grid = ""; poly = ""; } - tmp_dir = "/tmp"; - version = "VN.N"; + + obs_window = { beg = -5400; end = 5400; } + mask = { grid = ""; poly = ""; } + tmp_dir = "/tmp"; + version = "VN.N"; The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. The use of temporary files in PB2NC is described in :numref:`Contributor's Guide Section %s `. @@ -113,47 +101,45 @@ The use of temporary files in PB2NC is described in :numref:`Contributor's Guide _____________________ .. code-block:: none - - message_type = []; + + message_type = []; Each PrepBUFR message is tagged with one of eighteen message types as listed in the :numref:`config_options` file. The **message_type** refers to the type of observation from which the observation value (or 'report') was derived. The user may specify a comma-separated list of message types to be retained. Providing an empty list indicates that all message types should be retained. _____________________ -.. code-block:: none +.. code-block:: none - message_type_map = [ { key = "AIRCAR"; val = "AIRCAR_PROFILES"; } ]; + message_type_map = [ { key = "AIRCAR"; val = "AIRCAR_PROFILES"; } ]; The **message_type_map** entry is an array of dictionaries, each containing a **key** string and **val** string. This defines a mapping of input PrepBUFR message types to output message types. This provides a method for renaming input PrepBUFR message types. _____________________ .. code-block:: none - - message_type_group_map = [ - { key = "SURFACE"; val = "ADPSFC,SFCSHP,MSONET"; }, - { key = "ANYAIR"; val = "AIRCAR,AIRCFT"; }, - { key = "ANYSFC"; val = "ADPSFC,SFCSHP,ADPUPA,PROFLR,MSONET"; }, - { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } - ]; + message_type_group_map = [ + { key = "SURFACE"; val = "ADPSFC,SFCSHP,MSONET"; }, + { key = "ANYAIR"; val = "AIRCAR,AIRCFT"; }, + { key = "ANYSFC"; val = "ADPSFC,SFCSHP,ADPUPA,PROFLR,MSONET"; }, + { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } + ]; The **message_type_group_map** entry is an array of dictionaries, each containing a **key** string and **val** string. This defines a mapping of message type group names to a comma-separated list of values. This map is defined in the config files for PB2NC, Point-Stat, or Ensemble-Stat. Modify this map to define sets of message types that should be processed together as a group. The **SURFACE** entry must be present to define message types for which surface verification logic should be applied. _____________________ .. code-block:: none - - station_id = []; + + station_id = []; Each PrepBUFR message has a station identification string associated with it. The user may specify a comma-separated list of station IDs to be retained. Providing an empty list indicates that messages from all station IDs will be retained. It can be a file name containing a list of stations. _____________________ .. code-block:: none - - elevation_range = { beg = -1000; end = 100000; } + elevation_range = { beg = -1000; end = 100000; } The **beg** and **end** variables are used to stratify the elevation (in meters) of the observations to be retained. The range shown above is set to -1000 to 100000 meters, which essentially retains every observation. @@ -161,11 +147,10 @@ _____________________ .. code-block:: none - pb_report_type = []; - in_report_type = []; - instrument_type = []; + pb_report_type = []; + in_report_type = []; + instrument_type = []; - The **pb_report_type, in_report_type**, and **instrument_type** variables are used to specify comma-separated lists of PrepBUFR report types, input report types, and instrument types to be retained, respectively. If left empty, all PrepBUFR report types, input report types, and instrument types will be retained. See the following for more details: `Code table for PrepBUFR report types used by Regional NAM GSI analyses. `_ @@ -175,19 +160,16 @@ The **pb_report_type, in_report_type**, and **instrument_type** variables are us _____________________ .. code-block:: none - - level_range = { beg = 1; end = 255; } - level_category = []; + level_range = { beg = 1; end = 255; } + level_category = []; The **beg** and **end** variables are used to stratify the model level of observations to be retained. The range shown above is 1 to 255. - The **level_category** variable is used to specify a comma-separated list of PrepBUFR data level categories to retain. An empty string indicates that all level categories should be retained. Accepted values and their meanings are described in :numref:`table_reformat-point_pb2nc_level_category`. See the following for more details: `PrepBUFR mnemonic table. `_ - .. _table_reformat-point_pb2nc_level_category: .. list-table:: Values for the level_category option. @@ -212,17 +194,15 @@ The **level_category** variable is used to specify a comma-separated list of Pre - Reports on a single level * - 7 - Auxiliary levels generated via interpolation from spanning levels - + _____________________ .. code-block:: none - - obs_bufr_var = [ 'QOB', 'TOB', 'ZOB', 'UOB', 'VOB' ]; + obs_bufr_var = [ 'QOB', 'TOB', 'ZOB', 'UOB', 'VOB' ]; Each PrepBUFR message will likely contain multiple observation variables. The **obs_bufr_var** variable is used to specify which observation variables should be retained or derived. The observation variable names are retrieved from the BUFR table embedded within the file. Users can run PB2NC with the **-index** command line argument to list out the variable names present in the file, and those names can be listed in this setting. If the list is empty, all BUFR variables present in the file are retained. This setting replaces the deprecated **obs_grib_code**. - The example **obs_bufr_var** setting above retains observations of QOB, TOB, ZOB, UOB, and VOB for specific humidity, temperature, height, and the u and v components of winds. Observations of those types are reported at the corresponding POB pressure level. In addition, PB2NC can derive several other variables from these observations. By convention, all observations that are derivable are named with a **D_** prefix: • **D_DPT** for dew point (from POB and QOB) @@ -243,39 +223,37 @@ The example **obs_bufr_var** setting above retains observations of QOB, TOB, ZOB • **D_MLCAPE** for mixed layer convective available potential energy (from POB, QOB, and TOB) - In BUFR, lower quality mark values indicate higher quality observations. The quality marks for derived observations are computed as the maximum of the quality marks for its components. For example, **D_DPT** derived from **POB** with quality mark 1 and **QOB** with quality mark 2 is assigned a quality mark value of 2. **D_PBL**, **D_CAPE**, and **D_MLCAPE** are derived using data from multiple vertical levels. Their quality marks are computed as the maximum of their components over all vertical levels. _____________________ .. code-block:: none - - obs_bufr_map = [ - { key = 'POB'; val = 'PRES'; }, - { key = 'QOB'; val = 'SPFH'; }, - { key = 'TOB'; val = 'TMP'; }, - { key = 'ZOB'; val = 'HGT'; }, - { key = 'UOB'; val = 'UGRD'; }, - { key = 'VOB'; val = 'VGRD'; }, - { key = 'D_DPT'; val = 'DPT'; }, - { key = 'D_WDIR'; val = 'WDIR'; }, - { key = 'D_WIND'; val = 'WIND'; }, - { key = 'D_RH'; val = 'RH'; }, - { key = 'D_MIXR'; val = 'MIXR'; }, - { key = 'D_PRMSL'; val = 'PRMSL'; }, - { key = 'D_PBL'; val = 'PBL'; }, - { key = 'D_CAPE'; val = 'CAPE'; } - { key = 'D_MLCAPE'; val = 'MLCAPE'; } - ]; + obs_bufr_map = [ + { key = 'POB'; val = 'PRES'; }, + { key = 'QOB'; val = 'SPFH'; }, + { key = 'TOB'; val = 'TMP'; }, + { key = 'ZOB'; val = 'HGT'; }, + { key = 'UOB'; val = 'UGRD'; }, + { key = 'VOB'; val = 'VGRD'; }, + { key = 'D_DPT'; val = 'DPT'; }, + { key = 'D_WDIR'; val = 'WDIR'; }, + { key = 'D_WIND'; val = 'WIND'; }, + { key = 'D_RH'; val = 'RH'; }, + { key = 'D_MIXR'; val = 'MIXR'; }, + { key = 'D_PRMSL'; val = 'PRMSL'; }, + { key = 'D_PBL'; val = 'PBL'; }, + { key = 'D_CAPE'; val = 'CAPE'; } + { key = 'D_MLCAPE'; val = 'MLCAPE'; } + ]; The BUFR variable names are not shared with other forecast data. This map is used to convert the BUFR name to the common name, like GRIB2. It allows to share the configuration for forecast data with PB2NC observation data. If there is no mapping, the BUFR variable name will be saved to output NetCDF file. _____________________ .. code-block:: none - - quality_mark_thresh = 2; + + quality_mark_thresh = 2; Each observation has a quality mark value associated with it. The **quality_mark_thresh** is used to stratify out which quality marks will be retained. The value shown above indicates that only observations with quality marks less than or equal to 2 will be retained. @@ -283,51 +261,45 @@ Each observation has a quality mark value associated with it. The **quality_mark _____________________ .. code-block:: none - - event_stack_flag = TOP; + event_stack_flag = TOP; A PrepBUFR message may contain duplicate observations with different quality mark values. The **event_stack_flag** indicates whether to use the observations at the top of the event stack (observation values have had more quality control processing applied) or the bottom of the event stack (observation values have had no quality control processing applied). The flag value of **TOP** listed above indicates the observations with the most amount of quality control processing should be used, the **BOTTOM** option uses the data closest to raw values. _____________________ .. code-block:: none - - time_summary = { - flag = FALSE; - raw_data = FALSE; - beg = "000000"; - end = "235959"; - step = 300; - width = 600; - // width = { beg = -300; end = 300; } - grib_code = []; - obs_var = [ "TMP", "WDIR", "RH" ]; - type = [ "min", "max", "range", "mean", "stdev", "median", "p80" ]; - vld_freq = 0; - vld_thresh = 0.0; - } + time_summary = { + flag = FALSE; + raw_data = FALSE; + beg = "000000"; + end = "235959"; + step = 300; + width = 600; + // width = { beg = -300; end = 300; } + grib_code = []; + obs_var = [ "TMP", "WDIR", "RH" ]; + type = [ "min", "max", "range", "mean", "stdev", "median", "p80" ]; + vld_freq = 0; + vld_thresh = 0.0; + } The **time_summary** dictionary enables additional processing for observations with high temporal resolution. The **flag** entry toggles the **time_summary** on (**TRUE**) and off (**FALSE**). If the **raw_data** flag is set to TRUE, then both the individual observation values and the derived time summary value will be written to the output. If FALSE, only the summary values are written. Observations may be summarized across the user specified time period defined by the **beg** and **end** entries in HHMMSS format. The **step** entry defines the time between intervals in seconds. The **width** entry specifies the summary interval in seconds. It may either be set as an integer number of seconds for a centered time interval or a dictionary with beginning and ending time offsets in seconds. - This example listed above does a 10-minute time summary (width = 600;) every 5 minutes (step = 300;) throughout the day (beg = "000000"; end = 235959";). The first interval will be from 23:55:00 the previous day through 00:04:59 of the current day. The second interval will be from 0:00:00 through 00:09:59. And so on. - The two **width** settings listed above are equivalent. Both define a centered 10-minute time interval. Use the **beg** and **end** entries to define uncentered time intervals. The following example requests observations for one hour prior: .. code-block:: none - - width = { beg = -3600; end = 0; } + width = { beg = -3600; end = 0; } -The summaries will only be calculated for the observations specified in the **grib_code** or **obs_var** entries. The **grib_code** entry is an array of integers while the **obs_var** entries is an array of strings. The supported summaries are **min** (minimum), **max** (maximum), **range, mean, stdev** (standard deviation), **median** and **p##** (percentile, with the desired percentile value specified in place of ##). If multiple summaries are selected in a single run, a string indicating the summary method applied will be appended to the output message type. +The summaries will only be calculated for the observations specified in the **grib_code** or **obs_var** entries. The **grib_code** entry is an array of integers while the **obs_var** entries is an array of strings. The supported summaries are **min** (minimum), **max** (maximum), **range, mean, stdev** (standard deviation), **median** and **p##** (percentile, with the desired percentile value specified in place of ##). If multiple summaries are selected in a single run, a string indicating the summary method applied will be appended to the output message type. The **vld_freq** and **vld_thresh** entries specify the required ratio of valid data for an output time summary value to be computed. This option is only applied when these entries are set to non-zero values. The **vld_freq** entry specifies the expected frequency of observations in seconds. The width of the time window is divided by this frequency to compute the expected number of observations for the time window. The actual number of valid observations is divided by the expected number to compute the ratio of valid data. An output time summary value will only be written if that ratio is greater than or equal to the **vld_thresh** entry. Detailed information about which observations are excluded is provided at debug level 4. - The quality mark for time summaries is always reported by PB2NC as bad data. Time summaries are computed by several MET point pre-processing tools using common library code. While BUFR quality marks are integers, the quality flags for other point data formats (MADIS NetCDF, for example) are stored as strings. MET does not currently contain logic to determine which quality flag strings are better or worse. Note however that any point observation whose quality mark does not meet the **quality_mark_thresh** criteria is not used in the computation of time summaries. .. _pb2nc output: @@ -358,14 +330,14 @@ Each NetCDF file generated by the PB2NC tool contains the dimensions and variabl * - nobs_qty - Number of unique quality control strings (variable) * - obs_var_num - - Number of unique observation variable types (variable) + - Number of unique observation variable types (variable) .. _table_reformat-point_pb2nc_output_vars: .. list-table:: NetCDF variables in pb2nc output :widths: auto :header-rows: 2 - + * - pb2nc NetCDF VARIABLES - - @@ -436,7 +408,6 @@ Each NetCDF file generated by the PB2NC tool contains the dimensions and variabl - obs_var_num, mxstr3 - Lookup table containing a description string for the unique observation variable names in obs_var. - ASCII2NC Tool ============= @@ -454,7 +425,7 @@ While initial versions of the ASCII2NC tool only supported a simple 11 column AS • `AirNow DailyData_v2, AirNow HourlyData, and AirNow HourlyAQObs formats `_. See the :ref:`MET_AIRNOW_STATIONS` environment variable. -• `National Data Buoy (NDBC) Standard Meteorlogical Data format `_. See the :ref:`MET_NDBC_STATIONS` environment variable. +• `National Data Buoy (NDBC) Standard Meteorological Data format `_. See the :ref:`MET_NDBC_STATIONS` environment variable. • `International Soil Moisture Network (ISMN) Data format `_. @@ -518,7 +489,7 @@ ascii2nc Usage Once the ASCII point observations have been formatted as expected, the ASCII file is ready to be processed by the ASCII2NC tool. The usage statement for ASCII2NC tool is shown below: .. code-block:: none - + Usage: ascii2nc ascii_file1 [ascii_file2 ... ascii_filen] netcdf_file @@ -568,9 +539,9 @@ Optional Arguments for ascii2nc An example of the ascii2nc calling sequence is shown below: .. code-block:: none - - ascii2nc sample_ascii_obs.txt \ - sample_ascii_obs.nc + + ascii2nc sample_ascii_obs.txt \ + sample_ascii_obs.nc In this example, the ASCII2NC tool will reformat the input **sample_ascii_obs.txt file** into NetCDF format and write the output to a file named **sample_ascii_obs.nc**. @@ -587,7 +558,7 @@ _____________________ .. code-block:: none - version = "VN.N"; + version = "VN.N"; The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. @@ -595,31 +566,28 @@ _____________________ .. code-block:: none - time_summary = { ... } - + time_summary = { ... } The **time_summary** feature was implemented to allow additional processing of observations with high temporal resolution, such as SURFRAD data every 5 minutes. This option is described in :numref:`pb2nc configuration file`. _____________________ .. code-block:: none - - message_type_map = [ - { key = "FM-12 SYNOP"; val = "ADPSFC"; }, - { key = "FM-13 SHIP"; val = "SFCSHP"; }, - { key = "FM-15 METAR"; val = "ADPSFC"; }, - { key = "FM-18 BUOY"; val = "SFCSHP"; }, - { key = "FM-281 QSCAT"; val = "ASCATW"; }, - { key = "FM-32 PILOT"; val = "ADPUPA"; }, - { key = "FM-35 TEMP"; val = "ADPUPA"; }, - { key = "FM-88 SATOB"; val = "SATWND"; }, - { key = "FM-97 ACARS"; val = "AIRCFT"; } - ]; + message_type_map = [ + { key = "FM-12 SYNOP"; val = "ADPSFC"; }, + { key = "FM-13 SHIP"; val = "SFCSHP"; }, + { key = "FM-15 METAR"; val = "ADPSFC"; }, + { key = "FM-18 BUOY"; val = "SFCSHP"; }, + { key = "FM-281 QSCAT"; val = "ASCATW"; }, + { key = "FM-32 PILOT"; val = "ADPUPA"; }, + { key = "FM-35 TEMP"; val = "ADPUPA"; }, + { key = "FM-88 SATOB"; val = "SATWND"; }, + { key = "FM-97 ACARS"; val = "AIRCFT"; } + ]; This entry is an array of dictionaries, each containing a **key** string and **val** string which define a mapping of input strings to output message types. This mapping is currently only applied when converting input little_r report types to output message types. - ascii2nc Output --------------- @@ -627,21 +595,18 @@ The NetCDF output of the ASCII2NC tool is structured in the same way as the outp "obs_vid" variable is replaced with "obs_gc" when the GRIB code is given instead of the variable names. In this case, the global variable "use_var_id" does not exist or set to false (use_var_id = "false" ;). Three variables (obs_var, obs_units, and obs_desc) related with variable names are not added. - MADIS2NC Tool ============= - This section describes how to run the MADIS2NC tool. The MADIS2NC tool is used to reformat `Meteorological Assimilation Data Ingest System (MADIS) `_ point observations into the NetCDF format expected by the MET statistics tools. An optional configuration file controls the processing of the point observations. The MADIS2NC tool supports many of the MADIS data types, as listed in the usage statement below. Support for additional MADIS data types may be added in the future based on user feedback. - madis2nc Usage -------------- The usage statement for the MADIS2NC tool is shown below: .. code-block:: none - + Usage: madis2nc madis_file [madis_file2 ... madis_filen] out_file @@ -658,81 +623,61 @@ The usage statement for the MADIS2NC tool is shown below: [-v level] [-compress level] - madis2nc has required arguments and can also take optional ones. - Required Arguments for madis2nc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. The **madis_file** argument is one or more input MADIS point observation files to be processed. - 2. The **out_file** argument is the NetCDF output file to be written. - 3. The argument **-type str** is a type of MADIS observations (metar, raob, profiler, maritime, mesonet or acarsProfiles). - Optional Arguments for madis2nc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 4. The **-config file** option specifies the configuration file to generate summaries of the fields in the ASCII files. - 5. The **-qc_dd list** option specifies a comma-separated list of QC flag values to be accepted(Z,C,S,V,X,Q,K,G,B). - 6. The **-lvl_dim list** option specifies a comma-separated list of vertical level dimensions to be processed. - 7. To specify the exact records to be processed, the **-rec_beg n** specifies the index of the first MADIS record to process and **-rec_end n** specifies the index of the last MADIS record to process. Both are zero-based. - 8. The **-mask_grid string** option specifies a named grid or a gridded data file for filtering the point observations spatially. - 9. The **-mask_poly file** option defines a polyline masking file for filtering the point observations spatially. - 10. The **-mask_sid file|list** option is a station ID masking file or a comma-separated list of station ID's for filtering the point observations spatially. See the description of the "sid" entry in :numref:`config_options`. - 11. The **-log file** option directs output and errors to the specified log file. All messages will be written to that file as well as standard out and error. Thus, users can save the messages without having to redirect the output on the command line. The default behavior is no log file. - 12. The **-v level** option indicates the desired level of verbosity. The value of "level" will override the default setting of 2. Setting the verbosity to 0 will make the tool run with no log messages, while increasing the verbosity will increase the amount of logging. - 13. The **-compress level** option specifies the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. - An example of the madis2nc calling sequence is shown below: .. code-block:: none - - madis2nc sample_madis_obs.nc \ - sample_madis_obs_met.nc -log madis.log -v 3 + madis2nc sample_madis_obs.nc \ + sample_madis_obs_met.nc -log madis.log -v 3 In this example, the MADIS2NC tool will reformat the input sample_madis_obs.nc file into NetCDF format and write the output to a file named sample_madis_obs_met.nc. Warnings and error messages will be written to the madis.log file, and the verbosity level of logging is three. - madis2nc Configuration File --------------------------- - The default configuration file for the MADIS2NC tool named **Madis2NcConfig_default** can be found in the installed *share/met/config* directory. It is recommended that users make a copy of this file prior to modifying its contents. - The MADIS2NC configuration file is optional and only necessary when defining time summaries. The contents of the default MADIS2NC configuration file are described below. _____________________ .. code-block:: none - version = "VN.N"; - + version = "VN.N"; The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. @@ -740,12 +685,10 @@ _____________________ .. code-block:: none - time_summary = { ... } - + time_summary = { ... } The **time_summary** dictionary is described in :numref:`pb2nc configuration file`. - madis2nc Output --------------- @@ -753,14 +696,11 @@ The NetCDF output of the MADIS2NC tool is structured in the same way as the outp "obs_vid" variable is replaced with "obs_gc" when the GRIB code is given instead of the variable names. In this case, the global variable "use_var_id" does not exist or set to false (use_var_id = "false" ;). Three variables (obs_var, obs_units, and obs_desc) related with variable names are not added. - LIDAR2NC Tool ============= - The LIDAR2NC tool creates a NetCDF point observation file from a CALIPSO HDF data file. Not all of the data present in the CALIPSO file is reproduced in the output, however. Instead, the output focuses mostly on information about clouds (as opposed to aerosols) as seen by the satellite along its ground track. - lidar2nc Usage -------------- @@ -775,19 +715,16 @@ The usage statement for LIDAR2NC tool is shown below: [-v level] [-compress level] - -Unlike most of the MET tools, lidar2nc does not use a config file. Currently, the options needed to run lidar2nc are not complex enough to require one. +Unlike most of the MET tools, lidar2nc does not use a config file. Currently, the options needed to run lidar2nc are not complex enough to require one. Required Arguments for lidar2nc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. The **lidar_file** argument is the input HDF lidar data file to be processed. Currently, CALIPSO files are supported but support for additional file types will be added in future releases. - 2. The **out_file** argument is the NetCDF output file to be written. - Optional Arguments for lidar2nc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -802,13 +739,10 @@ lidar2nc Output Each observation type in the lidar2nc output is assigned a GRIB code. These are outlined in :numref:`lidar2nc_grib_code_table`. GRIB codes were assigned to these fields arbitrarily, with GRIB codes in the 600s denoting individual bit fields taken from the feature classification flag field in the CALIPSO file. - We will not give a detailed description of each CALIPSO data product that lidar2nc reads. Users should refer to existing CALIPSO documentation for this information. We will, however, give some explanation of how the cloud layer base and top information is encoded in the lidar2nc NetCDF output file. - **Layer_Base** gives the elevation in meters above ground level of the cloud base for each cloud level at each observation location. Similarly, **Layer_Top** gives the elevation of the top of each cloud layer. Note that if there are multiple cloud layers at a particular location, then there will be more than one base (or top) given for that location. For convenience, **Min_Base** and **Max_Top** give, respectively, the base elevation for the bottom cloud layer, and the top elevation for the top cloud layer. For these data types, there will be only one value per observation location regardless of how many cloud layers there are at that location. - .. _lidar2nc_grib_code_table: .. list-table:: lidar2nc GRIB codes and their meaning, units, and abbreviations @@ -868,21 +802,18 @@ We will not give a detailed description of each CALIPSO data product that lidar2 - NA - Horizontal_Averaging - IODA2NC Tool ============ - This section describes the IODA2NC tool which is used to reformat IODA (Interface for Observation Data Access) point observations from the `Joint Center for Satellite Data Assimilation (JCSDA) `_ into the NetCDF format expected by the MET statistics tools. An optional configuration file controls the processing of the point observations. The IODA2NC tool reads NetCDF point observation files created by the `IODA Converters `_. Support for interfacing with data from IODA may be added in the future based on user feedback. - ioda2nc Usage ------------- The usage statement for the IODA2NC tool is shown below: .. code-block:: none - + Usage: ioda2nc ioda_file netcdf_file @@ -928,12 +859,11 @@ An example of the ioda2nc calling sequence is shown below: .. code-block:: none - ioda2nc \ - ioda.NC001007.2020031012.nc ioda2nc.2020031012.nc \ - -config IODA2NCConfig -v 3 -lg run_ioda2nc.log - -In this example, the IODA2NC tool will reformat the data in the input ioda.NC001007.2020031012.nc file and write the output to a file named ioda2nc.2020031012.nc. The data to be processed is specified by IODA2NCConfig, log messages will be written to the ioda2nc.log file, and the verbosity level is three. + ioda2nc \ + ioda.NC001007.2020031012.nc ioda2nc.2020031012.nc \ + -config IODA2NCConfig -v 3 -lg run_ioda2nc.log +In this example, the IODA2NC tool will reformat the data in the input ioda.NC001007.2020031012.nc file and write the output to a file named ioda2nc.2020031012.nc. The data to be processed is specified by IODA2NCConfig, log messages will be written to the ioda2nc.log file, and the verbosity level is three. ioda2nc Configuration File -------------------------- @@ -946,10 +876,10 @@ _____________________ .. code-block:: none - obs_window = { beg = -5400; end = 5400; } - mask = { grid = ""; poly = ""; } - tmp_dir = "/tmp"; - version = "VN.N"; + obs_window = { beg = -5400; end = 5400; } + mask = { grid = ""; poly = ""; } + tmp_dir = "/tmp"; + version = "VN.N"; The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. @@ -957,15 +887,15 @@ _____________________ .. code-block:: none - message_type = []; - message_type_group_map = []; - message_type_map = []; - station_id = []; - elevation_range = { ... }; - level_range = { ... }; - obs_var = []; - quality_mark_thresh = 0; - time_summary = { ... } + message_type = []; + message_type_group_map = []; + message_type_map = []; + station_id = []; + elevation_range = { ... }; + level_range = { ... }; + obs_var = []; + quality_mark_thresh = 0; + time_summary = { ... } The configuration options listed above are supported by other point observation pre-processing tools and are described in :numref:`pb2nc configuration file`. @@ -973,22 +903,22 @@ _____________________ .. code-block:: none - obs_name_map = []; + obs_name_map = []; This entry is an array of dictionaries, each containing a **key** string and **val** string which define a mapping of input IODA variable names to output variable names. The default IODA map, obs_var_map, is appended to this map. _____________________ .. code-block:: none - - metadata_map = [ - { key = "message_type"; val = "msg_type,station_ob"; }, - { key = "station_id"; val = "station_id,report_identifier"; }, - { key = "pressure"; val = "air_pressure,pressure"; }, - { key = "height"; val = "height,height_above_mean_sea_level"; }, - { key = "elevation"; val = "elevation,station_elevation"; }, - { key = "nlocs"; val = "Location"; } - ]; + + metadata_map = [ + { key = "message_type"; val = "msg_type,station_ob"; }, + { key = "station_id"; val = "station_id,report_identifier"; }, + { key = "pressure"; val = "air_pressure,pressure"; }, + { key = "height"; val = "height,height_above_mean_sea_level"; }, + { key = "elevation"; val = "elevation,station_elevation"; }, + { key = "nlocs"; val = "Location"; } + ]; This entry is an array of dictionaries, each containing a **key** string and **val** string which define a mapping of metadata for IODA data files. The "nlocs" is for the dimension name of the locations. The following key can be added: "nstring", "latitude" and "longitude". @@ -996,11 +926,11 @@ The "nlocs" is for the dimension name of the locations. The following key can be _____________________ .. code-block:: none - - obs_to_qc_map = [ - { key = "wind_from_direction"; val = "eastward_wind,northward_wind"; }, - { key = "wind_speed"; val = "eastward_wind,northward_wind"; } - ]; + + obs_to_qc_map = [ + { key = "wind_from_direction"; val = "eastward_wind,northward_wind"; }, + { key = "wind_speed"; val = "eastward_wind,northward_wind"; } + ]; This entry is an array of dictionaries, each containing a **key** string and **val** string which define a mapping of QC variable name for IODA data files. @@ -1008,17 +938,15 @@ _____________________ .. code-block:: none - missing_thresh = [ <=-1e9, >=1e9, ==-9999 ]; + missing_thresh = [ <=-1e9, >=1e9, ==-9999 ]; The **missing_thresh** option is an array of thresholds. Any data values which meet any of these thresholds are interpreted as being bad, or missing, data. - ioda2nc Output -------------- The NetCDF output of the IODA2NC tool is structured in the same way as the output of the PB2NC tool described in :numref:`pb2nc output`. - Point2Grid Tool =============== @@ -1030,15 +958,15 @@ point2grid Usage The usage statement for the Point2Grid tool is shown below: .. code-block:: none - + Usage: point2grid input_filename to_grid output_filename -field string [-config file] - [-qc flags] - [-adp adp_file_name] + [-goes_qc flags] + [-adp adp_filename] [-method type] [-gaussian_dx n] [-gaussian_radius n] @@ -1049,7 +977,6 @@ The usage statement for the Point2Grid tool is shown below: [-v level] [-compress level] - Required Arguments for point2grid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1063,21 +990,20 @@ The MET point observation NetCDF file name as **input_filename** argument is equ 4. The **-field** string argument is a string that defines the data to be regridded. It may be used multiple times. If **-adp** option is given (for AOD data from GOES16/17), the name consists with the variable name from the input data file and the variable name from ADP data file (for example, "AOD_Smoke" or "AOD_Dust": getting AOD variable from the input data and applying smoke or dust variable from ADP data file). - Optional Arguments for point2grid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 5. The **-config** file option is the configuration file to be used. -6. The **-qc** flags option specifies a comma-separated list of quality control (QC) flags, for example "0,1". This should only be applied if grid_mapping is set to "goes_imager_projection" and the QC variable exists. +6. The **-goes_qc** flags option specifies a comma-separated list of quality control (QC) flags, for example "0,1". Only used if grid_mapping is set to "goes_imager_projection" and the QC variable exists. Note that the older **-qc** option name is also supported. -7. The **-adp adp_file_name** option provides an additional Aerosol Detection Product (ADP) information on aerosols, dust, and smoke. This option is ignored if the requested variable is not AOD ("AOD_Dust" or "AOD_Smoke") from GOES16/17. The gridded data is filtered by the presence of dust/smoke. If -qc options are given, it's applied to QC of dust/smoke, too (First filtering with AOD QC values and the second filtering with dust/smoke QC values). +7. The **-adp adp_filename** option provides an additional Aerosol Detection Product (ADP) information on aerosols, dust, and smoke. This option is ignored if the requested variable is not AOD ("AOD_Dust" or "AOD_Smoke") from GOES16/17. The gridded data is filtered by the presence of dust/smoke. If -goes_qc options are given, it's applied to QC of dust/smoke, too (First filtering with AOD QC values and the second filtering with dust/smoke QC values). 8. The **-method type** option specifies the regridding method. The default method is UW_MEAN. -9. The **-gaussian_dx n** option defines the distance interval for Gaussian smoothing. The default is 81.271 km. Ignored if the method is not GAUSSIAN or MAXGAUSS. +9. The **-gaussian_dx n** option specifies the distance interval for Gaussian smoothing. The default is 81.271 km. Only used if the method is GAUSSIAN or MAXGAUSS. -10. The **-gaussian_radius** n option defines the radius of influence for Gaussian interpolation. The default is 120. Ignored if the method is not GAUSSIAN or MAXGAUSS. +10. The **-gaussian_radius** n option specifies the radius of influence for Gaussian interpolation. The default is 120. Only used if the method is GAUSSIAN or MAXGAUSS. 11. The **-prob_cat_thresh string** option sets the threshold to compute the probability of occurrence. The default is set to disabled. This option is relevant when calculating practically perfect forecasts. @@ -1092,30 +1018,43 @@ Optional Arguments for point2grid 16. The **-compress level** option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. Only 4 interpolation methods are applied to the field variables; MIN/MAX/MEDIAN/UW_MEAN. The GAUSSIAN method is applied to the probability variable only. Unlike regrad_data_plane, MAX method is applied to the file variable and Gaussian method to the probability variable with the MAXGAUSS method. If the probability variable is not requested, MAXGAUSS method is the same as MAX method. - + For the GOES-16 and GOES-17 data, the computing lat/long is time consuming. The computed coordinate (lat/long) is saved to a temporary NetCDF file, as described in :numref:`Contributor's Guide Section %s `. The computing lat/long step can be skipped if the coordinate file is given through the environment variable MET_GEOSTATIONARY_DATA. The grid mapping to the target grid is saved to MET_TMP_DIR to save the execution time. Once this file is created, the MET_GEOSTATIONARY_DATA is ignored. The grid mapping file should be deleted manually in order to apply a new MET_GEOSTATIONARY_DATA environment variable or to re-generate the grid mapping file. An example of call point2grid to process GOES-16 AOD data is shown below: + +The grid name or the grid definition can be given with the -field option when the grid information is missing from the input NetCDF file for the latitude_longitude projection. The latitude and longitude variable names should be defined by the user, and the grid information from the set_attr_grid is ignored in this case except nx and ny. + .. code-block:: none point2grid \ - OR_ABI-L2-AODC-M3_G16_s20181341702215_e20181341704588_c20181341711418.nc \ - G212 \ - regrid_data_plane_GOES-16_AOD_TO_G212.nc \ - -field 'name="AOD"; level="(*,*)";' \ - -qc 0,1,2 - -method MAX -v 1 + iceh.2018-01-03.c00.tlat_tlon.nc \ + G231 \ + point2grid_cice_to_G231.nc \ + -config Point2GridConfig_tlat_tlon \ + -field 'name="hi_d"; level="(0,*,*)"; set_attr_grid="latlon 1440 1080 -79.80672 60.28144 0.04 0.04";' \ + -v 1 +.. code-block:: none -When processing GOES-16 data, the **-qc** option may also be used to specify the acceptable quality control flag values. The example above regrids the GOES-16 AOD values to NCEP Grid number 212 (which QC flags are high, medium, and low), writing to the output the maximum AOD value falling inside each grid box. + point2grid \ + OR_ABI-L2-AODC-M3_G16_s20181341702215_e20181341704588_c20181341711418.nc \ + G212 \ + regrid_data_plane_GOES-16_AOD_TO_G212.nc \ + -field 'name="AOD"; level="(*,*)";' \ + -goes_qc 0,1,2 + -method MAX -v 1 + + +When processing GOES-16 data, the **-goes_qc** option may also be used to specify the acceptable quality control flag values. The example above regrids the GOES-16 AOD values to NCEP Grid number 212 (which QC flags are high, medium, and low), writing to the output the maximum AOD value falling inside each grid box. Listed below is an example of processing the same set of observations but using Python embedding instead: .. code-block:: none - - point2grid \ - 'PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py ascii2nc_edr_hourly.20130827.nc' \ - G212 python_gridded_ascii_python.nc -config Point2GridConfig_edr \ - -field 'name="200"; level="*"; valid_time="20130827_205959";' -method MAX -v 1 + + point2grid \ + 'PYTHON_NUMPY=MET_BASE/python/examples/read_met_point_obs.py ascii2nc_edr_hourly.20130827.nc' \ + G212 python_gridded_ascii_python.nc -config Point2GridConfig_edr \ + -field 'name="200"; level="*"; valid_time="20130827_205959";' -method MAX -v 1 Please refer to :numref:`Appendix F, Section %s ` for more details about Python embedding in MET. @@ -1124,25 +1063,18 @@ point2grid Output The point2grid tool will output a gridded NetCDF file containing the following: - 1. Latitude - 2. Longitude - 3. The variable specified in the -field string regridded to the grid defined in the **to_grid** argument. - 4. The count field which represents the number of point observations that were included calculating the value of the variable at that grid cell. - 5. The mask field which is a binary field representing the presence or lack thereof of point observations at that grid cell. A value of "1" indicates that there was at least one point observation within the bounds of that grid cell and a value of "0" indicates the lack of point observations at that grid cell. - 6. The probability field which is the probability of the event defined by the **-prob_cat_thresh** command line option. The output variable name includes the threshold used to define the probability. Ranges from 0 to 1. - 7. The probability mask field which is a binary field that represents whether or not there is probability data at that grid point. Can be either "0" or "1" with "0" meaning the probability value does not exist and a value of "1" meaning that the probability value does exist. For MET observation input and CF complaint NetCDF input with 2D time variable: The latest observation time within the target grid is saved as the observation time. If the "valid_time" is configured at the configuration file, the valid_time from the configuration file is saved into the output file. @@ -1150,36 +1082,31 @@ For MET observation input and CF complaint NetCDF input with 2D time variable: T point2grid Configuration File ----------------------------- - The default configuration file for the point2grid tool named **Point2GridConfig_default** can be found in the installed *share/met/config* directory. It is recommended that users make a copy of this file prior to modifying its contents. -The point2grid configuration file is optional and only necessary when defining the variable name instead of GRIB code or filtering by time. The contents of the default MADIS2NC configuration file are described below. +The point2grid configuration file is optional and only necessary when defining the variable name instead of GRIB code or filtering by time. The contents of the default point2grid configuration file are described below. _____________________ .. code-block:: none - version = "VN.N"; - + obs_window = { beg = -5400; end = 5400; } + message_type = []; + obs_quality_inc = []; + obs_quality_exc = []; + version = "VN.N"; The configuration options listed above are common to many MET tools and are described in :numref:`config_options`. _____________________ .. code-block:: none - + valid_time = "YYYYMMDD_HHMMSS"; -This entry is a string to override the obseration time into the output and to filter observation data by time. +This entry is a string to override the observation time into the output and to filter observation data by time. -.. code-block:: none - - obs_window = { - beg = -5400; - end = 5400; - } - -The configuration option listed above is common to many MET tools and are described in :numref:`config_options`. +_____________________ .. code-block:: none @@ -1189,11 +1116,14 @@ The configuration option listed above is common to many MET tools and are descri { key = "7"; val = "HGT"; }, // GRIB: Geopotential height { key = "11"; val = "TMP"; }, // GRIB: Temperature { key = "15"; val = "TMAX"; }, // GRIB: Max Temperature - ... + ... + { key = "lat_vname"; val = "NLAT"; }, // NetCDF latitude variable name + { key = "lon_vname"; val = "NLON"; }, // NetCDF longitude varialbe name + ... ] - -This entry is an array of dictionaries, each containing a **GRIB code** string and mathcing **variable name** string which define a mapping of GRIB code to the output variable names. +This entry is an array of dictionaries, each containing a **GRIB code** string and matching **variable name** string which define a mapping of GRIB code to the output variable names. +The latitude and longitude variables for NetCDF input can be overridden by the configurations. There are two special keys, **lat_vname** and **lon_vname**, are applied to the NetCDF input, not for a GRIB code. Point NetCDF to ASCII Python Utility ==================================== diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index c8f2fb0f2c..19e635afbe 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -9,6 +9,76 @@ When applicable, release notes are followed by the GitHub issue number which des enhancement, or new feature (`MET GitHub issues `_). Important issues are listed **in bold** for emphasis. +MET Version 12.0.0-beta6 Release Notes (20241018) +------------------------------------------------- + + .. dropdown:: Repository, build, and test + + * Update METbaseimage to use newer versions of Atlas and ecKit (`METbaseimage#27 `_). + * MET: Enhance the MET testing framework to provide a mechanism for expected failure (`METplus-Internal#23 `_). + * Fix the SonarQube findings for MET version 12.0.0 (`#2673 `_). + * Enhance the `unit.py` MET testing script to allow for expected failures (`#2937 `_). + * Modify configure.ac to define C++17 as the default compilation standard (`#2948 `_). + + .. dropdown:: Bugfixes + + * Bugfix: Fix Point2Grid processing of GFS Ocean data input (`#2936 `_). + * **Bugfix: Fix contingency table statistic bugs in the CTS and NBRCTS line types for BAGSS, SEDI CI's, ORSS, and ORSS CI's** (`#2958 `_). + * Bugfix: Fix the grid dimensions used for `point2grid_cice_set_attr_grid` unit test (`#2968 `_). + * Bugfix: Fix MTD to run on any MET-supported grid projection (`#2979 `_). + + .. dropdown:: Enhancements + + * **Enhance Series-Analysis to read its own output and incrementally update output statistics over time** (`#1371 `_). + * Enhance the `set_attr_grid` processing logic to support input files lacking a grid definition (`#1729 `_). + * **Add support for new point_weight_flag to the Point-Stat and Ensemble-Stat tools** (`#2279 `_). + * Allow observation anomaly replacement in Anomaly Correlation Coefficient (ACC) calculation (`#2308 `_). + * Enhance Point2Grid to filter quality control strings with config file options (`#2880 `_). + * Refine SEEPS processing logic and output naming conventions (`#2882 `_). + * **Enhance MET to calculate weighted contingency table counts and statistics** (`#2887 `_). + * Enhance the OBTYPE header column for MPR and ORANK line types (`#2893 `_). + * **Enhance MET to support separate climatology datasets for both the forecast and observation inputs** (`#2924 `_). + * Refine PB2NC warning messages about changing Bufr center times (`#2938 `_). + + .. dropdown:: Documentation + + * Remove the double-quotes around keywords (`#2023 `_). + * Documentation: Provide instructions for compiling MET with the C++11 standard (`#2949 `_). + +MET Version 12.0.0-beta5 Release Notes (20240710) +------------------------------------------------- + + .. dropdown:: Repository, build, and test + + * Reimplement and enhance the Perl-based (unit.pl) unit test control script in Python (`#2717 `_). + * Update compilation script and configuration files as needed for supported platforms (`#2753 `_). + * Update tag used for the release checksum action (`#2929 `_). + + .. dropdown:: Bugfixes + + * Bugfix (METbaseimage): Fix the environment to correct the ncdump runtime linker error (`METbaseimage#24 `_). + * Bugfix: Fix the Grid-Stat configuration file to support the MET_SEEPS_GRID_CLIMO_NAME option (`#2601 `_). + * **Bugfix: Fix TC-RMW to correct the tangential and radial wind computations** (`#2841 `_). + * Bugfix: Fix Ensemble-Stat's handling of climo data when verifying ensemble-derived probabilities (`#2856 `_). + * **Bugfix: Fix Point2Grid's handling of the -qc option for ADP input files** (`#2867 `_). + * Bugfix: Fix Stat-Analysis errors for jobs using the -dump_row option and the -line_type option with VCNT, RPS, DMAP, or SSIDX (`#2888 `_). + * Bugfix: Fix inconsistent handling of point observation valid times processed through Python embedding (`#2897 `_). + + .. dropdown:: Enhancements + + * **Add new wind direction verification statistics for RMSE, Bias, and MAE** (`#2395 `_). + * Document UGRID configuration options added to Point-Stat and Grid-Stat (`#2748 `_ + * Refine Point-Stat Warning message about fcst/obs level mismatch (`#2795 `_). + * **Add new -ugrid_config command line option for unstructured grid inputs to Grid-Stat and Point-Stat** (`#2842 `_). + * Enhance Point2Grid to support modified quality control settings for smoke/dust AOD data in GOES-16/17 as of April 16, 2024 (`#2853 `_). + * **Enhance Point2Grid to support a wider variety of input tripolar datasets** (`#2857 `_). + * Test NOAA Unstructured grids in MET-12.0.0 (`#2860 `_). + * Enhance Ensemble-Stat and Gen-Ens-Prod to omit warning messages for the MISSING keyword (`#2870 `_). + * Add new Python functionality to convert MET NetCDF observation data to a Pandas DataFrame (`#2781 `_). + * Enhance PCP-Combine to allow missing data (`#2883 `_). + * Enhance TC-Stat to support the -set_hdr job command option (`#2911 `_). + * Refine ERROR messages written by PB2NC (`#2912 `_). + MET Version 12.0.0-beta4 Release Notes (20240417) ------------------------------------------------- diff --git a/docs/Users_Guide/series-analysis.rst b/docs/Users_Guide/series-analysis.rst index 0be681585f..ed9f5578ab 100644 --- a/docs/Users_Guide/series-analysis.rst +++ b/docs/Users_Guide/series-analysis.rst @@ -33,6 +33,7 @@ The usage statement for the Series-Analysis tool is shown below: -fcst file_1 ... file_n | fcst_file_list -obs file_1 ... file_n | obs_file_list [-both file_1 ... file_n | both_file_list] + [-aggr file] [-paired] -out file -config file @@ -58,13 +59,17 @@ Optional Arguments for series_analysis 5. To set both the forecast and observations to the same set of files, use the optional -both file_1 ... file_n | both_file_list option to the same set of files. This is useful when reading the NetCDF matched pair output of the Grid-Stat tool which contains both forecast and observation data. -6. The -paired option indicates that the -fcst and -obs file lists are already paired, meaning there is a one-to-one correspondence between the files in those lists. This option affects how missing data is handled. When -paired is not used, missing or incomplete files result in a runtime error with no output file being created. When -paired is used, missing or incomplete files result in a warning with output being created using the available data. +6. The -aggr option specifies the path to an existing Series-Analysis output file. When computing statistics for the input forecast and observation data, Series-Analysis aggregates the partial sums (SL1L2, SAL1L2 line types) and contingency table counts (CTC, MCTC, and PCT line types) with data provided in the aggregate file. This option enables Series-Analysis to run iteratively and update existing partial sums, counts, and statistics with new data. -7. The -log file outputs log messages to the specified file. +.. note:: When the -aggr option is used, only statistics that are derivable from partial sums and contingency table counts can be requested. Runtimes are generally much slower when aggregating data since it requires many additional NetCDF variables containing the scalar partial sums and contingency table counts to be read and written. -8. The -v level overrides the default level of logging (2). +7. The -paired option indicates that the -fcst and -obs file lists are already paired, meaning there is a one-to-one correspondence between the files in those lists. This option affects how missing data is handled. When -paired is not used, missing or incomplete files result in a runtime error with no output file being created. When -paired is used, missing or incomplete files result in a warning with output being created using the available data. -9. The -compress level option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. +8. The -log file outputs log messages to the specified file. + +9. The -v level overrides the default level of logging (2). + +10. The -compress level option indicates the desired level of compression (deflate level) for NetCDF variables. The valid level is between 0 and 9. The value of "level" will override the default setting of 0 from the configuration file or the environment variable MET_NC_COMPRESS. Setting the compression level to 0 will make no compression for the NetCDF output. Lower number is for fast compression and higher number is for better compression. An example of the series_analysis calling sequence is shown below: @@ -179,3 +184,5 @@ The output_stats array controls the type of output that the Series-Analysis tool 11. PJC for Joint and Conditional factorization for Probabilistic forecasts (See :numref:`table_PS_format_info_PJC`) 12. PRC for Receiver Operating Characteristic for Probabilistic forecasts (See :numref:`table_PS_format_info_PRC`) + +.. note:: When the -input option is used, all partial sum and contingency table count columns are required to aggregate statistics across multiple runs. To facilitate this, the output_stats entries for the CTC, SL1L2, SAL1L2, and PCT line types can be set to "ALL" to indicate that all available columns for those line types should be written. diff --git a/docs/Users_Guide/stat-analysis.rst b/docs/Users_Guide/stat-analysis.rst index 92672edc26..0b87586d09 100644 --- a/docs/Users_Guide/stat-analysis.rst +++ b/docs/Users_Guide/stat-analysis.rst @@ -324,7 +324,7 @@ The configuration file for the Stat-Analysis tool is optional. Users may find it Most of the user-specified parameters listed in the Stat-Analysis configuration file are used to filter the ASCII statistical output from the MET statistics tools down to a desired subset of lines over which statistics are to be computed. Only output that meets all of the parameters specified in the Stat-Analysis configuration file will be retained. -The Stat-Analysis tool actually performs a two step process when reading input data. First, it stores the filtering information defined top section of the configuration file. It applies that filtering criteria when reading the input STAT data and writes the filtered data out to a temporary file, as described in :numref:`Contributor's Guide Section %s `. Second, each job defined in the **jobs** entry reads data from that temporary file and performs the task defined for the job. After all jobs have run, the Stat-Analysis tool deletes the temporary file. +The Stat-Analysis tool actually performs a two step process when reading input data. First, it stores the filtering information in the defined top section of the configuration file. It applies that filtering criteria when reading the input STAT data and writes the filtered data out to a temporary file, as described in :numref:`Contributor's Guide Section %s `. Second, each job defined in the **jobs** entry reads data from that temporary file and performs the task defined for the job. After all jobs have run, the Stat-Analysis tool deletes the temporary file. This two step process enables the Stat-Analysis tool to run more efficiently when many jobs are defined in the configuration file. If only operating on a small subset of the input data, the common filtering criteria can be applied once rather than re-applying it for each job. In general, filtering criteria common to all tasks defined in the **jobs** entry should be moved to the top section of the configuration file. diff --git a/docs/Users_Guide/tc-diag.rst b/docs/Users_Guide/tc-diag.rst index 4f1d4630ee..edcafa62dd 100644 --- a/docs/Users_Guide/tc-diag.rst +++ b/docs/Users_Guide/tc-diag.rst @@ -15,7 +15,7 @@ Originally developed for the Statistical Hurricane Intensity Prediction Scheme ( TC-Diag is run once for each initialization time to produce diagnostics for each user-specified combination of TC tracks and model fields. The user provides track data (such as one or more ATCF a-deck track files), along with track filtering criteria as needed, to select one or more tracks to be processed. The user also provides gridded model data from which diagnostics should be computed. Gridded data can be provided for multiple concurrent storms, multiple models, and/or multiple domains (i.e. parent and nest) in a single run. -TC-Diag first determines the list of valid times that appear in any one of the tracks. For each valid time, it processes all track points for that time. For each track point, it reads the gridded model fields requested in the configuration file and transforms the gridded data to a range-azimuth cylindrical coordinates grid. For each domain, it writes the range-azimuth data to a temporary NetCDF file, as described in :numref:`Contributor's Guide Section %s `. +TC-Diag first determines the list of valid times that appear in any one of the tracks. For each valid time, it processes all track points for that time. For each track point, it reads the gridded model fields requested in the configuration file and transforms the gridded data to a range-azimuth cylindrical coordinates grid, as described for the TC-RMW tool in :numref:`tc-rmw`. For each domain, it writes the range-azimuth data to a temporary NetCDF file, as described in :numref:`Contributor's Guide Section %s `. Once the input data have been processed into the temporary NetCDF files, TC-Diag then calls one or more Python diagnostics scripts, as specified in the configuration file, to compute tropical cyclone diagnostic values. The computed diagnostics values are retrieved from the Python script and stored in memory. diff --git a/docs/Users_Guide/tc-pairs.rst b/docs/Users_Guide/tc-pairs.rst index c7a56a05ff..cc1c7dc2cd 100644 --- a/docs/Users_Guide/tc-pairs.rst +++ b/docs/Users_Guide/tc-pairs.rst @@ -211,7 +211,7 @@ The **consensus** array allows users to derive consensus forecasts from any numb - The **members** field is a comma-separated array of model ID stings which define the members of the consensus. - The **required** field is a comma-separated array of true/false values associated with each consensus member. If a member is designated as true, that member must be present in order for the consensus to be generated. If a member is false, the consensus will be generated regardless of whether or not the member is present. The required array can either be empty or have the same length as the members array. If empty, it defaults to all false. - The **min_req** field is the number of members required in order for the consensus to be computed. The **required** and **min_req** field options are applied at each forecast lead time. If any member of the consensus has a non-valid position or intensity value, the consensus for that valid time will not be generated. -- Tropical cyclone diagnostics, if provided on the command line, are included in the computation of consensus tracks. The consensus diagnostics are computed as the mean of the diagnostics for the members. The **diag_required** and **min_diag_req** entries apply the same logic described above, but to the computation of each consensus diagnostic value rather than the consensus track location and intensity. If **diag_required** is missing or an empty list, it defaults to all false. If **min_diag_req** is missing, it default to 0. +- Tropical cyclone diagnostics, if provided on the command line, are included in the computation of consensus tracks. The consensus diagnostics are computed as the mean of the diagnostics for the members. The **diag_required** and **min_diag_req** entries apply the same logic described above, but to the computation of each consensus diagnostic value rather than the consensus track location and intensity. If **diag_required** is missing or an empty list, it defaults to all false. If **min_diag_req** is missing, it defaults to 0. - The **write_members** field is a boolean that indicates whether or not to write track output for the individual consensus members. If set to true, standard output will show up for all members. If set to false, output for the consensus members is excluded from the output, even if they are used to define other consensus tracks in the configuration file. Users should take care to avoid filtering out track data for the consensus members with the **model** field, described above. Either set **model** to an empty list to process all input track data or include all of the consensus members in the **model** list. Use the **write_members** field, not the **model** field, to suppress track output for consensus members. diff --git a/docs/Users_Guide/tc-rmw.rst b/docs/Users_Guide/tc-rmw.rst index 82628c087c..5f226cc76a 100644 --- a/docs/Users_Guide/tc-rmw.rst +++ b/docs/Users_Guide/tc-rmw.rst @@ -7,7 +7,7 @@ TC-RMW Tool Introduction ============ -The TC-RMW tool regrids tropical cyclone model data onto a moving range-azimuth grid centered on points along the storm track provided in ATCF format, most likely the adeck generated from the file. The radial grid spacing may be set as a factor of the radius of maximum winds (RMW). If wind fields are specified in the configuration file the radial and tangential wind components will be computed. Any regridding method available in MET can be used to interpolate data on the model output grid to the specified range-azimuth grid. The regridding will be done separately on each vertical level. The model data files must coincide with track points in a user provided ATCF formatted track file. +The TC-RMW tool regrids tropical cyclone model data onto a moving range-azimuth grid centered on points along the storm track provided in ATCF format, most likely the adeck generated from the file. The radial grid spacing can be defined in kilometers or as a factor of the radius of maximum winds (RMW). The azimuthal grid spacing is defined in degrees clockwise from due east. If wind vector fields are specified in the configuration file, the radial and tangential wind components will be computed. Any regridding method available in MET can be used to interpolate data on the model output grid to the specified range-azimuth grid. The regridding will be done separately on each vertical level. The model data files must coincide with track points in a user provided ATCF formatted track file. Practical Information ===================== diff --git a/docs/Users_Guide/tc-stat.rst b/docs/Users_Guide/tc-stat.rst index bdcbc35327..3cddda63f2 100644 --- a/docs/Users_Guide/tc-stat.rst +++ b/docs/Users_Guide/tc-stat.rst @@ -400,6 +400,8 @@ The output generated from the TC-Stat tool contains statistics produced by the a This job command finds and filters TCST lines down to those meeting the criteria selected by the filter's options. The filtered TCST lines are written to a file specified by the **-dump_row** option. The TCST output from this job follows the TCST output description in :numref:`tc-dland` and :numref:`tc-pairs`. + The "-set_hdr" job command option can be used to override any of the output header strings (e.g. "-set_hdr DESC EVENT_EQUAL" sets the output DESC column to "EVENT_EQUAL"). + **Job: Summary** This job produces summary statistics for the column name specified by the **-column** option. The output of the summary job consists of three rows: @@ -475,6 +477,18 @@ Users may also specify the **-out_alpha** option to define the alpha value for t Users may also specify the **-out_stat** option to write the contingency table counts and statistics (for the CTC and CTS output line types) to an output STAT file. Information about the RIRW timing information and filtering criteria are written to the STAT header columns while the contingency table counts and/or statistics are written to the CTC and/or CTS output columns. +When using the "-out_stat" option to create a .stat output file and stratifying results using one or more "-by" job command options, those columns may be referenced in the "-set_hdr" option. + +.. code-block:: none + + -job rirw -line_type TCMPR -by CYCLONE -out_stat ctc.stat -set_hdr DESC CYCLONE + +When using multiple "-by" options, use "CASE" to reference the full case information string. + +.. code-block:: none + + -job rirw -line_type TCMPR -by CYCLONE,LEAD -out_stat ctc.stat -set_hdr DESC CASE + **Job: PROBRIRW** The PROBRIRW job produces probabilistic contingency table counts and statistics defined by placing forecast probabilities and BEST track rapid intensification events into an Nx2 contingency table. Users may specify several job command options to configure the behavior of this job: diff --git a/docs/conf.py b/docs/conf.py index 3a135308ab..659a095b96 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,11 +20,11 @@ project = 'MET' author = 'UCAR/NCAR, NOAA, CSU/CIRA, and CU/CIRES' author_list = 'Prestopnik, J., H. Soh, L. Goodrich, B. Brown, R. Bullock, J. Halley Gotway, K. Newman, J. Opatz, T. Jensen' -version = '12.0.0-beta4' +version = '12.0.0-beta6' verinfo = version release = f'{version}' release_year = '2024' -release_date = f'{release_year}-04-17' +release_date = f'{release_year}-10-18' copyright = f'{release_year}, {author}' # -- General configuration --------------------------------------------------- diff --git a/internal/scripts/docker/Dockerfile b/internal/scripts/docker/Dockerfile index c5daf58492..c7a2ca5bdd 100644 --- a/internal/scripts/docker/Dockerfile +++ b/internal/scripts/docker/Dockerfile @@ -1,5 +1,5 @@ ARG MET_BASE_REPO=met-base -ARG MET_BASE_TAG=v3.2 +ARG MET_BASE_TAG=v3.3 FROM dtcenter/${MET_BASE_REPO}:${MET_BASE_TAG} MAINTAINER John Halley Gotway diff --git a/internal/scripts/docker/Dockerfile.copy b/internal/scripts/docker/Dockerfile.copy index 4d3d9476cd..51ae55ec3a 100644 --- a/internal/scripts/docker/Dockerfile.copy +++ b/internal/scripts/docker/Dockerfile.copy @@ -1,5 +1,5 @@ ARG MET_BASE_REPO=met-base-unit-test -ARG MET_BASE_TAG=v3.2 +ARG MET_BASE_TAG=v3.3 FROM dtcenter/${MET_BASE_REPO}:${MET_BASE_TAG} MAINTAINER John Halley Gotway diff --git a/internal/scripts/docker/Dockerfile.sonarqube b/internal/scripts/docker/Dockerfile.sonarqube index 75dc3c952c..7a6a7114f5 100644 --- a/internal/scripts/docker/Dockerfile.sonarqube +++ b/internal/scripts/docker/Dockerfile.sonarqube @@ -1,5 +1,5 @@ ARG MET_BASE_REPO=met-base -ARG MET_BASE_TAG=v3.2 +ARG MET_BASE_TAG=v3.3 FROM dtcenter/${MET_BASE_REPO}:${MET_BASE_TAG} MAINTAINER John Halley Gotway @@ -9,7 +9,7 @@ MAINTAINER John Halley Gotway # SonarQube static code analysis on the specified branch or tag. # https://docs.sonarqube.org/latest/analysis/scan/sonarscanner/ # -ARG SONAR_SCANNER_VERSION=5.0.1.3006 +ARG SONAR_SCANNER_VERSION=6.1.0.4477 ARG SONAR_HOST_URL ARG SONAR_TOKEN ARG SOURCE_BRANCH diff --git a/internal/scripts/docker/build_met_sonarqube.sh b/internal/scripts/docker/build_met_sonarqube.sh index 2eea05654e..78163b5379 100755 --- a/internal/scripts/docker/build_met_sonarqube.sh +++ b/internal/scripts/docker/build_met_sonarqube.sh @@ -100,6 +100,7 @@ time_command ./configure \ BUFRLIB_NAME=${BUFRLIB_NAME} \ GRIB2CLIB_NAME=${GRIB2CLIB_NAME} \ --enable-all \ + MET_CXX_STANDARD=11 \ CPPFLAGS="-I/usr/local/include -I/usr/local/include/freetype2 -I/usr/local/include/cairo" \ LIBS="-ltirpc" diff --git a/internal/scripts/environment/development.seneca b/internal/scripts/environment/development.seneca index 7007241362..4f15f6ff73 100644 --- a/internal/scripts/environment/development.seneca +++ b/internal/scripts/environment/development.seneca @@ -46,7 +46,7 @@ export MET_TEST_INPUT=${MET_PROJ_DIR}/MET_test_data/unit_test export MET_FONT_DIR=${MET_TEST_INPUT}/fonts # Define Rscript to use a version with the ncdf4 package 1.17 or later -export MET_TEST_RSCRIPT=/nrit/ral/R-4.3.1/bin/Rscript +export MET_TEST_RSCRIPT=/nrit/ral/R-4.4.0/bin/Rscript # Define runtime Python version export MET_TEST_MET_PYTHON_EXE=${MET_PYTHON_BIN_EXE} diff --git a/internal/scripts/installation/compile_MET_all.sh b/internal/scripts/installation/compile_MET_all.sh index 6524aa8563..2497e582a8 100755 --- a/internal/scripts/installation/compile_MET_all.sh +++ b/internal/scripts/installation/compile_MET_all.sh @@ -640,7 +640,11 @@ if [ $COMPILE_ECKIT -eq 1 ]; then # Need to obtain ecbuild before installing eckit - vrs="3.5.0" + if [[ ! -z ${MET_CXX_STANDARD} && ${MET_CXX_STANDARD} -le 14 ]]; then + vrs="3.5.0" + elif [[ -z ${MET_CXX_STANDARD} ]]; then + vrs="3.7.0" + fi echo echo "Compiling ECBUILD at `date`" @@ -652,9 +656,13 @@ if [ $COMPILE_ECKIT -eq 1 ]; then run_cmd "mkdir build; cd build" run_cmd "cmake ../ -DCMAKE_INSTALL_PREFIX=${LIB_DIR} > $(pwd)/ecbuild.cmake.log 2>&1" run_cmd "make ${MAKE_ARGS} install > $(pwd)/ecbuild.make_install.log 2>&1" - - vrs="1.20.2" + if [[ ! -z ${MET_CXX_STANDARD} && ${MET_CXX_STANDARD} -le 14 ]]; then + vrs="1.20.2" + elif [[ -z ${MET_CXX_STANDARD} ]]; then + vrs="1.24.4" + fi + echo echo "Compiling ECKIT at `date`" mkdir -p ${LIB_DIR}/eckit @@ -671,7 +679,11 @@ fi # Compile ATLAS if [ $COMPILE_ATLAS -eq 1 ]; then - vrs="0.30.0" + if [[ ! -z ${MET_CXX_STANDARD} && ${MET_CXX_STANDARD} -le 14 ]]; then + vrs="0.30.0" + elif [[ -z ${MET_CXX_STANDARD} ]]; then + vrs="0.35.0" + fi echo echo "Compiling ATLAS at `date`" @@ -862,9 +874,11 @@ if [ -z ${MET_PROJ} ]; then export MET_PROJ=${LIB_DIR} fi -export MET_PYTHON_BIN_EXE=${MET_PYTHON_BIN_EXE:=${MET_PYTHON}/bin/python3} -export MET_PYTHON_LD -export MET_PYTHON_CC +if [[ ! -z ${MET_PYTHON_CC} || ! -z ${MET_PYTHON_LD} ]]; then + export MET_PYTHON_BIN_EXE=${MET_PYTHON_BIN_EXE:=${MET_PYTHON}/bin/python3} + export MET_PYTHON_LD + export MET_PYTHON_CC +fi # add flags to user-defined LDFLAGS for MacOS if [[ $machine != "Mac" ]]; then diff --git a/internal/scripts/installation/compile_MET_all.wcoss_beta5.sh b/internal/scripts/installation/compile_MET_all.wcoss_beta5.sh new file mode 100755 index 0000000000..5065643c84 --- /dev/null +++ b/internal/scripts/installation/compile_MET_all.wcoss_beta5.sh @@ -0,0 +1,959 @@ +#!/bin/bash +# +# Compile and install MET +# (Model Evaluation Tools) +#================================================ +# +# This compile_MET_all.sh script expects certain environment +# variables to be set: +# TEST_BASE, COMPILER (or COMPILER_FAMILY and COMPILER_VERSION), +# MET_SUBDIR, MET_TARBALL, and USE_MODULES. +# +# If compiling support for Python embedding, users will need to +# set MET_PYTHON, MET_PYTHON_BIN_EXE, MET_PYTHON_CC, and MET_PYTHON_LD. +# Users can directly set the python module to be loaded by setting +# either PYTHON_MODULE or by setting PYTHON_NAME and PYTHON_VERSION: +# - PYTHON_MODULE (only used if USE_MODULES=TRUE) - format is the name +# of the Python module to load followed by an underscore and then the +# version number (e.g. python_3.10.4, The script will then run "module +# load python/3.10.4") +# - PYTHON_NAME = python (or e.g. python3, etc.) +# - PYTHON_VERSION = 3.10.4 +# +# For a description of these and other variables, visit the MET +# downloads page under "Sample Script For Compiling External +# Libraries And MET": +# https://dtcenter.org/community-code/model-evaluation-tools-met/download +# +# An easy way to set these necessary environment variables is +# in an environment configuration file (for example, +# install_met_env.). This script and example +# environment config files for various machines can be found in +# the MET GitHub repository in the scripts/installation directory: +# https://github.com/dtcenter/MET +# +# USAGE: compile_MET_all.sh install_met_env. +# +# The compile_MET_all.sh script will compile and install MET and its +# external library dependencies, if needed, including: +# PROJ (with dependency SQLITE >= 3.11), GSL, BUFRLIB, +# GRIB2C (with dependencies Z, PNG, JASPER, JPEG), HDF5, NETCDF (C and CXX), +# HDF4 (optional for MODIS-Regrid and lidar2nc), HDFEOS (optional for +# MODIS-Regrid and lidar2nc), FREETYPE (optional for MODE Graphics), +# and CAIRO (optional for MODE Graphics). +# +# If these libraries have already been installed and don't need to be +# reinstalled or if you are compiling on a machine that uses modulefiles +# and you'd like to make use of the existing dependent libraries on +# your machine, there are more environment variables that you will +# need to set to let MET know where the library and header files are. +# Please supply values for the following environment variables +# in the input environment configuration file (install_met_env.: +# MET_GRIB2CLIB, MET_GRIB2CINC, GRIB2CLIB_NAME, MET_BUFRLIB, BUFRLIB_NAME, +# MET_HDF5, MET_NETCDF, MET_PROJ, MET_GSL, LIB_JASPER, LIB_LIBPNG, LIB_Z, +# LIB_JPEG, SQLITE_INCLUDE_DIR, SQLITE_LIB_DIR, TIFF_INCLUDE_DIR, TIFF_LIB_DIR. +# +# The optional libraries ecKit and atlas offer support for unstructured +# grids. The optional libraries HDF4, HDFEOS, FREETYPE, and CAIRO are +# used for the following, not widely used tools, MODIS-Regrid, +# lidar2nc, and MODE Graphics. To enable building of these libraries, +# set the compile flags for the library (e.g. COMPILE_ECKIT, COMPILE_ATLAS, +# COMPILE_HDF, COMPILE_HDFEOS) to any value in the environment config +# file. If these libraries have already been installed and don't need +# to be reinstalled, please supply values for the following environment +# variables in the input environment configuration file +# (install_met_env.): MET_ECKIT, MET_ATLAS, MET_HDF, +# MET_HDFEOS, MET_FREETYPEINC, MET_FREETYPELIB, MET_CAIROINC, +# MET_CAIROLIB. +# +# Users can speed up the compilation of MET and its dependent libraries +# by adding the following to their environment configuration file: +# export MAKE_ARGS=-j # +# replacing the # with the number of cores to use (integer) or simply +# specifying: +# export MAKE_ARGS=-j +# with no integer argument to start as many processes in parallel as +# possible. +#================================================ + +# print command, run it, then error and exit if non-zero value is returned +function run_cmd { + echo $* + eval "$@" + ret=$? + if [ $ret != 0 ]; then + echo "ERROR: Command returned with non-zero ($ret) status: $*" + exit $ret + fi +} + +if [ -z $1 ]; then + echo + echo "No environment configuration file provided (e.g. install_met_env.). Starting compilation with current environment." +else + if [ ! -f "$1" ]; then + echo "The file \"$1\" does not exist!" + exit 1 + fi + + source $1 +fi + +echo +echo "TEST_BASE = ${TEST_BASE? "ERROR: TEST_BASE must be set"}" +echo "MET_SUBDIR = ${MET_SUBDIR? "ERROR: MET_SUBDIR must be set"}" +echo "MET_TARBALL = ${MET_TARBALL? "ERROR: MET_TARBALL must be set"}" +echo "USE_MODULES = ${USE_MODULES? "ERROR: USE_MODULES must be set to TRUE if using modules or FALSE otherwise"}" +if [[ -z "$COMPILER" ]] && [[ -z "$COMPILER_FAMILY" && -z "$COMPILER_VERSION" ]]; then + echo "ERROR: COMPILER or COMPILER_FAMILY and COMPILER_VERSION must be set" + exit 1 +fi +echo ${MAKE_ARGS:+MAKE_ARGS = $MAKE_ARGS} + + +LIB_DIR=${TEST_BASE}/external_libs +MET_DIR=${MET_SUBDIR} + +if [ -z "${BIN_DIR_PATH}" ]; then + if [ -z "${MET_INSTALL_DIR}" ]; then + BIN_DIR_PATH=${TEST_BASE}/bin + else + BIN_DIR_PATH=${MET_INSTALL_DIR}/bin + fi +fi + +if [ -z "${MET_INSTALL_DIR}" ]; then + MET_INSTALL_DIR=${MET_DIR} +else + LIB_DIR=${MET_INSTALL_DIR} +fi + +TAR_DIR=${TEST_BASE}/tar_files +MET_TARBALL=${TAR_DIR}/${MET_TARBALL} + +# Create directory for libraries +mkdir -p ${LIB_DIR} + +# Check that tar files exist +if [ ! -e $TAR_DIR ]; then + echo "TAR File directory doesn't exist: ${TAR_DIR}" + exit 1 +fi + +# If MET_PYTHON_LIB is not set in the environment file, set it to the +# lib directory so it can be use to install MET with Python Embedding +# support +if [[ -z "$MET_PYTHON_LIB" ]]; then + MET_PYTHON_LIB=${MET_PYTHON}/lib +fi + + +# Print library linker path +echo "LD_LIBRARY_PATH = ${LD_LIBRARY_PATH}" + +# if LIB_Z is not set in the environment file, set it to the +# lib directory so it can be used to install HDF5 with zlib support +if [[ -z "$LIB_Z" ]]; then + LIB_Z=${LIB_DIR}/lib +fi + +# if TIFF is not defined in the environment file, enable its compilation +if [[ -z ${TIFF_INCLUDE_DIR} ]] && [[ -z ${TIFF_LIB_DIR} ]]; then + COMPILE_TIFF=1 +else + COMPILE_TIFF=0 +fi + +# if SQLITE is not defined in the environment file, enable its compilation +if [[ -z ${SQLITE_INCLUDE_DIR} ]] && [[ -z ${SQLITE_LIB_DIR} ]]; then + COMPILE_SQLITE=1 +else + COMPILE_SQLITE=0 +fi + +# Constants +if [[ -z ${MET_GRIB2CLIB} ]] && [[ -z ${MET_GRIB2C} ]]; then + COMPILE_ZLIB=1 + COMPILE_LIBPNG=1 + COMPILE_JASPER=1 + COMPILE_JPEG=1 + COMPILE_G2CLIB=1 +else + COMPILE_ZLIB=0 + COMPILE_LIBPNG=0 + COMPILE_JASPER=0 + COMPILE_JPEG=0 + COMPILE_G2CLIB=0 +fi + +if [ -z ${MET_BUFRLIB} ]; then COMPILE_BUFRLIB=1; else COMPILE_BUFRLIB=0; fi + +if [ -z ${MET_NETCDF} ]; then COMPILE_NETCDF=1; else COMPILE_NETCDF=0; fi + +if [ -z ${MET_PROJ} ]; then COMPILE_PROJ=1; else COMPILE_PROJ=0; fi + +if [ -z ${MET_GSL} ]; then COMPILE_GSL=1; else COMPILE_GSL=0; fi + +# Only set COMPILE_ECKIT and COMPILE_ATLAS if you want to compile and enable support for unstructued grids +if [ ! -z "${COMPILE_ECKIT}" ]; then COMPILE_ECKIT=1; else COMPILE_ECKIT=0; fi +if [ ! -z "${COMPILE_ATLAS}" ]; then COMPILE_ATLAS=1; else COMPILE_ATLAS=0; fi + +if [[ -z ${MET_ECKIT} ]] && [[ -z ${MET_ATLAS} ]]; then + if [[ $COMPILE_ECKIT -eq 1 && $COMPILE_ATLAS -eq 1 ]]; then + export MET_ECKIT=${LIB_DIR} + export MET_ATLAS=${LIB_DIR} + fi +else + # Only set COMPILE_ECKIT and COMPILE_ATLAS to 1 if you have already compiled ECKIT and ATLAS, + # have set MET_ECKIT and MET_ATLAS in your configuration file, and want to enable + # unstructured grids + COMPILE_ECKIT=0 + COMPILE_ATLAS=0 +fi + +# Only set COMPILE_HDF and COMPILE_HDFEOS if you want to compile and enable MODIS-Regrid (not widely used) +if [ ! -z "${COMPILE_HDF}" ]; then COMPILE_HDF=1; else COMPILE_HDF=0; fi +if [ ! -z "${COMPILE_HDFEOS}" ]; then COMPILE_HDFEOS=1; else COMPILE_HDFEOS=0; fi + +if [[ -z ${MET_HDF} ]] && [[ -z ${MET_HDFEOS} ]]; then + if [[ $COMPILE_HDF -eq 1 && $COMPILE_HDFEOS -eq 1 ]]; then + export MET_HDF=${LIB_DIR} + export MET_HDFEOS=${LIB_DIR} + fi +else + # Only set COMPILE_HDF and COMPILE_HDFEOS to 1 if you have already compiled HDF4 and HDFEOS, + # have set MET_HDF and MET_HDFEOS in your configuration file, and want to enable + # MODIS-Regrid (not widely used) + COMPILE_HDF=0 + COMPILE_HDFEOS=0 +fi + +# Only set COMPILE_FREETYPE and COMPILE_CAIRO if you want to compile and enable MODE Graphics (not widely used) +if [ ! -z "${COMPILE_FREETYPE}" ]; then COMPILE_FREETYPE=1; else COMPILE_FREETYPE=0; fi +if [ ! -z "${COMPILE_CAIRO}" ]; then COMPILE_CAIRO=1; else COMPILE_CAIRO=0; fi + + +if [[ ! -z ${MET_FREETYPE} ]]; then + echo "ERROR: MET_FREETYPEINC and MET_FREETYPELIB must be set instead of MET_FREETYPE" + exit 1 +fi + +if [[ ! -z ${MET_CAIRO} ]]; then + echo "ERROR: MET_CAIROINC and MET_CAIROLIB must be set instead of MET_CAIRO" + exit 1 +fi + +if [[ -z ${MET_FREETYPEINC} && -z ${MET_FREETYPELIB} && -z ${MET_CAIROINC} && -z ${MET_CAIROLIB} ]]; then + if [[ $COMPILE_CAIRO -eq 1 && $COMPILE_FREETYPE -eq 1 ]]; then + export MET_CAIROINC=${LIB_DIR}/include/cairo + export MET_CAIROLIB=${LIB_DIR}/lib + export MET_FREETYPEINC=${LIB_DIR}/include/freetype2 + export MET_FREETYPELIB=${LIB_DIR}/lib + fi +else + # Only set COMPILE_FREETYPE and COMPILE_CAIRO to 1 if you have compiled FREETYPE and CAIRO, + # have set MET_FREETYPEINC, MET_FREETYPELIB, MET_CAIROINC, and MET_CAIROLIB in your + # configuration file, and want to enable MODE Graphics (not widely used) + COMPILE_FREETYPE=0 + COMPILE_CAIRO=0 +fi + +COMPILE_MET=1 + +# skip compilation of MET if SKIP_MET is set +if [ ! -z "${SKIP_MET}" ]; then COMPILE_MET=0; fi + +# skip compilation of external libraries if SKIP_LIBS is set +if [ ! -z "${SKIP_LIBS}" ]; then + COMPILE_PROJ=0 + COMPILE_GSL=0 + COMPILE_BUFRLIB=0 + COMPILE_ZLIB=0 + COMPILE_LIBPNG=0 + COMPILE_JASPER=0 + COMPILE_JPEG=0 + COMPILE_G2CLIB=0 + COMPILE_ECKIT=0 + COMPILE_ATLAS=0 + COMPILE_HDF=0 + COMPILE_HDFEOS=0 + COMPILE_NETCDF=0 + COMPILE_FREETYPE=0 + COMPILE_CAIRO=0 +fi + +if [ -z ${BIN_DIR_PATH} ]; then + BIN_DIR_PATH=${TEST_BASE}/bin +fi + +if [ -z ${USE_MET_TAR_FILE} ]; then + export USE_MET_TAR_FILE=TRUE +fi + +echo +echo "Compiling libraries into: ${LIB_DIR}" + +if [ ! -e ${LIB_DIR}/include ]; then + mkdir ${LIB_DIR}/include +fi + +if [ ! -e ${LIB_DIR}/lib ]; then + mkdir ${LIB_DIR}/lib +fi + +# Load compiler version +if [ -z ${COMPILER_FAMILY} ]; then + COMPILER_FAMILY=` echo $COMPILER | cut -d'_' -f1` +fi + +# Check for "oneapi" in compiler family name +#if echo ${COMPILER_FAMILY} | grep -E "^intel"; then +if [[ ${COMPILER_FAMILY} == *intel* ]]; then + COMPILER_FAMILY_SUFFIX=` echo $COMPILER_FAMILY | cut -d'-' -f2` +fi + +if [ -z ${COMPILER_VERSION} ]; then + COMPILER_VERSION=`echo $COMPILER | cut -d'_' -f2` +fi + +echo "COMPILER = $COMPILER" +echo "COMPILER_FAMILY = $COMPILER_FAMILY" +echo "COMPILER_FAMILY_SUFFIX = $COMPILER_FAMILY_SUFFIX" +echo "COMPILER_VERSION = $COMPILER_VERSION" +COMPILER_MAJOR_VERSION=`echo $COMPILER_VERSION | cut -d'.' -f1` +COMPILER_MINOR_VERSION=`echo $COMPILER_VERSION | cut -d'.' -f2` + +echo +echo "USE_MODULES = ${USE_MODULES}" +echo + +if [ ${USE_MODULES} = "TRUE" ]; then + echo "module load ${COMPILER_FAMILY}/${COMPILER_VERSION}" + echo ${COMPILER_FAMILY}/${COMPILER_VERSION} + + module load ${COMPILER_FAMILY}/${COMPILER_VERSION} + if [ ${COMPILER_FAMILY} = "PrgEnv-intel" ]; then + module load craype + module switch craype craype-sandybridge + fi +fi + +# After loading the compiler module, strip any extra +# characters off of "gnu" (e.g. "gnu9") +if [[ ${COMPILER_FAMILY} == *gnu* ]]; then + export COMPILER_FAMILY="gnu" +fi + +if [ ${COMPILER_FAMILY} = "gnu" ]; then + if [ -z ${CC} ]; then export CC=`which gcc`; fi + if [ -z ${CXX} ]; then export CXX=`which g++`; fi + if [ -z ${FC} ]; then export FC=`which gfortran`; fi + if [ -z ${F77} ]; then export F77=`which gfortran`; fi + if [ -z ${F90} ]; then export F90=`which gfortran`; fi +elif [ ${COMPILER_FAMILY} = "pgi" ]; then + if [ -z ${CC} ]; then export CC=`which pgcc`; fi + if [ -z ${CXX} ]; then export CXX=`which pgc++`; fi + if [ -z ${FC} ]; then export FC=`which pgf90`; fi + if [ -z ${F77} ]; then export F77=`which pgf90`; fi + if [ -z ${F90} ]; then export F90=`which pgf90`; fi +elif [[ ${COMPILER_FAMILY} == *intel* && ${CC} == "icc" ]] || \ + [[ ${COMPILER_FAMILY} == "ics" ]] || \ + [[ ${COMPILER_FAMILY} == "ips" ]] || \ + [[ ${COMPILER_FAMILY} == "intel-classic" ]] || \ + [[ ${COMPILER_FAMILY} == "PrgEnv-intel" ]]; then + if [ -z ${CC} ]; then export CC=`which icc`; fi + if [ -z ${CXX} ]; then export CXX=`which icpc`; fi + if [ -z ${FC} ]; then export FC=`which ifort`; fi + if [ -z ${F77} ]; then export F77=`which ifort`; fi + if [ -z ${F90} ]; then export F90=`which ifort`; fi +elif [[ ${COMPILER_FAMILY} == *intel* ]] && [[ ${CC} == *icx* ]]; then + export CXX=`which icpx` + export FC=`which ifx` + export F77=`which ifx` + export F90=`which ifx` +elif [[ ${COMPILER_FAMILY_SUFFIX} == oneapi ]]; then + export CC=`which icx` + export CXX=`which icpx` + export FC=`which ifx` + export F77=`which ifx` + export F90=`which ifx` +else + echo "ERROR: \${COMPILER} must start with gnu, intel, ics, ips, intel-classic, PrgEnv-intel, or pgi" + exit +fi + +echo "export CC=${CC}" +echo "export CXX=${CXX}" +echo "export FC=${FC}" +echo "export F77=${F77}" +echo "export F90=${F90}" +echo + +# Figure out what kind of OS is being used +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) machine=Linux;; + Darwin*) machine=Mac;; + CYGWIN*) machine=Cygwin;; + MINGW*) machine=MinGw;; + *) machine="UNKNOWN:${unameOut}" +esac + +# change sed command and extension for dynamic library files +if [[ $machine == "Mac" ]]; then + sed_inline="sed -i ''" +else + sed_inline="sed -i''" +fi + +if [[ "$(uname -m)" == "arm64" ]]; then + dynamic_lib_ext="dylib" +else + dynamic_lib_ext="so" +fi + +# Load Python module + +if [ ${USE_MODULES} = "TRUE" ]; then + if [ ! -z ${PYTHON_MODULE} ]; then + PYTHON_NAME=`echo $PYTHON_MODULE | cut -d'_' -f1` + PYTHON_VERSION_NUM=`echo $PYTHON_MODULE | cut -d'_' -f2` + echo "module load ${PYTHON_NAME}/${PYTHON_VERSION_NUM}" + echo ${PYTHON_NAME}/${PYTHON_VERSION_NUM} + module load ${PYTHON_NAME}/${PYTHON_VERSION_NUM} + # Allow the user to specify the name and version of the module to load + elif [[ ! -z ${PYTHON_NAME} && ! -z ${PYTHON_VERSION_NUM} ]]; then + echo "module load ${PYTHON_NAME}/${PYTHON_VERSION_NUM}" + echo ${PYTHON_NAME}/${PYTHON_VERSION_NUM} + module load ${PYTHON_NAME}/${PYTHON_VERSION_NUM} + fi +fi + +if [[ ${MET_PYTHON}/bin/python3 ]]; then + echo "Using python version: " + ${MET_PYTHON}/bin/python3 --version +fi + +# Compile Proj +if [ $COMPILE_PROJ -eq 1 ]; then + + + if [ $COMPILE_TIFF -eq 1 ]; then + echo + echo "Compiling TIFF at `date`" + mkdir -p ${LIB_DIR}/tiff + rm -rf ${LIB_DIR}/tiff/tiff* + tar -xzf ${TAR_DIR}/tiff*.tar.gz -C ${LIB_DIR}/tiff + cd ${LIB_DIR}/tiff/tiff* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} > $(pwd)/tiff.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/tiff.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/tiff.make_install.log 2>&1" + export TIFF_INCLUDE_DIR=${LIB_DIR}/include + export TIFF_LIB_DIR=${LIB_DIR}/lib + fi + + if [ $COMPILE_SQLITE -eq 1 ]; then + echo + echo "Compiling SQLITE at `date`" + mkdir -p ${LIB_DIR}/sqlite + rm -rf ${LIB_DIR}/sqlite/sqlite* + tar -xf ${TAR_DIR}/sqlite*.tar.gz -C ${LIB_DIR}/sqlite > /dev/null 2>&1 + cd ${LIB_DIR}/sqlite/sqlite* + echo "cd `pwd`" + run_cmd "./configure --enable-shared --prefix=${LIB_DIR} > $(pwd)/sqlite.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/sqlite.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/sqlite.make_install.log 2>&1" + export SQLITE_INCLUDE_DIR=${LIB_DIR}/include + export SQLITE_LIB_DIR=${LIB_DIR}/lib + fi + + vrs="7.1.0" + + echo + echo "Compiling PROJ_${vrs} at `date`" + echo "cmake version `cmake --version`" + mkdir -p ${LIB_DIR}/proj + rm -rf ${LIB_DIR}/proj/proj* + tar -xf ${TAR_DIR}/proj-${vrs}.tar.gz -C ${LIB_DIR}/proj + cd ${LIB_DIR}/proj/proj* + echo "cd `pwd`" + export PATH=${LIB_DIR}/bin:${PATH} + run_cmd "mkdir build; cd build" + + tiff_arg="" + # add tiff library and include arguments if necessary + if [[ ! -z "$TIFF_LIB_DIR" ]]; then + tiff_arg+="-DTIFF_LIBRARY_RELEASE=${TIFF_LIB_DIR}/libtiff.${dynamic_lib_ext}" + fi + if [[ ! -z "$TIFF_INCLUDE_DIR" ]]; then + tiff_arg+=" -DTIFF_INCLUDE_DIR=${TIFF_INCLUDE_DIR}" + fi + + cmd="cmake -DCMAKE_INSTALL_PREFIX=${LIB_DIR} -DSQLITE3_INCLUDE_DIR=${SQLITE_INCLUDE_DIR} -DSQLITE3_LIBRARY=${SQLITE_LIB_DIR}/libsqlite3.${dynamic_lib_ext} ${tiff_arg} .. > $(pwd)/proj.cmake.log 2>&1" + run_cmd ${cmd} + run_cmd "cmake --build . > $(pwd)/proj.cmake_build.log 2>&1" + run_cmd "cmake --build . --target install > $(pwd)/proj.cmake_install.log 2>&1" + +fi + +# Compile GSL +if [ $COMPILE_GSL -eq 1 ]; then + + if [ ${COMPILER_FAMILY} = "pgi" ]; then + vrs="1.11" + else + vrs="2.7.1" + fi + + echo + echo "Compiling GSL_${vrs} at `date`" + mkdir -p ${LIB_DIR}/gsl + rm -rf ${LIB_DIR}/gsl/gsl* + tar -xf ${TAR_DIR}/gsl-${vrs}.tar.gz -C ${LIB_DIR}/gsl + cd ${LIB_DIR}/gsl/gsl* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} > $(pwd)/gsl.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/gsl.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/gsl.make_install.log 2>&1" +fi + +# Compile BUFRLIB +if [ $COMPILE_BUFRLIB -eq 1 ]; then + + vrs="v11.6.0" + + echo + echo "Compiling bufr_${vrs} at `date`" + mkdir -p ${LIB_DIR}/bufrlib + rm -rf ${LIB_DIR}/bufrlib/NCEPLIBS-bufr-bufr_${vrs} + tar -xf ${TAR_DIR}/bufr_${vrs}.tar.gz -C ${LIB_DIR}/bufrlib + export SOURCE_DIR=${LIB_DIR}/bufrlib/NCEPLIBS-bufr-bufr_${vrs} + cd $SOURCE_DIR + echo "cd `pwd`" + run_cmd "mkdir build" + export BUILD_DIR=${SOURCE_DIR}/build + run_cmd "cmake -H${SOURCE_DIR} -B${BUILD_DIR} -DCMAKE_INSTALL_PREFIX=${LIB_DIR} -DCMAKE_BUILD_TYPE=Debug > $(pwd)/bufr.cmake.log 2>&1" + run_cmd "cd ${BUILD_DIR}" + run_cmd "make ${MAKE_ARGS} > $(pwd)/bufr.make.log 2>&1" + run_cmd "ctest > $(pwd)/bufr.ctest.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/bufr.make_install.log 2>&1" +fi + + +# Compile ZLIB +if [ $COMPILE_ZLIB -eq 1 ]; then + echo + echo "Compiling ZLIB at `date`" + mkdir -p ${LIB_DIR}/zlib + rm -rf ${LIB_DIR}/zlib/zlib* + tar -xzf ${TAR_DIR}/zlib*.tar.gz -C ${LIB_DIR}/zlib + cd ${LIB_DIR}/zlib/zlib* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} > $(pwd)/zlib.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/zlib.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/zlib.make_install.log 2>&1" + + # GPM: why is this removed? Could we add a comment to + # describe why this is needed? + run_cmd "rm ${LIB_DIR}/lib/libz.a" +fi + +# Compile LIBPNG +if [[ $COMPILE_LIBPNG -eq 1 && $HOST != ys* ]]; then + echo + echo "Compiling LIBPNG at `date`" + mkdir -p ${LIB_DIR}/libpng + rm -rf ${LIB_DIR}/libpng/libpng* + tar -xzf ${TAR_DIR}/libpng*.tar.gz -C ${LIB_DIR}/libpng + cd ${LIB_DIR}/libpng/libpng* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} LDFLAGS=-L${LIB_DIR}/lib CPPFLAGS=-I${LIB_DIR}/include > $(pwd)/libpng.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/libpng.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/libpng.make_install.log 2>&1" +fi + + +# Compile JASPER +if [ $COMPILE_JASPER -eq 1 ]; then + + vrs="2.0.25" + + echo + echo "Compiling JASPER at `date`" + mkdir -p ${LIB_DIR}/jasper + rm -rf ${LIB_DIR}/jasper/jasper* + tar -xf ${TAR_DIR}/jasper-${vrs}.tar.gz -C ${LIB_DIR}/jasper + cd ${LIB_DIR}/jasper/jasper-version-${vrs} + export CPPFLAGS="-I${LIB_DIR}/include" + export SOURCE_DIR=${LIB_DIR}/jasper/jasper-version-${vrs} + echo "cd `pwd`" + export BUILD_DIR=${LIB_DIR}/jasper/jasper-version-${vrs}/build + run_cmd "cmake -G \"Unix Makefiles\" -H${SOURCE_DIR} -B${BUILD_DIR} -DCMAKE_INSTALL_PREFIX=${LIB_DIR} -DJAS_ENABLE_DOC=false > $(pwd)/jasper.cmake.log 2>&1" + run_cmd "cd ${BUILD_DIR}" + run_cmd "make clean all > $(pwd)/jasper.make.log 2>&1" + # Commented out due to “which: no opj2_compress in …” error, which causes one of four tests to fail + # This is a known problem, so skipping tests for now: https://github.com/AAROC/CODE-RADE/issues/36#issuecomment-359744351 + #run_cmd "make ${MAKE_ARGS} test > $(pwd)/jasper.make_test.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/jasper.make_install.log 2>&1" +fi + +# Compile JPEG +if [ $COMPILE_JPEG -eq 1 ]; then + + vrs="9e" + + echo + echo "Compiling JPEG at `date`" + mkdir -p ${LIB_DIR}/jpeg + rm -rf ${LIB_DIR}/jpeg/jpeg* + tar -xf ${TAR_DIR}/jpegsrc.v${vrs}.tar.gz -C ${LIB_DIR}/jpeg + cd ${LIB_DIR}/jpeg/jpeg-${vrs} + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} LDFLAGS=-L${LIB_DIR}/lib CPPFLAGS=-I${LIB_DIR}/include > $(pwd)/libjpeg.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/libjpeg.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/libjpeg.make_install.log 2>&1" +fi + + +# Compile G2CLIB +if [ $COMPILE_G2CLIB -eq 1 ]; then + + vrs="1.6.4" + + echo + echo "Compiling G2CLIB at `date`" + mkdir -p ${LIB_DIR}/g2clib + rm -rf ${LIB_DIR}/g2clib/NCEP* + tar -xf ${TAR_DIR}/g2clib-${vrs}.tar.gz -C ${LIB_DIR}/g2clib + cd ${LIB_DIR}/g2clib/NCEP* + echo "cd `pwd`" + run_cmd "mkdir build; cd build" + run_cmd "cmake -DCMAKE_INSTALL_PREFIX=${LIB_DIR} -DCMAKE_PREFIX_PATH=${LIB_DIR} .. > $(pwd)/g2c.cmake.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/g2c.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} test > $(pwd)/g2c.make_test.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/g2c.make_install.log 2>&1" +fi + +# Compile ECKIT +if [ $COMPILE_ECKIT -eq 1 ]; then + + # Need to obtain ecbuild before installing eckit + + vrs="3.5.0" + + echo + echo "Compiling ECBUILD at `date`" + mkdir -p ${LIB_DIR}/ecbuild + rm -rf ${LIB_DIR}/ecbuild/ecbuild* + tar -xf ${TAR_DIR}/ecbuild-${vrs}.tar.gz -C ${LIB_DIR}/ecbuild + cd ${LIB_DIR}/ecbuild/ecbuild* + echo "cd `pwd`" + run_cmd "mkdir build; cd build" + run_cmd "cmake ../ -DCMAKE_INSTALL_PREFIX=${LIB_DIR} > $(pwd)/ecbuild.cmake.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/ecbuild.make_install.log 2>&1" + + vrs="1.20.2" + + echo + echo "Compiling ECKIT at `date`" + mkdir -p ${LIB_DIR}/eckit + rm -rf ${LIB_DIR}/eckit/eckit* + tar -xf ${TAR_DIR}/eckit-${vrs}.tar.gz -C ${LIB_DIR}/eckit + cd ${LIB_DIR}/eckit/eckit* + echo "cd `pwd`" + run_cmd "mkdir build; cd build" + run_cmd "cmake ../ -DCMAKE_INSTALL_PREFIX=${LIB_DIR} -DCMAKE_PREFIX_PATH=${LIB_DIR} > $(pwd)/eckit.cmake.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/eckit.make_install.log 2>&1" + +fi + +# Compile ATLAS +if [ $COMPILE_ATLAS -eq 1 ]; then + + vrs="0.30.0" + + echo + echo "Compiling ATLAS at `date`" + mkdir -p ${LIB_DIR}/atlas + rm -rf ${LIB_DIR}/atlas/atlas* + tar -xf ${TAR_DIR}/atlas-${vrs}.tar.gz -C ${LIB_DIR}/atlas + cd ${LIB_DIR}/atlas/atlas* + echo "cd `pwd`" + run_cmd "mkdir build; cd build" + run_cmd "cmake ../ -DCMAKE_INSTALL_PREFIX=${LIB_DIR} -DCMAKE_PREFIX_PATH=${LIB_DIR} > $(pwd)/atlas.cmake.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/atlas.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/atlas.make_install.log 2>&1" + +fi + +# Compile HDF +# Depends on jpeg +# Edit 'mfhdf/hdiff/Makefile' as follows: +# From: LIBS = -ljpeg -lz +# To: LIBS = -ljpeg -lz -lm +if [ $COMPILE_HDF -eq 1 ]; then + echo + echo "Compiling HDF at `date`" + mkdir -p ${LIB_DIR}/hdf + rm -rf ${LIB_DIR}/hdf/HDF* + tar -xf ${TAR_DIR}/HDF4.2*.tar.gz -C ${LIB_DIR}/hdf + cd ${LIB_DIR}/hdf/HDF* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} --disable-netcdf --with-jpeg=${LIB_DIR} --with-zlib=${LIB_DIR} CPPFLAGS=-I/usr/include/tirpc LIBS='-lm -ltirpc' > $(pwd)/hdf4.configure.log 2>&1" + if [[ ${COMPILER_MAJOR_VERSION} -ge 10 ]]; then + cat hdf/src/Makefile | \ + sed 's/FFLAGS = -O2/FFLAGS = -w -fallow-argument-mismatch -O2/g' \ + > Makefile_new + elif [[ ${COMPILER_MAJOR_VERSION} -lt 10 ]]; then + cat hdf/src/Makefile | \ + sed 's/FFLAGS = -O2/FFLAGS = -w -Wno-argument-mismatch -O2/g' \ + > Makefile_new + fi + mv Makefile_new hdf/src/Makefile + run_cmd "make ${MAKE_ARGS} > $(pwd)/hdf4.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/hdf4.make_install.log 2>&1" +fi + +# Compile HDFEOS +# Depends on HDF +if [ $COMPILE_HDFEOS -eq 1 ]; then + echo + echo "Compiling HDFEOS at `date`" + mkdir -p ${LIB_DIR}/hdfeos + rm -rf ${LIB_DIR}/hdfeos/HDF-EOS* + tar -xzf ${TAR_DIR}/HDF-EOS*.tar.* -C ${LIB_DIR}/hdfeos + cd ${LIB_DIR}/hdfeos/hdfeos + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} --with-hdf4=${LIB_DIR} --with-jpeg=${LIB_DIR} > $(pwd)/hdf-eos.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/hed-eos.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/hsf-eos.make_install.log 2>&1" + + cp include/*.h ${LIB_DIR}/include/ +fi + +# Compile NetCDF +if [ $COMPILE_NETCDF -eq 1 ]; then + + echo + echo "Compiling HDF5 at `date`" + mkdir -p ${LIB_DIR}/hdf5 + rm -rf ${LIB_DIR}/hdf5/hdf5* + tar -xzf ${TAR_DIR}/hdf5*.tar.gz -C ${LIB_DIR}/hdf5 + cd ${LIB_DIR}/hdf5/hdf5* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} --with-zlib=${LIB_Z} CFLAGS=-fPIC CXXFLAGS=-fPIC FFLAGS=-fPIC LDFLAGS=-L${LIB_DIR}/lib:${LIB_Z} CPPFLAGS=-I${LIB_DIR}/include > $(pwd)/hdf5.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/hdf5.make_install.log 2>&1" + + echo + echo "Compiling NetCDF-C at `date`" + mkdir -p ${LIB_DIR}/netcdf + rm -rf ${LIB_DIR}/netcdf/netcdf* + tar -xzf ${TAR_DIR}/netcdf-4*.tar.gz -C ${LIB_DIR}/netcdf > /dev/null 2>&1 || unzip ${TAR_DIR}/netcdf-4*.zip -d ${LIB_DIR}/netcdf + cd ${LIB_DIR}/netcdf/netcdf-* + export FC='' + export F90='' + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} CFLAGS=-fPIC CXXFLAGS=-fPIC LDFLAGS=-L${LIB_DIR}/lib CPPFLAGS=-I${LIB_DIR}/include > $(pwd)/netcdf-c.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/netcdf-c.make_install.log 2>&1" + + echo + echo "Compiling NetCDF-CXX at `date`" + tar -xzf ${TAR_DIR}/netcdf-cxx*.tar.gz -C ${LIB_DIR}/netcdf + cd ${LIB_DIR}/netcdf/netcdf-cxx* + echo "cd `pwd`" + configure_lib_args="" + if [[ $machine == "Mac" ]]; then + configure_lib_args="-lnetcdf -lhdf5_hl -lhdf5 -lz" + fi + run_cmd "./configure --prefix=${LIB_DIR} LDFLAGS=-L${LIB_DIR}/lib CPPFLAGS=-I${LIB_DIR}/include LIBS=\"${LIBS} ${configure_lib_args}\" > $(pwd)/netcdf-cxx.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/netcdf-cxx.make_install.log 2>&1" +fi + +# Compile FREETYPE +if [ $COMPILE_FREETYPE -eq 1 ]; then + echo + echo "Compiling FREETYPE at `date`" + mkdir -p ${LIB_DIR}/freetype + rm -rf ${LIB_DIR}/freetype/freetype* + tar -xzf ${TAR_DIR}/freetype*.tar.gz -C ${LIB_DIR}/freetype + cd ${LIB_DIR}/freetype/freetype* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} --with-png=yes > $(pwd)/freetype.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/freetype.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/freetype.make_install.log 2>&1" +fi + + +# Compile CAIRO +if [ $COMPILE_CAIRO -eq 1 ]; then + + # If on Cray, compile PIXMAN + if [ ${COMPILER_FAMILY} = "PrgEnv-intel" ]; then + echo + echo "Compiling pixman at `date`" + mkdir -p ${LIB_DIR}/pixman + rm -rf ${LIB_DIR}/pixman/pixman* + tar -xzf ${TAR_DIR}/pixman*.tar.gz -C ${LIB_DIR}/pixman + cd ${LIB_DIR}/pixman/pixman* + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} > $(pwd)/pixman.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/pixman.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/pixman.make_install.log 2>&1" + fi + + echo + echo "Compiling CAIRO at `date`" + mkdir -p ${LIB_DIR}/cairo + rm -rf ${LIB_DIR}/cairo/cairo* + tar -xf ${TAR_DIR}/cairo*.tar* -C ${LIB_DIR}/cairo + cd ${LIB_DIR}/cairo/cairo* + export PKG_CONFIG=`which pkg-config` + if [ ${COMPILER_FAMILY} = "PrgEnv-intel" ]; then + export PKG_CONFIG_PATH=${LIB_DIR}/lib/pkgconfig/ + fi + echo "cd `pwd`" + run_cmd "./configure --prefix=${LIB_DIR} ax_cv_c_float_words_bigendian=no LDFLAGS=-L${LIB_DIR}/lib CPPFLAGS=-I${LIB_DIR}/include > $(pwd)/cairo.configure.log 2>&1" + run_cmd "make ${MAKE_ARGS} > $(pwd)/cairo.make.log 2>&1" + run_cmd "make ${MAKE_ARGS} install > $(pwd)/cairo.make_install.log 2>&1" +fi + +# Compile MET +if [ $COMPILE_MET -eq 0 ]; then + echo Skipping MET compilation + echo "Finished compiling at `date`" + exit 0 +fi + +echo +echo "Compiling MET at `date`" +# If using source from a tar file remove everything and unpack the tar file +# FALSE = compiling from github repo and we don't want to overwrite the files +if [ ${USE_MET_TAR_FILE} = "TRUE" ]; then + rm -rf ${MET_DIR}/MET* + tar -xzf ${MET_TARBALL} -C ${MET_DIR} +fi +cd ${MET_DIR}/MET* + +echo "Modifying configure" +cat configure | \ + sed 's/C11/C17/g' | \ + sed 's/c11/c17/g' | \ + sed 's/cxx11/cxx17/g' | \ + sed 's/c++11/c++17/g' | \ + sed 's/gnu11/gnu17/g' \ + > configure_new + +mv configure_new configure +chmod 755 configure + +if [ -z ${MET_BUFRLIB} ]; then + export MET_BUFRLIB=${LIB_DIR}/lib + export BUFRLIB_NAME=-lbufr_4 +fi + +if [ -z ${MET_GRIB2CLIB} ]; then + export MET_GRIB2CLIB=${LIB_DIR}/lib + export MET_GRIB2CINC=${LIB_DIR}/include + export LIB_JASPER=${LIB_DIR}/lib + export LIB_LIBPNG=${LIB_DIR}/lib + export LIB_Z=${LIB_DIR}/lib + export GRIB2CLIB_NAME=-lg2c +fi + +if [ -z ${MET_NETCDF} ]; then + export MET_NETCDF=${LIB_DIR} + export MET_HDF5=${LIB_DIR} +fi + +if [ -z ${MET_GSL} ]; then + export MET_GSL=${LIB_DIR} +fi + +if [ -z ${MET_PROJ} ]; then + export MET_PROJ=${LIB_DIR} +fi + +export MET_PYTHON_BIN_EXE=${MET_PYTHON_BIN_EXE:=${MET_PYTHON}/bin/python3} +export MET_PYTHON_LD +export MET_PYTHON_CC + +# add flags to user-defined LDFLAGS for MacOS +if [[ $machine != "Mac" ]]; then + LDFLAGS="${LDFLAGS} -Wl,--disable-new-dtags" +fi + +# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html +# ${parameter:+word} +# If parameter is null or unset, nothing is substituted, otherwise the expansion of word is substituted. + +# add LIB_DIR/lib and LIB_DIR/lib64 to rpath and -L +LDFLAGS="${LDFLAGS} -Wl,-rpath,${LIB_DIR}/lib -L${LIB_DIR}/lib -Wl,-rpath,${LIB_DIR}/lib64 -L${LIB_DIR}/lib64" + +# if variables are set, add /lib to rpath and -L +for x in $MET_CAIRO $MET_FREETYPE $MET_GSL $MET_HDF $MET_HDF5 $MET_NETCDF; do + arg="${x:+-Wl,-rpath,$x/lib -L$x/lib}" + if [[ "$LDFLAGS" != *"$arg"* ]]; then + LDFLAGS+=" $arg" + fi +done + +# if variables are set, add /lib64 to rpath and -L +for x in $MET_ATLAS $MET_BUFR $MET_ECKIT $MET_GRIB2C $MET_PROJ $LIB_JASPER; do + arg="${x:+-Wl,-rpath,$x/lib64 -L$x/lib64}" + if [[ "$LDFLAGS" != *"$arg"* ]]; then + LDFLAGS+=" $arg" + fi +done + +# if variables are set, add to rpath and -L +for x in $MET_ATLASLIB $MET_BUFRLIB $MET_CAIROLIB $MET_ECKITLIB $MET_FREETYPELIB $MET_GRIB2CLIB $MET_GSLLIB $MET_HDF5LIB $MET_HDFLIB $MET_NETCDFLIB $MET_PROJLIB $MET_PYTHON_LIB $LIB_JASPER $LIB_LIBPNG $LIB_Z $ADDTL_DIR; do + arg="${x:+-Wl,-rpath,$x -L$x}" + if [[ "$LDFLAGS" != *"$arg"* ]]; then + LDFLAGS+=" $arg" + fi +done + +export LDFLAGS + +export LIBS="${LIBS} -lhdf5_hl -lhdf5 -lz" +export MET_FONT_DIR=${TEST_BASE}/fonts + + +echo "MET Configuration settings..." +printenv | egrep "^MET_" | sed -r 's/^/export /g' +echo "LDFLAGS = ${LDFLAGS}" +export OPT_ARGS='' +if [[ $COMPILER_FAMILY == "pgi" ]]; then + export OPT_ARGS="${OPT_ARGS} FFLAGS=-lpgf90" +fi + +configure_cmd="./configure --prefix=${MET_INSTALL_DIR} --bindir=${BIN_DIR_PATH}" +configure_cmd="${configure_cmd} BUFRLIB_NAME=${BUFRLIB_NAME}" +configure_cmd="${configure_cmd} GRIB2CLIB_NAME=${GRIB2CLIB_NAME} --enable-grib2" +if [[ ! -z ${MET_FREETYPEINC} && ! -z ${MET_FREETYPELIB} && \ + ! -z ${MET_CAIROINC} && ! -z ${MET_CAIROLIB} ]]; then + configure_cmd="${configure_cmd} --enable-mode_graphics" +fi + +if [[ ! -z $MET_ECKIT && ! -z $MET_ATLAS ]]; then + configure_cmd="${configure_cmd} --enable-ugrid" +fi + +if [[ ! -z $MET_HDF && ! -z $MET_HDFEOS ]]; then + configure_cmd="${configure_cmd} --enable-modis --enable-lidar2nc" +fi + +if [[ ! -z ${MET_PYTHON_CC} || ! -z ${MET_PYTHON_LD} ]]; then + configure_cmd="${configure_cmd} --enable-python" +fi + +configure_cmd="${configure_cmd} ${OPT_ARGS}" + +echo "cd `pwd`" +run_cmd "${configure_cmd} > $(pwd)/configure.log 2>&1" +run_cmd "make ${MAKE_ARGS} > $(pwd)/met.make.log 2>&1" +run_cmd "make install > $(pwd)/met.make_install.log 2>&1" +run_cmd "make test > $(pwd)/met.make_test.log 2>&1" + +echo "Finished compiling at `date`" diff --git a/internal/scripts/installation/config/install_met_env.cactus b/internal/scripts/installation/config/install_met_env.cactus new file mode 100644 index 0000000000..63ac85192a --- /dev/null +++ b/internal/scripts/installation/config/install_met_env.cactus @@ -0,0 +1,50 @@ +module reset +module use /apps/dev/modulefiles/ +module load ve/evs/2.0 +module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304 +module load netcdf/4.7.4 +module load hdf5/1.10.6 +module load bufr/11.5.0 +module load zlib/1.2.11 +module load jasper/2.0.25 +module load libpng/1.6.37 +module load gsl/2.7 +module load g2c/1.6.4 +module load proj/7.1.0 +module use /apps/dev/modulefiles +module load fckit/0.11.0 +module load atlas/0.35.0 +module load eckit/1.24.4 + +export FC=ifort +export F77=ifort +export F90=ifort +export CC=icc +export CXX=icpc +export TEST_BASE=/lfs/h2/users/julie.prestopnik/12.0.0-beta5 +export LIB_DIR=${TEST_BASE}/external_libs +export COMPILER=intel_19.1.3.304 +export MET_SUBDIR=${TEST_BASE} +export MET_TARBALL=v12.0.0-beta5.tar.gz +export USE_MODULES=TRUE +export ADDTL_DIR=/apps/spack/gettext/0.21/intel/19.1.3.304/at2kdo4edvuhyzrt5g6zhwrdb7bdui4s/lib64/ +export PYTHON_MODULE=python_3.10.4 +export MET_PYTHON=/apps/spack/python/3.10.4/intel/19.1.3.304/xqft4d45h4dp4xnbz2ue3nbxv65i6bgp/ +export MET_PYTHON_LIB=/apps/spack/python/3.10.4/intel/19.1.3.304/xqft4d45h4dp4xnbz2ue3nbxv65i6bgp/lib64 +export MET_PYTHON_CC=-I/apps/spack/python/3.10.4/intel/19.1.3.304/xqft4d45h4dp4xnbz2ue3nbxv65i6bgp/include/python3.10 +export MET_PYTHON_LD=-L/apps/spack/python/3.10.4/intel/19.1.3.304/xqft4d45h4dp4xnbz2ue3nbxv65i6bgp/lib64\ -lpython3.10\ -lintl\ -lcrypt\ -ldl\ -lutil\ -lm\ -lm +export MET_NETCDF=/apps/prod/hpc-stack/intel-19.1.3.304/netcdf/4.7.4 +export MET_HDF5=/apps/prod/hpc-stack/intel-19.1.3.304/hdf5/1.10.6 +export MET_BUFRLIB=/apps/ops/prod/libs/intel/19.1.3.304/bufr/11.5.0/lib64 +export MET_GRIB2CLIB=/apps/ops/prod/libs/intel/19.1.3.304/g2c/1.6.4/lib64 +export MET_GRIB2CINC=/apps/ops/prod/libs/intel/19.1.3.304/g2c/1.6.4/include +export MET_GSL=/apps/spack/gsl/2.7/intel/19.1.3.304/xks7dxbowrdxhjck5zxc4rompopocevb +export MET_PROJ=/apps/spack/proj/7.1.0/intel/19.1.3.304/cjbmc7tacv5qcfatslqmcrzo5kb4raaq +export MET_ATLAS=/apps/dev/atlas/install-0.35.0 +export MET_ECKIT=/apps/dev/eckit/install-1.24.4 +export BUFRLIB_NAME=-lbufr_4 +export GRIB2CLIB_NAME=-lg2c +export LIB_JASPER=/apps/spack/jasper/2.0.25/intel/19.1.3.304/sjib74krrorkyczqpqah4tvewmlnqdx4/lib64 +export LIB_LIBPNG=/apps/spack/libpng/1.6.37/intel/19.1.3.304/4ohkronuhlyherusoszzrmur5ewvlwzh/lib +export LIB_Z=/apps/spack/zlib/1.2.11/intel/19.1.3.304/hjotqkckeoyt6j6tibalwzrlfljcjtdh/lib +export MAKE_ARGS=-j diff --git a/internal/scripts/installation/config/install_met_env.wcoss2 b/internal/scripts/installation/config/install_met_env.wcoss2 index 86b73e0064..68e7ef2872 100644 --- a/internal/scripts/installation/config/install_met_env.wcoss2 +++ b/internal/scripts/installation/config/install_met_env.wcoss2 @@ -1,8 +1,7 @@ module reset -module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304 -export HPC_OPT=/apps/ops/para/libs module use /apps/dev/modulefiles/ module load ve/evs/2.0 +module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304 module load netcdf/4.7.4 module load hdf5/1.10.6 module load bufr/11.6.0 @@ -12,22 +11,22 @@ module load libpng/1.6.37 module load gsl/2.7 module load g2c/1.6.4 module load proj/7.1.0 -module use /apps/dev/lmodules/intel/19.1.3.304/ -module load atlas/0.30.0 -module load eckit/1.20.2 +module use /apps/dev/modulefiles +module load fckit/0.11.0 +module load atlas/0.35.0 +module load eckit/1.24.4 export FC=ifort export F77=ifort export F90=ifort export CC=icc export CXX=icpc -#export TEST_BASE=/apps/ops/para/libs/intel/19.1.3.304/met/12.0.0-beta3 export TEST_BASE=$(pwd) export LIB_DIR=${TEST_BASE}/external_libs export BIN_DIR_PATH=${TEST_BASE}/bin export COMPILER=intel_19.1.3.304 export MET_SUBDIR=${TEST_BASE} -export MET_TARBALL=v12.0.0-beta3.tar.gz +export MET_TARBALL=v12.0.0-beta5.tar.gz export USE_MODULES=TRUE export ADDTL_DIR=/apps/spack/gettext/0.21/intel/19.1.3.304/at2kdo4edvuhyzrt5g6zhwrdb7bdui4s/lib64 export PYTHON_MODULE=python_3.10.4 @@ -45,10 +44,10 @@ export MET_GRIB2CLIB=${g2c_ROOT}/lib64 export MET_GRIB2CINC=${G2C_INC} export MET_GSL=/apps/spack/gsl/2.7/intel/19.1.3.304/xks7dxbowrdxhjck5zxc4rompopocevb export MET_PROJ=/apps/spack/proj/7.1.0/intel/19.1.3.304/cjbmc7tacv5qcfatslqmcrzo5kb4raaq -export MET_ATLASLIB=/apps/dev/intel-19.1.3.304/cray-mpich-8.1.9/atlas/0.30.0/lib64 -export MET_ATLASINC=/apps/dev/intel-19.1.3.304/cray-mpich-8.1.9/atlas/0.30.0/include/atlas -export MET_ECKITLIB=/apps/dev/intel-19.1.3.304/cray-mpich-8.1.9/eckit/1.20.2/lib64 -export MET_ECKITINC=/apps/dev/intel-19.1.3.304/cray-mpich-8.1.9/eckit/1.20.2/include/eckit +export MET_ATLASLIB=/apps/dev/atlas/install-0.35.0/lib64 +export MET_ATLASINC=/apps/dev/atlas/install-0.35.0/include/atlas +export MET_ECKITLIB=/apps/dev/eckit/install-1.24.4/lib64 +export MET_ECKITINC=/apps/dev/eckit/install-1.24.4/include/eckit export BUFRLIB_NAME=-lbufr_4 export GRIB2CLIB_NAME=-lg2c # JY export LIB_JASPER=/apps/spack/jasper/2.0.25/intel/19.1.3.304/sjib74krrorkyczqpqah4tvewmlnqdx4/lib64 @@ -58,4 +57,3 @@ export LIB_LIBPNG=${LIBPNG_LIBDIR} # JY export LIB_Z=/apps/spack/zlib/1.2.11/intel/19.1.3.304/hjotqkckeoyt6j6tibalwzrlfljcjtdh/lib export LIB_Z=${ZLIB_LIBDIR} export MAKE_ARGS=-j -export CXXFLAGS="-std=c++11" diff --git a/internal/scripts/sonarqube/run_sonarqube.sh b/internal/scripts/sonarqube/run_sonarqube.sh index ac439b8a11..9b3557581a 100755 --- a/internal/scripts/sonarqube/run_sonarqube.sh +++ b/internal/scripts/sonarqube/run_sonarqube.sh @@ -109,8 +109,9 @@ run_command "git checkout ${1}" # Otherwise, the SonarQube logic does not work. export MET_DEVELOPMENT=true -# Run the configure script -run_command "./configure --prefix=`pwd` --enable-all" +# Run the configure script. +# Specify the C++ standard to limit the scope of the findings. +run_command "./configure --prefix=`pwd` --enable-all MET_CXX_STANDARD=11" # Define the version string SONAR_PROJECT_VERSION=$(grep "^version" docs/conf.py | cut -d'=' -f2 | tr -d "\'\" ") diff --git a/internal/test_unit/R_test/comp_dir.R b/internal/test_unit/R_test/comp_dir.R index a4453459c3..c94d8bae38 100644 --- a/internal/test_unit/R_test/comp_dir.R +++ b/internal/test_unit/R_test/comp_dir.R @@ -51,9 +51,9 @@ strDir1 = gsub("/$", "", listArgs[1]); strDir2 = gsub("/$", "", listArgs[2]); # build a list of files in each stat folder -listTest1 = system(paste("find", strDir1, "| egrep '\\.stat$|\\.txt$|\\.tcst|\\.nc$|\\.out$|\\.ps$|\\.png$' | sort"), intern=T); +listTest1 = system(paste("find", strDir1, "| egrep '\\.stat$|\\.txt$|\\.tcst|\\.nc$|\\.out$|\\.ps$|\\.png$|\\.dat$' | sort"), intern=T); listTest1Files = gsub(paste(strDir1, "/", sep=""), "", listTest1); -listTest2 = system(paste("find", strDir2, "| egrep '\\.stat$|\\.txt$|\\.tcst|\\.nc$|\\.out$|\\.ps$|\\.png$' | sort"), intern=T); +listTest2 = system(paste("find", strDir2, "| egrep '\\.stat$|\\.txt$|\\.tcst|\\.nc$|\\.out$|\\.ps$|\\.png$|\\.dat$' | sort"), intern=T); listTest2Files = gsub(paste(strDir2, "/", sep=""), "", listTest2); if( 1 <= verb ){ cat("dir1:", strDir1, "contains", length(listTest1Files), "files\n"); @@ -103,10 +103,11 @@ for(strFile in listTest1Files[ listTest1Files %in% listTest2Files ]){ compareNc(strFile1, strFile2, verb, strict, file_size_delta, compare_nc_var); } - # if the files are PostScript, PNG, or end in .out, compare accordingly + # if the files are PostScript, PNG, or end in .out or .dat, compare accordingly else if( TRUE == grepl("\\.out$", strFile1, perl=T) || TRUE == grepl("\\.ps$", strFile1, perl=T) || - TRUE == grepl("\\.png$", strFile1, perl=T) ){ + TRUE == grepl("\\.png$", strFile1, perl=T) || + TRUE == grepl("\\.dat$", strFile1, perl=T) ){ if( 1 <= verb ){ cat("file1: ", strFile1, "\nfile2: ", strFile2, "\n", sep=""); } compareDiff(strFile1, strFile2, verb); } diff --git a/internal/test_unit/bin/unit_test.sh b/internal/test_unit/bin/unit_test.sh index 0f0493720f..e3bc29e181 100755 --- a/internal/test_unit/bin/unit_test.sh +++ b/internal/test_unit/bin/unit_test.sh @@ -24,14 +24,14 @@ if [[ -z "${MET_TEST_MET_PYTHON_EXE}" ]] ; then export MET_TEST_MET_PYTHON_EXE=/usr/local/python3/bin/python3 fi -PERL_UNIT_OPTS="" +UNIT_OPTS="" for arg in $@; do - [ $arg == "-memchk" -o $arg == "memchk" ] && PERL_UNIT_OPTS="$PERL_UNIT_OPTS -memchk" - [ $arg == "-callchk" -o $arg == "callchk" ] && PERL_UNIT_OPTS="$PERL_UNIT_OPTS -callchk" + [ $arg == "-memchk" -o $arg == "memchk" ] && UNIT_OPTS="$UNIT_OPTS -memchk" + [ $arg == "-callchk" -o $arg == "callchk" ] && UNIT_OPTS="$UNIT_OPTS -callchk" done # Unit test script -PERL_UNIT=${MET_TEST_BASE}/perl/unit.pl +UNIT=${MET_TEST_BASE}/python/unit.py # Unit test XML UNIT_XML="unit_ascii2nc.xml \ @@ -86,6 +86,7 @@ UNIT_XML="unit_ascii2nc.xml \ unit_climatology_1.0deg.xml \ unit_climatology_1.5deg.xml \ unit_climatology_2.5deg.xml \ + unit_climatology_mixed.xml \ unit_grib_tables.xml \ unit_grid_weight.xml \ unit_netcdf.xml \ @@ -107,15 +108,15 @@ UNIT_XML="${UNIT_XML} unit_ugrid.xml" for CUR_XML in ${UNIT_XML}; do echo - echo "CALLING: ${PERL_UNIT} $PERL_UNIT_OPTS ${MET_TEST_BASE}/xml/${CUR_XML}" + echo "CALLING: ${UNIT} $UNIT_OPTS ${MET_TEST_BASE}/xml/${CUR_XML}" echo - ${PERL_UNIT} $PERL_UNIT_OPTS ${MET_TEST_BASE}/xml/${CUR_XML} + ${UNIT} $UNIT_OPTS ${MET_TEST_BASE}/xml/${CUR_XML} RET_VAL=$? # Fail on non-zero return status if [ ${RET_VAL} != 0 ]; then echo - echo "ERROR: ${PERL_UNIT} ${CUR_XML} failed." + echo "ERROR: ${UNIT} ${CUR_XML} failed." echo echo "*** UNIT TESTS FAILED ***" echo diff --git a/internal/test_unit/config/EnsembleStatConfig b/internal/test_unit/config/EnsembleStatConfig index 80157bed0c..59661a5fe2 100644 --- a/internal/test_unit/config/EnsembleStatConfig +++ b/internal/test_unit/config/EnsembleStatConfig @@ -226,8 +226,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_MASK_SID b/internal/test_unit/config/EnsembleStatConfig_MASK_SID index 96128cb148..28bd9899b3 100644 --- a/internal/test_unit/config/EnsembleStatConfig_MASK_SID +++ b/internal/test_unit/config/EnsembleStatConfig_MASK_SID @@ -218,8 +218,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_climo b/internal/test_unit/config/EnsembleStatConfig_climo index 633ab775f9..89abf95dd8 100644 --- a/internal/test_unit/config/EnsembleStatConfig_climo +++ b/internal/test_unit/config/EnsembleStatConfig_climo @@ -71,6 +71,7 @@ fcst = { field = [ { name = "TMP"; level = "Z2"; message_type = [ "ADPSFC" ]; }, + { name = "TMP"; level = "Z2"; message_type = [ "ADPSFC" ]; prob_cat_thresh = [ >275, >280, >285 ]; }, { name = "TMP"; level = "P850"; message_type = [ "ADPUPA" ]; } ]; } @@ -247,8 +248,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_grid_weight b/internal/test_unit/config/EnsembleStatConfig_grid_weight index 9915c3fa37..67da4d825f 100644 --- a/internal/test_unit/config/EnsembleStatConfig_grid_weight +++ b/internal/test_unit/config/EnsembleStatConfig_grid_weight @@ -15,7 +15,7 @@ model = "FCST"; // Output description to be written // May be set separately in each "obs.field" entry // -desc = "NA"; +desc = "${DESC}"; // // Output observation type to be written @@ -62,7 +62,7 @@ prob_pct_thresh = [ ==0.25 ]; nc_var_str = ""; eclv_points = 0.05; -tmp_field = [ { name = "TMP"; level = [ "Z2" ]; } ]; +tmp_field = [ { name = "TMP"; level = [ "Z2" ]; prob_cat_thresh = [ <=273, >273 ]; } ]; // // Forecast and observation fields to be verified @@ -139,6 +139,11 @@ climo_mean = { hour_interval = 6; } +climo_stdev = climo_mean; +climo_stdev = { + file_name = [ "${CLIMO_STDEV_FILE}" ]; +} + //////////////////////////////////////////////////////////////////////////////// // @@ -200,11 +205,11 @@ output_flag = { orank = NONE; ssvar = STAT; relp = STAT; - pct = NONE; - pstd = NONE; - pjc = NONE; - prc = NONE; - eclv = NONE; + pct = STAT; + pstd = STAT; + pjc = STAT; + prc = STAT; + eclv = STAT; } //////////////////////////////////////////////////////////////////////////////// @@ -235,8 +240,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = ${GRID_WEIGHT}; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = ${GRID_WEIGHT}; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_one_cdf_bin b/internal/test_unit/config/EnsembleStatConfig_one_cdf_bin index 013763c74d..3828b386e5 100644 --- a/internal/test_unit/config/EnsembleStatConfig_one_cdf_bin +++ b/internal/test_unit/config/EnsembleStatConfig_one_cdf_bin @@ -232,8 +232,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_point_weight b/internal/test_unit/config/EnsembleStatConfig_point_weight new file mode 100644 index 0000000000..2e9ad158b8 --- /dev/null +++ b/internal/test_unit/config/EnsembleStatConfig_point_weight @@ -0,0 +1,214 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Ensemble-Stat configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +// +// Output model name to be written +// +model = "FCST"; + +// +// Output description to be written +// May be set separately in each "obs.field" entry +// +desc = "${DESC}"; + +// +// Output observation type to be written +// +obtype = "ANALYS"; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Verification grid +// +regrid = { + to_grid = NONE; + method = NEAREST; + width = 1; + vld_thresh = 0.5; + shape = SQUARE; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// IDs for ensemble members +// Only set if processing a single file with all ensembles +// +ens_member_ids = []; +control_id = ""; + +// +// May be set separately in each "fcst.field" and "obs.field" entry +// +censor_thresh = []; +censor_val = []; +prob_cat_thresh = []; + +// +// May be set separately in each "fcst.field" entry +// +prob_pct_thresh = [ ==0.25 ]; + +// +// May be set separately in each "obs.field" entry +// +nc_var_str = ""; +eclv_points = 0.05; + +// +// Forecast and observation fields to be verified +// +fcst = { + ens_thresh = 0.75; + vld_thresh = 1.0; + message_type = [ "ADPSFC" ]; + obs_quality_inc = []; + obs_quality_exc = []; + + field = [ + { name = "APCP"; level = "A24"; prob_cat_thresh = [ >=2.54 ]; } + ]; +} +obs = fcst; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Point observation filtering options +// May be set separately in each "obs.field" entry +// +message_type = []; +sid_inc = []; +sid_exc = []; +obs_thresh = [ NA ]; +obs_quality_inc = []; +obs_quality_exc = []; +duplicate_flag = NONE; +obs_summary = NONE; +obs_perc_value = 50; +skip_const = FALSE; + +// +// Observation error options +// Set dist_type to NONE to use the observation error table instead +// May be set separately in each "obs.field" entry +// +obs_error = { + flag = FALSE; + dist_type = NONE; + dist_parm = []; + inst_bias_scale = 1.0; + inst_bias_offset = 0.0; + min = NA; + max = NA; +} + +// +// Ensemble bin sizes +// May be set separately in each "obs.field" entry +// +ens_ssvar_bin_size = 1.0; +ens_phist_bin_size = 0.05; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Point observation time window +// +obs_window = { + beg = -5400; + end = 5400; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Verification masking regions +// +mask = { + grid = []; + poly = []; + sid = [ "${CONFIG_DIR}/SID_CONUS_ADPSFC_ELEV.txt" ]; + llpnt = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Confidence interval settings +// +ci_alpha = [ 0.05 ]; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Interpolation methods +// +interp = { + field = BOTH; + vld_thresh = 1.0; + shape = SQUARE; + + type = [ + { + method = NEAREST; + width = 1; + } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Statistical output types +// May be set separately in each "obs.field" entry +// +output_flag = { + ecnt = NONE; + rps = NONE; + rhist = NONE; + phist = NONE; + orank = NONE; + ssvar = NONE; + relp = NONE; + pct = STAT; + pstd = STAT; + pjc = STAT; + prc = STAT; + eclv = STAT; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Gridded verification output types +// May be set separately in each "obs.field" entry +// +nc_orank_flag = FALSE; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Random number generator +// +rng = { + type = "mt19937"; + seed = "1"; +} + +//////////////////////////////////////////////////////////////////////////////// + +grid_weight_flag = NONE; +point_weight_flag = ${POINT_WEIGHT}; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_python b/internal/test_unit/config/EnsembleStatConfig_python index 4de2045664..1b32091705 100644 --- a/internal/test_unit/config/EnsembleStatConfig_python +++ b/internal/test_unit/config/EnsembleStatConfig_python @@ -119,6 +119,8 @@ message_type_group_map = [ { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + // // Ensemble bin sizes // May be set separately in each "obs.field" entry @@ -222,8 +224,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_qty_inc_exc b/internal/test_unit/config/EnsembleStatConfig_qty_inc_exc index 0881ff3b8d..0a0bf9b074 100644 --- a/internal/test_unit/config/EnsembleStatConfig_qty_inc_exc +++ b/internal/test_unit/config/EnsembleStatConfig_qty_inc_exc @@ -235,8 +235,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_rps_climo_bin_prob b/internal/test_unit/config/EnsembleStatConfig_rps_climo_bin_prob index af4354bfea..ea1023dc36 100644 --- a/internal/test_unit/config/EnsembleStatConfig_rps_climo_bin_prob +++ b/internal/test_unit/config/EnsembleStatConfig_rps_climo_bin_prob @@ -216,8 +216,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_single_file_grib b/internal/test_unit/config/EnsembleStatConfig_single_file_grib index 20220f82b5..ba1ec381a2 100644 --- a/internal/test_unit/config/EnsembleStatConfig_single_file_grib +++ b/internal/test_unit/config/EnsembleStatConfig_single_file_grib @@ -131,6 +131,8 @@ message_type_group_map = [ { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + // // Ensemble bin sizes // May be set separately in each "obs.field" entry @@ -271,8 +273,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/EnsembleStatConfig_single_file_nc b/internal/test_unit/config/EnsembleStatConfig_single_file_nc index b3faf78d95..f9d18a2129 100644 --- a/internal/test_unit/config/EnsembleStatConfig_single_file_nc +++ b/internal/test_unit/config/EnsembleStatConfig_single_file_nc @@ -137,6 +137,8 @@ message_type_group_map = [ { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + // // Ensemble bin sizes // May be set separately in each "obs.field" entry @@ -277,8 +279,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GenEnsProdConfig b/internal/test_unit/config/GenEnsProdConfig index 9a565c2fb4..9841006614 100644 --- a/internal/test_unit/config/GenEnsProdConfig +++ b/internal/test_unit/config/GenEnsProdConfig @@ -13,7 +13,6 @@ model = "FCST"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; @@ -60,17 +59,17 @@ ens = { { name = "UGRD"; level = [ "Z10" ]; - cat_thresh = [ CDP75 ]; + cat_thresh = [ >OCDP75 ]; }, { name = "WIND"; level = [ "Z10" ]; - cat_thresh = [ >=CDP25&&<=CDP75 ]; + cat_thresh = [ >=OCDP25&&<=OCDP75 ]; } ]; } diff --git a/internal/test_unit/config/GenEnsProdConfig_climo_anom_ens_member_id b/internal/test_unit/config/GenEnsProdConfig_climo_anom_ens_member_id index adebdb2528..440b528326 100644 --- a/internal/test_unit/config/GenEnsProdConfig_climo_anom_ens_member_id +++ b/internal/test_unit/config/GenEnsProdConfig_climo_anom_ens_member_id @@ -13,7 +13,6 @@ model = "CFSv2"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; diff --git a/internal/test_unit/config/GenEnsProdConfig_normalize b/internal/test_unit/config/GenEnsProdConfig_normalize index b23708ab46..192c75cb5b 100644 --- a/internal/test_unit/config/GenEnsProdConfig_normalize +++ b/internal/test_unit/config/GenEnsProdConfig_normalize @@ -13,7 +13,6 @@ model = "FCST"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; diff --git a/internal/test_unit/config/GenEnsProdConfig_single_file_grib b/internal/test_unit/config/GenEnsProdConfig_single_file_grib index b1f2bb3315..82f31da619 100644 --- a/internal/test_unit/config/GenEnsProdConfig_single_file_grib +++ b/internal/test_unit/config/GenEnsProdConfig_single_file_grib @@ -13,7 +13,6 @@ model = "GEFS"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; diff --git a/internal/test_unit/config/GenEnsProdConfig_single_file_nc b/internal/test_unit/config/GenEnsProdConfig_single_file_nc index 2b4be6e12b..9d84b2bcbc 100644 --- a/internal/test_unit/config/GenEnsProdConfig_single_file_nc +++ b/internal/test_unit/config/GenEnsProdConfig_single_file_nc @@ -13,7 +13,6 @@ model = "CFSv2"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; diff --git a/internal/test_unit/config/GridStatConfig_APCP_regrid b/internal/test_unit/config/GridStatConfig_APCP_regrid index 74bbba5b4d..aea9adeaef 100644 --- a/internal/test_unit/config/GridStatConfig_APCP_regrid +++ b/internal/test_unit/config/GridStatConfig_APCP_regrid @@ -208,8 +208,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_GRIB_lvl_typ_val b/internal/test_unit/config/GridStatConfig_GRIB_lvl_typ_val index 64381023a8..ecce9ddfdf 100644 --- a/internal/test_unit/config/GridStatConfig_GRIB_lvl_typ_val +++ b/internal/test_unit/config/GridStatConfig_GRIB_lvl_typ_val @@ -305,8 +305,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "GRIB_lvl_typ_val"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "GRIB_lvl_typ_val"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_GRIB_set_attr b/internal/test_unit/config/GridStatConfig_GRIB_set_attr index cb2f86984c..36ab040a33 100644 --- a/internal/test_unit/config/GridStatConfig_GRIB_set_attr +++ b/internal/test_unit/config/GridStatConfig_GRIB_set_attr @@ -237,8 +237,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "GRIB_set_attr"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "GRIB_set_attr"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_GTG_latlon b/internal/test_unit/config/GridStatConfig_GTG_latlon index 90be6f2172..77edd743db 100644 --- a/internal/test_unit/config/GridStatConfig_GTG_latlon +++ b/internal/test_unit/config/GridStatConfig_GTG_latlon @@ -216,8 +216,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_GTG_lc b/internal/test_unit/config/GridStatConfig_GTG_lc index ddd8321fdd..0385bfda9e 100644 --- a/internal/test_unit/config/GridStatConfig_GTG_lc +++ b/internal/test_unit/config/GridStatConfig_GTG_lc @@ -216,8 +216,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_SEEPS b/internal/test_unit/config/GridStatConfig_SEEPS index 8a23c76c45..808e5a10f3 100644 --- a/internal/test_unit/config/GridStatConfig_SEEPS +++ b/internal/test_unit/config/GridStatConfig_SEEPS @@ -210,12 +210,14 @@ nc_pairs_flag = { // Threshold for SEEPS p1 (Probability of being dry) seeps_p1_thresh = ${SEEPS_P1_THRESH}; +seeps_grid_climo_name = "${SEEPS_GRID_CLIMO_NAME}"; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_WRF_pres b/internal/test_unit/config/GridStatConfig_WRF_pres index 10d46e8f94..98851dfa7b 100644 --- a/internal/test_unit/config/GridStatConfig_WRF_pres +++ b/internal/test_unit/config/GridStatConfig_WRF_pres @@ -281,8 +281,9 @@ ugrid_coordinates_file = ""; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_apply_mask b/internal/test_unit/config/GridStatConfig_apply_mask index 77af6251c0..041cae792d 100644 --- a/internal/test_unit/config/GridStatConfig_apply_mask +++ b/internal/test_unit/config/GridStatConfig_apply_mask @@ -217,8 +217,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_climo_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG b/internal/test_unit/config/GridStatConfig_climo_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG new file mode 100644 index 0000000000..8eab6dccb0 --- /dev/null +++ b/internal/test_unit/config/GridStatConfig_climo_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG @@ -0,0 +1,292 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Grid-Stat configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +// +// Output model name to be written +// +model = "GFS"; + +// +// Output description to be written +// May be set separately in each "obs.field" entry +// +desc = "NA"; + +// +// Output observation type to be written +// +obtype = "GFSANL"; + +//////////////////////////////////////////////////////////////////////////////// + +// +// Verification grid +// +regrid = { + to_grid = "${OBS_CLIMO_DIR}/mslp_mean.grib"; + method = BILIN; + width = 2; + vld_thresh = 0.5; + shape = SQUARE; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// May be set separately in each "field" entry +// +censor_thresh = []; +censor_val = []; +mpr_column = []; +mpr_thresh = []; +cat_thresh = []; +cnt_thresh = [ NA ]; +cnt_logic = UNION; +wind_thresh = [ NA ]; +wind_logic = UNION; +eclv_points = 0.05; +nc_pairs_var_name = ""; +nc_pairs_var_suffix = ""; +hss_ec_value = NA; +rank_corr_flag = FALSE; + +// +// Forecast and observation fields to be verified +// + +field_list = [ + { name = "TMP"; level = [ "P500" ]; }, + { name = "UGRD"; level = [ "P500" ]; }, + { name = "VGRD"; level = [ "P500" ]; }, + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >FCDP75, >OCDP75 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >FCDP75, >OCDP75 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >FCDP75, >OCDP75 ]; } +]; + +fcst = { + + field = field_list; + + climo_mean = { + field = field_list; + file_name = [ "${FCST_CLIMO_DIR}/cmean_1d.19590410" ]; + + regrid = { + method = BILIN; + width = 2; + vld_thresh = 0.5; + shape = SQUARE; + } + + time_interp_method = DW_MEAN; + day_interval = 1; + hour_interval = 6; + }; + + climo_stdev = climo_mean; + climo_stdev = { + file_name = [ "${FCST_CLIMO_DIR}/cstdv_1d.19590410" ]; + }; + +} + +obs = { + + field = field_list; + + climo_mean = { + field = field_list; + file_name = [ "${OBS_CLIMO_DIR}/t500hPa_mean.grib", + "${OBS_CLIMO_DIR}/t850hPa_mean.grib", + "${OBS_CLIMO_DIR}/u500hPa_mean.grib", + "${OBS_CLIMO_DIR}/u850hPa_mean.grib", + "${OBS_CLIMO_DIR}/v500hPa_mean.grib", + "${OBS_CLIMO_DIR}/v850hPa_mean.grib" ]; + regrid = { + method = BILIN; + width = 2; + vld_thresh = 0.5; + shape = SQUARE; + } + + time_interp_method = DW_MEAN; + day_interval = 1; + hour_interval = 12; + }; + + climo_stdev = climo_mean; + climo_stdev = { + file_name = [ "${OBS_CLIMO_DIR}/t850hPa_stdev.grib", + "${OBS_CLIMO_DIR}/u850hPa_stdev.grib", + "${OBS_CLIMO_DIR}/v850hPa_stdev.grib" ]; + }; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// May be set separately in each "obs.field" entry +// +climo_cdf = { + cdf_bins = 1; + center_bins = TRUE; + write_bins = FALSE; + direct_prob = FALSE; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Verification masking regions +// +mask = { + grid = [ "FULL" ]; + poly = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Confidence interval settings +// +ci_alpha = [ 0.05 ]; + +boot = { + interval = PCTILE; + rep_prop = 1.0; + n_rep = 0; + rng = "mt19937"; + seed = ""; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Data smoothing methods +// +interp = { + field = BOTH; + vld_thresh = 1.0; + shape = SQUARE; + + type = [ + { + method = NEAREST; + width = 1; + } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Neighborhood methods +// +nbrhd = { + width = [ 1 ]; + cov_thresh = [ >=0.5 ]; + vld_thresh = 1.0; + shape = SQUARE; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Fourier decomposition +// +fourier = { + wave_1d_beg = []; + wave_1d_end = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Gradient statistics +// May be set separately in each "obs.field" entry +// +gradient = { + dx = []; + dy = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Distance Map statistics +// May be set separately in each "obs.field" entry +// +distance_map = { + baddeley_p = 2; + baddeley_max_dist = NA; + fom_alpha = 0.1; + zhu_weight = 0.5; + beta_value(n) = n * n / 2.0; +} + +//////////////////////////////////////////////////////////////////////////////// + +// +// Statistical output types +// +output_flag = { + fho = NONE; + ctc = NONE; + cts = NONE; + mctc = NONE; + mcts = NONE; + cnt = STAT; + sl1l2 = STAT; + sal1l2 = STAT; + vl1l2 = STAT; + val1l2 = STAT; + vcnt = STAT; + pct = NONE; + pstd = NONE; + pjc = NONE; + prc = NONE; + eclv = NONE; + nbrctc = NONE; + nbrcts = NONE; + nbrcnt = NONE; + grad = NONE; + dmap = NONE; + seeps = NONE; +} + +// +// NetCDF matched pairs output file +// +nc_pairs_flag = { + latlon = TRUE; + raw = TRUE; + diff = TRUE; + climo = TRUE; + climo_cdp = TRUE; + weight = TRUE; + nbrhd = FALSE; + fourier = FALSE; + gradient = FALSE; + distance_map = FALSE; + apply_mask = FALSE; +} + +//////////////////////////////////////////////////////////////////////////////// +// Threshold for SEEPS p1 (Probability of being dry) + +seeps_p1_thresh = NA; + +//////////////////////////////////////////////////////////////////////////////// + +grid_weight_flag = COS_LAT; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_climo_WMO b/internal/test_unit/config/GridStatConfig_climo_WMO index 58f184cde0..5e37820412 100644 --- a/internal/test_unit/config/GridStatConfig_climo_WMO +++ b/internal/test_unit/config/GridStatConfig_climo_WMO @@ -64,25 +64,25 @@ fcst = { { name = "UGRD"; level = [ "P500" ]; }, { name = "VGRD"; level = [ "P500" ]; }, { name = "WIND"; level = [ "P500" ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } obs = { - cnt_thresh = [ NA, =CDP50, ==CDP25 ]; + cnt_thresh = [ NA, =OCDP50, ==OCDP25 ]; field = [ { name = "TMP"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "UGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "VGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "WIND"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } @@ -278,8 +278,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = COS_LAT; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_climo_prob b/internal/test_unit/config/GridStatConfig_climo_prob index 3f00164d86..a1d35ed9e2 100644 --- a/internal/test_unit/config/GridStatConfig_climo_prob +++ b/internal/test_unit/config/GridStatConfig_climo_prob @@ -288,8 +288,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = COS_LAT; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_climo_wrap_year b/internal/test_unit/config/GridStatConfig_climo_wrap_year index beacc6825d..d0a6296ac8 100644 --- a/internal/test_unit/config/GridStatConfig_climo_wrap_year +++ b/internal/test_unit/config/GridStatConfig_climo_wrap_year @@ -259,8 +259,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_fbias_perc_thresh b/internal/test_unit/config/GridStatConfig_fbias_perc_thresh index 9d9d544afb..cb8c1ba195 100644 --- a/internal/test_unit/config/GridStatConfig_fbias_perc_thresh +++ b/internal/test_unit/config/GridStatConfig_fbias_perc_thresh @@ -203,8 +203,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_fourier b/internal/test_unit/config/GridStatConfig_fourier index 3ec891b8dd..c1ae46bb99 100644 --- a/internal/test_unit/config/GridStatConfig_fourier +++ b/internal/test_unit/config/GridStatConfig_fourier @@ -243,8 +243,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_gen_ens_prod b/internal/test_unit/config/GridStatConfig_gen_ens_prod index 8893c2d5ff..653f178a52 100644 --- a/internal/test_unit/config/GridStatConfig_gen_ens_prod +++ b/internal/test_unit/config/GridStatConfig_gen_ens_prod @@ -66,9 +66,9 @@ fcst = { prob = TRUE; field = [ - { name = "UGRD_Z10_ENS_FREQ_ltCDP25"; }, - { name = "UGRD_Z10_ENS_NEP_ltCDP25_NBRHD25"; }, - { name = "UGRD_Z10_ENS_NMEP_ltCDP25_NBRHD25_GAUSSIAN1"; } + { name = "UGRD_Z10_ENS_FREQ_ltOCDP25"; }, + { name = "UGRD_Z10_ENS_NEP_ltOCDP25_NBRHD25"; }, + { name = "UGRD_Z10_ENS_NMEP_ltOCDP25_NBRHD25_GAUSSIAN1"; } ]; } @@ -76,7 +76,7 @@ obs = { name = "UGRD"; level = "Z10"; - cat_thresh = 245, >255 ]; } ]; // // Forecast and observation fields to be verified @@ -179,11 +179,11 @@ distance_map = { // Statistical output types // output_flag = { - fho = NONE; - ctc = NONE; - cts = NONE; - mctc = NONE; - mcts = NONE; + fho = NONE; + ctc = STAT; + cts = STAT; + mctc = STAT; + mcts = STAT; cnt = STAT; sl1l2 = STAT; sal1l2 = STAT; @@ -228,8 +228,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = ${GRID_WEIGHT}; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_interp_shape b/internal/test_unit/config/GridStatConfig_interp_shape index ad250e9802..ac3179cf94 100644 --- a/internal/test_unit/config/GridStatConfig_interp_shape +++ b/internal/test_unit/config/GridStatConfig_interp_shape @@ -211,8 +211,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_mpr_thresh b/internal/test_unit/config/GridStatConfig_mpr_thresh index da1edb7870..54ea06965f 100644 --- a/internal/test_unit/config/GridStatConfig_mpr_thresh +++ b/internal/test_unit/config/GridStatConfig_mpr_thresh @@ -80,15 +80,15 @@ fcst = { nc_pairs_var_suffix = desc; }, { - mpr_column = [ "ABS(OBS-CLIMO_MEAN)" ]; + mpr_column = [ "ABS(OBS-OBS_CLIMO_MEAN)" ]; mpr_thresh = [ <=5 ]; desc = "ABS_OBS_CLIMO_MEAN_DIFF"; nc_pairs_var_suffix = desc; }, { - mpr_column = [ "CLIMO_CDF" ]; + mpr_column = [ "OBS_CLIMO_CDF" ]; mpr_thresh = [ >=0.25&&<=0.75 ]; - desc = "CLIMO_CDF_IQR"; + desc = "OBS_CLIMO_CDF_IQR"; nc_pairs_var_suffix = desc; } ]; @@ -276,8 +276,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = COS_LAT; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_no_leap b/internal/test_unit/config/GridStatConfig_no_leap index 32d05b0862..f22c8e3008 100644 --- a/internal/test_unit/config/GridStatConfig_no_leap +++ b/internal/test_unit/config/GridStatConfig_no_leap @@ -217,8 +217,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_prob_as_scalar b/internal/test_unit/config/GridStatConfig_prob_as_scalar index 0d5bdd25cb..2b8b91a0c4 100644 --- a/internal/test_unit/config/GridStatConfig_prob_as_scalar +++ b/internal/test_unit/config/GridStatConfig_prob_as_scalar @@ -238,8 +238,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_python b/internal/test_unit/config/GridStatConfig_python index 7ca801748e..14021bd14d 100644 --- a/internal/test_unit/config/GridStatConfig_python +++ b/internal/test_unit/config/GridStatConfig_python @@ -218,8 +218,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "python"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "python"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_python_mixed b/internal/test_unit/config/GridStatConfig_python_mixed index 9a39bc9533..b4c9feff07 100644 --- a/internal/test_unit/config/GridStatConfig_python_mixed +++ b/internal/test_unit/config/GridStatConfig_python_mixed @@ -222,8 +222,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "python_mixed"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "python_mixed"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_rtma b/internal/test_unit/config/GridStatConfig_rtma index ddef797d2e..fea47945db 100644 --- a/internal/test_unit/config/GridStatConfig_rtma +++ b/internal/test_unit/config/GridStatConfig_rtma @@ -218,8 +218,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_rtma_perc_thresh b/internal/test_unit/config/GridStatConfig_rtma_perc_thresh index 44ed5f564f..f468a9d1ba 100644 --- a/internal/test_unit/config/GridStatConfig_rtma_perc_thresh +++ b/internal/test_unit/config/GridStatConfig_rtma_perc_thresh @@ -221,8 +221,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_st4 b/internal/test_unit/config/GridStatConfig_st4 index 71d165e98a..1a55fc2cca 100644 --- a/internal/test_unit/config/GridStatConfig_st4 +++ b/internal/test_unit/config/GridStatConfig_st4 @@ -222,8 +222,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_st4_censor b/internal/test_unit/config/GridStatConfig_st4_censor index e1e9adb1df..bfc90cad92 100644 --- a/internal/test_unit/config/GridStatConfig_st4_censor +++ b/internal/test_unit/config/GridStatConfig_st4_censor @@ -231,8 +231,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_ugrid_mpas b/internal/test_unit/config/GridStatConfig_ugrid_mpas index d54b25de53..e61b0c2611 100644 --- a/internal/test_unit/config/GridStatConfig_ugrid_mpas +++ b/internal/test_unit/config/GridStatConfig_ugrid_mpas @@ -255,8 +255,9 @@ ugrid_coordinates_file = "${MET_TEST_INPUT}/ugrid_data/mpas/static.40962_reduced //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/GridStatConfig_ugrid_mpas_diag b/internal/test_unit/config/GridStatConfig_ugrid_mpas_diag index 9e479d0302..0655fd1aa9 100644 --- a/internal/test_unit/config/GridStatConfig_ugrid_mpas_diag +++ b/internal/test_unit/config/GridStatConfig_ugrid_mpas_diag @@ -256,8 +256,9 @@ ugrid_coordinates_file = "${MET_TEST_INPUT}/ugrid_data/mpas/static.40962_reduced //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/Point2GridConfig_SST b/internal/test_unit/config/Point2GridConfig_SST new file mode 100644 index 0000000000..d9a658780c --- /dev/null +++ b/internal/test_unit/config/Point2GridConfig_SST @@ -0,0 +1,7 @@ +file_type = NETCDF_NCCF; + +var_name_map = [ + { key = "lat_vname"; val = "yh"; }, + { key = "lon_vname"; val = "xh"; } +]; + diff --git a/internal/test_unit/config/Point2GridConfig_WINDS b/internal/test_unit/config/Point2GridConfig_WINDS new file mode 100644 index 0000000000..d39221adbe --- /dev/null +++ b/internal/test_unit/config/Point2GridConfig_WINDS @@ -0,0 +1,2 @@ +obs_quality_inc = [ "2" ]; +obs_quality_exc = [ "1" ]; diff --git a/internal/test_unit/config/Point2GridConfig_lat_lon b/internal/test_unit/config/Point2GridConfig_lat_lon new file mode 100644 index 0000000000..80eee8fef6 --- /dev/null +++ b/internal/test_unit/config/Point2GridConfig_lat_lon @@ -0,0 +1,4 @@ +var_name_map = [ + { key = "lat_vname"; val = "NLAT"; }, + { key = "lon_vname"; val = "NLON"; } +]; diff --git a/internal/test_unit/config/Point2GridConfig_tlat_tlon b/internal/test_unit/config/Point2GridConfig_tlat_tlon new file mode 100644 index 0000000000..66f2f854b4 --- /dev/null +++ b/internal/test_unit/config/Point2GridConfig_tlat_tlon @@ -0,0 +1,6 @@ +file_type = NETCDF_NCCF; + +var_name_map = [ + { key = "lat_vname"; val = "TLAT"; }, + { key = "lon_vname"; val = "TLON"; } +]; diff --git a/internal/test_unit/config/Point2GridConfig_valid_time b/internal/test_unit/config/Point2GridConfig_valid_time index 748da58b4f..a2a1a69a0a 100644 --- a/internal/test_unit/config/Point2GridConfig_valid_time +++ b/internal/test_unit/config/Point2GridConfig_valid_time @@ -22,11 +22,12 @@ valid_time = "20201022_173000"; //////////////////////////////////////////////////////////////////////////////// - // -// Observation message type +// Point observation filtering options // -//message_type = []; +message_type = []; +obs_quality_inc = []; +obs_quality_exc = []; //////////////////////////////////////////////////////////////////////////////// @@ -72,10 +73,6 @@ valid_time = "20201022_173000"; //////////////////////////////////////////////////////////////////////////////// -//quality_mark_thresh = 2; - -//////////////////////////////////////////////////////////////////////////////// - tmp_dir = "/tmp"; version = "V12.0.0"; diff --git a/internal/test_unit/config/PointStatConfig_APCP b/internal/test_unit/config/PointStatConfig_APCP index a6a6f06d64..ca381a89d4 100644 --- a/internal/test_unit/config/PointStatConfig_APCP +++ b/internal/test_unit/config/PointStatConfig_APCP @@ -127,21 +127,19 @@ output_flag = { rps = NONE; eclv = BOTH; mpr = NONE; - seeps = ${SEEPS_FLAG}; - seeps_mpr = ${SEEPS_FLAG}; + seeps = NONE; + seeps_mpr = NONE; } -//////////////////////////////////////////////////////////////////////////////// -// Threshold for SEEPS p1 (Probability of being dry) - -seeps_p1_thresh = ${SEEPS_P1_THRESH}; - //////////////////////////////////////////////////////////////////////////////// duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_APCP_HIRA b/internal/test_unit/config/PointStatConfig_APCP_HIRA index 3dc823a50b..4e965bd716 100644 --- a/internal/test_unit/config/PointStatConfig_APCP_HIRA +++ b/internal/test_unit/config/PointStatConfig_APCP_HIRA @@ -142,8 +142,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_GTG_latlon b/internal/test_unit/config/PointStatConfig_GTG_latlon index 17aba08c1f..ae84d237fd 100644 --- a/internal/test_unit/config/PointStatConfig_GTG_latlon +++ b/internal/test_unit/config/PointStatConfig_GTG_latlon @@ -161,8 +161,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_GTG_lc b/internal/test_unit/config/PointStatConfig_GTG_lc index d45c47f618..198a40dc2f 100644 --- a/internal/test_unit/config/PointStatConfig_GTG_lc +++ b/internal/test_unit/config/PointStatConfig_GTG_lc @@ -169,8 +169,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_INTERP_OPTS b/internal/test_unit/config/PointStatConfig_INTERP_OPTS index 43eb1f0708..e0e75a5b9b 100644 --- a/internal/test_unit/config/PointStatConfig_INTERP_OPTS +++ b/internal/test_unit/config/PointStatConfig_INTERP_OPTS @@ -152,8 +152,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_LAND_TOPO_MASK b/internal/test_unit/config/PointStatConfig_LAND_TOPO_MASK index 48a04ed227..49cbf781dd 100644 --- a/internal/test_unit/config/PointStatConfig_LAND_TOPO_MASK +++ b/internal/test_unit/config/PointStatConfig_LAND_TOPO_MASK @@ -81,6 +81,8 @@ message_type_group_map = [ { key = "WATERSF"; val = "SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + //////////////////////////////////////////////////////////////////////////////// climo_mean = fcst; @@ -192,8 +194,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_MASK_SID b/internal/test_unit/config/PointStatConfig_MASK_SID index a77a8f9008..253f7320a1 100644 --- a/internal/test_unit/config/PointStatConfig_MASK_SID +++ b/internal/test_unit/config/PointStatConfig_MASK_SID @@ -147,8 +147,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_MPR_OBTYPE b/internal/test_unit/config/PointStatConfig_MPR_OBTYPE new file mode 100644 index 0000000000..6aa68e9842 --- /dev/null +++ b/internal/test_unit/config/PointStatConfig_MPR_OBTYPE @@ -0,0 +1,166 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Point-Stat configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +model = "FCST"; + +// +// Output description to be written +// May be set separately in each "obs.field" entry +// +desc = "NA"; + +//////////////////////////////////////////////////////////////////////////////// + +regrid = { + to_grid = NONE; + method = NEAREST; + width = 1; +} + +//////////////////////////////////////////////////////////////////////////////// + +obs_window = { + beg = ${BEG_DS}; + end = ${END_DS}; +} + +//////////////////////////////////////////////////////////////////////////////// + +mpr_column = []; +mpr_thresh = []; +cnt_thresh = [ NA ]; +cnt_logic = UNION; +wind_thresh = [ NA ]; +wind_logic = UNION; +eclv_points = 0.05; + +// +// Mapping of message type group name to comma-separated list of values. +// +message_type_group_map = [ + { key = "SURFACE"; val = "ADPSFC,SFCSHP,MSONET"; }, + { key = "ANYAIR"; val = "AIRCAR,AIRCFT"; }, + { key = "ANYSFC"; val = "ADPSFC,SFCSHP,ADPUPA,PROFLR,MSONET"; }, + { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; }, + { key = "LANDSF"; val = "ADPSFC,MSONET"; }, + { key = "WATERSF"; val = "SFCSHP"; } +]; + +obtype_as_group_val_flag = TRUE; + +fcst = { + sid_inc = []; + sid_exc = []; + obs_quality_inc = []; + obs_quality_exc = []; + + message_type = [ "SURFACE" ]; + + field = [ + { name = "TMP"; level = "Z2"; } + ]; + +} +obs = fcst; + +//////////////////////////////////////////////////////////////////////////////// + +climo_mean = fcst; +climo_mean = { + file_name = [ ${CLIMO_FILE} ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +mask = { + grid = [ "FULL" ]; + poly = []; + sid = []; + llpnt = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +ci_alpha = [ 0.05 ]; + +boot = { + interval = PCTILE; + rep_prop = 1.0; + n_rep = 200; + rng = "mt19937"; + seed = "1"; +} + +//////////////////////////////////////////////////////////////////////////////// + +interp = { + vld_thresh = 1.0; + + type = [ + { + method = NEAREST; + width = 1; + } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +hira = { + flag = TRUE; + width = [ 3 ]; + vld_thresh = 1.0; + cov_thresh = [ ==0.25 ]; + shape = SQUARE; + prob_cat_thresh = [ >273 ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +output_flag = { + fho = NONE; + ctc = NONE; + cts = NONE; + mctc = NONE; + mcts = NONE; + cnt = STAT; + sl1l2 = STAT; + sal1l2 = STAT; + vl1l2 = NONE; + val1l2 = NONE; + vcnt = NONE; + pct = STAT; + pstd = NONE; + pjc = NONE; + prc = NONE; + ecnt = STAT; + rps = NONE; + orank = STAT; + eclv = NONE; + mpr = STAT; + seeps = NONE; + seeps_mpr = NONE; +} + +//////////////////////////////////////////////////////////////////////////////// +// Threshold for SEEPS p1 (Probability of being dry) + +seeps_p1_thresh = NA; + +//////////////////////////////////////////////////////////////////////////////// + +duplicate_flag = NONE; +rank_corr_flag = TRUE; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_PHYS b/internal/test_unit/config/PointStatConfig_PHYS index 3c13262a71..6f3308f785 100644 --- a/internal/test_unit/config/PointStatConfig_PHYS +++ b/internal/test_unit/config/PointStatConfig_PHYS @@ -148,8 +148,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_PHYS_pint b/internal/test_unit/config/PointStatConfig_PHYS_pint index e246c570b3..f98f2e4102 100644 --- a/internal/test_unit/config/PointStatConfig_PHYS_pint +++ b/internal/test_unit/config/PointStatConfig_PHYS_pint @@ -143,8 +143,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_SEEPS b/internal/test_unit/config/PointStatConfig_SEEPS new file mode 100644 index 0000000000..cb7244311f --- /dev/null +++ b/internal/test_unit/config/PointStatConfig_SEEPS @@ -0,0 +1,152 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Point-Stat configuration file. +// +// For additional information, please see the MET User's Guide. +// Copied from PointStatConfig_APCP +// +//////////////////////////////////////////////////////////////////////////////// + +model = "FCST"; + +// +// Output description to be written +// May be set separately in each "obs.field" entry +// +desc = "NA"; + +//////////////////////////////////////////////////////////////////////////////// + +regrid = { + to_grid = NONE; + method = NEAREST; + width = 1; +} + +//////////////////////////////////////////////////////////////////////////////// + +obs_window = { + beg = ${BEG_DS}; + end = ${END_DS}; +} + +//////////////////////////////////////////////////////////////////////////////// + +mpr_column = []; +mpr_thresh = []; +cnt_thresh = [ NA ]; +cnt_logic = UNION; +wind_thresh = [ NA ]; +wind_logic = UNION; +eclv_points = 0.05; + +cat_thresh = [ >0.254, >0.635, >1.270, >2.540 ]; +message_type = "ADPSFC"; + +fcst = { + sid_inc = []; + sid_exc = []; + obs_quality_inc = []; + obs_quality_exc = []; + + field = [ + { + name = "${FCST_FIELD_NAME}"; + level = "${FCST_FIELD_LEVEL}"; + } + ]; + +} +obs = ${OBS_DICT}; + +//////////////////////////////////////////////////////////////////////////////// + +mask = { + grid = [ "FULL" ]; + poly = []; + sid = []; + llpnt = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +ci_alpha = [ 0.05 ]; + +boot = { + interval = PCTILE; + rep_prop = 1.0; + n_rep = 0; + rng = "mt19937"; + seed = "1"; +} + +//////////////////////////////////////////////////////////////////////////////// + +interp = { + vld_thresh = 1.0; + + type = [ + { method = MEDIAN; width = 3; }, + { method = NEAREST; width = 1; }, + { method = DW_MEAN; width = [ 3, 5 ]; }, + { method = LS_FIT; width = [ 3, 5 ]; }, + { method = BILIN; width = 2; } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +hira = { + flag = FALSE; + width = [ 2, 3, 4, 5 ]; + vld_thresh = 1.0; + cov_thresh = [ ==0.25 ]; + shape = SQUARE; + prob_cat_thresh = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +output_flag = { + fho = BOTH; + ctc = BOTH; + cts = BOTH; + mctc = NONE; + mcts = NONE; + cnt = BOTH; + sl1l2 = BOTH; + sal1l2 = NONE; + vl1l2 = NONE; + val1l2 = NONE; + vcnt = NONE; + pct = NONE; + pstd = NONE; + pjc = NONE; + prc = NONE; + ecnt = NONE; + orank = NONE; + rps = NONE; + eclv = BOTH; + mpr = NONE; + seeps = BOTH; + seeps_mpr = BOTH; +} + +//////////////////////////////////////////////////////////////////////////////// +// Threshold for SEEPS p1 (Probability of being dry) + +seeps_p1_thresh = ${SEEPS_P1_THRESH}; +seeps_point_climo_name = "${SEEPS_POINT_CLIMO_NAME}"; + +//////////////////////////////////////////////////////////////////////////////// + +duplicate_flag = NONE; +rank_corr_flag = FALSE; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_WINDS b/internal/test_unit/config/PointStatConfig_WINDS index 1c87273e89..4dac7fe51e 100644 --- a/internal/test_unit/config/PointStatConfig_WINDS +++ b/internal/test_unit/config/PointStatConfig_WINDS @@ -52,6 +52,8 @@ message_type_group_map = [ { key = "USERSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + fcst = { sid_inc = []; sid_exc = []; @@ -163,8 +165,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_aeronet b/internal/test_unit/config/PointStatConfig_aeronet index fef6c9bb42..387a3c54a2 100644 --- a/internal/test_unit/config/PointStatConfig_aeronet +++ b/internal/test_unit/config/PointStatConfig_aeronet @@ -212,8 +212,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_airnow b/internal/test_unit/config/PointStatConfig_airnow index 4223455902..4e7cfe79ba 100644 --- a/internal/test_unit/config/PointStatConfig_airnow +++ b/internal/test_unit/config/PointStatConfig_airnow @@ -92,6 +92,8 @@ message_type_group_map = [ { key = "WATERSF"; val = "SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + //////////////////////////////////////////////////////////////////////////////// // @@ -240,8 +242,10 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// -tmp_dir = "/tmp"; -output_prefix = ""; -version = "V12.0.0"; +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_climo b/internal/test_unit/config/PointStatConfig_climo index d7f89b3cb0..d87ff87ca5 100644 --- a/internal/test_unit/config/PointStatConfig_climo +++ b/internal/test_unit/config/PointStatConfig_climo @@ -285,8 +285,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_climo_WMO b/internal/test_unit/config/PointStatConfig_climo_WMO index 1ddc8deb1e..c34dc0989e 100644 --- a/internal/test_unit/config/PointStatConfig_climo_WMO +++ b/internal/test_unit/config/PointStatConfig_climo_WMO @@ -55,25 +55,25 @@ fcst = { { name = "UGRD"; level = [ "P500" ]; }, { name = "VGRD"; level = [ "P500" ]; }, { name = "WIND"; level = [ "P500" ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } obs = { - cnt_thresh = [ NA, =CDP50, ==CDP25 ]; + cnt_thresh = [ NA, =OCDP50, ==OCDP25 ]; field = [ { name = "TMP"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "UGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "VGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "WIND"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } @@ -230,8 +230,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_climo_prob b/internal/test_unit/config/PointStatConfig_climo_prob index 364c1bfe73..b69812fae5 100644 --- a/internal/test_unit/config/PointStatConfig_climo_prob +++ b/internal/test_unit/config/PointStatConfig_climo_prob @@ -232,8 +232,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_dup b/internal/test_unit/config/PointStatConfig_dup index 8fad74b0e5..ff42a31eab 100644 --- a/internal/test_unit/config/PointStatConfig_dup +++ b/internal/test_unit/config/PointStatConfig_dup @@ -165,8 +165,11 @@ seeps_p1_thresh = NA; duplicate_flag = ${DUPLICATE_FLAG}; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_mpr_thresh b/internal/test_unit/config/PointStatConfig_mpr_thresh index 5740d4907a..6b62a8df4f 100644 --- a/internal/test_unit/config/PointStatConfig_mpr_thresh +++ b/internal/test_unit/config/PointStatConfig_mpr_thresh @@ -69,14 +69,14 @@ fcst = { desc = "ABS_OBS_FCST_DIFF"; }, { - mpr_column = [ "ABS(OBS-CLIMO_MEAN)" ]; + mpr_column = [ "ABS(OBS-OBS_CLIMO_MEAN)" ]; mpr_thresh = [ <=5 ]; desc = "ABS_OBS_CLIMO_MEAN_DIFF"; }, { - mpr_column = [ "CLIMO_CDF" ]; + mpr_column = [ "OBS_CLIMO_CDF" ]; mpr_thresh = [ >=0.25&&<=0.75 ]; - desc = "CLIMO_CDF_IQR"; + desc = "OBS_CLIMO_CDF_IQR"; } ]; } @@ -224,8 +224,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_obs_summary b/internal/test_unit/config/PointStatConfig_obs_summary index a931ba6b21..dc5b55a67e 100644 --- a/internal/test_unit/config/PointStatConfig_obs_summary +++ b/internal/test_unit/config/PointStatConfig_obs_summary @@ -153,8 +153,11 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_obs_summary_all b/internal/test_unit/config/PointStatConfig_obs_summary_all index 384e4ada19..e5bc4dcd7a 100644 --- a/internal/test_unit/config/PointStatConfig_obs_summary_all +++ b/internal/test_unit/config/PointStatConfig_obs_summary_all @@ -222,8 +222,11 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_point_weight b/internal/test_unit/config/PointStatConfig_point_weight new file mode 100644 index 0000000000..4f9528e26e --- /dev/null +++ b/internal/test_unit/config/PointStatConfig_point_weight @@ -0,0 +1,145 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Point-Stat configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +model = "SREF"; + +// +// Output description to be written +// May be set separately in each "obs.field" entry +// +desc = "${DESC}"; + +//////////////////////////////////////////////////////////////////////////////// + +regrid = { + to_grid = NONE; + method = NEAREST; + width = 1; +} + +//////////////////////////////////////////////////////////////////////////////// + +obs_window = { + beg = -1800; + end = 1800; +} + +//////////////////////////////////////////////////////////////////////////////// + +mpr_column = []; +mpr_thresh = []; +cnt_thresh = [ NA ]; +cnt_logic = UNION; +wind_thresh = [ NA ]; +wind_logic = UNION; +eclv_points = 0.05; + +fcst = { + sid_inc = []; + sid_exc = []; + obs_quality_inc = []; + obs_quality_exc = []; + + field = [ + { name = "TMP"; level = "P850"; message_type = "ADPUPA"; cat_thresh = [ >273, >283 ]; }, + { name = "UGRD"; level = "P850"; message_type = "ADPUPA"; cat_thresh = [ >5 ]; }, + { name = "VGRD"; level = "P850"; message_type = "ADPUPA"; cat_thresh = [ >5 ]; } + ]; + +} +obs = fcst; + +//////////////////////////////////////////////////////////////////////////////// + +mask = { + grid = []; + poly = []; + sid = [ "${CONFIG_DIR}/SID_CONUS_ADPUPA_ELEV.txt" ]; + llpnt = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +ci_alpha = [ 0.05 ]; + +boot = { + interval = PCTILE; + rep_prop = 1.0; + n_rep = 200; + rng = "mt19937"; + seed = "1"; +} + +//////////////////////////////////////////////////////////////////////////////// + +interp = { + vld_thresh = 1.0; + + type = [ + { + method = NEAREST; + width = 1; + } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +hira = { + flag = FALSE; + width = [ 2, 3, 4, 5 ]; + vld_thresh = 1.0; + cov_thresh = [ ==0.25 ]; + shape = SQUARE; + prob_cat_thresh = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +output_flag = { + fho = NONE; + ctc = STAT; + cts = STAT; + mctc = STAT; + mcts = STAT; + cnt = STAT; + sl1l2 = STAT; + sal1l2 = NONE; + vl1l2 = STAT; + val1l2 = NONE; + vcnt = STAT; + pct = NONE; + pstd = NONE; + pjc = NONE; + prc = NONE; + ecnt = NONE; + orank = NONE; + rps = NONE; + eclv = STAT; + mpr = STAT; + seeps = NONE; + seeps_mpr = NONE; +} + +//////////////////////////////////////////////////////////////////////////////// +// Threshold for SEEPS p1 (Probability of being dry) + +seeps_p1_thresh = NA; + +//////////////////////////////////////////////////////////////////////////////// + +duplicate_flag = NONE; +rank_corr_flag = TRUE; + +point_weight_flag = ${POINT_WEIGHT}; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_prob b/internal/test_unit/config/PointStatConfig_prob index ab8067a79e..e4149c7eff 100644 --- a/internal/test_unit/config/PointStatConfig_prob +++ b/internal/test_unit/config/PointStatConfig_prob @@ -150,8 +150,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_prob_point_weight b/internal/test_unit/config/PointStatConfig_prob_point_weight new file mode 100644 index 0000000000..f083cb8807 --- /dev/null +++ b/internal/test_unit/config/PointStatConfig_prob_point_weight @@ -0,0 +1,151 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Point-Stat configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +model = "SREF"; + +// +// Output description to be written +// May be set separately in each "obs.field" entry +// +desc = "${DESC}"; + +//////////////////////////////////////////////////////////////////////////////// + +regrid = { + to_grid = NONE; + method = NEAREST; + width = 1; +} + +//////////////////////////////////////////////////////////////////////////////// + +obs_window = { + beg = -1800; + end = 1800; +} + +//////////////////////////////////////////////////////////////////////////////// + +mpr_column = []; +mpr_thresh = []; +cnt_thresh = [ NA ]; +cnt_logic = UNION; +wind_thresh = [ NA ]; +wind_logic = UNION; +eclv_points = 0.05; + +fcst = { + sid_inc = []; + sid_exc = []; + obs_quality_inc = []; + obs_quality_exc = []; + + field = [ + { name = "PROB"; level = "P850"; + prob = { name = "TMP"; thresh_hi = 273; }; + cat_thresh = ==0.25; } + ]; + +} +obs = { + message_type = "ADPUPA"; + + field = [ + { name = "TMP"; level = "P850"; cat_thresh = <273; } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +mask = { + grid = [ "FULL" ]; + poly = []; + sid = [ "${CONFIG_DIR}/SID_CONUS_ADPUPA_ELEV.txt" ]; + llpnt = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +ci_alpha = [ 0.05 ]; + +boot = { + interval = PCTILE; + rep_prop = 1.0; + n_rep = 200; + rng = "mt19937"; + seed = "1"; +} + +//////////////////////////////////////////////////////////////////////////////// + +interp = { + vld_thresh = 1.0; + + type = [ + { + method = NEAREST; + width = 1; + } + ]; +} + +//////////////////////////////////////////////////////////////////////////////// + +hira = { + flag = FALSE; + width = [ 2, 3, 4, 5 ]; + vld_thresh = 1.0; + cov_thresh = [ ==0.25 ]; + shape = SQUARE; + prob_cat_thresh = []; +} + +//////////////////////////////////////////////////////////////////////////////// + +output_flag = { + fho = NONE; + ctc = NONE; + cts = NONE; + mctc = NONE; + mcts = NONE; + cnt = NONE; + sl1l2 = NONE; + sal1l2 = NONE; + vl1l2 = NONE; + val1l2 = NONE; + vcnt = NONE; + pct = STAT; + pstd = STAT; + pjc = STAT; + prc = STAT; + ecnt = NONE; + orank = NONE; + rps = NONE; + eclv = STAT; + mpr = STAT; + seeps = NONE; + seeps_mpr = NONE; +} + +//////////////////////////////////////////////////////////////////////////////// +// Threshold for SEEPS p1 (Probability of being dry) + +seeps_p1_thresh = NA; + +//////////////////////////////////////////////////////////////////////////////// + +duplicate_flag = NONE; +rank_corr_flag = TRUE; + +point_weight_flag = ${POINT_WEIGHT}; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; + +//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_python b/internal/test_unit/config/PointStatConfig_python index 5116179451..3f56423d96 100644 --- a/internal/test_unit/config/PointStatConfig_python +++ b/internal/test_unit/config/PointStatConfig_python @@ -219,9 +219,12 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = ""; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_qty_inc_exc b/internal/test_unit/config/PointStatConfig_qty_inc_exc index 1eab2b184c..2345684e7a 100644 --- a/internal/test_unit/config/PointStatConfig_qty_inc_exc +++ b/internal/test_unit/config/PointStatConfig_qty_inc_exc @@ -209,8 +209,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_sid_inc_exc b/internal/test_unit/config/PointStatConfig_sid_inc_exc index a586064f53..9ab93b7417 100644 --- a/internal/test_unit/config/PointStatConfig_sid_inc_exc +++ b/internal/test_unit/config/PointStatConfig_sid_inc_exc @@ -156,8 +156,11 @@ seeps_p1_thresh = NA; duplicate_flag = NONE; obs_summary = NEAREST; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_ugrid_mpas_diag b/internal/test_unit/config/PointStatConfig_ugrid_mpas_diag index 886c6bb301..3c1a4054a6 100644 --- a/internal/test_unit/config/PointStatConfig_ugrid_mpas_diag +++ b/internal/test_unit/config/PointStatConfig_ugrid_mpas_diag @@ -160,8 +160,11 @@ ugrid_coordinates_file = "${MET_TEST_INPUT}/ugrid_data/mpas/static.40962_reduced duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_ugrid_mpas_out b/internal/test_unit/config/PointStatConfig_ugrid_mpas_out index c16deb0f7d..dcb7afe613 100644 --- a/internal/test_unit/config/PointStatConfig_ugrid_mpas_out +++ b/internal/test_unit/config/PointStatConfig_ugrid_mpas_out @@ -160,8 +160,11 @@ ugrid_coordinates_file = "${MET_TEST_INPUT}/ugrid_data/mpas/static.40962_reduced duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/PointStatConfig_ugrid_no_dataset b/internal/test_unit/config/PointStatConfig_ugrid_no_dataset index 9c7c7f5b3a..2cc3296b6e 100644 --- a/internal/test_unit/config/PointStatConfig_ugrid_no_dataset +++ b/internal/test_unit/config/PointStatConfig_ugrid_no_dataset @@ -160,8 +160,11 @@ ugrid_coordinates_file = "${MET_TEST_INPUT}/ugrid_data/mpas/static.40962_reduced duplicate_flag = NONE; rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = "${OUTPUT_PREFIX}"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${OUTPUT_PREFIX}"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/SID_CONUS_ADPSFC_ELEV.txt b/internal/test_unit/config/SID_CONUS_ADPSFC_ELEV.txt new file mode 100644 index 0000000000..03da3d6060 --- /dev/null +++ b/internal/test_unit/config/SID_CONUS_ADPSFC_ELEV.txt @@ -0,0 +1,2109 @@ +SID_CONUS_ADPSFC_ELEV +1L2(47.07002) +3CLO3(2.57002) +AAT(1492.32002) +AATC1(318.82002) +ABCC1(1419.32002) +ABEW1(182.57002) +ABMC1(1426.07002) +ABNW1(281.32002) +ABRC1(1455.82002) +ABTM8(1348.82002) +ACLN2(1838.32002) +ACMC1(43.07002) +ACRI1(1771.07002) +ACRU1(2216.82002) +ACV(72.57002) +ADRC1(307.07002) +AFHA3(1553.32002) +AFMA3(1278.57002) +AFRA3(901.07002) +AFRC1(2086.07002) +AFRI1(1386.32002) +AFSC1(1455.82002) +AFSN2(2029.82002) +AGDO3(672.82002) +AGKO3(1514.82002) +AGPC1(2767.82002) +AGTQ1(910.82002) +AGWC1(638.57002) +AIDC1(0.57002) +AJGW1(193.32002) +AJO(306.57002) +AKEC1(1041.07002) +ALDC1(553.57002) +ALFO3(1569.32002) +ALKN2(1922.32002) +ALLN2(2004.57002) +ALMA3(563.07002) +ALPC1(671.32002) +ALRN2(1626.57002) +ALW(501.57002) +AMBC1(2004.07002) +ANAC1(106.82002) +ANDC1(494.82002) +ANEC1(654.07002) +ANIC1(0.070023) +ANRO3(1916.07002) +ANSO3(1711.57002) +APC(73.82002) +APDN2(926.07002) +APIC1(1453.57002) +APKC1(1460.07002) +APLC1(941.07002) +APSW1(457.32002) +APYC1(1361.82002) +ARAO3(55.07002) +ARAU1(1446.82002) +ARAW1(675.82002) +ARFO3(1248.07002) +ARGC1(384.82002) +ARKI1(1354.82002) +ASFA3(1807.57002) +ASHW1(944.82002) +ASMC1(354.32002) +ASRC1(655.07002) +AST(11.82002) +ASYU1(2478.82002) +ATAI1(2067.57002) +ATFO3(1461.07002) +ATHC1(1339.82002) +ATLC1(254.07002) +ATLI1(2289.32002) +ATNC1(1160.57002) +ATRC1(1088.57002) +ATSC1(1496.57002) +AUBW1(95.82002) +AURO3(67.07002) +AVLC1(1727.57002) +AVNC1(0.57002) +AVX(23.57002) +AZRQ1(1107.82002) +BAB(104.07002) +BABC1(1253.57002) +BADM8(1806.57002) +BADU1(1144.32002) +BAEI1(2204.57002) +BAFO3(1320.57002) +BAGA3(1147.82002) +BAJC1(600.82002) +BALO3(1239.32002) +BAMC1(456.57002) +BANM8(1254.32002) +BANO3(34.57002) +BARQ1(964.07002) +BARQ2(1466.57002) +BASC1(1137.82002) +BASI1(2203.57002) +BASQ1(829.32002) +BATN2(1573.32002) +BBBC1(1706.57002) +BBFO3(1239.57002) +BBRA3(1741.82002) +BBYC1(3.57002) +BCDO3(1545.82002) +BCDW1(145.07002) +BCFO3(1313.82002) +BCNC1(1598.32002) +BCRQ1(1161.32002) +BCYC1(1381.82002) +BDDA3(1294.07002) +BDFO3(319.82002) +BDMC1(1065.07002) +BDOC1(1834.82002) +BDYC1(733.32002) +BEAM8(2414.82002) +BEBC1(133.32002) +BEDO3(1542.32002) +BEFM8(1915.07002) +BENC1(242.07002) +BERN2(2213.82002) +BETC1(2246.57002) +BEUO3(1317.32002) +BEWO3(1178.07002) +BFL(183.07002) +BFRI1(1574.57002) +BFYI1(1079.32002) +BGBC1(981.32002) +BGCO3(1128.07002) +BGFO3(1380.57002) +BGRM8(1342.32002) +BGVC1(744.07002) +BHAU1(1401.07002) +BHNC1(270.32002) +BIEQ1(903.82002) +BIGN2(2094.82002) +BIH(1884.57002) +BIIC1(795.07002) +BIRQ2(1121.82002) +BISM8(1311.07002) +BKBW1(135.82002) +BKCU1(1996.82002) +BKE(1193.57002) +BKFN2(2023.82002) +BKFO3(612.32002) +BKGC1(1070.32002) +BKLC1(1314.82002) +BKRC1(2257.82002) +BKRN2(2023.82002) +BKSI1(1887.32002) +BKVN2(763.07002) +BLAO3(1066.32002) +BLBC1(80.32002) +BLBO3(551.07002) +BLCC1(2231.32002) +BLCN2(1699.57002) +BLDQ1(1185.07002) +BLEN2(1710.07002) +BLH(131.57002) +BLI(61.07002) +BLKM8(1868.32002) +BLKO3(1159.07002) +BLMC1(2163.07002) +BLOM8(2219.07002) +BLPO3(1740.57002) +BLRC1(284.07002) +BLSC1(1622.32002) +BLTM8(1135.32002) +BLTQ1(952.57002) +BLU(1381.82002) +BLUN2(1525.82002) +BMEC1(864.07002) +BMFW1(1183.82002) +BMOC1(1081.07002) +BMTC1(534.82002) +BMUC1(1538.57002) +BNCC1(186.57002) +BNCN2(1573.57002) +BNDC1(284.07002) +BNDO3(1178.07002) +BNDW1(615.07002) +BNFO3(421.32002) +BNGC1(211.57002) +BNKC1(1359.82002) +BNO(1369.82002) +BNRI1(2203.57002) +BNTC1(1001.07002) +BNVC1(422.32002) +BOCC1(2054.82002) +BOFO3(1183.07002) +BOGC1(1867.07002) +BOI(970.57002) +BOKO3(110.57002) +BOLC1(1569.82002) +BONO3(566.07002) +BOOM8(1316.82002) +BORO3(1760.32002) +BOUN2(609.57002) +BOWC1(1768.32002) +BPAW1(1158.32002) +BPFC1(1638.32002) +BPKC1(1700.32002) +BPKN2(2249.82002) +BPMC1(1700.32002) +BPNC1(1378.82002) +BPOC1(2480.07002) +BQFO3(1635.57002) +BRAQ2(1663.07002) +BRBI1(1671.57002) +BRCN2(2287.57002) +BRCQ1(2131.82002) +BRDC1(335.82002) +BRHC1(711.32002) +BRLM8(2082.57002) +BRLN2(1719.82002) +BRMO3(1136.32002) +BRNQ1(820.32002) +BROC1(846.82002) +BROQ1(1142.57002) +BRRC1(643.32002) +BRRI1(1818.57002) +BRRM8(2132.82002) +BRUO3(331.82002) +BRWC1(85.57002) +BSCM8(1981.57002) +BSCN2(1305.07002) +BSDI1(1268.82002) +BSNC1(1460.07002) +BSPM8(1931.32002) +BSTU1(1783.57002) +BTFO3(1261.82002) +BTM(1918.32002) +BTRC1(1249.82002) +BTRI1(1875.57002) +BTTC1(54.32002) +BTYO3(1454.82002) +BUBC1(186.57002) +BUCO3(1200.57002) +BUCW1(409.32002) +BUFC1(1499.32002) +BUFN2(1468.32002) +BUFO3(856.07002) +BUGC1(1181.07002) +BULI1(1830.82002) +BULO3(890.32002) +BULQ1(1326.82002) +BULQ2(1297.07002) +BUMC1(1001.07002) +BUO(1001.07002) +BUPC1(1314.82002) +BUR(400.32002) +BUSW1(695.82002) +BVAQ1(852.82002) +BVDC1(1053.82002) +BVLQ1(1408.32002) +BVRC1(167.32002) +BVRO3(1345.32002) +BWFO3(1433.32002) +BWSO3(1555.82002) +BWSQ1(2318.82002) +BYCU1(2325.07002) +BYFO3(1374.57002) +BYI(1307.82002) +BZRC1(941.57002) +CAEC1(379.32002) +CAFO3(1428.07002) +CAHC1(611.32002) +CALU1(1709.82002) +CAMI1(1163.32002) +CANO3(129.07002) +CAPC1(352.57002) +CAPN2(1838.32002) +CASN2(1044.32002) +CATN2(1799.82002) +CAWN2(615.57002) +CAZC1(779.32002) +CBFI1(2468.07002) +CBRA3(206.57002) +CBUC1(1715.57002) +CCCC1(794.57002) +CCCQ1(1301.57002) +CCDI1(1551.82002) +CCEC1(804.07002) +CCEN2(637.82002) +CCHC1(402.07002) +CCR(86.07002) +CCRN2(1826.32002) +CCRO3(180.57002) +CCUN2(1877.57002) +CCZC1(123.32002) +CDAW1(717.32002) +CDCU1(2042.07002) +CDEC1(1110.57002) +CDGC1(561.32002) +CDLC1(463.32002) +CDMU1(1612.57002) +CEC(62.82002) +CECU1(2011.82002) +CEDN2(2100.57002) +CEDW1(575.82002) +CEEC1(295.07002) +CEFO3(1330.32002) +CEKC1(856.57002) +CENW1(145.07002) +CESC1(1699.82002) +CFCC1(1585.82002) +CFRC1(1483.32002) +CFWC1(85.57002) +CGDN2(1001.82002) +CGFO3(1276.82002) +CGFW1(859.57002) +CGVC1(2744.57002) +CGWO3(843.57002) +CHAC1(1432.82002) +CHEA3(1392.82002) +CHFQ1(1614.82002) +CHMC1(1823.82002) +CHMQ1(979.32002) +CHMQ2(824.82002) +CHOC1(1318.57002) +CHPQ1(1568.32002) +CHRI1(2024.07002) +CHRO3(208.32002) +CHSN2(2178.57002) +CHTC1(396.07002) +CHUM8(1922.32002) +CHWC1(611.32002) +CICC1(88.07002) +CIFO3(1533.82002) +CINW1(473.07002) +CISC1(1050.07002) +CJAW1(596.82002) +CLBC1(1135.82002) +CLFC1(568.07002) +CLFO3(1475.07002) +CLFU1(1752.82002) +CLHC1(380.82002) +CLKI1(1165.32002) +CLKO3(1197.07002) +CLLC1(153.07002) +CLLO3(1136.57002) +CLM(432.57002) +CLNC1(1473.32002) +CLSC1(-0.42998) +CLSW1(152.82002) +CLVM8(2188.07002) +CMA(58.32002) +CMAC1(69.82002) +CMBM8(1785.32002) +CMFW1(1133.07002) +CMNC1(1096.07002) +CMOC1(1000.82002) +CMPQ1(1745.82002) +CMVC1(1215.57002) +CNAC1(353.57002) +CNFC1(1453.82002) +CNFO3(262.32002) +CNGC1(488.82002) +CNIC1(605.07002) +CNJC1(153.07002) +CNO(269.82002) +CNSC1(1094.32002) +CNYC1(1463.32002) +COCC1(244.57002) +CODC1(494.82002) +COE(828.07002) +COEC1(494.82002) +COGC1(581.07002) +COIN2(2005.82002) +COKC1(1728.32002) +COLO3(1365.82002) +COMN2(2082.82002) +CONM8(1755.32002) +COPM8(1864.32002) +COVC1(989.32002) +COWC1(548.07002) +COXQ1(1930.57002) +COYN2(1868.07002) +COZI1(1938.32002) +CPCM8(1809.32002) +CPFO3(1733.57002) +CPLC1(2317.32002) +CPPW1(1380.57002) +CPWA3(1660.57002) +CQFO3(1421.57002) +CQLQ2(504.07002) +CQT(87.82002) +CRAQ1(842.07002) +CRCA3(1752.32002) +CRCW1(958.32002) +CRGC1(53.57002) +CRIC1(217.07002) +CRKI1(990.82002) +CRLC1(2654.82002) +CRLO3(1711.57002) +CRMI1(1867.82002) +CROQ1(1719.07002) +CRQ(29.32002) +CRSN2(1843.32002) +CRTI1(1867.82002) +CRVC1(2594.82002) +CRVO3(95.57002) +CRWA3(308.32002) +CRWC1(1561.07002) +CRWW1(145.07002) +CRXC1(99.07002) +CRYN2(653.57002) +CRZC1(1197.82002) +CSCI1(1635.57002) +CSCO3(1553.07002) +CSDI1(1635.57002) +CSFO3(1464.32002) +CSHW1(236.32002) +CSMC1(538.32002) +CSTC1(583.57002) +CSTO3(1574.82002) +CSUC1(446.57002) +CSVC1(410.07002) +CSWC1(1238.32002) +CSXC1(783.32002) +CSZC1(1587.82002) +CTB(1155.57002) +CTLN2(2114.07002) +CTNC1(879.32002) +CTOC1(149.32002) +CUGW1(699.82002) +CUMW1(674.32002) +CUTQ1(2228.57002) +CUUC1(1457.57002) +CVAC1(38.32002) +CVBC1(335.07002) +CVFO3(671.82002) +CVSC1(1020.32002) +CVSO3(161.57002) +CWFO3(1326.07002) +CWSN2(587.32002) +CWXW1(575.82002) +CXCC1(67.57002) +CYAC1(988.32002) +CYFI1(1494.07002) +CYFW1(555.57002) +CYMC1(875.32002) +CYOC1(410.07002) +CYVC1(1786.82002) +CZFO3(1790.57002) +CZZ(971.07002) +DACC1(559.07002) +DAG(774.07002) +DALM8(1921.32002) +DANO3(1310.82002) +DARW1(786.57002) +DBCQ2(1799.57002) +DBLC1(572.32002) +DCCW1(817.32002) +DCEN2(692.07002) +DCKN2(587.32002) +DCRM8(1575.07002) +DCUC1(303.07002) +DCWC1(85.57002) +DDWC1(462.57002) +DEAI1(977.07002) +DECC1(1031.07002) +DEDN2(1336.82002) +DEEM8(1716.82002) +DEFO3(828.32002) +DEHI1(1377.57002) +DEIC1(683.07002) +DEMC1(1070.32002) +DENC1(1013.07002) +DERM8(1780.07002) +DESC1(734.07002) +DESN2(1602.57002) +DEVC1(539.32002) +DEW(710.32002) +DGFC1(1118.82002) +DGRC1(291.82002) +DHDI1(2190.07002) +DHLM8(2060.57002) +DIAO3(1643.32002) +DIDN2(926.07002) +DIVM8(2339.57002) +DIXO3(126.07002) +DKFC1(1050.07002) +DKYC1(2509.57002) +DLBQ1(1262.82002) +DLCC1(1732.57002) +DLN(1711.07002) +DLS(362.07002) +DLTU1(1439.82002) +DLVC1(476.57002) +DMLC1(1737.07002) +DMRA3(1642.32002) +DNPC1(1544.57002) +DNRC1(2091.32002) +DNVC1(1511.57002) +DOGC1(1949.07002) +DOGQ1(1956.32002) +DOTW1(284.57002) +DOUW1(740.82002) +DPHC1(1381.82002) +DPKI1(1763.82002) +DPSA3(1472.07002) +DPYM8(1611.07002) +DRBM8(1755.57002) +DRBN2(2007.57002) +DRCN2(2111.07002) +DRYN2(1809.07002) +DRYW1(987.57002) +DSNC1(117.32002) +DUCC1(1508.57002) +DUCN2(692.07002) +DUDC1(864.07002) +DUIC1(1047.07002) +DUNO3(119.32002) +DVLO3(246.82002) +DVOC1(984.07002) +DVRC1(159.07002) +DVSC1(1766.07002) +DVT(476.32002) +DWAI1(818.82002) +DWRN2(1586.57002) +DYCN2(1777.32002) +DYLC1(1555.82002) +DYNN2(2159.32002) +EACQ2(1800.57002) +EAT(754.32002) +EBTC1(2394.32002) +ECHO3(209.32002) +ECKC1(832.82002) +ECSC1(556.82002) +EDOC1(433.32002) +EDW(732.32002) +EED(351.57002) +EELC1(989.32002) +EEPC1(1286.82002) +EGCC1(1578.82002) +EGKO3(213.32002) +EGLC1(520.07002) +EIMO3(1608.07002) +EKA(127.82002) +EKAN2(2042.07002) +EKO(1704.82002) +EKRI1(945.07002) +ELBI1(1045.57002) +ELCC1(-20.17998) +ELDA3(726.82002) +ELEW1(714.57002) +ELJC1(176.82002) +ELKC1(879.57002) +ELKQ2(858.32002) +ELN(835.07002) +ELRN2(2205.07002) +ELRQ1(1547.82002) +ELSQ2(659.07002) +ELXC1(43.32002) +ELY(2205.07002) +EMCM8(1324.32002) +EMFO3(1388.57002) +EMIO3(1200.57002) +EMRC1(897.32002) +EMTI1(945.32002) +ENCQ1(829.07002) +ENCW1(409.32002) +ENFO3(1243.32002) +ENMW1(259.07002) +ENTU1(1798.57002) +ENUW1(409.32002) +ENV(1440.82002) +ENVI1(1047.82002) +EORC1(552.82002) +EPH(429.82002) +EPKC1(628.32002) +ERAW1(740.57002) +ERCC1(422.07002) +ESCC1(258.32002) +ESEC1(514.57002) +ESOC1(258.32002) +ESPC1(822.07002) +ESRC1(348.07002) +EUG(165.07002) +EUL(728.57002) +EURM8(1327.32002) +EVAQ1(2190.32002) +EVFO3(699.57002) +EXQC1(242.82002) +EZRI1(2087.07002) +FADO3(378.07002) +FAFI1(1604.07002) +FAFO3(1193.57002) +FALN2(1196.82002) +FALQ1(1641.57002) +FAT(74.32002) +FBOC1(450.82002) +FBSC1(814.82002) +FCHC1(311.57002) +FCKO3(1761.57002) +FCRM8(2060.07002) +FEFO3(964.57002) +FERI1(1202.57002) +FFFO3(1454.82002) +FFXW1(695.82002) +FGFO3(1512.32002) +FGMC1(514.32002) +FGRC1(781.07002) +FHCC1(117.57002) +FHDC1(1733.32002) +FHFI1(1408.07002) +FHLC1(448.82002) +FHR(6.07002) +FIFW1(567.82002) +FINQ1(820.57002) +FISN2(1825.82002) +FISO3(1361.07002) +FISW1(1437.07002) +FKFO3(1401.57002) +FLAC1(1773.82002) +FLEI1(2262.57002) +FLFC1(2136.07002) +FLFI1(1594.57002) +FLFO3(1524.32002) +FMFO3(1386.82002) +FMLQR(968.57002) +FMNQ1(1929.57002) +FMOC1(1062.82002) +FMRC1(1406.57002) +FNWC1(1011.57002) +FNWO3(161.57002) +FNXA3(374.82002) +FOGO3(258.07002) +FOIC1(2086.07002) +FOSO3(385.57002) +FOXN2(1742.32002) +FPRO3(178.07002) +FPWU1(1060.82002) +FRAW1(281.07002) +FRCC1(216.07002) +FRCW1(1004.32002) +FRGC1(47.57002) +FRHM8(1917.32002) +FRIC1(238.32002) +FRIW1(259.07002) +FRKW1(197.07002) +FRLO3(1545.82002) +FRMM8(1202.57002) +FRSO3(145.82002) +FRTC1(1140.57002) +FSHC1(1180.82002) +FSLC1(140.82002) +FSNC1(56.82002) +FSSO3(238.07002) +FTAW1(817.82002) +FTDC1(204.32002) +FTFW1(961.32002) +FTMM8(1759.57002) +FTNC1(581.82002) +FTSC1(607.57002) +FUL(27.07002) +FVVN2(1477.82002) +FWFW1(1071.32002) +FWNN2(653.57002) +FWSC1(1706.57002) +FWSN2(653.57002) +FZWA3(1881.82002) +GAAM8(1989.57002) +GALN2(2015.32002) +GARI1(2175.07002) +GARW1(817.82002) +GASC1(486.57002) +GASO3(190.82002) +GAVI1(1554.57002) +GBDA3(282.57002) +GBRC1(870.32002) +GCAW1(668.57002) +GCDC1(493.82002) +GCHA3(1354.82002) +GCKQ1(1783.82002) +GCN(1863.82002) +GCRA3(1672.57002) +GDFO3(1022.82002) +GDLC1(1788.82002) +GDPN2(1100.07002) +GDRO3(356.07002) +GDRQ2(730.82002) +GDTA3(143.32002) +GDTC1(142.57002) +GECU1(1945.57002) +GEG(700.82002) +GEOC1(810.32002) +GERW1(349.32002) +GGDA3(300.07002) +GGRC1(418.07002) +GHDQ1(1684.07002) +GHFW1(883.57002) +GILO3(1089.82002) +GISC1(960.07002) +GKSC1(1508.57002) +GLAW1(930.32002) +GLDO3(1668.32002) +GLEQ1(898.82002) +GLFM8(1308.82002) +GLNI1(2600.32002) +GLSI1(2600.32002) +GMFW1(906.57002) +GMTC1(880.82002) +GNFC1(2143.82002) +GNLC1(2143.57002) +GNLW1(768.07002) +GNNC1(1126.07002) +GNSC1(265.82002) +GNTC1(1113.07002) +GODW1(1157.57002) +GOLW1(483.07002) +GOSC1(760.57002) +GOYC1(241.57002) +GPEN2(2015.32002) +GPFO3(212.57002) +GPI(1061.32002) +GPRM8(1772.82002) +GQEC1(486.57002) +GRBO3(1554.32002) +GRCA3(1672.57002) +GRCM8(1621.32002) +GRCW1(1108.07002) +GRFW1(734.07002) +GRHC1(348.07002) +GRMO3(140.57002) +GRNQ2(1708.57002) +GRNW1(1081.57002) +GROQ2(1410.57002) +GRRC1(1461.82002) +GRSC1(1757.32002) +GRSI1(2191.32002) +GRVN2(2024.07002) +GRWA3(1651.32002) +GRZO3(1067.32002) +GSFO3(1336.07002) +GSGN2(1310.82002) +GSKI1(1861.57002) +GSNM8(1719.07002) +GSPC1(745.82002) +GSTC1(36.07002) +GSTO3(1554.32002) +GSVC1(770.07002) +GTCC1(167.32002) +GTMC1(1137.82002) +GTOC1(475.32002) +GTOM8(1825.82002) +GVPC1(1070.32002) +GVYC1(2090.07002) +GWFW1(1146.57002) +GWWW1(349.32002) +GZFC1(1009.07002) +HABC1(1530.57002) +HAIC1(607.07002) +HAMA3(639.82002) +HANM8(1377.82002) +HATC1(1158.32002) +HAWM8(1442.32002) +HAYI1(828.07002) +HBFI1(1452.32002) +HBRC1(1777.82002) +HCNC1(1646.57002) +HCPC1(442.57002) +HCTC1(349.32002) +HDLC1(944.32002) +HDLI1(1332.32002) +HDZC1(740.07002) +HEMI1(1238.32002) +HESC1(1060.07002) +HFLC1(1041.07002) +HGFW1(1036.82002) +HHAI1(1963.07002) +HHDW1(674.32002) +HHFO3(975.07002) +HHR(13.32002) +HIBW1(791.07002) +HIDC1(187.07002) +HIMN2(2010.07002) +HIO(145.82002) +HIRO3(1172.07002) +HJO(58.82002) +HKFW1(284.57002) +HLCC1(1023.07002) +HLKC1(1656.32002) +HLLC1(1773.07002) +HLMO3(879.57002) +HLTI1(2393.82002) +HMGC1(1545.57002) +HMS(255.82002) +HNAO3(677.82002) +HNBO3(898.32002) +HNDO3(898.32002) +HNEO3(734.32002) +HNFO3(968.32002) +HNGC1(856.57002) +HNGO3(734.32002) +HNHO3(968.32002) +HNIO3(968.32002) +HNJO3(1185.82002) +HNSW1(259.07002) +HNTC1(1770.07002) +HNYM8(1181.07002) +HOAC1(830.57002) +HODO3(485.82002) +HOHU1(1687.57002) +HOMI1(758.07002) +HONC1(357.57002) +HOOM8(1596.32002) +HOTM8(1151.32002) +HOTN2(1305.57002) +HOWO3(1421.07002) +HOXO3(485.82002) +HPDC1(524.57002) +HPEC1(220.07002) +HPFI1(2544.07002) +HPRC1(803.32002) +HPRO3(898.32002) +HPWO3(1365.07002) +HQM(44.57002) +HQSI1(1016.57002) +HRHW1(351.32002) +HRI(212.57002) +HRLO3(1512.82002) +HRWM8(1348.32002) +HRZC1(664.57002) +HSEC1(331.07002) +HSFO3(763.82002) +HSGC1(819.07002) +HSKO3(341.57002) +HSPC1(67.57002) +HSQC1(2740.07002) +HTRC1(1377.07002) +HTSC1(1022.32002) +HUFW1(300.07002) +HUGI1(1369.57002) +HUSQ1(896.82002) +HUZI1(1748.57002) +HVSA3(312.32002) +HWCI1(1637.57002) +HWD(103.32002) +HWKC1(377.07002) +HWRW1(389.07002) +HYFC1(1034.57002) +HYFO3(900.82002) +HYNI1(2486.07002) +HYSC1(1879.57002) +ICKC1(2047.32002) +ICPC1(2047.32002) +IDA(1494.82002) +IDAC1(818.57002) +IDKC1(1357.82002) +IDOC1(338.32002) +IDPC1(2203.32002) +IDWC1(1406.82002) +IGM(1229.82002) +ILLQ2(1445.32002) +IMHO3(1278.07002) +IMTW1(1231.07002) +IMWN2(1889.82002) +INDI1(1864.32002) +INTC1(1166.57002) +IPL(-25.67998) +IPLC1(38.32002) +IRFO3(1380.82002) +ISPQ1(896.57002) +ISWC1(484.82002) +IWLC1(1068.57002) +IZA(402.07002) +JAPN2(2033.32002) +JBGC1(508.57002) +JBLC1(434.57002) +JCUN2(2033.32002) +JEAN2(1075.07002) +JER(1144.57002) +JESN2(1130.32002) +JETM8(1021.82002) +JKPI1(1925.07002) +JLNQ2(826.07002) +JNLC1(661.32002) +JOJO3(961.57002) +JPRC1(1704.82002) +JRFO3(1395.32002) +JRHQ1(1699.32002) +JRMA3(1379.07002) +JRMI1(1081.32002) +JSDC1(810.32002) +JSNC1(1564.32002) +JSPN2(1723.57002) +JSPU1(1827.07002) +JTAC1(1461.07002) +JUFW1(205.57002) +JULC1(1065.82002) +JUMQ2(654.32002) +JWBC1(1297.32002) +KADW1(1063.82002) +KBFO3(1309.07002) +KBNC1(1290.82002) +KCFW1(640.32002) +KCPC1(701.57002) +KCYC1(286.57002) +KEDW1(1140.82002) +KEEO3(1425.57002) +KELC1(575.07002) +KESC1(363.57002) +KFAW1(840.82002) +KFLW1(840.82002) +KIMO3(733.57002) +KLS(195.57002) +KMFW1(632.32002) +KMRO3(1515.32002) +KNNC1(454.82002) +KNSN2(1484.32002) +KNWC1(1179.32002) +KOSW1(645.07002) +KRCI1(1881.07002) +KRCM8(1661.07002) +KRKC1(1640.57002) +KRNC1(1508.07002) +KRTC1(1457.57002) +KSPC1(2283.07002) +KTLC1(230.07002) +KTLW1(840.82002) +KTTC1(109.57002) +KUSW1(770.82002) +KYCN2(1758.32002) +L13(11.32002) +LADN2(2014.07002) +LAFC1(1670.07002) +LAFN2(552.32002) +LAGC1(25.07002) +LAGO3(1095.32002) +LAHC1(179.82002) +LALN2(552.32002) +LAS(692.07002) +LAX(47.07002) +LAYC1(596.32002) +LBBM8(1124.32002) +LBCO3(935.57002) +LBDC1(129.82002) +LBFO3(1413.07002) +LBIC1(8.07002) +LBRW1(470.32002) +LCBC1(35.82002) +LCFW1(1113.57002) +LCHN2(1818.57002) +LDOI1(2287.57002) +LDRC1(1432.07002) +LDWM8(1151.82002) +LEBC1(1355.07002) +LECW1(954.82002) +LEFW1(946.07002) +LEGC1(459.07002) +LEGW1(201.32002) +LENN2(2023.82002) +LESQ1(2054.32002) +LEXC1(313.57002) +LFDQ1(892.07002) +LFRM8(1132.57002) +LGB(2.32002) +LGD(1105.82002) +LGFO3(698.32002) +LGRC1(207.32002) +LGWN2(535.07002) +LHCA3(428.57002) +LIBM8(1118.57002) +LICC1(72.32002) +LIDW1(446.32002) +LIMQ1(1592.32002) +LINM8(1754.82002) +LINQR(919.57002) +LKCC1(410.07002) +LKCO3(1706.32002) +LKN(1704.82002) +LKNC1(1292.07002) +LKSO3(615.07002) +LKTI1(1369.57002) +LKWA3(1426.57002) +LLEC1(931.32002) +LLFW1(1055.82002) +LLJ(2024.07002) +LMCN2(1746.57002) +LMCO3(1680.82002) +LMDN2(1001.82002) +LMDO3(996.07002) +LMHM8(2112.32002) +LMLN2(2125.57002) +LMRM8(2148.82002) +LMRN2(1001.82002) +LMT(1346.82002) +LNBW1(353.82002) +LNCM8(1754.82002) +LOAC1(201.07002) +LOBC1(2497.82002) +LOFO3(259.07002) +LOIN2(1958.82002) +LOKC1(1494.57002) +LOL(1286.07002) +LONN2(1940.82002) +LOSC1(129.82002) +LOSO3(865.32002) +LPAC1(153.07002) +LPC(152.07002) +LPDC1(981.07002) +LPFI1(1852.57002) +LPOC1(624.82002) +LPOW1(865.32002) +LPRC1(620.32002) +LPRU1(1974.32002) +LPSI1(1735.57002) +LPSW1(1075.32002) +LPTC1(1507.82002) +LPWO3(509.32002) +LPZC1(639.07002) +LRCM8(1755.57002) +LRLC1(889.32002) +LRRM8(2199.82002) +LSFW1(954.82002) +LSGC1(312.82002) +LSHC1(735.07002) +LSLC1(327.57002) +LSNC1(1249.82002) +LSPC1(1715.57002) +LSRC1(150.07002) +LSTC1(1126.07002) +LSV(704.82002) +LTAI1(1554.57002) +LTHC1(1029.07002) +LTJC1(605.07002) +LTLC1(984.07002) +LTRC1(205.82002) +LUBM8(1436.32002) +LUFI1(1109.57002) +LUKO3(1197.57002) +LUNN2(2077.82002) +LVGQ1(1881.07002) +LVK(246.82002) +LVMC1(165.32002) +LVPN2(587.32002) +LVTC1(2722.57002) +LVYN2(1973.32002) +LWDC1(422.07002) +LWDI1(2604.32002) +LWNW1(1214.07002) +LWS(597.82002) +MADC1(1039.57002) +MAE(86.57002) +MAEC1(487.57002) +MAFC1(236.57002) +MAMC1(2671.82002) +MANM8(1812.57002) +MANW1(809.32002) +MAPC1(603.57002) +MARW1(824.32002) +MAYI1(1925.32002) +MBBC1(951.57002) +MBCA3(628.07002) +MBCC1(123.32002) +MBMW1(824.32002) +MBUC1(129.07002) +MCCC1(1347.07002) +MCDO3(1538.82002) +MCDW1(261.82002) +MCE(40.57002) +MCFC1(1017.57002) +MCGC1(274.32002) +MCGN2(2217.32002) +MCKI1(1136.07002) +MCPA3(1150.57002) +MCUC1(1081.57002) +MDAC1(222.57002) +MDDC1(939.57002) +MDFI1(1785.07002) +MDFO3(636.82002) +MDHC1(1317.82002) +MDLA3(1073.32002) +MDLI1(2400.07002) +MDMI1(1306.57002) +MDRW1(709.07002) +MEAA3(1087.32002) +MEEC1(71.82002) +MEFO3(1123.32002) +MEFW1(1219.82002) +MFDU1(1857.07002) +MFDW1(222.32002) +MFLC1(1031.07002) +MFR(531.82002) +MGCA3(628.07002) +MGFO3(1204.32002) +MGMI1(1857.82002) +MGSW1(872.07002) +MHBC1(93.32002) +MHEC1(502.32002) +MHLA3(1162.57002) +MHMC1(589.82002) +MHS(1579.32002) +MHSI1(1858.32002) +MHSW1(1033.07002) +MHV(854.07002) +MHWO3(1816.07002) +MHYC1(2135.57002) +MIAC1(1017.57002) +MIDI1(2081.82002) +MIDW1(709.07002) +MIGC1(157.57002) +MILW1(799.07002) +MIPW1(148.07002) +MISI1(966.07002) +MITW1(730.57002) +MJBN2(1472.57002) +MJCC1(875.07002) +MKBI1(2332.07002) +MKEC1(2179.57002) +MKZO3(1345.32002) +MLCC1(1154.32002) +MLFO3(565.57002) +MLGC1(1052.82002) +MLHN2(776.32002) +MLKI1(2356.57002) +MLLO3(256.57002) +MLNC1(1052.82002) +MLP(1392.82002) +MMCW1(824.32002) +MMFO3(708.57002) +MMIA3(402.07002) +MMKC1(1335.32002) +MMRO3(708.82002) +MMTA3(1073.32002) +MMTC1(2246.82002) +MMV(79.82002) +MMWA3(572.82002) +MNAC1(1603.32002) +MNCC1(1230.07002) +MNCN2(2014.07002) +MNGC1(1483.32002) +MNHC1(1819.57002) +MNLC1(1124.07002) +MNNA3(1347.82002) +MNRN2(2015.32002) +MNSI1(2367.32002) +MNTN2(1659.07002) +MOAN2(703.07002) +MOD(14.07002) +MOFI1(1656.57002) +MOGN2(1805.82002) +MOIC1(377.82002) +MOKI1(2017.07002) +MOLC1(541.32002) +MOMN2(2123.57002) +MORN2(1633.07002) +MORQ1(846.32002) +MOSI1(866.32002) +MOSO3(1423.32002) +MOUC1(903.32002) +MOWC1(1625.32002) +MPEC1(76.82002) +MPLO3(203.57002) +MPOC1(535.82002) +MPSW1(1021.32002) +MRDC1(1483.32002) +MRFO3(1228.32002) +MRGI1(2231.57002) +MRIC1(535.82002) +MRKI1(1647.32002) +MRLN2(1973.32002) +MRNC1(422.07002) +MRSC1(452.57002) +MRSO3(747.07002) +MRWA3(470.07002) +MRY(19.57002) +MRYM8(1764.57002) +MRYN2(1185.82002) +MSAC1(1579.32002) +MSCI1(878.82002) +MSEC1(244.07002) +MSFO3(1358.07002) +MSJC1(1107.32002) +MSLQ1(1013.07002) +MSO(1372.82002) +MSPM8(1311.07002) +MSQN2(855.07002) +MSRC1(1579.32002) +MSRU1(1816.32002) +MSVA3(572.82002) +MSYC1(1090.82002) +MTAW1(874.07002) +MTCW1(897.57002) +MTHI1(833.07002) +MTHN2(2139.82002) +MTHO3(1177.57002) +MTIC1(377.82002) +MTKM8(1922.32002) +MTMC1(188.07002) +MTMI1(1739.32002) +MTQC1(840.57002) +MTR(19.57002) +MTRO3(1187.32002) +MTSC1(2103.32002) +MTSN2(1341.82002) +MTTC1(2090.07002) +MTZC1(600.32002) +MUDC1(981.32002) +MUDW1(171.82002) +MULM8(2247.82002) +MUO(833.07002) +MVDC1(1233.07002) +MVDN2(1712.82002) +MVEW1(100.32002) +MVLC1(1376.82002) +MWH(392.82002) +MWSA3(783.07002) +MYF(76.82002) +MYL(1746.07002) +MYMA3(321.07002) +MYRC1(2228.07002) +MYRI1(1217.57002) +MYV(22.32002) +MZTC1(1625.32002) +NADC1(168.82002) +NAPC1(254.07002) +NBKO3(318.32002) +NBRC1(75.07002) +NCKC1(1016.07002) +NCLO3(1594.32002) +NCSW1(1115.07002) +NEFW1(766.57002) +NEIQ1(1161.57002) +NFEM8(1524.82002) +NFFI1(2566.57002) +NFFW1(213.82002) +NFJM8(1673.32002) +NFKC1(1544.57002) +NFL(1250.57002) +NFRC1(854.57002) +NFRO3(698.32002) +NHPC1(446.57002) +NHRU1(1995.32002) +NID(850.07002) +NINM8(1348.82002) +NIPC1(384.82002) +NISW1(944.82002) +NKSW1(61.07002) +NKX(76.82002) +NLC(50.57002) +NLSC1(791.57002) +NLSN2(575.82002) +NMPI1(809.82002) +NMSC1(342.57002) +NOIM8(1320.82002) +NPFO3(798.07002) +NPXC1(1180.57002) +NRAC1(100.82002) +NRDC1(1031.82002) +NRKW1(173.57002) +NTCC1(562.82002) +NTDN2(1618.82002) +NTPO3(1709.57002) +NTRC1(1861.82002) +NUCI1(1300.32002) +NUQ(157.07002) +NUW(0.82002) +NVRM8(1714.07002) +NWRA3(901.07002) +NWRC1(27.82002) +NXP(715.57002) +NYAN2(1873.57002) +NYL(166.07002) +NYSO3(717.32002) +NZAC1(1279.32002) +NZCM8(1965.32002) +OAGW1(718.07002) +OAK(29.07002) +OAMC1(31.57002) +OASN2(1923.82002) +OBRC1(532.57002) +OCFW1(1255.32002) +OCHO3(1167.32002) +OCMO3(1415.07002) +OCNC1(63.07002) +OCTC1(259.07002) +OCWO3(1408.57002) +ODBN2(926.07002) +ODLC1(28.82002) +ODSW1(502.82002) +OGD(1454.82002) +OGDC1(949.07002) +OGOC1(616.07002) +OGVC1(1180.57002) +OHOI1(2151.32002) +OICC1(1010.07002) +OITC1(417.57002) +OJAC1(561.32002) +OJIC1(561.32002) +OKB(63.07002) +OKFO3(1515.57002) +OKNC1(1194.82002) +OKPC1(354.32002) +OLDQ1(1015.82002) +OLNM8(1374.32002) +OMFO3(613.07002) +OMFW1(1016.82002) +OMK(792.57002) +OMTC1(828.82002) +OMWW1(1021.32002) +ONCC1(2046.07002) +ONO(708.57002) +ONOC1(120.82002) +ONSC1(823.57002) +ONT(559.07002) +ONYC1(1312.32002) +OOFO3(1648.57002) +OORC1(1651.57002) +OPCA3(532.57002) +OPLC1(875.07002) +ORCO3(72.82002) +ORDC1(266.82002) +ORIC1(96.07002) +ORWN2(1356.57002) +OTOW1(313.82002) +OTTC1(119.07002) +OURC1(830.82002) +OVE(96.82002) +OVNM8(1549.07002) +OVRC1(2064.07002) +OWDC1(133.32002) +OWFO3(1061.07002) +OWNC1(2132.07002) +OWYN2(1817.07002) +OXR(58.32002) +OXSI1(1699.82002) +OYFO3(1607.57002) +P68(1960.07002) +P69(1202.57002) +PACW1(1036.82002) +PAE(39.82002) +PAFO3(594.32002) +PAHN2(1302.82002) +PALW1(446.57002) +PAMC1(937.32002) +PANN2(1759.32002) +PASQ1(1977.32002) +PBCC1(1310.32002) +PBFW1(100.32002) +PBUI1(1571.32002) +PCEC1(434.82002) +PCIC1(476.57002) +PCKC1(1277.57002) +PCKI1(1751.82002) +PCLC1(410.07002) +PCON2(1698.57002) +PCQC1(671.82002) +PCRW1(1591.82002) +PCYO3(1535.82002) +PDEC1(508.57002) +PDGC1(457.07002) +PDT(411.07002) +PDX(87.57002) +PEAC1(348.57002) +PEFO3(291.57002) +PEFW1(974.07002) +PELQR(1600.32002) +PEOW1(976.82002) +PEPC1(2030.57002) +PESW1(961.57002) +PFHC1(1397.57002) +PFRC1(278.07002) +PFTC1(414.57002) +PGRC1(600.32002) +PHGM8(1989.32002) +PHRC1(955.32002) +PHX(368.32002) +PIBC1(162.57002) +PIBN2(587.32002) +PICM8(1771.07002) +PIDC1(1031.82002) +PIEC1(1624.32002) +PIEI1(1096.57002) +PIFC1(1362.82002) +PIFW1(977.82002) +PIH(1370.57002) +PIHC1(982.57002) +PION2(1798.82002) +PIPA3(1543.82002) +PIRI1(1096.57002) +PISC1(80.57002) +PITC1(789.82002) +PIVC1(1426.07002) +PKCC1(680.07002) +PKFC1(537.07002) +PKFO3(819.32002) +PKSQ1(1594.57002) +PLAM8(1179.32002) +PLCC1(433.32002) +PLEC1(165.07002) +PLFI1(1057.32002) +PLIC1(877.07002) +PLKI1(1151.57002) +PLLC1(1530.57002) +PLMC1(29.32002) +PLRC1(924.82002) +PLTC1(307.07002) +PLVM8(1337.07002) +PMD(950.57002) +PMFW1(966.07002) +PMNM8(1294.57002) +PNCC1(2143.57002) +PNCQR(1110.32002) +PNFI1(1419.82002) +PNGO3(485.82002) +PNKW1(935.32002) +PNRI1(1034.07002) +PNTM8(1515.32002) +POEM8(1519.32002) +POLM8(1514.57002) +POMC1(389.57002) +PORQ1(1387.57002) +POSC1(1823.82002) +POTC1(762.32002) +POTI1(874.32002) +POWC1(213.82002) +POWO3(502.07002) +PPDN2(692.07002) +PPHC1(1055.32002) +PPPC1(842.82002) +PPRC1(442.57002) +PPRW1(1356.07002) +PPSC1(307.57002) +PRAI1(1557.82002) +PRB(350.82002) +PRBC1(350.82002) +PRBQ1(1576.07002) +PRC(1651.32002) +PRCI1(1175.07002) +PRCO3(1473.32002) +PRDM8(1302.82002) +PRFO3(741.07002) +PRGC1(1675.32002) +PRHC1(461.07002) +PRIQ1(1259.07002) +PRKA3(230.32002) +PRLI1(1820.32002) +PRMC1(937.32002) +PROO3(875.32002) +PRPC1(828.82002) +PRSN2(2211.57002) +PSAC1(291.82002) +PSC(182.07002) +PSCC1(2163.07002) +PSP(794.57002) +PSPC1(1721.57002) +PSQC1(258.32002) +PSRC1(2097.82002) +PSTA3(1651.32002) +PSTC1(1748.32002) +PSTM8(1481.07002) +PTEC1(231.57002) +PTFO3(1498.32002) +PTHC1(1095.07002) +PTHW1(1409.82002) +PTNM8(2195.32002) +PTPC1(19.57002) +PTV(193.82002) +PUGC1(119.07002) +PUW(835.82002) +PVRO3(1027.82002) +PWYC1(213.82002) +PYFO3(1003.32002) +PYLC1(1131.07002) +PYNC1(1193.32002) +PYPC1(566.32002) +PYTI1(729.32002) +QBAA3(1828.07002) +QBMA3(1816.82002) +QBRA3(1443.82002) +QBYQ2(1429.57002) +QCAC1(58.32002) +QCCO3(737.32002) +QCKA3(1214.57002) +QCNW1(289.07002) +QDPA3(2360.07002) +QFSA3(1933.57002) +QGDA3(1326.57002) +QGSA3(1711.57002) +QHAA3(351.57002) +QHBA3(1070.07002) +QHQA3(726.57002) +QHUA3(1535.57002) +QISA3(1651.32002) +QLGA3(1715.07002) +QMBA3(1292.32002) +QMLA3(1715.07002) +QMMA3(1291.57002) +QNFA3(1715.07002) +QNYC1(1504.57002) +QOKA3(1282.82002) +QPFO3(569.07002) +QRMO3(1703.32002) +QRTA3(1572.32002) +QSPA3(1214.57002) +QSTA3(1042.57002) +QTCW1(566.07002) +QTUA3(1863.82002) +QTWA3(1466.32002) +QTZC1(1234.57002) +QUEQ1(925.07002) +QUPA3(628.07002) +QWSA3(2150.82002) +QYJA3(1641.32002) +QYRC1(1504.57002) +RAL(309.57002) +RAWN2(1395.07002) +RAYQ1(956.82002) +RBG(319.32002) +RBL(108.82002) +RBTN2(1664.82002) +RBVN2(2048.57002) +RBYC1(370.07002) +RBYM8(1907.82002) +RCCW1(193.32002) +RCEC1(2297.32002) +RCFC1(47.57002) +RCHC1(846.82002) +RCPC1(1286.82002) +RCRO3(1587.57002) +RCSO3(1590.57002) +RDD(155.82002) +RDKI1(2303.32002) +RDLO3(478.07002) +RDM(929.07002) +RDOC1(0.32002) +RDVC1(667.32002) +REDO3(1123.07002) +REEO3(83.82002) +RENI1(1220.57002) +REO(1261.57002) +RFCN2(653.57002) +RFSC1(363.57002) +RFTI1(1347.82002) +RGTC1(984.07002) +RIV(457.07002) +RJSC1(395.32002) +RKBC1(277.07002) +RKHO3(287.32002) +RKPM8(1992.07002) +RLFO3(1389.57002) +RLGW1(1181.57002) +RLKC1(990.07002) +RLKN2(2027.57002) +RLYO3(1393.32002) +RMFO3(110.57002) +RMNC1(475.32002) +RMTC1(1179.32002) +RNDC1(1477.07002) +RNDN2(1723.32002) +RNFO3(1485.82002) +RNM(475.32002) +RNO(1777.32002) +RNT(54.82002) +ROCI1(1315.82002) +ROLC1(712.07002) +RONM8(1080.32002) +RORO3(1281.07002) +ROSQ1(762.57002) +ROVC1(1031.82002) +RPSA3(171.32002) +RRAC1(155.82002) +RRFI1(1704.57002) +RRKN2(1305.07002) +RRMC1(1592.32002) +RROO3(1333.82002) +RRRC1(520.57002) +RSBU1(1657.32002) +RSCN2(1826.57002) +RSFW1(159.82002) +RSHC1(1497.07002) +RSPC1(508.07002) +RTFO3(1374.57002) +RTHN2(2138.57002) +RTLC1(1440.07002) +RUBC1(2136.07002) +RUSC1(1113.32002) +RVDC1(1716.82002) +RVYC1(324.32002) +RWCC1(111.32002) +RWDN2(575.82002) +RXFO3(1092.07002) +RYNC1(549.07002) +RZVW1(562.82002) +SABC1(2896.07002) +SAC(-4.42998) +SAFO3(1148.32002) +SAHN2(575.82002) +SAN(56.82002) +SAPC1(-5.92998) +SARC1(75.07002) +SAUC1(558.32002) +SAYC1(410.07002) +SBA(142.57002) +SBFO3(880.32002) +SBIC1(0.070023) +SBKW1(76.82002) +SBMW1(677.57002) +SBP(318.82002) +SBPC1(318.82002) +SBTC1(1166.82002) +SBVC1(244.07002) +SBYC1(745.82002) +SCAQ1(2022.82002) +SCCN2(575.82002) +SCCQ1(2190.07002) +SCDO3(190.82002) +SCFI1(1328.57002) +SCFO3(1481.32002) +SCHC1(313.57002) +SCHI1(2118.82002) +SCHO3(1417.32002) +SCK(-4.17998) +SCKC1(0.32002) +SCOO3(190.82002) +SCRN2(1851.57002) +SCSC1(508.32002) +SCTI1(1774.82002) +SCWW1(278.07002) +SDB(1131.07002) +SDDC1(363.82002) +SDFO3(1570.82002) +SDLC1(796.82002) +SDM(179.82002) +SDMM8(1981.57002) +SDMO3(393.07002) +SDMW1(315.07002) +SDOC1(416.07002) +SDRC1(1128.82002) +SEA(76.82002) +SENW1(1094.82002) +SERC1(1940.57002) +SETC1(973.07002) +SEW(37.82002) +SEXC1(208.57002) +SEYC1(1762.32002) +SFBC1(1748.32002) +SFBO3(495.57002) +SFF(705.32002) +SFKC1(532.57002) +SFKO3(477.32002) +SFNW1(205.82002) +SFO(24.07002) +SFOC1(2.82002) +SFXC1(64.82002) +SGEC1(438.07002) +SGFO3(879.57002) +SGGU1(1359.32002) +SGHQ1(1781.82002) +SGNW1(1145.07002) +SGPC1(1180.82002) +SGQC1(396.07002) +SGRU1(1101.82002) +SGUU1(1050.07002) +SGX(258.32002) +SGYC1(962.32002) +SHDC1(364.82002) +SHFO3(1392.82002) +SHHC1(653.57002) +SHIC1(801.57002) +SHLQ2(1300.57002) +SHMC1(1483.32002) +SHN(70.57002) +SHNC1(669.82002) +SHPW1(817.57002) +SHQC1(746.82002) +SHRO3(1182.82002) +SHRQ1(1725.82002) +SHVC1(1642.82002) +SHWI1(1061.57002) +SIAN2(1673.32002) +SIDW1(638.57002) +SIGU1(2386.57002) +SIY(970.32002) +SJBC1(207.07002) +SJC(198.57002) +SKAM8(1843.07002) +SKFI1(1924.82002) +SKKW1(1143.82002) +SKLA3(1611.07002) +SKMW1(200.07002) +SKNC1(551.82002) +SKOQ1(2292.82002) +SKOW1(663.82002) +SKYW1(982.82002) +SLAC1(739.32002) +SLE(66.57002) +SLFC1(735.07002) +SLFO3(1415.07002) +SLKO3(1701.57002) +SLMC1(2231.32002) +SLON2(704.82002) +SLPC1(1020.82002) +SLRC1(175.82002) +SLTC1(1247.07002) +SLTO3(1370.32002) +SLVO3(1715.32002) +SLWC1(23.07002) +SMBA3(691.32002) +SMBC1(1007.32002) +SMDC1(1949.07002) +SMF(-5.92998) +SMFO3(1762.07002) +SMGC1(318.82002) +SMIO3(263.57002) +SMLO3(1565.07002) +SMN(1853.32002) +SMNN2(1422.07002) +SMNQ2(540.82002) +SMO(47.07002) +SMOC1(23.07002) +SMPC1(307.32002) +SMPN2(926.07002) +SMPW1(1063.82002) +SMRC1(364.82002) +SMRO3(1701.57002) +SMRQ1(1080.32002) +SMSC1(1148.32002) +SMTC1(10.57002) +SMTI1(1695.07002) +SMTM8(1580.82002) +SMVC1(354.32002) +SMWN2(1422.07002) +SMX(103.57002) +SMYI1(1853.32002) +SNA(100.82002) +SNFW1(516.07002) +SNS(168.82002) +SNSI1(1309.82002) +SNT(2352.07002) +SNWC1(1140.32002) +SOAC1(904.07002) +SOAO3(898.32002) +SOBO3(898.32002) +SOCO3(898.32002) +SODO3(898.32002) +SOEO3(898.32002) +SOFO3(898.32002) +SOGO3(898.32002) +SOUI1(1054.57002) +SOX(306.57002) +SPAI1(1559.57002) +SPB(127.57002) +SPCC1(2179.57002) +SPCW1(668.57002) +SPEW1(246.57002) +SPGC1(1728.32002) +SPGN2(1616.07002) +SPGW1(1299.57002) +SPLW1(1121.32002) +SPMN2(1956.82002) +SPMW1(1071.57002) +SPNQ1(1798.82002) +SPSO3(1200.57002) +SPWC1(1020.32002) +SPXC1(142.57002) +SQFO3(1127.32002) +SQLW1(473.07002) +SQPM8(1356.57002) +SQPW1(1036.32002) +SQSC1(932.32002) +SRAC1(2722.57002) +SRBN2(1001.82002) +SRBW1(476.07002) +SRCQ1(1787.32002) +SRFI1(1497.57002) +SRGM8(1348.07002) +SRIC1(24.32002) +SRMO3(631.07002) +SRTC1(712.32002) +SRUC1(398.32002) +SRXC1(607.07002) +SRYM8(1764.57002) +SSCN2(2178.57002) +SSPW1(927.07002) +STAC1(100.82002) +STAM8(1480.57002) +STAN2(2028.32002) +STCQ1(1379.32002) +STDQ1(882.82002) +STEC1(176.82002) +STFC1(624.07002) +STFO3(1623.82002) +STGM8(1348.07002) +STHC1(275.32002) +STKM8(1499.57002) +STMN2(1994.07002) +STMQR(1764.57002) +STNI1(2352.07002) +STPC1(1949.07002) +STQC1(1651.57002) +STRO3(1525.07002) +STRQ1(932.32002) +STRW1(1555.32002) +STS(206.57002) +STSM8(1263.07002) +STTM8(1454.07002) +STUC1(1748.32002) +STVM8(1521.32002) +STWO3(1598.57002) +STYC1(1105.32002) +SUAM8(1931.32002) +SUMM8(1771.07002) +SUNN2(1812.57002) +SUNQ1(2034.07002) +SVCO3(1629.57002) +SVFI1(1654.07002) +SVFO3(584.32002) +SVMO3(1556.57002) +SVNW1(1236.32002) +SVPI1(1787.57002) +SVRQ2(1122.82002) +SWAC1(697.82002) +SWBC1(1342.82002) +SWCW1(920.32002) +SWDC1(11.32002) +SWIC1(161.32002) +SWLC1(118.07002) +SWNN2(653.57002) +SWPI1(2391.82002) +SXT(599.07002) +SYDC1(354.32002) +SYNO3(229.32002) +SYSC1(999.82002) +SZKQ2(947.32002) +TABC1(351.57002) +TACA3(131.32002) +TACN2(1975.57002) +TANC1(841.32002) +TAPC1(1359.82002) +TAYI1(2006.32002) +TBRC1(1455.07002) +TBSU1(2418.32002) +TCAC1(944.32002) +TCFI1(1440.07002) +TCFO3(583.57002) +TCFW1(1023.07002) +TCKC1(628.32002) +TCLC1(671.82002) +TCM(36.07002) +TCMO3(1430.82002) +TCNC1(442.57002) +TCRA3(1343.57002) +TEKC1(924.82002) +TEKW1(834.07002) +TENQR(1728.82002) +TEPM8(2006.82002) +TERC1(261.07002) +TEXN2(1613.07002) +TFRU1(1778.07002) +TGCC1(2091.82002) +TGFI1(1741.57002) +THAM8(1298.57002) +THBW1(1494.32002) +THDC1(267.32002) +THFI1(2051.07002) +THLN2(1777.32002) +THMI1(1956.57002) +THRQ1(875.07002) +TIGC1(973.32002) +TIJC1(37.82002) +TILO3(711.32002) +TILQ1(2241.57002) +TILW1(473.07002) +TIW(28.82002) +TIXC1(38.32002) +TLHC1(903.32002) +TLMO3(300.07002) +TLRW1(297.82002) +TLYO3(1027.82002) +TMCM8(1866.07002) +TMFO3(1600.57002) +TMKO3(300.07002) +TMNC1(1306.57002) +TMRO3(1355.07002) +TNRC1(203.82002) +TOFO3(1349.82002) +TOHW1(1222.57002) +TOPI1(1758.57002) +TORC1(295.82002) +TPEO3(1482.07002) +TPFW1(973.07002) +TPGC1(311.07002) +TPH(1745.07002) +TPHC1(1306.57002) +TQIN2(2066.82002) +TRCM8(1184.82002) +TRFO3(513.57002) +TRFW1(1121.32002) +TRGW1(1060.82002) +TRHC1(944.32002) +TRII1(1680.07002) +TRM(238.32002) +TRMC1(590.07002) +TRMI1(1784.82002) +TRNW1(720.82002) +TROM8(1218.07002) +TRON2(926.32002) +TRTC1(2086.07002) +TRUC1(2108.32002) +TRVQ1(919.07002) +TSCC1(502.32002) +TSDC1(1357.82002) +TSHC1(1238.32002) +TSOW1(1088.07002) +TSP(1359.82002) +TTD(138.82002) +TTRC1(338.57002) +TUFO3(1224.32002) +TUSN2(1975.57002) +TVL(2128.57002) +TVWU1(1660.32002) +TWBI1(1064.82002) +TWCC1(1359.82002) +TWDC1(402.07002) +TWDN2(855.07002) +TWF(1307.57002) +TWLM8(1864.57002) +TWMC1(485.57002) +TWRW1(720.82002) +TYBO3(1545.07002) +TYEC1(940.57002) +TYLO3(1457.82002) +U24(1439.82002) +UAO(55.07002) +UCCC1(1048.57002) +UCLC1(167.32002) +UCRC1(363.32002) +UDWC1(930.32002) +UFDN2(926.32002) +UHLC1(1564.32002) +UIL(79.82002) +UKIO3(1290.57002) +ULVN2(1044.32002) +UMCQ2(1488.57002) +UMNC1(312.82002) +UMTO3(175.07002) +UNYO3(1447.82002) +UPSO3(780.82002) +UPWW1(1060.82002) +USEA3(906.57002) +VABC1(1653.57002) +VAMA3(609.07002) +VAQC1(231.57002) +VBPC1(1949.07002) +VCB(67.82002) +VCFO3(283.57002) +VDBC1(131.07002) +VDBN2(755.82002) +VDCA3(1423.32002) +VDPA3(1438.57002) +VENO3(373.57002) +VENU1(1796.57002) +VGAC1(2548.07002) +VGRC1(1054.07002) +VGT(755.82002) +VICC1(463.82002) +VICQ1(1745.82002) +VIOC1(1624.57002) +VIS(61.07002) +VKOC1(2950.57002) +VLCC1(563.32002) +VLKC1(588.07002) +VLYC1(1235.57002) +VNCU1(1903.32002) +VNNI1(2462.82002) +VNOC1(240.57002) +VNY(312.82002) +VOFN2(597.82002) +VSTC1(167.07002) +VTUC1(208.57002) +VUO(87.57002) +VVDN2(575.82002) +WAGI1(1912.32002) +WAHI1(850.57002) +WALC1(2288.07002) +WARO3(1291.32002) +WASC1(82.57002) +WATC1(1306.57002) +WATQ1(1796.07002) +WBA(1973.82002) +WBRI1(1641.57002) +WCGU1(1657.07002) +WCLN2(1851.57002) +WCWO3(898.32002) +WDEC1(311.07002) +WEAC1(975.07002) +WEDC1(96.07002) +WEEC1(1230.32002) +WEFI1(1396.07002) +WEHC1(561.32002) +WEIN2(718.07002) +WESC1(304.82002) +WFHC1(1379.82002) +WFSW1(162.32002) +WGRM8(1333.82002) +WGVN2(1532.82002) +WGWC1(1031.82002) +WHDI1(1700.82002) +WHHM8(1656.57002) +WHLA3(2070.82002) +WHSW1(872.57002) +WHTW1(1465.07002) +WIKA3(983.57002) +WIMQ1(946.07002) +WIWO3(1423.07002) +WJF(753.32002) +WKFO3(320.57002) +WKPA3(735.07002) +WLBC1(1010.07002) +WLCI1(2005.57002) +WLDN2(896.82002) +WLFQ2(990.32002) +WLKC1(-0.67998) +WLLM8(1381.32002) +WLLO3(959.57002) +WLMO3(741.07002) +WLYC1(442.57002) +WMC(1532.82002) +WMFO3(1044.32002) +WMSC1(856.82002) +WNDN2(831.07002) +WODI1(1614.07002) +WODM8(1915.07002) +WPKO3(770.32002) +WPOC1(344.57002) +WPRW1(680.82002) +WRIC1(1347.82002) +WRKC1(1359.82002) +WRMM8(2064.07002) +WRNQ1(936.32002) +WRPM8(1427.57002) +WRRC1(320.07002) +WRRU1(1359.32002) +WRSC1(450.82002) +WRSM8(2222.07002) +WRTN2(994.57002) +WSBO3(539.57002) +WSDC1(377.07002) +WSFM8(1925.32002) +WSFO3(1102.57002) +WSHC1(2118.57002) +WSJN2(926.32002) +WSPC1(512.57002) +WSRO3(737.57002) +WSTQ1(1803.32002) +WTFO3(1462.57002) +WTHC1(145.82002) +WTPC1(898.82002) +WTRC1(124.82002) +WTSO3(300.07002) +WUPA3(983.57002) +WVAA3(1488.82002) +WVI(149.32002) +WVTC1(2063.57002) +WWAC1(1016.07002) +WWDC1(1636.07002) +WWNC1(1729.32002) +WWRC1(1995.07002) +WYDW1(389.07002) +WYTC1(733.32002) +XXXC1(2379.07002) +YALW1(434.07002) +YBCA3(1672.57002) +YBGA3(1672.57002) +YBLC1(216.07002) +YCGN2(1345.57002) +YCPA3(1379.07002) +YCPC1(1335.32002) +YCVA3(1535.07002) +YCWA3(1659.07002) +YEFO3(695.32002) +YFCA3(1672.57002) +YFFI1(2410.82002) +YKA(844.07002) +YKAM8(1409.57002) +YKM(409.82002) +YLCA3(1659.07002) +YLPO3(1514.57002) +YMNA3(1403.57002) +YNFO3(273.32002) +YOBC1(1198.07002) +YPWA3(1651.32002) +YRKC1(404.07002) +YRV(1352.82002) +YSAC1(999.82002) +YSMA3(1672.57002) +YSUA3(1897.32002) +YTBA3(1672.57002) +YTPA3(1672.57002) +YUCA3(879.57002) +YUGA3(1672.57002) +YVR(1.82002) +YVVC1(1453.82002) +YWAC1(1729.32002) +YWPA3(1640.82002) +YWSA3(1672.57002) +YXC(1142.07002) +YXX(60.07002) +YYC(1065.07002) +YYF(1119.82002) +YYVC1(2450.32002) +ZBHA3(806.07002) +ZENC1(753.82002) +ZFWA3(744.82002) +ZIOU1(1600.57002) +ZONM8(1151.82002) diff --git a/internal/test_unit/config/SID_CONUS_ADPUPA_ELEV.txt b/internal/test_unit/config/SID_CONUS_ADPUPA_ELEV.txt new file mode 100644 index 0000000000..42eb4fd1fb --- /dev/null +++ b/internal/test_unit/config/SID_CONUS_ADPUPA_ELEV.txt @@ -0,0 +1,98 @@ +SID_CONUS_ADPUPA_ELEV +72206(4.07002) +72520(316.07002) +72597(531.82002) +72518(137.82002) +71600(0.070023) +71722(214.57002) +72764(541.57002) +71836(14.82002) +71908(729.57002) +71867(261.32002) +71913(0.070023) +71945(374.82002) +72250(4.82002) +72208(6.07002) +72632(307.07002) +74001(204.32002) +72403(115.82002) +72572(1486.57002) +72662(1025.82002) +72712(182.82002) +71845(355.57002) +76644(8.82002) +76595(2.32002) +72201(0.070023) +76225(1572.07002) +72230(169.57002) +72388(926.32002) +72318(640.32002) +72440(374.32002) +72528(201.82002) +72645(222.57002) +72776(1057.82002) +71934(220.07002) +72393(57.32002) +72305(2.82002) +72493(131.32002) +72456(300.32002) +72476(1638.57002) +72426(308.57002) +72501(17.57002) +72649(277.57002) +72786(670.57002) +72261(324.32002) +72364(1244.82002) +72340(103.57002) +72317(251.07002) +72451(770.32002) +74002(12.32002) +72562(887.82002) +72681(970.57002) +72694(66.57002) +72634(400.57002) +76743(12.82002) +72558(363.07002) +72797(79.82002) +71203(868.57002) +70398(22.57002) +78073(0.070023) +72210(3.82002) +72251(12.57002) +72240(2.07002) +72265(855.82002) +72327(169.82002) +72768(688.07002) +74455(210.82002) +71603(11.57002) +72659(395.82002) +71811(3.07002) +71816(74.82002) +76458(12.82002) +72202(1.57002) +72274(883.32002) +72235(103.07002) +78016(0.070023) +72402(3.32002) +71815(38.57002) +72747(346.82002) +71119(718.57002) +78526(42.07002) +72248(60.82002) +72215(263.32002) +72357(359.82002) +74560(189.07002) +74494(1.82002) +74389(77.32002) +71906(62.82002) +ASDE04(0.070023) +76394(433.07002) +72233(7.07002) +72214(25.82002) +72249(190.57002) +72293(76.82002) +74004(136.07002) +72363(1078.82002) +71109(97.82002) +71823(334.57002) +71907(5.32002) diff --git a/internal/test_unit/config/STATAnalysisConfig_point_stat b/internal/test_unit/config/STATAnalysisConfig_point_stat index 4f751b32dc..db7ab415a6 100644 --- a/internal/test_unit/config/STATAnalysisConfig_point_stat +++ b/internal/test_unit/config/STATAnalysisConfig_point_stat @@ -103,7 +103,11 @@ jobs = [ -dump_row ${OUTPUT_DIR}/CONFIG_POINT_STAT_filter_mpr_sid.stat", "-job filter -fcst_var TMP -fcst_lev Z2 \ -line_type MPR -column_thresh abs(fcst-obs) >5 \ - -dump_row ${OUTPUT_DIR}/CONFIG_POINT_STAT_filter_mpr_fcst_minus_obs.stat" + -dump_row ${OUTPUT_DIR}/CONFIG_POINT_STAT_filter_mpr_fcst_minus_obs.stat", + "-job aggregate -line_type VL1L2 -by FCST_LEV \ + -out_stat ${OUTPUT_DIR}/CONFIG_POINT_STAT_agg_vl1l2.stat", + "-job aggregate_stat -line_type VL1L2 -out_line_type VCNT -by FCST_LEV \ + -out_stat ${OUTPUT_DIR}/CONFIG_POINT_STAT_agg_stat_vl1l2_to_vcnt.stat" ]; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/SeriesAnalysisConfig_climo b/internal/test_unit/config/SeriesAnalysisConfig_climo index 196c38dafa..f19bac7a20 100644 --- a/internal/test_unit/config/SeriesAnalysisConfig_climo +++ b/internal/test_unit/config/SeriesAnalysisConfig_climo @@ -37,7 +37,7 @@ regrid = { censor_thresh = []; censor_val = []; -cat_thresh = [ >CDP25, >CDP50, >CDP75 ]; +cat_thresh = [ >OCDP25, >OCDP50, >OCDP75 ]; cnt_thresh = [ NA ]; cnt_logic = UNION; @@ -50,7 +50,7 @@ fcst = { ]; } obs = { - cnt_thresh = [ NA, >CDP25&&OCDP25&&=30 -rirw_exact FALSE -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_ri.tcst", - "-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh <=-30 -rirw_exact TRUE -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_rw.tcst", + "-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh >=30 -rirw_exact FALSE -set_hdr DESC RI -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_ri.tcst", + "-job filter -amodel AHWI -rirw_track BDECK -rirw_thresh <=-30 -rirw_exact TRUE -set_hdr DESC RW -dump_row ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_AHWI_rw.tcst", "-job rirw -rirw_window 00 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR", "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS,MPR", - "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS -by amodel -out_stat ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_rirw.stat" + "-job rirw -rirw_window 12 -rirw_thresh <=-15 -out_line_type CTC,CTS -by amodel -set_hdr DESC AMODEL -out_stat ${MET_TEST_OUTPUT}/tc_stat/ALAL2010_rirw.stat" ]; // diff --git a/internal/test_unit/config/TCStatConfig_PROBRIRW b/internal/test_unit/config/TCStatConfig_PROBRIRW index f8645a1e1e..679b681e59 100644 --- a/internal/test_unit/config/TCStatConfig_PROBRIRW +++ b/internal/test_unit/config/TCStatConfig_PROBRIRW @@ -205,7 +205,7 @@ out_valid_mask = ""; // Array of TCStat analysis jobs to be performed on the filtered data // jobs = [ - "-job filter -dump_row ${MET_TEST_OUTPUT}/tc_stat/PROBRIRW_filter_ee.tcst", + "-job filter -set_hdr DESC EVENT_EQUAL -dump_row ${MET_TEST_OUTPUT}/tc_stat/PROBRIRW_filter_ee.tcst", "-job summary -column TK_ERR -by AMODEL -probrirw_thresh 30 -column_thresh PROBRIRW_PROB >0 -dump_row ${MET_TEST_OUTPUT}/tc_stat/PROBRIRW_summary_tk_err.tcst", "-job probrirw -column_thresh RIRW_WINDOW ==24 -by AMODEL -probrirw_thresh 30 -probrirw_bdelta_thresh >=30 -out_line_type PCT,PSTD,PRC,PJC -dump_row ${MET_TEST_OUTPUT}/tc_stat/PROBRIRW_probrirw.tcst", "-job summary -column TK_ERR -by AMODEL,LEAD -amodel GPMI,GPMN -event_equal TRUE", diff --git a/internal/test_unit/config/ref_config/GridStatConfig_03h b/internal/test_unit/config/ref_config/GridStatConfig_03h index 0a3daba7a1..278acf3be0 100644 --- a/internal/test_unit/config/ref_config/GridStatConfig_03h +++ b/internal/test_unit/config/ref_config/GridStatConfig_03h @@ -198,8 +198,9 @@ nc_pairs_flag = FALSE; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${MODEL}_F${FCST_TIME}_03h"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${MODEL}_F${FCST_TIME}_03h"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/ref_config/GridStatConfig_24h b/internal/test_unit/config/ref_config/GridStatConfig_24h index 8804e2d708..2d0c263303 100644 --- a/internal/test_unit/config/ref_config/GridStatConfig_24h +++ b/internal/test_unit/config/ref_config/GridStatConfig_24h @@ -198,8 +198,9 @@ nc_pairs_flag = FALSE; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "${MODEL}_F${FCST_TIME}_24h"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "${MODEL}_F${FCST_TIME}_24h"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/ref_config/PointStatConfig_ADPUPA b/internal/test_unit/config/ref_config/PointStatConfig_ADPUPA index 226be944df..2d98af4d87 100644 --- a/internal/test_unit/config/ref_config/PointStatConfig_ADPUPA +++ b/internal/test_unit/config/ref_config/PointStatConfig_ADPUPA @@ -182,8 +182,11 @@ output_flag = { duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${MODEL}_F${FCST_TIME}_ADPUPA"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${MODEL}_F${FCST_TIME}_ADPUPA"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/ref_config/PointStatConfig_ONLYSF b/internal/test_unit/config/ref_config/PointStatConfig_ONLYSF index 9667c95b10..3f8a2a6367 100644 --- a/internal/test_unit/config/ref_config/PointStatConfig_ONLYSF +++ b/internal/test_unit/config/ref_config/PointStatConfig_ONLYSF @@ -192,8 +192,11 @@ output_flag = { duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${MODEL}_F${FCST_TIME}_ONLYSF"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${MODEL}_F${FCST_TIME}_ONLYSF"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/config/ref_config/PointStatConfig_WINDS b/internal/test_unit/config/ref_config/PointStatConfig_WINDS index 87fe9c01bd..ffb0a2f06f 100644 --- a/internal/test_unit/config/ref_config/PointStatConfig_WINDS +++ b/internal/test_unit/config/ref_config/PointStatConfig_WINDS @@ -175,8 +175,11 @@ output_flag = { duplicate_flag = NONE; rank_corr_flag = FALSE; -tmp_dir = "/tmp"; -output_prefix = "${MODEL}_F${FCST_TIME}_WINDS"; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = "${MODEL}_F${FCST_TIME}_WINDS"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/internal/test_unit/hdr/met_12_0.hdr b/internal/test_unit/hdr/met_12_0.hdr index a1113d5102..f8655a4a47 100644 --- a/internal/test_unit/hdr/met_12_0.hdr +++ b/internal/test_unit/hdr/met_12_0.hdr @@ -5,8 +5,8 @@ FHO : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_L ISC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL TILE_DIM TILE_XLL TILE_YLL NSCALE ISCALE MSE ISC FENERGY2 OENERGY2 BASER FBIAS MCTC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_CAT _VAR_ MCTS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_CAT ACC ACC_NCL ACC_NCU ACC_BCL ACC_BCU HK HK_BCL HK_BCU HSS HSS_BCL HSS_BCU GER GER_BCL GER_BCU HSS_EC HSS_EC_BCL HSS_EC_BCU EC_VALUE -MPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX OBS_SID OBS_LAT OBS_LON OBS_LVL OBS_ELV FCST OBS OBS_QC CLIMO_MEAN CLIMO_STDEV CLIMO_CDF_VAR_ -SEEPS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL S12 S13 S21 S23 S31 S32 PF1 PF2 PF3 PV1 PV2 PV3 MEAN_FCST MEAN_OBS SEEPS +MPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX OBS_SID OBS_LAT OBS_LON OBS_LVL OBS_ELV FCST OBS OBS_QC OBS_CLIMO_MEAN OBS_CLIMO_STDEV OBS_CLIMO_CDF FCST_CLIMO_MEAN FCST_CLIMO_STDEV +SEEPS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL ODFL ODFH OLFD OLFH OHFD OHFL PF1 PF2 PF3 PV1 PV2 PV3 MEAN_FCST MEAN_OBS SEEPS SEEPS_MPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE OBS_SID OBS_LAT OBS_LON FCST OBS OBS_QC FCST_CAT OBS_CAT P1 P2 T1 T2 SEEPS NBRCNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBS FBS_BCL FBS_BCU FSS FSS_BCL FSS_BCU AFSS AFSS_BCL AFSS_BCU UFSS UFSS_BCL UFSS_BCU F_RATE F_RATE_BCL F_RATE_BCU O_RATE O_RATE_BCL O_RATE_BCU NBRCTC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FY_OY FY_ON FN_OY FN_ON @@ -27,9 +27,9 @@ RELP : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_L SAL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FABAR OABAR FOABAR FFABAR OOABAR MAE SL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBAR OBAR FOBAR FFBAR OOBAR MAE SSVAR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_BIN BIN_i BIN_N VAR_MIN VAR_MAX VAR_MEAN FBAR OBAR FOBAR FFBAR OOBAR FBAR_NCL FBAR_NCU FSTDEV FSTDEV_NCL FSTDEV_NCU OBAR_NCL OBAR_NCU OSTDEV OSTDEV_NCL OSTDEV_NCU PR_CORR PR_CORR_NCL PR_CORR_NCU ME ME_NCL ME_NCU ESTDEV ESTDEV_NCL ESTDEV_NCU MBIAS MSE BCMSE RMSE -VL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFBAR VFBAR UOBAR VOBAR UVFOBAR UVFFBAR UVOOBAR F_SPEED_BAR O_SPEED_BAR DIR_ME DIR_MAE DIR_MSE -VAL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFABAR VFABAR UOABAR VOABAR UVFOABAR UVFFABAR UVOOABAR FA_SPEED_BAR OA_SPEED_BAR DIRA_ME DIRA_MAE DIRA_MSE -VCNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBAR FBAR_BCL FBAR_BCU OBAR OBAR_BCL OBAR_BCU FS_RMS FS_RMS_BCL FS_RMS_BCU OS_RMS OS_RMS_BCL OS_RMS_BCU MSVE MSVE_BCL MSVE_BCU RMSVE RMSVE_BCL RMSVE_BCU FSTDEV FSTDEV_BCL FSTDEV_BCU OSTDEV OSTDEV_BCL OSTDEV_BCU FDIR FDIR_BCL FDIR_BCU ODIR ODIR_BCL ODIR_BCU FBAR_SPEED FBAR_SPEED_BCL FBAR_SPEED_BCU OBAR_SPEED OBAR_SPEED_BCL OBAR_SPEED_BCU VDIFF_SPEED VDIFF_SPEED_BCL VDIFF_SPEED_BCU VDIFF_DIR VDIFF_DIR_BCL VDIFF_DIR_BCU SPEED_ERR SPEED_ERR_BCL SPEED_ERR_BCU SPEED_ABSERR SPEED_ABSERR_BCL SPEED_ABSERR_BCU DIR_ERR DIR_ERR_BCL DIR_ERR_BCU DIR_ABSERR DIR_ABSERR_BCL DIR_ABSERR_BCU ANOM_CORR ANOM_CORR_NCL ANOM_CORR_NCU ANOM_CORR_BCL ANOM_CORR_BCU ANOM_CORR_UNCNTR ANOM_CORR_UNCNTR_BCL ANOM_CORR_UNCNTR_BCU DIR_ME DIR_ME_BCL DIR_ME_BCU DIR_MAE DIR_MAE_BCL DIR_MAE_BCU DIR_MSE DIR_MSE_BCL DIR_MSE_BCU DIR_RMSE DIR_RMSE_BCL DIR_RMSE_BCU +VL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFBAR VFBAR UOBAR VOBAR UVFOBAR UVFFBAR UVOOBAR F_SPEED_BAR O_SPEED_BAR TOTAL_DIR DIR_ME DIR_MAE DIR_MSE +VAL1L2 : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFABAR VFABAR UOABAR VOABAR UVFOABAR UVFFABAR UVOOABAR FA_SPEED_BAR OA_SPEED_BAR TOTAL_DIR DIRA_ME DIRA_MAE DIRA_MSE +VCNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL FBAR FBAR_BCL FBAR_BCU OBAR OBAR_BCL OBAR_BCU FS_RMS FS_RMS_BCL FS_RMS_BCU OS_RMS OS_RMS_BCL OS_RMS_BCU MSVE MSVE_BCL MSVE_BCU RMSVE RMSVE_BCL RMSVE_BCU FSTDEV FSTDEV_BCL FSTDEV_BCU OSTDEV OSTDEV_BCL OSTDEV_BCU FDIR FDIR_BCL FDIR_BCU ODIR ODIR_BCL ODIR_BCU FBAR_SPEED FBAR_SPEED_BCL FBAR_SPEED_BCU OBAR_SPEED OBAR_SPEED_BCL OBAR_SPEED_BCU VDIFF_SPEED VDIFF_SPEED_BCL VDIFF_SPEED_BCU VDIFF_DIR VDIFF_DIR_BCL VDIFF_DIR_BCU SPEED_ERR SPEED_ERR_BCL SPEED_ERR_BCU SPEED_ABSERR SPEED_ABSERR_BCL SPEED_ABSERR_BCU DIR_ERR DIR_ERR_BCL DIR_ERR_BCU DIR_ABSERR DIR_ABSERR_BCL DIR_ABSERR_BCU ANOM_CORR ANOM_CORR_NCL ANOM_CORR_NCU ANOM_CORR_BCL ANOM_CORR_BCU ANOM_CORR_UNCNTR ANOM_CORR_UNCNTR_BCL ANOM_CORR_UNCNTR_BCU TOTAL_DIR DIR_ME DIR_ME_BCL DIR_ME_BCU DIR_MAE DIR_MAE_BCL DIR_MAE_BCU DIR_MSE DIR_MSE_BCL DIR_MSE_BCU DIR_RMSE DIR_RMSE_BCL DIR_RMSE_BCU GENMPR : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL INDEX STORM_ID PROB_LEAD PROB_VAL AGEN_INIT AGEN_FHR AGEN_LAT AGEN_LON AGEN_DLAND BGEN_LAT BGEN_LON BGEN_DLAND GEN_DIST GEN_TDIFF INIT_TDIFF DEV_CAT OPS_CAT SSIDX : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE FCST_MODEL REF_MODEL N_INIT N_TERM N_VLD SS_INDEX MODE_SOA : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE N_VALID GRID_RES OBJECT_ID OBJECT_CAT CENTROID_X CENTROID_Y CENTROID_LAT CENTROID_LON AXIS_ANG LENGTH WIDTH AREA AREA_THRESH CURVATURE CURVATURE_X CURVATURE_Y COMPLEXITY INTENSITY_10 INTENSITY_25 INTENSITY_50 INTENSITY_75 INTENSITY_90 INTENSITY_50 INTENSITY_SUM diff --git a/internal/test_unit/python/unit.py b/internal/test_unit/python/unit.py new file mode 100755 index 0000000000..1fc3bf681a --- /dev/null +++ b/internal/test_unit/python/unit.py @@ -0,0 +1,396 @@ +#! /usr/bin/env python3 + +from datetime import datetime as dt +import logging +import os +from pathlib import Path +import re +import subprocess +import sys +import xml.etree.ElementTree as ET + +def unit(test_xml, file_log=None, cmd_only=False, noexit=False, memchk=False, callchk=False, log_overwrite=True): + """ + Parse a unit test xml file, run the associated tests, and display test results. + + Parameters + ----------- + test_xml : pathlike + path to file containing the unit test(s) to perform + file_log : pathlike, default None + if present, write output from each test to the specified file + cmd_only : bool, default False + if true, print the test commands but do not run them (overrides file_log) + noexit : bool, default False + if true, the unit tester will continue executing subsequent + tests when a test fails + memchk : bool, default False + if true, activate valgrind with memcheck + callchk : bool, default False + if true, activate valgrind with callcheck + log_overwrite : bool, default True + when true, if file_log points to an existing file, that file will be overwritten. + when false, new log records will be appended to the existing file. + """ + + # initialize logger + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + + # create/add console handler + ch = logging.StreamHandler() + ch.setLevel(logging.INFO) + logger.addHandler(ch) + + # create/add file handler + if file_log and not cmd_only: + if log_overwrite: + file_mode = 'w' + else: + file_mode = 'a' + fh = logging.FileHandler(file_log, mode=file_mode) + fh.setLevel(logging.DEBUG) + logger.addHandler(fh) + + # parse xml file + try: + test_root = ET.parse(test_xml) + except Exception as e: + logger.exception(f"ERROR: Unable to parse xml from {test_xml}") + raise + + # parse the children of the met_test element + if test_root.getroot().tag != 'met_test': + logger.error(f"ERROR: unexpected top-level element. Expected 'met_test', got '{test_root.tag}'") + sys.exit(1) + # read test_dir + try: + test_dir = test_root.find('test_dir').text + mgnc = repl_env(test_dir + '/bin/mgnc.sh') + mpnc = repl_env(test_dir + '/bin/mpnc.sh') + except Exception as e: + logger.warning(f"WARNING: unable to read test_dir from {test_xml}") + pass + + tests = build_tests(test_root) + + # determine the max length of the test names + # not used, unless format of test result display is changed + name_wid = max([len(test['name']) for test in tests]) + + VALGRIND_OPT_MEM ="--leak-check=full --show-leak-kinds=all --error-limit=no -v" + VALGRIND_OPT_CALL ="--tool=callgrind --dump-instr=yes --simulate-cache=yes --collect-jumps=yes" + + # run each test + for test in tests: + # # print the test name ... may want to change this to only if cmd_only=False + logger.debug("\n") + logger.info(f"TEST: {test['name']}") + + # # prepare the output space + output_keys = [key for key in test.keys() if key.startswith('out_')] + outputs = [output for key in output_keys for output in test[key]] + for output in outputs: + try: + Path(output).unlink() + except FileNotFoundError: + pass + except Exception as e: + logger.exception() + raise + output_dir = Path(output).parent + output_dir.mkdir(parents=True, exist_ok=True) #should error/warning be raised if dir already exists? + + # # set the test environment variables + set_envs = [] + if 'env' in test.keys(): + for key, val in sorted(test['env'].items()): + os.environ[key] = val + set_cmd = f"export {key}={val}" + logger.debug(set_cmd) + set_envs.append(set_cmd) + + # # build the text command + cmd = (test['exec'] + test['param']).strip() + + if memchk: + cmd = f"valgrind {VALGRIND_OPT_MEM} {cmd}" + elif callchk: + cmd = f"valgrind {VALGRIND_OPT_CALL} {cmd}" + + + # # if writing a command file, print the environment and command, then loop + # consider tying this into logging... + if cmd_only: + if 'env' in test.keys(): + for key, val in sorted(test['env'].items()): + print(f"export '{key}={val}'") + print(f"{cmd}") + if 'env' in test.keys(): + for key, val in sorted(test['env'].items()): + print(f"unset {key}") + print("\n") + + # # run and time the test command + else: + logger.debug(f"{cmd}") + t_start = dt.now() + cmd_return = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, shell=True) + t_elaps = dt.now() - t_start + + cmd_outs = cmd_return.stdout + logger.debug(f"{cmd_outs}") + logger.debug(f"Return code: {cmd_return.returncode}") + + # # check the return status and output files + ret_ok = (cmd_return.returncode == test['retval']) + if ret_ok: + out_ok = True + + for filepath in test['out_pnc']: + result = subprocess.run([mpnc, '-v', filepath], + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + cmd_outs += ("\n"+result.stdout) + logger.debug(result.stdout) + if result.returncode: + out_ok = False + + for filepath in test['out_gnc']: + result = subprocess.run([mgnc, '-v', filepath], + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + cmd_outs += ("\n"+result.stdout) + logger.debug(result.stdout) + if result.returncode: + out_ok = False + + for filepath in test['out_stat']: + # check stat file exists and is nonzero size + try: + filesize = os.stat(filepath).st_size + if filesize==0: + cmd_outs += (f"\nERROR: stat file empty {filepath}\n") + out_ok = False + break + except FileNotFoundError: + cmd_outs += (f"\nERROR: stat file missing {filepath}\n") + logger.debug(result.stdout) + out_ok = False + break + # check stat file has non-header lines + with open(filepath) as f: + numlines = len([l for l in f.readlines() if not l.startswith('VERSION')]) + if numlines==0: + cmd_outs += (f"\nERROR: stat data missing from file {filepath}\n") + out_ok = False + + for filepath in test['out_ps']: + # check postscript file exists and is nonzero size + try: + filesize = os.stat(filepath).st_size + if filesize==0: + cmd_outs += (f"\nERROR: postscript file empty {filepath}\n") + out_ok = False + break + except FileNotFoundError: + cmd_outs += (f"\nERROR: postscript file missing {filepath}\n") + out_ok = False + break + # check for ghostscript errors + result = subprocess.run(['gs', '-sDEVICE=nullpage', '-dQUIET', '-dNOPAUSE', '-dBATCH', filepath]) + if result.returncode: + cmd_outs += (f"\nERROR: ghostscript error for postscript file {filepath}") + out_ok = False + + for filepath in test['out_exist']: + # check output file exists and is nonzero size + try: + filesize = os.stat(filepath).st_size + if filesize==0: + cmd_outs += (f"\nERROR: file empty {filepath}\n") + out_ok = False + break + except FileNotFoundError: + cmd_outs += (f"\nERROR: file missing when it should exist {filepath}\n") + out_ok = False + + for filepath in test['out_not_exist']: + # check output file doesn't exist + if os.path.isfile(filepath): + cmd_outs += (f"\nERROR: file exists when it should be missing {filepath}\n") + out_ok = False + + # # unset the test environment variables + unset_envs = [] + if 'env' in test.keys(): + for key, val in sorted(test['env'].items()): + del os.environ[key] + unset_cmd = f"unset {key}" + logger.debug(unset_cmd) + unset_envs.append(unset_cmd) + + # # print the test result + test_result = "pass" if (ret_ok and out_ok) else "FAIL" + logger.info(f"\t- {test_result} - \t{round(t_elaps.total_seconds(),3)} sec") + + # # on failure, print the problematic test and exit, if requested + if not (ret_ok and out_ok): + logger.info("\n".join(set_envs) + "\n" + cmd + "\n" + cmd_outs + "\n".join(unset_envs) + "\n") + if not noexit: + sys.exit(1) + + # clean up logger/handlers (to avoid duplicate logging when this function is looped) + logger.removeHandler(ch) + try: + logger.removeHandler(fh) + except NameError: + pass + + +def build_tests(test_root): + """ + Parse the test components. + + Take an ElementTree element extracted from a unit test xml file. + Return a list of all tests, where each test is represented as a dictionary, + with its keys representing each test component. + + Parameters + ---------- + test_root : ElementTree element + parsed from XML file containing the unit test(s) to perform + + Returns + ------- + test_list: + list of test dicts, containing test attributes parsed from xml object + + """ + + # define logger + logger = logging.getLogger(__name__) + + # find all tests in test_xml, and create a dictionary of attributes for each test + test_list = [] + for test_el in test_root.iter('test'): + test = {} + try: + test['name'] = test_el.attrib['name'] + except KeyError: + logger.error("ERROR: name attribute not found for test") + raise + + test['retval'] = 0 + for el in test_el: + if (el.tag=='exec' or el.tag=='param'): + test[el.tag] = repl_env(el.text) + elif el.tag=='retval': + try: + test['retval'] = int(el.text) + except ValueError: + logger.error("ERROR: retval must be an integer value") + raise + elif el.tag=='output': + test['out_pnc'] = [] + test['out_gnc'] = [] + test['out_stat'] = [] + test['out_ps'] = [] + test['out_exist'] = [] + test['out_not_exist'] = [] + output_names = { + 'point_nc' : 'out_pnc', + 'grid_nc' : 'out_gnc', + 'stat' : 'out_stat', + 'ps' : 'out_ps', + 'exist' : 'out_exist', + 'not_exist' : 'out_not_exist', + } + for output_el in el: + test[output_names[output_el.tag]].append(repl_env(output_el.text)) + + elif el.tag=='env': + env_dict = {} + for env_el in el: + try: + env_name = env_el.find('name').text + env_dict[env_name] = env_el.find('value').text + if not env_dict[env_name]: + env_dict[env_name] = '' + except AttributeError: + logger.error(f"ERROR: env pair in test \\{test['name']}\\ missing name or value") + raise + + test['env'] = env_dict + + # validate test format/details + expected_keys = ['exec', 'param', 'out_pnc', 'out_gnc', 'out_stat', 'out_ps', + 'out_exist', 'out_not_exist'] + for key in expected_keys: + if key not in test.keys(): + logger.error(f"ERROR: test {test['name']} missing {key} element") + sys.exit(1) + + test_list.append(test) + + return test_list + + +def repl_env(string_with_ref): + """ + Take a string with a placeholder for environment variable with syntax + ${ENV_NAME} and replace placeholder with corresponding value of environment + variable. + + Parameters + ---------- + string_with_ref : str + A string, generally path-like, that includes substring ${ENV_NAME} + + Returns + ------- + string_with_ref : str + The provided string with ${ENV_NAME} replaced by corresponding environment variable + """ + # define logger + logger = logging.getLogger(__name__) + + envar_ref_list = re.findall('\$\{\w+}', string_with_ref) + envar_ref_unique = [ + envar_ref_list[i] for i in list(range(len(envar_ref_list))) if ( + envar_ref_list[i] not in envar_ref_list[:i])] + + if len(envar_ref_unique)>0: + for envar_ref in envar_ref_unique: + envar_name = envar_ref[2:-1] + envar = os.getenv(envar_name) + if not envar: + logger.error(f"ERROR: environment variable {envar_name} not found") + string_with_ref = string_with_ref.replace(envar_ref, envar) + + return string_with_ref + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Run a unit test.") + parser.add_argument('test_xml', nargs='+') + parser.add_argument('-log', metavar='log_file', + help='if present, write output from each test to log_file') + parser.add_argument('-cmd', action='store_true', + help='if present, print the test commands but do not run them, overrides -log') + parser.add_argument('-memchk', action='store_true', + help='if present, activate valgrind with memcheck') + parser.add_argument('-callchk', action='store_true', + help='if present, activate valgrind with callcheck') + parser.add_argument('-noexit', action='store_true', + help='if present, the unit tester will continue executing subsequent tests when a test fails') + args = parser.parse_args() + + for i, xml in enumerate(args.test_xml): + if i==0: + new_log = True + else: + new_log = False + unit(test_xml=xml, file_log=args.log, cmd_only=args.cmd, noexit=args.noexit, memchk=args.memchk, callchk=args.callchk, + log_overwrite=new_log) + + diff --git a/internal/test_unit/unit_test.log b/internal/test_unit/unit_test.log new file mode 100644 index 0000000000..ef1c7b19b5 --- /dev/null +++ b/internal/test_unit/unit_test.log @@ -0,0 +1,1225 @@ +export MET_BASE=/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met +export MET_BUILD_BASE=/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../.. +export MET_TEST_BASE=/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit +export MET_TEST_INPUT=/d1/projects/MET/MET_test_data/unit_test +export MET_TEST_OUTPUT=/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../test_output + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ascii2nc.xml + +TEST: ascii2nc_TRMM_3hr + - pass - 17.767 sec +TEST: ascii2nc_GAGE_24hr + - pass - 1.165 sec +TEST: ascii2nc_GAGE_24hr_badfile + - pass - 0.53 sec +TEST: ascii2nc_duplicates + - pass - 0.532 sec +TEST: ascii2nc_SURFRAD1 + - pass - 0.975 sec +TEST: ascii2nc_insitu_turb + - pass - 3.119 sec +TEST: ascii2nc_by_var_name_PB + - pass - 146.208 sec +TEST: ascii2nc_rain_01H_sum + - pass - 0.582 sec +TEST: ascii2nc_airnow_daily_v2 + - pass - 0.799 sec +TEST: ascii2nc_airnow_hourly_aqobs + - pass - 0.886 sec +TEST: ascii2nc_airnow_hourly + - pass - 3.847 sec +TEST: ascii2nc_ndbc + - pass - 8.243 sec +TEST: ascii2nc_ismn_SNOTEL + - pass - 14.834 sec +TEST: ascii2nc_iabp + - pass - 0.542 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ascii2nc_indy.xml + +TEST: ascii2nc_TRMM_12hr + - pass - 20.969 sec +TEST: ascii2nc_LITTLE_R + - pass - 0.605 sec +TEST: ascii2nc_LITTLE_R_BAD_RECORD + - pass - 0.551 sec +TEST: ascii2nc_SURFRAD + - pass - 0.976 sec +TEST: ascii2nc_SURFRAD_summary1 + - pass - 2.007 sec +TEST: ascii2nc_SURFRAD_summary2 + - pass - 1.483 sec +TEST: ascii2nc_SURFRAD_summary3 + - pass - 1.224 sec +TEST: ascii2nc_SURFRAD_summary4 + - pass - 1.205 sec +TEST: ascii2nc_insitu_turb_mask_sid + - pass - 1.241 sec +TEST: ascii2nc_insitu_turb_mask_grid_data + - pass - 2.994 sec +TEST: ascii2nc_insitu_turb_mask_named_grid + - pass - 2.963 sec +TEST: ascii2nc_MASK_GRID + - pass - 3.539 sec +TEST: ascii2nc_MASK_POLY + - pass - 1.298 sec +TEST: ascii2nc_WWSIS_clear_pvwatts_one_min + - pass - 18.383 sec +TEST: ascii2nc_WWSIS_clear_pvwatts_five_min + - pass - 2.641 sec +TEST: ascii2nc_WWSIS_clear_pvwatts_ten_min + - pass - 1.509 sec +TEST: ascii2nc_WWSIS_clear_pvwatts_sixty_min + - pass - 0.694 sec +TEST: ascii2nc_WWSIS_HA_pvwatts_sixty_min + - pass - 0.735 sec +TEST: ascii2nc_WWSIS_pvwatts_one_min + - pass - 18.646 sec +TEST: ascii2nc_WWSIS_pvwatts_sixty_min + - pass - 0.7 sec +TEST: ascii2nc_by_var_name + - pass - 0.534 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_madis2nc.xml + +TEST: madis2nc_METAR + - pass - 12.385 sec +TEST: madis2nc_METAR_time_summary + - pass - 16.391 sec +TEST: madis2nc_METAR_mask_sid + - pass - 0.608 sec +TEST: madis2nc_METAR_mask_grid + - pass - 1.052 sec +TEST: madis2nc_RAOB + - pass - 3.295 sec +TEST: madis2nc_PROFILER_MASK_POLY + - pass - 0.57 sec +TEST: madis2nc_MARITIME + - pass - 0.811 sec +TEST: madis2nc_MESONET_MASK_GRID + - pass - 6.653 sec +TEST: madis2nc_MESONET_optional_vars + - pass - 4.856 sec +TEST: madis2nc_ACARS_PROFILES + - pass - 2.095 sec +TEST: madis2nc_buf_handle + - pass - 2.626 sec +TEST: madis2nc_multiple_inputs + - pass - 2.167 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_trmm2nc.xml + +TEST: trmm2nc_3hr + - pass - 0.334 sec +TEST: trmm2nc_12hr + - pass - 0.331 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_pb2nc.xml + +TEST: pb2nc_GDAS_mask_grid_G212 + - pass - 8.247 sec +TEST: pb2nc_NDAS_no_mask + - pass - 9.265 sec +TEST: pb2nc_NDAS_mask_poly_conus + - pass - 3.676 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_pb2nc_indy.xml + +TEST: pb2nc_NDAS_mask_sid_list + - pass - 1.602 sec +TEST: pb2nc_NDAS_mask_sid_file + - pass - 1.765 sec +TEST: pb2nc_NDAS_mask_grid_data_cfg + - pass - 4.469 sec +TEST: pb2nc_compute_pbl_cape + - pass - 13.715 sec +TEST: pb2nc_NDAS_var_all + - pass - 19.439 sec +TEST: pb2nc_vertical_level_500 + - pass - 1.392 sec +TEST: pb2nc_NDAS_summary + - pass - 6.866 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_gen_vx_mask.xml + +TEST: gen_vx_mask_POLY_GFS_LATLON + - pass - 11.973 sec +TEST: gen_vx_mask_POLY_GFS_MERCATOR + - pass - 0.607 sec +TEST: gen_vx_mask_POLY_NAM_LAMBERT + - pass - 4.092 sec +TEST: gen_vx_mask_POLY_HMT_STEREO + - pass - 1.026 sec +TEST: gen_vx_mask_POLY_GFS_LATLON_NAK + - pass - 1.476 sec +TEST: gen_vx_mask_POLY_LATLON_RECTANGLE + - pass - 0.552 sec +TEST: gen_vx_mask_POLY_XY_RECTANGLE + - pass - 0.544 sec +TEST: gen_vx_mask_GRID_NAM_HMT_STEREO + - pass - 0.732 sec +TEST: gen_vx_mask_GRID_NAMED_GRIDS + - pass - 0.618 sec +TEST: gen_vx_mask_GRID_SPEC_STRINGS + - pass - 0.598 sec +TEST: gen_vx_mask_CIRCLE + - pass - 0.685 sec +TEST: gen_vx_mask_CIRCLE_MASK + - pass - 0.642 sec +TEST: gen_vx_mask_CIRCLE_COMPLEMENT + - pass - 0.649 sec +TEST: gen_vx_mask_TRACK + - pass - 1.096 sec +TEST: gen_vx_mask_TRACK_MASK + - pass - 1.098 sec +TEST: gen_vx_mask_DATA_APCP_24 + - pass - 1.067 sec +TEST: gen_vx_mask_POLY_PASS_THRU + - pass - 0.633 sec +TEST: gen_vx_mask_POLY_INTERSECTION + - pass - 0.633 sec +TEST: gen_vx_mask_POLY_UNION + - pass - 0.634 sec +TEST: gen_vx_mask_POLY_SYMDIFF + - pass - 0.627 sec +TEST: gen_vx_mask_DATA_INPUT_FIELD + - pass - 1.211 sec +TEST: gen_vx_mask_BOX + - pass - 0.536 sec +TEST: gen_vx_mask_SOLAR_ALT + - pass - 0.557 sec +TEST: gen_vx_mask_SOLAR_AZI + - pass - 0.673 sec +TEST: gen_vx_mask_LAT + - pass - 0.543 sec +TEST: gen_vx_mask_LON + - pass - 0.548 sec +TEST: gen_vx_mask_SHAPE + - pass - 0.549 sec +TEST: gen_vx_mask_SHAPE_STR + - pass - 0.695 sec +TEST: gen_vx_mask_SHAPE_STR_MULTI + - pass - 0.605 sec +TEST: gen_vx_mask_PYTHON + - pass - 1.605 sec +TEST: gen_vx_mask_DATA_TWO_FILE_TYPES + - pass - 1.22 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_gen_ens_prod.xml + +TEST: gen_ens_prod_NO_CTRL + - pass - 9.428 sec +TEST: gen_ens_prod_WITH_CTRL + - pass - 8.858 sec +TEST: gen_ens_prod_SINGLE_FILE_NC_NO_CTRL + - pass - 1.23 sec +TEST: gen_ens_prod_SINGLE_FILE_NC_WITH_CTRL + - pass - 1.144 sec +TEST: gen_ens_prod_SINGLE_FILE_GRIB_NO_CTRL + - pass - 1.206 sec +TEST: gen_ens_prod_SINGLE_FILE_GRIB_WITH_CTRL + - pass - 1.213 sec +TEST: gen_ens_prod_NORMALIZE + - pass - 6.058 sec +TEST: gen_ens_prod_CLIMO_ANOM_ENS_MEMBER_ID + - pass - 0.88 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_pcp_combine.xml + +TEST: pcp_combine_sum_GRIB1 + - pass - 28.664 sec +TEST: pcp_combine_sum_GRIB1_MISSING + - pass - 14.362 sec +TEST: pcp_combine_sum_GRIB1_MULTIPLE_FIELDS + - pass - 42.993 sec +TEST: pcp_combine_sum_GRIB2 + - pass - 1.445 sec +TEST: pcp_combine_add_GRIB1 + - pass - 1.686 sec +TEST: pcp_combine_add_GRIB2 + - pass - 0.601 sec +TEST: pcp_combine_add_STAGEIV + - pass - 1.207 sec +TEST: pcp_combine_add_ACCUMS + - pass - 1.414 sec +TEST: pcp_combine_sub_GRIB1 + - pass - 1.089 sec +TEST: pcp_combine_sub_GRIB1_run2 + - pass - 0.675 sec +TEST: pcp_combine_sub_GRIB2 + - pass - 0.553 sec +TEST: pcp_combine_sub_NC_MET_06 + - pass - 0.631 sec +TEST: pcp_combine_sub_P_INTERP + - pass - 0.811 sec +TEST: pcp_combine_add_VARNAME + - pass - 0.891 sec +TEST: pcp_combine_sub_DIFFERENT_INIT + - pass - 0.669 sec +TEST: pcp_combine_sub_NEGATIVE_ACCUM + - pass - 0.694 sec +TEST: pcp_combine_sub_SUBTRACT_MULTIPLE_FIELDS + - pass - 1.045 sec +TEST: pcp_combine_derive_LIST_OF_FILES + - pass - 1.104 sec +TEST: pcp_combine_derive_MULTIPLE_FIELDS + - pass - 3.137 sec +TEST: pcp_combine_derive_VLD_THRESH + - pass - 1.208 sec +TEST: pcp_combine_derive_CUSTOM_NAMES + - pass - 0.747 sec +TEST: pcp_combine_sub_ROT_LL + - pass - 1.096 sec +TEST: pcp_combine_LAEA_GRIB2 + - pass - 1.428 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_wwmca_regrid.xml + +TEST: wwmca_regrid_G003_NO_AGE + - pass - 2.472 sec +TEST: wwmca_regrid_G003_AGE_60 + - pass - 1.532 sec +TEST: wwmca_regrid_G003_AGE_120 + - pass - 1.531 sec +TEST: wwmca_regrid_G003_AGE_240 + - pass - 1.575 sec +TEST: wwmca_regrid_G003_WRITE_PIXEL_AGE + - pass - 1.547 sec +TEST: wwmca_regrid_GFS_LATLON + - pass - 4.694 sec +TEST: wwmca_regrid_GFS_MERCATOR + - pass - 0.699 sec +TEST: wwmca_regrid_NAM_LAMBERT + - pass - 5.288 sec +TEST: wwmca_regrid_HMT_STEREO + - pass - 0.963 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_point_stat.xml + +TEST: point_stat_GRIB1_NAM_GDAS + - pass - 49.029 sec +TEST: point_stat_GRIB1_NAM_GDAS_WINDS + - pass - 11.773 sec +TEST: point_stat_GRIB1_NAM_GDAS_MASK_SID + - pass - 43.827 sec +TEST: point_stat_GRIB2_NAM_NDAS + - pass - 36.403 sec +TEST: point_stat_GRIB2_SREF_GDAS + - pass - 28.574 sec +TEST: point_stat_GRIB1_NAM_TRMM + - pass - 15.594 sec +TEST: point_stat_GRIB2_SREF_TRMM + - pass - 15.265 sec +TEST: point_stat_NCMET_NAM_HMTGAGE + - pass - 1.756 sec +TEST: point_stat_NCMET_NAM_NDAS_SEEPS + - pass - 9.972 sec +TEST: point_stat_NCPINT_TRMM + - pass - 15.005 sec +TEST: point_stat_NCPINT_NDAS + - pass - 7.636 sec +TEST: point_stat_GRIB2_SREF_TRMM_prob + - pass - 2.426 sec +TEST: point_stat_GTG_lc + - pass - 60.144 sec +TEST: point_stat_GTG_latlon + - pass - 43.921 sec +TEST: point_stat_SID_INC_EXC + - pass - 6.573 sec +TEST: point_stat_SID_INC_EXC_CENSOR + - pass - 7.393 sec +TEST: point_stat_GRIB1_NAM_GDAS_INTERP_OPTS + - pass - 5.202 sec +TEST: point_stat_GRIB1_NAM_GDAS_INTERP_OPTS_name + - pass - 20.228 sec +TEST: point_stat_LAND_TOPO_MASK + - pass - 36.319 sec +TEST: point_stat_MPR_THRESH + - pass - 57.575 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_stat_analysis_ps.xml + +TEST: stat_analysis_CONFIG_POINT_STAT + - pass - 44.426 sec +TEST: stat_analysis_POINT_STAT_SUMMARY + - pass - 34.47 sec +TEST: stat_analysis_POINT_STAT_SUMMARY_UNION + - pass - 19.293 sec +TEST: stat_analysis_POINT_STAT_FILTER_OBS_SID + - pass - 1.695 sec +TEST: stat_analysis_POINT_STAT_FILTER_TIMES + - pass - 8.536 sec +TEST: stat_analysis_POINT_STAT_SEEPS + - pass - 3.168 sec +TEST: stat_analysis_RAMPS + - pass - 3.089 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_duplicate_flag.xml + +TEST: point_stat_DUP_NONE + - pass - 0.694 sec +TEST: point_stat_DUP_UNIQUE + - pass - 0.679 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_obs_summary.xml + +TEST: ascii2nc_obs_summary + - pass - 0.571 sec +TEST: point_stat_OS_NONE + - pass - 0.684 sec +TEST: point_stat_OS_NEAREST + - pass - 0.728 sec +TEST: point_stat_OS_MIN + - pass - 0.676 sec +TEST: point_stat_OS_MAX + - pass - 0.686 sec +TEST: point_stat_OS_UW_MEAN + - pass - 0.681 sec +TEST: point_stat_OS_DW_MEAN + - pass - 0.676 sec +TEST: point_stat_OS_MEDIAN + - pass - 0.677 sec +TEST: point_stat_OS_PERC + - pass - 0.682 sec +TEST: point_stat_OS_UNIQUE_ALL + - pass - 1.406 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_grid_stat.xml + +TEST: grid_stat_GRIB_lvl_typ_val + - pass - 102.432 sec +TEST: grid_stat_GRIB_set_attr + - pass - 26.404 sec +TEST: grid_stat_GRIB2_NAM_RTMA + - pass - 21.167 sec +TEST: grid_stat_GRIB2_NAM_RTMA_NP2 + - pass - 19.316 sec +TEST: grid_stat_GRIB1_NAM_STAGE4 + - pass - 35.211 sec +TEST: grid_stat_GRIB1_NAM_STAGE4_CENSOR + - pass - 3.779 sec +TEST: grid_stat_GTG_lc + - pass - 2.261 sec +TEST: grid_stat_GTG_latlon + - pass - 3.405 sec +TEST: grid_stat_GRIB2_SREF_STAGE4_prob_as_scalar + - pass - 2.357 sec +TEST: grid_stat_APPLY_MASK_TRUE + - pass - 5.515 sec +TEST: grid_stat_APPLY_MASK_FALSE + - pass - 5.392 sec +TEST: grid_stat_GFS_FOURIER + - pass - 8.642 sec +TEST: grid_stat_MPR_THRESH + - pass - 59.558 sec +TEST: grid_stat_UK_SEEPS + - pass - 4.344 sec +TEST: grid_stat_WRF_pres + - pass - 1.113 sec +TEST: grid_stat_GEN_ENS_PROD + - pass - 3.09 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_stat_analysis_gs.xml + +TEST: stat_analysis_CONFIG_GRID_STAT + - pass - 0.878 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_wavelet_stat.xml + +TEST: wavelet_stat_GRIB1_NAM_STAGE4 + - pass - 27.062 sec +TEST: wavelet_stat_GRIB1_NAM_STAGE4_NO_THRESH + - pass - 15.383 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_stat_analysis_ws.xml + +TEST: stat_analysis_AGG_ISC + - pass - 0.64 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ensemble_stat.xml + +TEST: ensemble_stat_CMD_LINE + - pass - 5.766 sec +TEST: ensemble_stat_FILE_LIST + - pass - 5.425 sec +TEST: ensemble_stat_MASK_SID + - pass - 1.392 sec +TEST: ensemble_stat_MASK_SID_CTRL + - pass - 1.345 sec +TEST: ensemble_stat_MASK_SID_CENSOR + - pass - 1.732 sec +TEST: ensemble_stat_SKIP_CONST + - pass - 5.18 sec +TEST: ensemble_stat_OBSERR + - pass - 13.099 sec +TEST: ensemble_stat_SINGLE_FILE_NC_NO_CTRL + - pass - 3.044 sec +TEST: ensemble_stat_SINGLE_FILE_NC_WITH_CTRL + - pass - 3.158 sec +TEST: ensemble_stat_SINGLE_FILE_GRIB_NO_CTRL + - pass - 2.072 sec +TEST: ensemble_stat_SINGLE_FILE_GRIB_WITH_CTRL + - pass - 2.109 sec +TEST: ensemble_stat_RPS_CLIMO_BIN_PROB + - pass - 0.581 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_stat_analysis_es.xml + +TEST: stat_analysis_AGG_RHIST + - pass - 0.843 sec +TEST: stat_analysis_AGG_PHIST + - pass - 0.843 sec +TEST: stat_analysis_AGG_RELP + - pass - 1.007 sec +TEST: stat_analysis_AGG_ECNT + - pass - 1.031 sec +TEST: stat_analysis_AGG_STAT_ORANK_RHIST_PHIST + - pass - 5.348 sec +TEST: stat_analysis_AGG_STAT_ORANK_RELP + - pass - 4.691 sec +TEST: stat_analysis_AGG_STAT_ORANK_SSVAR + - pass - 5.3 sec +TEST: stat_analysis_AGG_STAT_ORANK_ECNT + - pass - 11.662 sec +TEST: stat_analysis_AGG_SSVAR + - pass - 1.132 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_mode.xml + +TEST: mode_NO_MATCH_MERGE + - pass - 2.319 sec +TEST: mode_NO_MERGE + - pass - 1.94 sec +TEST: mode_MERGE_BOTH + - pass - 3.724 sec +TEST: mode_MASK_POLY + - pass - 1.96 sec +TEST: mode_QUILT + - pass - 5.595 sec +TEST: mode_CONFIG_MERGE + - pass - 3.549 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_mode_multivar.xml + +TEST: mode_multivar_snow + - pass - 36.544 sec +TEST: mode_multivar_snow_3_2 + - pass - 19.229 sec +TEST: mode_multivar_snow_super + - pass - 31.774 sec +TEST: mode_multivar_FAKE_DATA + - pass - 4.152 sec +TEST: mode_multivar_FAKE_DATA_with_intensities + - pass - 6.589 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_mode_analysis.xml + +TEST: mode_analysis_BYCASE_SIMPLE + - pass - 0.758 sec +TEST: mode_analysis_BYCASE_CLUSTER + - pass - 0.55 sec +TEST: mode_analysis_MET-644_LOOKIN_BY_DIR + - pass - 0.593 sec +TEST: mode_analysis_MET-644_LOOKIN_BY_FILE + - pass - 0.553 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_plot_point_obs.xml + +TEST: plot_point_obs_G218 + - pass - 4.803 sec +TEST: plot_point_obs_TMP_ADPUPA + - pass - 4.345 sec +TEST: plot_point_obs_CONFIG + - pass - 4.309 sec +TEST: plot_point_obs_CONFIG_REGRID + - pass - 4.069 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_plot_data_plane.xml + +TEST: plot_data_plane_GRIB1 + - pass - 1.385 sec +TEST: plot_data_plane_GRIB1_REC + - pass - 0.934 sec +TEST: plot_data_plane_GRIB1_CODE + - pass - 0.916 sec +TEST: plot_data_plane_GRIB1_ENS + - pass - 0.825 sec +TEST: plot_data_plane_GRIB1_ENS_HI + - pass - 0.793 sec +TEST: plot_data_plane_GRIB1_rotlatlon + - pass - 0.721 sec +TEST: plot_data_plane_GRIB2 + - pass - 0.858 sec +TEST: plot_data_plane_GRIB2_ENS + - pass - 0.715 sec +TEST: plot_data_plane_GRIB2_ENS_LOW + - pass - 0.725 sec +TEST: plot_data_plane_GRIB2_PROB + - pass - 0.645 sec +TEST: plot_data_plane_NC_PINTERP + - pass - 0.755 sec +TEST: plot_data_plane_NC_MET + - pass - 0.804 sec +TEST: plot_data_plane_NCCF_lc_0 + - pass - 0.746 sec +TEST: plot_data_plane_NCCF_lc_25 + - pass - 0.815 sec +TEST: plot_data_plane_NCCF_lc_50 + - pass - 0.802 sec +TEST: plot_data_plane_NCCF_latlon_0 + - pass - 1.003 sec +TEST: plot_data_plane_NCCF_latlon_12 + - pass - 0.976 sec +TEST: plot_data_plane_NCCF_latlon_25 + - pass - 0.976 sec +TEST: plot_data_plane_NCCF_latlon_by_value + - pass - 0.956 sec +TEST: plot_data_plane_NCCF_north_to_south + - pass - 4.35 sec +TEST: plot_data_plane_NCCF_time + - pass - 0.681 sec +TEST: plot_data_plane_NCCF_time_int64 + - pass - 0.73 sec +TEST: plot_data_plane_NCCF_rotlatlon + - pass - 0.934 sec +TEST: plot_data_plane_TRMM_3B42_3hourly_nc + - pass - 1.041 sec +TEST: plot_data_plane_TRMM_3B42_daily_nc + - pass - 1.233 sec +TEST: plot_data_plane_TRMM_3B42_daily_packed + - pass - 1.209 sec +TEST: plot_data_plane_TRMM_3B42_daily_packed_CONVERT + - pass - 1.71 sec +TEST: plot_data_plane_EaSM_CMIP5_rcp85 + - pass - 0.752 sec +TEST: plot_data_plane_EaSM_CMIP5_rcp85_time_slice + - pass - 0.76 sec +TEST: plot_data_plane_EaSM_CESM + - pass - 0.771 sec +TEST: plot_data_plane_GRIB2_NBM_CWASP_L0 + - pass - 3.072 sec +TEST: plot_data_plane_GRIB2_NBM_CWASP_PERC_5 + - pass - 3.146 sec +TEST: plot_data_plane_GRIB2_NBM_CWASP_PROB_50 + - pass - 2.262 sec +TEST: plot_data_plane_GRIB2_NBM_WETBT_MIXED_LEVELS + - pass - 3.377 sec +TEST: plot_data_plane_GRIB2_NBM_FICEAC_A48_PERC_10 + - pass - 2.102 sec +TEST: plot_data_plane_LAEA_GRIB2 + - pass - 1.631 sec +TEST: plot_data_plane_LAEA_NCCF + - pass - 1.595 sec +TEST: plot_data_plane_LAEA_MET_NC + - pass - 1.634 sec +TEST: plot_data_plane_NCCF_POLAR_STEREO + - pass - 1.747 sec +TEST: plot_data_plane_NCCF_POLAR_ELLIPSOIDAL + - pass - 0.719 sec +TEST: plot_data_plane_GRIB2_TABLE_4.48 + - pass - 1.514 sec +TEST: plot_data_plane_WRF_west_east_stag + - pass - 0.864 sec +TEST: plot_data_plane_WRF_south_north_stag + - pass - 0.863 sec +TEST: plot_data_plane_WRF_num_press_levels_stag + - pass - 0.761 sec +TEST: plot_data_plane_WRF_num_z_levels_stag + - pass - 0.741 sec +TEST: plot_data_plane_WRF_bottom_top + - pass - 0.866 sec +TEST: plot_data_plane_WRF_bottom_top_stag + - pass - 0.852 sec +TEST: plot_data_plane_set_attr_grid + - pass - 14.222 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_wwmca_plot.xml + +TEST: wwmca_plot_NH_SH_AGE_240 + - pass - 2.607 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_series_analysis.xml + +TEST: series_analysis_CMD_LINE + - pass - 8.969 sec +TEST: series_analysis_AGGR_CMD_LINE + - pass - 10.585 sec +TEST: series_analysis_FILE_LIST + - pass - 6.317 sec +TEST: series_analysis_AGGR_FILE_LIST + - pass - 7.913 sec +TEST: series_analysis_UPPER_AIR + - pass - 3.876 sec +TEST: series_analysis_CONDITIONAL + - pass - 3.963 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_tc_dland.xml + +TEST: tc_dland_ONE_DEG + - pass - 17.215 sec +TEST: tc_dland_HALF_DEG + - pass - 65.738 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_tc_pairs.xml + +TEST: tc_pairs_ALAL2010 + - pass - 2.412 sec +TEST: tc_pairs_CONSENSUS + - pass - 5.688 sec +TEST: tc_pairs_INTERP12_FILL + - pass - 0.923 sec +TEST: tc_pairs_INTERP12_REPLACE + - pass - 0.938 sec +TEST: tc_pairs_PROBRIRW + - pass - 2.656 sec +TEST: tc_pairs_BASIN_MAP + - pass - 2.661 sec +TEST: tc_pairs_LEAD_REQ + - pass - 1.023 sec +TEST: tc_pairs_WRITE_VALID + - pass - 0.741 sec +TEST: tc_pairs_WRITE_VALID_PROBRIRW + - pass - 2.105 sec +TEST: tc_pairs_DIAGNOSTICS + - pass - 0.932 sec +TEST: tc_pairs_DIAGNOSTICS_CONSENSUS + - pass - 5.124 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_tc_stat.xml + +TEST: tc_stat_ALAL2010 + - pass - 24.263 sec +TEST: tc_stat_FILTER_STRINGS + - pass - 2.475 sec +TEST: tc_stat_PROBRIRW + - pass - 40.831 sec +TEST: tc_stat_LEAD_REQ + - pass - 1.483 sec +TEST: tc_stat_FALSE_ALARMS + - pass - 2.438 sec +TEST: tc_stat_DIAGNOSTICS + - pass - 7.112 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_plot_tc.xml + +TEST: plot_tc_TCMPR + - pass - 9.155 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_tc_rmw.xml + +TEST: tc_rmw_PRESSURE_LEV_OUT + - pass - 39.751 sec +TEST: tc_rmw_GONZALO + - pass - 9.403 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_rmw_analysis.xml + +TEST: rmw_analysis + - pass - 1.61 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_tc_diag.xml + +TEST: tc_diag_IAN + - pass - 117.499 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_tc_gen.xml + +TEST: tc_gen_2016 + - pass - 94.491 sec +TEST: tc_gen_prob + - pass - 1.05 sec +TEST: tc_gen_2021_shape + - pass - 9.322 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_met_test_scripts.xml + +TEST: test_all_gen_vx_mask_1 + - pass - 1.263 sec +TEST: test_all_gen_vx_mask_2 + - pass - 1.054 sec +TEST: test_all_gen_ens_prod + - pass - 1.72 sec +TEST: test_all_pcp_combine_1 + - pass - 1.855 sec +TEST: test_all_pcp_combine_2 + - pass - 3.97 sec +TEST: test_all_pcp_combine_3 + - pass - 5.307 sec +TEST: test_all_pcp_combine_4 + - pass - 1.066 sec +TEST: test_all_pcp_combine_5 + - pass - 1.075 sec +TEST: test_all_pcp_combine_6 + - pass - 1.563 sec +TEST: test_all_mode_1 + - pass - 2.505 sec +TEST: test_all_mode_2 + - pass - 2.463 sec +TEST: test_all_mode_3 + - pass - 3.169 sec +TEST: test_all_grid_stat_1 + - pass - 2.189 sec +TEST: test_all_grid_stat_2 + - pass - 0.646 sec +TEST: test_all_grid_stat_3 + - pass - 1.26 sec +TEST: test_all_grid_stat_4 + - pass - 8.559 sec +TEST: test_all_pb2nc + - pass - 3.645 sec +TEST: test_all_plot_point_obs + - pass - 5.294 sec +TEST: test_all_ascii2nc_1 + - pass - 0.599 sec +TEST: test_all_ascii2nc_2 + - pass - 0.715 sec +TEST: test_all_madis2nc + - pass - 1.521 sec +TEST: test_all_point_stat + - pass - 72.135 sec +TEST: test_all_wavelet_stat_1 + - pass - 4.892 sec +TEST: test_all_wavelet_stat_2 + - pass - 2.751 sec +TEST: test_all_ensemble_stat + - pass - 9.129 sec +TEST: test_all_stat_analysis + - pass - 15.05 sec +TEST: test_all_mode_analysis_1 + - pass - 0.713 sec +TEST: test_all_mode_analysis_2 + - pass - 0.561 sec +TEST: test_all_mode_analysis_3 + - pass - 0.574 sec +TEST: test_all_plot_data_plane_1 + - pass - 1.109 sec +TEST: test_all_plot_data_plane_2 + - pass - 0.662 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_modis.xml + +TEST: modis_regrid_SURFACE_TEMPERATURE + - pass - 3.171 sec +TEST: modis_regrid_CLOUD_FRACTION + - pass - 2.371 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ref_config_lead_00.xml + +TEST: gen_vx_mask + - pass - 2.464 sec +TEST: pb2nc_ndas_lead_00 + - pass - 8.045 sec +TEST: point_stat_lead_00_upper_air_AFWAv3.4_Noahv2.7.1 + - pass - 48.708 sec +TEST: point_stat_lead_00_surface_AFWAv3.4_Noahv2.7.1 + - pass - 24.294 sec +TEST: point_stat_lead_00_winds_AFWAv3.4_Noahv2.7.1 + - pass - 56.158 sec +TEST: point_stat_lead_00_upper_air_AFWAv3.4_Noahv3.3 + - pass - 48.776 sec +TEST: point_stat_lead_00_surface_AFWAv3.4_Noahv3.3 + - pass - 24.526 sec +TEST: point_stat_lead_00_winds_AFWAv3.4_Noahv3.3 + - pass - 56.448 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ref_config_lead_12.xml + +TEST: gen_vx_mask + - pass - 2.225 sec +TEST: pb2nc_ndas_lead_12 + - pass - 8.146 sec +TEST: pcp_combine_ST2ml_3hr_09_12 + - pass - 0.909 sec +TEST: pcp_combine_wrf_3hr_09_12 + - pass - 1.143 sec +TEST: point_stat_lead_12_upper_air_AFWAv3.4_Noahv2.7.1 + - pass - 48.863 sec +TEST: point_stat_lead_12_surface_AFWAv3.4_Noahv2.7.1 + - pass - 25.401 sec +TEST: point_stat_lead_12_winds_AFWAv3.4_Noahv2.7.1 + - pass - 55.69 sec +TEST: point_stat_lead_12_upper_air_AFWAv3.4_Noahv3.3 + - pass - 48.919 sec +TEST: point_stat_lead_12_surface_AFWAv3.4_Noahv3.3 + - pass - 25.006 sec +TEST: point_stat_lead_12_winds_AFWAv3.4_Noahv3.3 + - pass - 55.808 sec +TEST: grid_stat_3hr_accum_time_12 + - pass - 0.883 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ref_config_lead_24.xml + +TEST: gen_vx_mask + - pass - 2.181 sec +TEST: pb2nc_ndas_lead_24 + - pass - 7.926 sec +TEST: pcp_combine_ST2ml_3hr_21_24 + - pass - 0.76 sec +TEST: pcp_combine_wrf_3hr_21_24 + - pass - 1.115 sec +TEST: point_stat_lead_24_upper_air_AFWAv3.4_Noahv2.7.1 + - pass - 47.982 sec +TEST: point_stat_lead_24_surface_AFWAv3.4_Noahv2.7.1 + - pass - 23.972 sec +TEST: point_stat_lead_24_winds_AFWAv3.4_Noahv2.7.1 + - pass - 54.219 sec +TEST: point_stat_lead_24_upper_air_AFWAv3.4_Noahv3.3 + - pass - 48.24 sec +TEST: point_stat_lead_24_surface_AFWAv3.4_Noahv3.3 + - pass - 24.114 sec +TEST: point_stat_lead_24_winds_AFWAv3.4_Noahv3.3 + - pass - 54.602 sec +TEST: grid_stat_3hr_accum_time_24 + - pass - 0.892 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ref_config_lead_36.xml + +TEST: gen_vx_mask + - pass - 2.186 sec +TEST: pb2nc_ndas_lead_36 + - pass - 8.025 sec +TEST: pcp_combine_ST2ml_3hr_33_36 + - pass - 0.697 sec +TEST: pcp_combine_ST2ml_24hr_12_36 + - pass - 0.596 sec +TEST: pcp_combine_wrf_3hr_33_36 + - pass - 1.092 sec +TEST: pcp_combine_wrf_24hr_12_36 + - pass - 1.108 sec +TEST: point_stat_lead_36_upper_air_AFWAv3.4_Noahv2.7.1 + - pass - 48.391 sec +TEST: point_stat_lead_36_surface_AFWAv3.4_Noahv2.7.1 + - pass - 24.7 sec +TEST: point_stat_lead_36_winds_AFWAv3.4_Noahv2.7.1 + - pass - 54.79 sec +TEST: point_stat_lead_36_upper_air_AFWAv3.4_Noahv3.3 + - pass - 48.341 sec +TEST: point_stat_lead_36_surface_AFWAv3.4_Noahv3.3 + - pass - 24.723 sec +TEST: point_stat_lead_36_winds_AFWAv3.4_Noahv3.3 + - pass - 54.725 sec +TEST: grid_stat_3hr_accum_time_36 + - pass - 0.917 sec +TEST: grid_stat_24hr_accum_time_36 + - pass - 0.896 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ref_config_lead_48.xml + +TEST: gen_vx_mask + - pass - 2.193 sec +TEST: pb2nc_ndas_lead_48 + - pass - 8.014 sec +TEST: pcp_combine_ST2ml_3hr_45_48 + - pass - 0.72 sec +TEST: pcp_combine_wrf_3hr_45_48 + - pass - 1.101 sec +TEST: point_stat_lead_48_upper_air_AFWAv3.4_Noahv2.7.1 + - pass - 48.027 sec +TEST: point_stat_lead_48_surface_AFWAv3.4_Noahv2.7.1 + - pass - 24.714 sec +TEST: point_stat_lead_48_winds_AFWAv3.4_Noahv2.7.1 + - pass - 54.824 sec +TEST: point_stat_lead_48_upper_air_AFWAv3.4_Noahv3.3 + - pass - 48.048 sec +TEST: point_stat_lead_48_surface_AFWAv3.4_Noahv3.3 + - pass - 24.41 sec +TEST: point_stat_lead_48_winds_AFWAv3.4_Noahv3.3 + - pass - 55.39 sec +TEST: grid_stat_3hr_accum_time_48 + - pass - 0.893 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ref_config.xml + +TEST: stat_analysis_GO_Index + - pass - 1.468 sec +TEST: stat_analysis_GO_Index_out_stat + - pass - 1.179 sec +TEST: stat_analysis_SFC_SS_Index_out + - pass - 0.874 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_mode_graphics.xml + +TEST: mode_graphics_PLOT_MULTIPLE + - pass - 63.828 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_regrid.xml + +TEST: regrid_grid_stat_ST4_TO_HMT + - pass - 1.052 sec +TEST: regrid_grid_stat_HMT_TO_ST4 + - pass - 8.112 sec +TEST: regrid_grid_stat_BOTH_TO_DTC165 + - pass - 1.594 sec +TEST: regrid_grid_stat_BOTH_TO_NAM + - pass - 5.106 sec +TEST: regrid_grid_stat_BOTH_TO_HMT_D02 + - pass - 1.577 sec +TEST: regrid_data_plane_GFS_TO_HMT_NEAREST + - pass - 2.858 sec +TEST: regrid_data_plane_GFS_ROTLATLON_GRID_SPEC + - pass - 2.465 sec +TEST: regrid_data_plane_GFS_TO_HMT_BILIN + - pass - 2.782 sec +TEST: regrid_data_plane_GFS_TO_HMT_BUDGET + - pass - 2.539 sec +TEST: regrid_data_plane_GFS_TO_HMT_MIN_3 + - pass - 3.824 sec +TEST: regrid_data_plane_GFS_TO_HMT_MAX_3 + - pass - 3.887 sec +TEST: regrid_data_plane_GFS_TO_HMT_UW_MEAN_3 + - pass - 3.837 sec +TEST: regrid_data_plane_GFS_TO_HMT_UW_MEAN_9 + - pass - 19.083 sec +TEST: regrid_data_plane_GFS_TO_HMT_DW_MEAN_3 + - pass - 3.94 sec +TEST: regrid_data_plane_HRRR_MAXGAUSS + - pass - 4.686 sec +TEST: regrid_data_plane_GFS_TO_HMT_MEDIAN_3 + - pass - 3.921 sec +TEST: regrid_data_plane_GFS_TO_HMT_LS_FIT_3 + - pass - 3.982 sec +TEST: regrid_data_plane_GFS_TO_HMT_MAX_5_SQUARE + - pass - 1.92 sec +TEST: regrid_data_plane_GFS_TO_G212_CONVERT_CENSOR + - pass - 0.893 sec +TEST: regrid_data_plane_WRAP_LON + - pass - 1.438 sec +TEST: regrid_data_plane_NC_ROT_LAT_LON + - pass - 2.385 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_gsi_tools.xml + +TEST: gsid2mpr_CONV + - pass - 2.161 sec +TEST: gsid2mpr_DUP + - pass - 1.27 sec +TEST: gsid2mpr_RAD + - pass - 2.469 sec +TEST: gsidens2orank_CONV_NO_MEAN + - pass - 5.607 sec +TEST: gsidens2orank_CONV_ENS_MEAN + - pass - 4.809 sec +TEST: gsidens2orank_RAD + - pass - 4.108 sec +TEST: gsidens2orank_RAD_CHANNEL + - pass - 1.14 sec +TEST: stat_analysis_MPR_TO_CNT + - pass - 2.631 sec +TEST: stat_analysis_ORANK_TO_RHIST + - pass - 23.803 sec +TEST: stat_analysis_ORANK_TO_SSVAR + - pass - 23.779 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_aeronet.xml + +TEST: ascii2nc_AERONET_daily + - pass - 0.814 sec +TEST: ascii2nc_AERONET_v3_daily + - pass - 0.537 sec +TEST: ascii2nc_AERONET_v3_concat + - pass - 0.574 sec +TEST: ascii2nc_AERONET_vld_thresh + - pass - 0.545 sec +TEST: ascii2nc_AERONET_monthly + - pass - 0.698 sec +TEST: point_stat_GRIB2_f18_NGAC_AERONET_daily + - pass - 0.59 sec +TEST: point_stat_GRIB2_f18_NGAC_AERONET_monthly + - pass - 0.639 sec +TEST: point_stat_GRIB2_f21_NGAC_AERONET_daily + - pass - 0.595 sec +TEST: point_stat_GRIB2_f21_NGAC_AERONET_monthly + - pass - 0.639 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_shift_data_plane.xml + +TEST: shift_data_plane_GRIB1 + - pass - 4.387 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_mtd.xml + +TEST: mtd_basic + - pass - 45.285 sec +TEST: mtd_conv_time + - pass - 47.593 sec +TEST: mtd_single + - pass - 11.058 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_climatology_1.0deg.xml + +TEST: climatology_POINT_STAT_GFS_1.0DEG + - pass - 128.342 sec +TEST: climatology_POINT_STAT_GFS_1.0DEG_CLIMO_PREV_MONTH + - pass - 127.347 sec +TEST: climatology_POINT_STAT_PROB_GFS_1.0DEG + - pass - 11.152 sec +TEST: climatology_GRID_STAT_PROB_GFS_1.0DEG + - pass - 8.402 sec +TEST: climatology_STAT_ANALYSIS_1.0DEG + - pass - 3.124 sec +TEST: climatology_SERIES_ANALYSIS_1.0DEG + - pass - 168.005 sec +TEST: climatology_SERIES_ANALYSIS_1.0DEG_CONST_CLIMO + - pass - 49.084 sec +TEST: climatology_SERIES_ANALYSIS_1.0DEG_AGGR + - pass - 215.56 sec +TEST: climatology_SERIES_ANALYSIS_PROB_1.0DEG + - pass - 20.95 sec +TEST: climatology_SERIES_ANALYSIS_PROB_1.0DEG_AGGR + - pass - 24.843 sec +TEST: climatology_ENSEMBLE_STAT_1.0DEG + - pass - 31.429 sec +TEST: climatology_ENSEMBLE_STAT_1.0DEG_ONE_CDF_BIN + - pass - 11.051 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_climatology_1.5deg.xml + +TEST: climatology_POINT_STAT_WMO_1.5DEG + - pass - 245.695 sec +TEST: climatology_STAT_ANALYSIS_WMO_1.5DEG_MPR_AGG_STAT + - pass - 0.741 sec +TEST: climatology_STAT_ANALYSIS_WMO_1.5DEG_VAL1L2_AGG_STAT + - pass - 0.607 sec +TEST: climatology_STAT_ANALYSIS_WMO_1.5DEG_FILTER + - pass - 0.758 sec +TEST: climatology_GRID_STAT_WMO_1.5DEG + - pass - 253.773 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_climatology_2.5deg.xml + +TEST: climatology_POINT_STAT_GFS_2.5DEG + - pass - 207.139 sec +TEST: climatology_GRID_STAT_WRAP_YEAR_2.5DEG + - pass - 126.605 sec +TEST: climatology_GRID_STAT_SINGLE_MONTH_2.5DEG + - pass - 64.927 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_climatology_mixed.xml + +TEST: climatology_GRID_STAT_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG + - pass - 72.612 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_grib_tables.xml + +TEST: GRIB1_um_dcf + - pass - 2.76 sec +TEST: GRIB2_um_raw + - pass - 2.366 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_grid_weight.xml + +TEST: grid_weight_GRID_STAT_NONE + - pass - 6.082 sec +TEST: grid_weight_GRID_STAT_COS_LAT + - pass - 6.075 sec +TEST: grid_weight_GRID_STAT_AREA + - pass - 6.081 sec +TEST: grid_weight_ENSEMBLE_STAT_NONE + - pass - 2.327 sec +TEST: grid_weight_ENSEMBLE_STAT_COS_LAT + - pass - 2.287 sec +TEST: grid_weight_ENSEMBLE_STAT_AREA + - pass - 2.291 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_netcdf.xml + +TEST: ascii2nc_no_compression + - pass - 16.393 sec +TEST: ascii2nc_compression2_by_config + - pass - 16.728 sec +TEST: ascii2nc_compression3_by_env + - pass - 16.411 sec +TEST: ascii2nc_compression4_by_argument + - pass - 16.483 sec +TEST: 365_days + - pass - 1.913 sec +TEST: netcdf_1byte_time + - pass - 0.934 sec +TEST: netcdf_months_units + - pass - 0.653 sec +TEST: netcdf_months_units_from_day2 + - pass - 0.535 sec +TEST: netcdf_months_units_to_next_month + - pass - 0.537 sec +TEST: netcdf_years_units + - pass - 0.535 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_hira.xml + +WARNING: unable to read test_dir from /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_hira.xml +TEST: point_stat_NCMET_NAM_HMTGAGE_HIRA + - pass - 10.392 sec +TEST: point_stat_HIRA_EMPTY_PROB_CAT_THRESH + - pass - 9.472 sec +TEST: stat_analysis_CONFIG_HIRA + - pass - 4.317 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_interp_shape.xml + +TEST: grid_stat_INTERP_SQUARE + - pass - 6.366 sec +TEST: grid_stat_INTERP_CIRCLE + - pass - 5.351 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_lidar2nc.xml + +TEST: lidar2nc_CALIPSO + - pass - 1.126 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_ioda2nc.xml + +TEST: ioda2nc_mask_sid_list + - pass - 1.612 sec +TEST: ioda2nc_var_all + - pass - 0.549 sec +TEST: ioda2nc_summary + - pass - 0.566 sec +TEST: ioda2nc_same_input + - pass - 0.565 sec +TEST: ioda2nc_int_datetime + - pass - 0.575 sec +TEST: ioda2nc_v2_string_sid + - pass - 0.576 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_airnow.xml + +TEST: pb2nc_AIRNOW + - pass - 16.993 sec +TEST: point_stat_GRIB2_AIRNOW + - pass - 2.994 sec + +CALLING: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/xml/unit_python.xml + +TEST: python_numpy_grid_name + - pass - 1.728 sec +TEST: python_numpy_grid_string + - pass - 1.147 sec +TEST: python_numpy_grid_data_file + - pass - 1.163 sec +TEST: python_numpy_plot_data_plane + - pass - 1.221 sec +TEST: python_xarray_plot_data_plane + - pass - 1.193 sec +TEST: python_numpy_plot_data_plane_missing + - FAIL - 0.533 sec +export MET_PYTHON_EXE=${MET_TEST_MET_PYTHON_EXE} +/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met/../../bin/plot_data_plane \ + PYTHON_NUMPY \ + /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../test_output/python/letter_numpy_0_to_missing.ps \ + 'name = "/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met/python/examples/read_ascii_numpy.py /d1/projects/MET/MET_test_data/unit_test/python/letter.txt LETTER 0.0";' \ + -plot_range 0.0 255.0 \ + -title "Python enabled numpy plot_data_plane" \ + -v 1 +DEBUG 1: Start plot_data_plane by johnhg(6088) at 2024-10-07 17:45:28Z cmd: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met/../../bin/plot_data_plane PYTHON_NUMPY /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../test_output/python/letter_numpy_0_to_missing.ps name = "/d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met/python/examples/read_ascii_numpy.py /d1/projects/MET/MET_test_data/unit_test/python/letter.txt LETTER 0.0"; -plot_range 0.0 255.0 -title Python enabled numpy plot_data_plane -v 1 +DEBUG 1: Opening data file: PYTHON_NUMPY +sh: 1: /usr/local/python3/bin/python3: not found +ERROR : +ERROR : tmp_nc_dataplane() -> command "${MET_TEST_MET_PYTHON_EXE} /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met/python/pyembed/write_tmp_dataplane.py /tmp/tmp_met_data_386958_0 /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/../../share/met/python/examples/read_ascii_numpy /d1/projects/MET/MET_test_data/unit_test/python/letter.txt LETTER 0.0" failed ... status = 32512 +ERROR : +unset MET_PYTHON_EXE + + +ERROR: /d1/personal/johnhg/MET/MET_development/MET-feature_2887_categorical_weights/internal/test_unit/python/unit.py unit_python.xml failed. + +*** UNIT TESTS FAILED *** + diff --git a/internal/test_unit/xml/unit_ascii2nc.xml b/internal/test_unit/xml/unit_ascii2nc.xml index 4424fd0e33..0b2d242ef7 100644 --- a/internal/test_unit/xml/unit_ascii2nc.xml +++ b/internal/test_unit/xml/unit_ascii2nc.xml @@ -44,6 +44,18 @@ + + &MET_BIN;/ascii2nc + 1 + \ + &DATA_DIR_OBS;/gauge/2012041012.badfile.ascii \ + &OUTPUT_DIR;/ascii2nc/gauge_2012041012_24hr.nc \ + -v 1 + + + + + &MET_BIN;/ascii2nc \ diff --git a/internal/test_unit/xml/unit_climatology_1.0deg.xml b/internal/test_unit/xml/unit_climatology_1.0deg.xml index a07d47ff6e..699026825e 100644 --- a/internal/test_unit/xml/unit_climatology_1.0deg.xml +++ b/internal/test_unit/xml/unit_climatology_1.0deg.xml @@ -154,20 +154,18 @@ &OUTPUT_DIR;/climatology_1.0deg/stat_analysis_MPR_to_PSTD.stat - +--!> &MET_BIN;/series_analysis CLIMO_MEAN_FILE_LIST "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590409", - "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590410", - "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590411" + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590410" CLIMO_STDEV_FILE_LIST "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590409", - "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590410", - "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590411" + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590410" @@ -175,11 +173,9 @@ -fcst &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F012.grib2 \ &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F024.grib2 \ &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F036.grib2 \ - &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F048.grib2 \ -obs &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_1200_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_0000_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_1200_000.grb2 \ - &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120411_0000_000.grb2 \ -paired \ -out &OUTPUT_DIR;/climatology_1.0deg/series_analysis_GFS_CLIMO_1.0DEG.nc \ -config &CONFIG_DIR;/SeriesAnalysisConfig_climo \ @@ -190,25 +186,112 @@ + + &MET_BIN;/series_analysis + + CLIMO_MEAN_FILE_LIST + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590409" + + + CLIMO_STDEV_FILE_LIST + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590409" + + + + \ + -fcst &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F012.grib2 \ + &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F024.grib2 \ + &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F036.grib2 \ + -obs &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_1200_000.grb2 \ + &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_0000_000.grb2 \ + &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_1200_000.grb2 \ + -out &OUTPUT_DIR;/climatology_1.0deg/series_analysis_GFS_CLIMO_1.0DEG_CONST_CLIMO.nc \ + -config &CONFIG_DIR;/SeriesAnalysisConfig_const_climo \ + -v 3 + + + &OUTPUT_DIR;/climatology_1.0deg/series_analysis_GFS_CLIMO_1.0DEG_CONST_CLIMO.nc + + + + + &MET_BIN;/series_analysis + + CLIMO_MEAN_FILE_LIST + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590411" + + + CLIMO_STDEV_FILE_LIST + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590411" + + + + \ + -fcst &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F048.grib2 \ + -obs &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120411_0000_000.grb2 \ + -paired \ + -aggr &OUTPUT_DIR;/climatology_1.0deg/series_analysis_GFS_CLIMO_1.0DEG.nc \ + -out &OUTPUT_DIR;/climatology_1.0deg/series_analysis_GFS_CLIMO_1.0DEG_AGGR.nc \ + -config &CONFIG_DIR;/SeriesAnalysisConfig_climo \ + -v 2 + + + &OUTPUT_DIR;/climatology_1.0deg/series_analysis_GFS_CLIMO_1.0DEG_AGGR.nc + + + echo "&DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F003.grib2 \ &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F009.grib2 \ &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F015.grib2 \ - &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F021.grib2 \ - &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F027.grib2 \ - &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F033.grib2 \ - &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F039.grib2 \ - &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F045.grib2" \ - > &OUTPUT_DIR;/climatology_1.0deg/input_fcst_file_list; \ + &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F021.grib2" \ + > &OUTPUT_DIR;/climatology_1.0deg/20120409_fcst_file_list; \ echo "&DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_0000_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_0600_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_1200_000.grb2 \ - &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_1800_000.grb2 \ - &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_0000_000.grb2 \ + &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120409_1800_000.grb2" \ + > &OUTPUT_DIR;/climatology_1.0deg/20120409_obs_file_list; \ + &MET_BIN;/series_analysis + + DAY_INTERVAL 1 + HOUR_INTERVAL 6 + CLIMO_MEAN_FILE_LIST + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590409", + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590410", + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cmean_1d.19590411" + + + CLIMO_STDEV_FILE_LIST + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590409", + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590410", + "&DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg/cstdv_1d.19590411" + + + + \ + -fcst &OUTPUT_DIR;/climatology_1.0deg/20120409_fcst_file_list \ + -obs &OUTPUT_DIR;/climatology_1.0deg/20120409_obs_file_list \ + -paired \ + -out &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG.nc \ + -config &CONFIG_DIR;/SeriesAnalysisConfig_climo_prob \ + -v 2 + + + &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG.nc + + + + + echo "&DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F027.grib2 \ + &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F033.grib2 \ + &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F039.grib2 \ + &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F045.grib2" \ + > &OUTPUT_DIR;/climatology_1.0deg/20120410_fcst_file_list; \ + echo "&DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_0000_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_0600_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_1200_000.grb2 \ &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_1800_000.grb2" \ - > &OUTPUT_DIR;/climatology_1.0deg/input_obs_file_list; \ + > &OUTPUT_DIR;/climatology_1.0deg/20120410_obs_file_list; \ &MET_BIN;/series_analysis DAY_INTERVAL 1 @@ -227,15 +310,16 @@ \ - -fcst &OUTPUT_DIR;/climatology_1.0deg/input_fcst_file_list \ - -obs &OUTPUT_DIR;/climatology_1.0deg/input_obs_file_list \ + -fcst &OUTPUT_DIR;/climatology_1.0deg/20120410_fcst_file_list \ + -obs &OUTPUT_DIR;/climatology_1.0deg/20120410_obs_file_list \ -paired \ - -out &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG.nc \ + -aggr &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG.nc \ + -out &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG_AGGR.nc \ -config &CONFIG_DIR;/SeriesAnalysisConfig_climo_prob \ -v 2 - &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG.nc + &OUTPUT_DIR;/climatology_1.0deg/series_analysis_PROB_CLIMO_1.0DEG_AGGR.nc diff --git a/internal/test_unit/xml/unit_climatology_1.5deg.xml b/internal/test_unit/xml/unit_climatology_1.5deg.xml index cd1a86b353..de22d95673 100644 --- a/internal/test_unit/xml/unit_climatology_1.5deg.xml +++ b/internal/test_unit/xml/unit_climatology_1.5deg.xml @@ -54,7 +54,7 @@ \ -lookin &OUTPUT_DIR;/climatology_1.5deg/point_stat_WMO_CLIMO_1.5DEG_120000L_20120409_120000V.stat \ - -job aggregate_stat -line_type MPR -out_line_type CTC -fcst_lev P850 -interp_mthd NEAREST -by FCST_VAR -out_thresh '>CDP90' \ + -job aggregate_stat -line_type MPR -out_line_type CTC -fcst_lev P850 -interp_mthd NEAREST -by FCST_VAR -out_thresh '>OCDP90' \ -out_stat &OUTPUT_DIR;/climatology_1.5deg/stat_analysis_WMO_1.5DEG_MPR_to_CTC_out.stat @@ -84,7 +84,7 @@ \ -lookin &OUTPUT_DIR;/climatology_1.5deg/point_stat_WMO_CLIMO_1.5DEG_120000L_20120409_120000V.stat \ - -job filter -line_type MPR -column_thresh CLIMO_CDF 'lt0.1||gt0.9' \ + -job filter -line_type MPR -column_thresh OBS_CLIMO_CDF 'lt0.1||gt0.9' \ -dump_row &OUTPUT_DIR;/climatology_1.5deg/stat_analysis_WMO_1.5DEG_FILTER_CDF_dump.stat diff --git a/internal/test_unit/xml/unit_climatology_mixed.xml b/internal/test_unit/xml/unit_climatology_mixed.xml new file mode 100644 index 0000000000..0433b39e8d --- /dev/null +++ b/internal/test_unit/xml/unit_climatology_mixed.xml @@ -0,0 +1,39 @@ + + + + + + + + + + +]> + + + + + + &TEST_DIR; + true + + + &MET_BIN;/grid_stat + + OUTPUT_PREFIX FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG + FCST_CLIMO_DIR &DATA_DIR_CLIMO;/NCEP_NCAR_40YR_1.0deg + OBS_CLIMO_DIR &DATA_DIR_CLIMO;/ERA_DAILY_1.5deg + + \ + &DATA_DIR_MODEL;/grib2/gfs/gfs_2012040900_F024.grib2 \ + &DATA_DIR_MODEL;/grib2/gfsanl/gfsanl_4_20120410_0000_000.grb2 \ + &CONFIG_DIR;/GridStatConfig_climo_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG \ + -outdir &OUTPUT_DIR;/climatology_mixed -v 2 + + + &OUTPUT_DIR;/climatology_mixed/grid_stat_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG_240000L_20120410_000000V.stat + &OUTPUT_DIR;/climatology_mixed/grid_stat_FCST_NCEP_1.0DEG_OBS_WMO_1.5DEG_240000L_20120410_000000V_pairs.nc + + + diff --git a/internal/test_unit/xml/unit_grid_stat.xml b/internal/test_unit/xml/unit_grid_stat.xml index cc24ad21d3..ad070dff60 100644 --- a/internal/test_unit/xml/unit_grid_stat.xml +++ b/internal/test_unit/xml/unit_grid_stat.xml @@ -296,7 +296,7 @@ SEEPS_FLAG BOTH SEEPS_P1_THRESH NA OUTPUT_PREFIX SEEPS - MET_SEEPS_GRID_CLIMO_NAME&DATA_DIR_CLIMO;/seeps/PPT24_seepsweights_grid.nc + SEEPS_GRID_CLIMO_NAME&DATA_DIR_CLIMO;/seeps/PPT24_seepsweights_grid.nc \ &DATA_DIR_MODEL;/seeps/gpm_2021120100_2021120200_trmmgrid.nc \ diff --git a/internal/test_unit/xml/unit_grid_weight.xml b/internal/test_unit/xml/unit_grid_weight.xml index 85005feec1..979ebad495 100644 --- a/internal/test_unit/xml/unit_grid_weight.xml +++ b/internal/test_unit/xml/unit_grid_weight.xml @@ -22,6 +22,7 @@ &MET_BIN;/grid_stat OUTPUT_PREFIX NO_WEIGHT + DESC NO_WEIGHT CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 GRID_WEIGHT NONE @@ -32,7 +33,7 @@ -outdir &OUTPUT_DIR;/grid_weight -v 1 - &OUTPUT_DIR;/grid_weight/grid_stat_NO_WEIGHT_240000L_20120410_000000V.stat + &OUTPUT_DIR;/grid_weight/grid_stat_NO_WEIGHT_240000L_20120410_000000V.stat &OUTPUT_DIR;/grid_weight/grid_stat_NO_WEIGHT_240000L_20120410_000000V_pairs.nc @@ -41,6 +42,7 @@ &MET_BIN;/grid_stat OUTPUT_PREFIX COS_LAT_WEIGHT + DESC COS_LAT_WEIGHT CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 GRID_WEIGHT COS_LAT @@ -51,7 +53,7 @@ -outdir &OUTPUT_DIR;/grid_weight -v 1 - &OUTPUT_DIR;/grid_weight/grid_stat_COS_LAT_WEIGHT_240000L_20120410_000000V.stat + &OUTPUT_DIR;/grid_weight/grid_stat_COS_LAT_WEIGHT_240000L_20120410_000000V.stat &OUTPUT_DIR;/grid_weight/grid_stat_COS_LAT_WEIGHT_240000L_20120410_000000V_pairs.nc @@ -60,6 +62,7 @@ &MET_BIN;/grid_stat OUTPUT_PREFIX AREA_WEIGHT + DESC AREA_WEIGHT CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 GRID_WEIGHT AREA @@ -70,7 +73,7 @@ -outdir &OUTPUT_DIR;/grid_weight -v 1 - &OUTPUT_DIR;/grid_weight/grid_stat_AREA_WEIGHT_240000L_20120410_000000V.stat + &OUTPUT_DIR;/grid_weight/grid_stat_AREA_WEIGHT_240000L_20120410_000000V.stat &OUTPUT_DIR;/grid_weight/grid_stat_AREA_WEIGHT_240000L_20120410_000000V_pairs.nc @@ -78,9 +81,11 @@ &MET_BIN;/ensemble_stat - OUTPUT_PREFIX NO_WEIGHT - CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 - GRID_WEIGHT NONE + OUTPUT_PREFIX NO_WEIGHT + DESC NO_WEIGHT + CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 + CLIMO_STDEV_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cstdv_1d.19790410 + GRID_WEIGHT NONE \ 6 \ @@ -104,7 +109,9 @@ &MET_BIN;/ensemble_stat OUTPUT_PREFIX COS_LAT_WEIGHT + DESC COS_LAT_WEIGHT CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 + CLIMO_STDEV_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cstdv_1d.19790410 GRID_WEIGHT COS_LAT \ @@ -129,7 +136,9 @@ &MET_BIN;/ensemble_stat OUTPUT_PREFIX AREA_WEIGHT + DESC AREA_WEIGHT CLIMO_MEAN_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cmean_1d.19790410 + CLIMO_STDEV_FILE &DATA_DIR_CLIMO;/NCEP_1.0deg/cstdv_1d.19790410 GRID_WEIGHT AREA \ diff --git a/internal/test_unit/xml/unit_pcp_combine.xml b/internal/test_unit/xml/unit_pcp_combine.xml index a49e220d79..8d9fb0f6c8 100644 --- a/internal/test_unit/xml/unit_pcp_combine.xml +++ b/internal/test_unit/xml/unit_pcp_combine.xml @@ -33,6 +33,19 @@ + + &MET_BIN;/pcp_combine + \ + 20120409_00 3 20120412_15 12 \ + &OUTPUT_DIR;/pcp_combine/nam_2012040900_F087_APCP12.nc \ + -pcpdir &DATA_DIR_MODEL;/grib1/nam \ + -input_thresh 0.75 + + + &OUTPUT_DIR;/pcp_combine/nam_2012040900_F087_APCP12.nc + + + &MET_BIN;/pcp_combine \ @@ -292,22 +305,26 @@ - echo "&DATA_DIR_MODEL;/grib1/arw-fer-gep1/arw-fer-gep1_2012040912_F024.grib \ + echo "MISSING \ + &DATA_DIR_MODEL;/grib1/arw-fer-gep1/arw-fer-gep1_2012040912_F024.grib \ &DATA_DIR_MODEL;/grib1/arw-fer-gep5/arw-fer-gep5_2012040912_F024.grib \ &DATA_DIR_MODEL;/grib1/arw-sch-gep2/arw-sch-gep2_2012040912_F024.grib \ &DATA_DIR_MODEL;/grib1/arw-sch-gep6/arw-sch-gep6_2012040912_F024.grib \ + MISSING/optional/path/to/missing/file \ &DATA_DIR_MODEL;/grib1/arw-tom-gep0/arw-tom-gep0_2012040912_F024.grib \ &DATA_DIR_MODEL;/grib1/arw-tom-gep7/arw-tom-gep7_2012040912_F024.grib \ &DATA_DIR_MODEL;/grib1/nmm-fer-gep4/nmm-fer-gep4_2012040912_F024.grib \ - &DATA_DIR_MODEL;/grib1/nmm-fer-gep8/nmm-fer-gep8_2012040912_F024.grib" \ - > &OUTPUT_DIR;/pcp_combine/derive_file_list; \ + &DATA_DIR_MODEL;/grib1/nmm-fer-gep8/nmm-fer-gep8_2012040912_F024.grib \ + &DATA_DIR_MODEL;/path/to/missing/file" \ + > &OUTPUT_DIR;/pcp_combine/derive_file_list_missing; \ &MET_BIN;/pcp_combine \ -derive mean,stdev,vld_count \ - &OUTPUT_DIR;/pcp_combine/derive_file_list \ + &OUTPUT_DIR;/pcp_combine/derive_file_list_missing \ -field 'name="TMP"; level="Z2";' \ -field 'name="UGRD"; level="Z10";' \ -field 'name="VGRD"; level="Z10";' \ + -input_thresh 0.7 \ &OUTPUT_DIR;/pcp_combine/derive_2012040912_F024_MULTIPLE_FIELDS.nc @@ -322,7 +339,16 @@ - &MET_BIN;/pcp_combine + echo "&DATA_DIR_MODEL;/grib1/arw-fer-gep1/arw-fer-gep1_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-fer-gep5/arw-fer-gep5_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-sch-gep2/arw-sch-gep2_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-sch-gep6/arw-sch-gep6_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-tom-gep0/arw-tom-gep0_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-tom-gep7/arw-tom-gep7_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/nmm-fer-gep4/nmm-fer-gep4_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/nmm-fer-gep8/nmm-fer-gep8_2012040912_F024.grib" \ + > &OUTPUT_DIR;/pcp_combine/derive_file_list; \ + &MET_BIN;/pcp_combine \ -derive mean,stdev,vld_count \ &OUTPUT_DIR;/pcp_combine/derive_file_list \ diff --git a/internal/test_unit/xml/unit_plot_data_plane.xml b/internal/test_unit/xml/unit_plot_data_plane.xml index 5c56a389d8..599bfd0742 100644 --- a/internal/test_unit/xml/unit_plot_data_plane.xml +++ b/internal/test_unit/xml/unit_plot_data_plane.xml @@ -680,4 +680,18 @@ + + &MET_BIN;/plot_data_plane + \ + &DATA_DIR_MODEL;/nccf/MITLL.ProxyEchoTopsCalibratedMosaic.20200831_235328_v_20200831_235328.nc \ + &OUTPUT_DIR;/plot_data_plane/EchoTops_set_attr_grid.ps \ + 'name="ProxyEchoTopsCalibratedMosaic"; level="(0,*,*)"; set_attr_grid="latlon 8008 4004 -90 -180 0.04 0.04";' \ + -title "Global Synthetic Weather Radar EchoTops" \ + -v 1 + + + &OUTPUT_DIR;/plot_data_plane/EchoTops_set_attr_grid.ps + + + diff --git a/internal/test_unit/xml/unit_point2grid.xml b/internal/test_unit/xml/unit_point2grid.xml index dd1792ccf0..c198834114 100644 --- a/internal/test_unit/xml/unit_point2grid.xml +++ b/internal/test_unit/xml/unit_point2grid.xml @@ -46,6 +46,23 @@ + + &MET_BIN;/point2grid + \ + &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc \ + G212 \ + &OUTPUT_DIR;/point2grid/pb2nc_WINDS.nc \ + -field 'name="UGRD"; level="*";' \ + -field 'name="VGRD"; level="*";' \ + -name UWIND,VWIND \ + -config &CONFIG_DIR;/Point2GridConfig_WINDS \ + -v 3 + + + &OUTPUT_DIR;/point2grid/pb2nc_WINDS.nc + + + &MET_BIN;/point2grid \ @@ -130,14 +147,14 @@ G212 \ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212_compute.nc \ -field 'name="AOD"; level="(*,*)";' \ - -qc 1,2,3 -method MAX \ + -goes_qc 0,1 -method MAX \ -v 1 &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212_compute.nc - + &MET_BIN;/point2grid @@ -155,7 +172,7 @@ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212_gaussian.nc - + &MET_BIN;/point2grid @@ -167,14 +184,14 @@ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_ADP.nc \ -field 'name="AOD_Smoke"; level="(*,*)";' \ -adp &DATA_DIR_MODEL;/goes_16/OR_ABI-L2-ADPC-M6_G16_s20192662141196_e20192662143569_c20192662144526.nc \ - -qc 1,2 -method MAX \ + -goes_qc 0,1 -method MAX \ -v 1 &OUTPUT_DIR;/point2grid/point2grid_GOES_16_ADP.nc - + &MET_BIN;/point2grid @@ -185,7 +202,7 @@ G212 \ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212_grid_map.nc \ -field 'name="AOD"; level="(*,*)";' \ - -qc 1,2,3 -method MAX \ + -goes_qc 0,1,2 -method MAX \ -v 1 @@ -205,7 +222,7 @@ G212 \ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212.nc \ -field 'name="AOD"; level="(*,*)";' \ - -qc 1,2,3 -method MAX \ + -goes_qc 0,1,2 -method MAX \ -v 1 @@ -213,6 +230,25 @@ + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR;/point2grid + + \ + &DATA_DIR_MODEL;/goes_16/OR_ABI-L2-AODC-M6_G16_s20241100001171_e20241100003544_c20241100006242.nc \ + G212 \ + &OUTPUT_DIR;/point2grid/point2grid_GOES_16_ADP_Enterprise_high.nc \ + -field 'name="AOD_Smoke"; level="(*,*)";' \ + -adp &DATA_DIR_MODEL;/goes_16/OR_ABI-L2-ADPC-M6_G16_s20241100001171_e20241100003544_c20241100006361.nc \ + -goes_qc 0,1 -method MAX \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_GOES_16_ADP_Enterprise_high.nc + + + &MET_BIN;/point2grid @@ -230,6 +266,77 @@ + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR;/point2grid + + \ + &DATA_DIR_OBS;/point_obs/iceh.2018-01-03.c00.small.nc \ + G171 \ + &OUTPUT_DIR;/point2grid/point2grid_sea_ice.nc \ + -field 'name="uvel_d"; level="(0,*,*)";' -field 'name="hi_d"; level="(0,*,*)";' \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_sea_ice.nc + + + + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR;/point2grid + + \ + &DATA_DIR_OBS;/point_obs/iceh.2018-01-03.c00.small.nc \ + G171 \ + &OUTPUT_DIR;/point2grid/point2grid_sea_ice_snow.nc \ + -config &CONFIG_DIR;/Point2GridConfig_lat_lon \ + -field 'name="hs_d_without_att"; level="(0,*,*)";' \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_sea_ice_snow.nc + + + + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR;/point2grid + + \ + &DATA_DIR_OBS;/point_obs/rtofs_glo_2ds_f006_ice_coverage.nc \ + "latlon 720 360 -80. -60. 0.5 0.5" \ + &OUTPUT_DIR;/point2grid/point2grid_rtofs_ice_coverage.nc \ + -field 'name="ice_coverage"; level="(0,*,*)";' \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_rtofs_ice_coverage.nc + + + + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR;/point2grid + + \ + &DATA_DIR_OBS;/point_obs/gfs.ocean.t00z.6hr_avg.f006_SST.nc \ + G231 \ + &OUTPUT_DIR;/point2grid/point2grid_gfs.ocean.SST.nc \ + -config &CONFIG_DIR;/Point2GridConfig_SST \ + -field 'name="SST"; level="(0,*,*)";' \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_gfs.ocean.SST.nc + + + + &MET_BIN;/point2grid @@ -240,7 +347,7 @@ G212 \ &OUTPUT_DIR;/point2grid/point2grid_GOES_16_AOD_TO_G212_unsigned.nc \ -field 'name="AOD"; level="(*,*)";' \ - -qc 1,2,3 -method MAX \ + -qc 0,1,2 -method MAX \ -v 1 @@ -264,4 +371,24 @@ &OUTPUT_DIR;/point2grid/point2grid_2D_time_west_bering_sea.nc + + + &MET_BIN;/point2grid + + MET_TMP_DIR &OUTPUT_DIR;/point2grid + + \ + &DATA_DIR_MODEL;/cice/iceh.2018-01-03.c00.tlat_tlon.nc \ + G231 \ + &OUTPUT_DIR;/point2grid/point2grid_cice_to_G231.nc \ + -config &CONFIG_DIR;/Point2GridConfig_tlat_tlon \ + -field 'name="hi_d"; level="(0,*,*)"; set_attr_grid="latlon 1440 1080 -80 -180 0.1 0.1";' \ + -v 1 + + + &OUTPUT_DIR;/point2grid/point2grid_cice_to_G231.nc + + + + diff --git a/internal/test_unit/xml/unit_point_stat.xml b/internal/test_unit/xml/unit_point_stat.xml index b57d756a14..8e798e03ec 100644 --- a/internal/test_unit/xml/unit_point_stat.xml +++ b/internal/test_unit/xml/unit_point_stat.xml @@ -18,6 +18,8 @@ + &TEST_DIR; + true &MET_BIN;/point_stat @@ -70,6 +72,26 @@ + + &MET_BIN;/point_stat + + BEG_DS -300 + END_DS 300 + OUTPUT_PREFIX GRIB1_NAM_GDAS_MPR_OBTYPE + CONFIG_DIR &CONFIG_DIR; + CLIMO_FILE "&DATA_DIR_MODEL;/grib1/gfs/gfs_2012040900_F012_gNam.grib" + + \ + &DATA_DIR_MODEL;/grib1/nam/nam_2012040900_F012.grib \ + &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc \ + &CONFIG_DIR;/PointStatConfig_MPR_OBTYPE \ + -outdir &OUTPUT_DIR;/point_stat -v 1 + + + &OUTPUT_DIR;/point_stat/point_stat_GRIB1_NAM_GDAS_MPR_OBTYPE_120000L_20120409_120000V.stat + + + &MET_BIN;/point_stat @@ -166,8 +188,6 @@ FCST_FIELD_NAME APCP FCST_FIELD_LEVEL A3 OBS_DICT fcst - SEEPS_FLAG NONE - SEEPS_P1_THRESH NA OUTPUT_PREFIX GRIB1_NAM_TRMM \ @@ -194,8 +214,6 @@ FCST_FIELD_NAME APCP FCST_FIELD_LEVEL A3 OBS_DICT fcst - SEEPS_FLAG NONE - SEEPS_P1_THRESH NA OUTPUT_PREFIX GRIB2_SREF_TRMM \ @@ -222,8 +240,6 @@ FCST_FIELD_NAME APCP_24 FCST_FIELD_LEVEL (*,*) OBS_DICT { field = [ { name = "APCP"; level = "A24"; } ]; } - SEEPS_FLAG NONE - SEEPS_P1_THRESH NA OUTPUT_PREFIX NCMET_NAM_HMTGAGE \ @@ -250,14 +266,14 @@ FCST_FIELD_NAME APCP_24 FCST_FIELD_LEVEL (*,*) OBS_DICT { field = [ { name = "TP24"; level = "L0"; is_precipitation = TRUE; } ]; } - SEEPS_FLAG BOTH SEEPS_P1_THRESH ge0.1&&le0.85 + SEEPS_POINT_CLIMO_NAME OUTPUT_PREFIX NCMET_NAM_NDAS_SEEPS \ &DATA_DIR_MODEL;/met_nc/nam/nam_2012040900_F036_APCP24.nc \ &OUTPUT_DIR;/pb2nc/ndas.20120410.t12z.prepbufr.tm00.nc \ - &CONFIG_DIR;/PointStatConfig_APCP \ + &CONFIG_DIR;/PointStatConfig_SEEPS \ -outdir &OUTPUT_DIR;/point_stat -v 1 diff --git a/internal/test_unit/xml/unit_point_weight.xml b/internal/test_unit/xml/unit_point_weight.xml new file mode 100644 index 0000000000..b034b54a3a --- /dev/null +++ b/internal/test_unit/xml/unit_point_weight.xml @@ -0,0 +1,148 @@ + + + + + + + + + + +]> + + + + + + &TEST_DIR; + true + + + &MET_BIN;/point_stat + + OUTPUT_PREFIX NO_WEIGHT + DESC NO_WEIGHT + POINT_WEIGHT NONE + CONFIG_DIR &CONFIG_DIR; + + \ + &DATA_DIR_MODEL;/grib2/sref_mn/sref_mean_2012040821_F015.grib2 \ + &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc \ + &CONFIG_DIR;/PointStatConfig_point_weight \ + -outdir &OUTPUT_DIR;/point_weight -v 1 + + + &OUTPUT_DIR;/point_weight/point_stat_NO_WEIGHT_150000L_20120409_120000V.stat + + + + + &MET_BIN;/point_stat + + OUTPUT_PREFIX SID_WEIGHT + DESC SID_WEIGHT + POINT_WEIGHT SID + CONFIG_DIR &CONFIG_DIR; + + \ + &DATA_DIR_MODEL;/grib2/sref_mn/sref_mean_2012040821_F015.grib2 \ + &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc \ + &CONFIG_DIR;/PointStatConfig_point_weight \ + -outdir &OUTPUT_DIR;/point_weight -v 1 + + + &OUTPUT_DIR;/point_weight/point_stat_SID_WEIGHT_150000L_20120409_120000V.stat + + + + + &MET_BIN;/point_stat + + OUTPUT_PREFIX PROB_NO_WEIGHT + DESC NO_WEIGHT + POINT_WEIGHT NONE + CONFIG_DIR &CONFIG_DIR; + + \ + &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F015.grib2 \ + &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc \ + &CONFIG_DIR;/PointStatConfig_prob_point_weight \ + -outdir &OUTPUT_DIR;/point_weight -v 1 + + + &OUTPUT_DIR;/point_weight/point_stat_PROB_NO_WEIGHT_150000L_20120409_120000V.stat + + + + + &MET_BIN;/point_stat + + OUTPUT_PREFIX PROB_SID_WEIGHT + DESC SID_WEIGHT + POINT_WEIGHT SID + CONFIG_DIR &CONFIG_DIR; + + \ + &DATA_DIR_MODEL;/grib2/sref_pr/sref_prob_2012040821_F015.grib2 \ + &OUTPUT_DIR;/pb2nc/gdas1.20120409.t12z.prepbufr.nc \ + &CONFIG_DIR;/PointStatConfig_prob_point_weight \ + -outdir &OUTPUT_DIR;/point_weight -v 1 + + + &OUTPUT_DIR;/point_weight/point_stat_PROB_SID_WEIGHT_150000L_20120409_120000V.stat + + + + + &MET_BIN;/ensemble_stat + + OUTPUT_PREFIX NO_WEIGHT + DESC NO_WEIGHT + POINT_WEIGHT NONE + CONFIG_DIR &CONFIG_DIR; + + \ + 6 \ + &DATA_DIR_MODEL;/grib1/arw-fer-gep1/arw-fer-gep1_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-fer-gep5/arw-fer-gep5_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-sch-gep2/arw-sch-gep2_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-sch-gep6/arw-sch-gep6_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-tom-gep3/arw-tom-gep3_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-tom-gep7/arw-tom-gep7_2012040912_F024.grib \ + &CONFIG_DIR;/EnsembleStatConfig_point_weight \ + -point_obs &OUTPUT_DIR;/ascii2nc/gauge_2012041012_24hr.nc \ + -outdir &OUTPUT_DIR;/point_weight -v 1 + + + &OUTPUT_DIR;/point_weight/ensemble_stat_NO_WEIGHT_20120410_120000V.stat + + + + + &MET_BIN;/ensemble_stat + + OUTPUT_PREFIX SID_WEIGHT + DESC SID_WEIGHT + POINT_WEIGHT SID + CONFIG_DIR &CONFIG_DIR; + + \ + 6 \ + &DATA_DIR_MODEL;/grib1/arw-fer-gep1/arw-fer-gep1_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-fer-gep5/arw-fer-gep5_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-sch-gep2/arw-sch-gep2_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-sch-gep6/arw-sch-gep6_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-tom-gep3/arw-tom-gep3_2012040912_F024.grib \ + &DATA_DIR_MODEL;/grib1/arw-tom-gep7/arw-tom-gep7_2012040912_F024.grib \ + &CONFIG_DIR;/EnsembleStatConfig_point_weight \ + -point_obs &OUTPUT_DIR;/ascii2nc/gauge_2012041012_24hr.nc \ + -outdir &OUTPUT_DIR;/point_weight -v 1 + + + &OUTPUT_DIR;/point_weight/ensemble_stat_SID_WEIGHT_20120410_120000V.stat + + + + + diff --git a/internal/test_unit/xml/unit_python.xml b/internal/test_unit/xml/unit_python.xml index af782db022..3bf4c6521f 100644 --- a/internal/test_unit/xml/unit_python.xml +++ b/internal/test_unit/xml/unit_python.xml @@ -162,9 +162,9 @@ &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A.ps - &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A_cts.txt - &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A_obj.txt - &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A_obj.nc + &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A_cts.txt + &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A_obj.txt + &OUTPUT_DIR;/python/mode_python_mixed_300000L_20120410_180000V_060000A_obj.nc @@ -182,9 +182,9 @@ &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A.ps - &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A_obj.txt - &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A_cts.txt - &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A_obj.nc + &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A_obj.txt + &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A_cts.txt + &OUTPUT_DIR;/python/mode_python_120000L_20050807_120000V_120000A_obj.nc @@ -200,8 +200,7 @@ -outdir &OUTPUT_DIR;/python -v 1 - &OUTPUT_DIR;/python/grid_stat_python_mixed_120000L_20120409_120000V.stat - &OUTPUT_DIR;/python/grid_stat_python_mixed_120000L_20120409_120000V_pairs.nc + &OUTPUT_DIR;/python/grid_stat_python_mixed_120000L_20120409_120000V_pairs.nc @@ -218,7 +217,7 @@ -outdir &OUTPUT_DIR;/python -v 1 - &OUTPUT_DIR;/python/grid_stat_python_120000L_20050807_120000V_pairs.nc + &OUTPUT_DIR;/python/grid_stat_python_120000L_20050807_120000V_pairs.nc @@ -237,7 +236,7 @@ -outdir &OUTPUT_DIR;/python -v 1 - &OUTPUT_DIR;/python/point_stat_python_120000L_20120409_120000V.stat + &OUTPUT_DIR;/python/point_stat_120000L_20050807_120000V.stat @@ -255,7 +254,7 @@ &OUTPUT_DIR;/python/wavelet_stat_python_120000L_20050807_120000V.stat - &OUTPUT_DIR;/python/wavelet_stat_python_120000L_20050807_120000V_isc.txt + &OUTPUT_DIR;/python/wavelet_stat_python_120000L_20050807_120000V_isc.txt &OUTPUT_DIR;/python/wavelet_stat_python_120000L_20050807_120000V.nc &OUTPUT_DIR;/python/wavelet_stat_python_120000L_20050807_120000V.ps @@ -274,7 +273,7 @@ &OUTPUT_DIR;/python/wavelet_stat_python_mixed_120000L_20050807_120000V.stat - &OUTPUT_DIR;/python/wavelet_stat_python_mixed_120000L_20050807_120000V_isc.txt + &OUTPUT_DIR;/python/wavelet_stat_python_mixed_120000L_20050807_120000V_isc.txt &OUTPUT_DIR;/python/wavelet_stat_python_mixed_120000L_20050807_120000V.nc &OUTPUT_DIR;/python/wavelet_stat_python_mixed_120000L_20050807_120000V.ps @@ -557,6 +556,20 @@ + + &MET_BIN;/plot_point_obs + \ + 'PYTHON_NUMPY=&MET_BASE;/python/examples/read_met_point_obs_pandas.py &OUTPUT_DIR;/pb2nc/ndas.20120409.t12z.prepbufr.tm00.nc' \ + &OUTPUT_DIR;/python/ndas.20120409.t12z.prepbufr.tm00.nr_met_nc_to_pandas.ps \ + -data_file &DATA_DIR_MODEL;/grib2/nam/nam_2012040900_F012.grib2 \ + -dotsize 2.0 \ + -v 1 + + + &OUTPUT_DIR;/python/ndas.20120409.t12z.prepbufr.tm00.nr_met_nc_to_pandas.ps + + + echo "&DATA_DIR_MODEL;/grib1/arw-fer-gep1/arw-fer-gep1_2012040912_F024.grib \ &DATA_DIR_MODEL;/grib1/arw-fer-gep5/arw-fer-gep5_2012040912_F024.grib \ diff --git a/internal/test_unit/xml/unit_ref_config_lead_12.xml b/internal/test_unit/xml/unit_ref_config_lead_12.xml index 989e548da5..5945ff2fdc 100644 --- a/internal/test_unit/xml/unit_ref_config_lead_12.xml +++ b/internal/test_unit/xml/unit_ref_config_lead_12.xml @@ -79,7 +79,7 @@ \ -subtract \ &DATA_DIR_MODEL;/grib1/ref_config/2011090200/AFWAv3.4_Noahv3.3/postprd/wrfprs_012.tm00 12 \ - &DATA_DIR_MODEL;/grib1/ref_config/2011090200/AFWAv3.4_Noahv3.3/postprd/wrfprs_009.tm00 9 \ + &DATA_DIR_MODEL;/grib1/ref_config/2011090200/AFWAv3.4_Noahv3.3/postprd/wrfprs_009.tm00 9 \ &OUTPUT_DIR;/ref_config_lead_12/pcp_combine/wrf/wrfpcp03_012.nc diff --git a/internal/test_unit/xml/unit_series_analysis.xml b/internal/test_unit/xml/unit_series_analysis.xml index c1e64416b3..96cda729dd 100644 --- a/internal/test_unit/xml/unit_series_analysis.xml +++ b/internal/test_unit/xml/unit_series_analysis.xml @@ -29,12 +29,12 @@ OBS_FIELD { name = "APCP"; level = [ "A06" ]; } MASK_POLY FHO_STATS "F_RATE", "O_RATE" - CTC_STATS "FY_OY", "FN_ON" + CTC_STATS "ALL" CTS_STATS "CSI", "GSS" - MCTC_STATS "F1_O1", "F2_O2", "F3_O3" + MCTC_STATS "ALL" MCTS_STATS "ACC", "ACC_NCL", "ACC_NCU" CNT_STATS "TOTAL", "ME", "ME_NCL", "ME_NCU" - SL1L2_STATS "FBAR", "OBAR" + SL1L2_STATS "ALL" SAL1L2_STATS PCT_STATS PSTD_STATS @@ -46,22 +46,56 @@ &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F012.grib \ &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F018.grib \ &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F024.grib \ - &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F030.grib \ - &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F036.grib \ - &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F042.grib \ -obs &DATA_DIR_OBS;/stage4_hmt/stage4_2012040906_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012040912_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012040918_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012041000_06h.grib \ - &DATA_DIR_OBS;/stage4_hmt/stage4_2012041006_06h.grib \ + -out &OUTPUT_DIR;/series_analysis/series_analysis_CMD_LINE_APCP_06_2012040900_to_2012041000.nc \ + -config &CONFIG_DIR;/SeriesAnalysisConfig \ + -v 1 + + + &OUTPUT_DIR;/series_analysis/series_analysis_CMD_LINE_APCP_06_2012040900_to_2012041000.nc + + + + + &MET_BIN;/series_analysis + + MODEL GFS + OBTYPE STAGE4 + FCST_CAT_THRESH >0.0, >5.0 + FCST_FIELD { name = "APCP"; level = [ "A06" ]; } + OBS_CAT_THRESH >0.0, >5.0 + OBS_FIELD { name = "APCP"; level = [ "A06" ]; } + MASK_POLY + FHO_STATS "F_RATE", "O_RATE" + CTC_STATS "ALL" + CTS_STATS "CSI", "GSS" + MCTC_STATS "ALL" + MCTS_STATS "ACC", "ACC_NCL", "ACC_NCU" + CNT_STATS "TOTAL", "ME", "ME_NCL", "ME_NCU" + SL1L2_STATS "ALL" + SAL1L2_STATS + PCT_STATS + PSTD_STATS + PJC_STATS + PRC_STATS + + \ + -fcst &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F030.grib \ + &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F036.grib \ + &DATA_DIR_MODEL;/grib1/gfs_hmt/gfs_2012040900_F042.grib \ + -obs &DATA_DIR_OBS;/stage4_hmt/stage4_2012041006_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012041012_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012041018_06h.grib \ - -out &OUTPUT_DIR;/series_analysis/series_analysis_CMD_LINE_APCP_06_2012040900_to_2012041100.nc \ + -aggr &OUTPUT_DIR;/series_analysis/series_analysis_CMD_LINE_APCP_06_2012040900_to_2012041000.nc \ + -out &OUTPUT_DIR;/series_analysis/series_analysis_AGGR_CMD_LINE_APCP_06_2012040900_to_2012041018.nc \ -config &CONFIG_DIR;/SeriesAnalysisConfig \ -v 1 - &OUTPUT_DIR;/series_analysis/series_analysis_CMD_LINE_APCP_06_2012040900_to_2012041100.nc + &OUTPUT_DIR;/series_analysis/series_analysis_AGGR_CMD_LINE_APCP_06_2012040900_to_2012041018.nc @@ -69,18 +103,12 @@ echo "&DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F009.grib \ &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F015.grib \ &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F021.grib \ - &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F027.grib \ - &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F033.grib \ - &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F039.grib \ - &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F045.grib" \ + &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F027.grib" \ > &OUTPUT_DIR;/series_analysis/input_fcst_file_list; \ echo "&DATA_DIR_OBS;/stage4_hmt/stage4_2012040906_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012040912_06h.grib \ &DATA_DIR_OBS;/stage4_hmt/stage4_2012040918_06h.grib \ - &DATA_DIR_OBS;/stage4_hmt/stage4_2012041000_06h.grib \ - &DATA_DIR_OBS;/stage4_hmt/stage4_2012041006_06h.grib \ - &DATA_DIR_OBS;/stage4_hmt/stage4_2012041012_06h.grib \ - &DATA_DIR_OBS;/stage4_hmt/stage4_2012041018_06h.grib" \ + &DATA_DIR_OBS;/stage4_hmt/stage4_2012041000_06h.grib" \ > &OUTPUT_DIR;/series_analysis/input_obs_file_list; \ &MET_BIN;/series_analysis @@ -99,7 +127,7 @@ CNT_STATS SL1L2_STATS SAL1L2_STATS - PCT_STATS "OY_1", "ON_1" + PCT_STATS "ALL" PSTD_STATS "TOTAL", "ROC_AUC", "BRIER", "BRIER_NCL", "BRIER_NCU" PJC_STATS "CALIBRATION_1", "REFINEMENT_1" PRC_STATS "PODY_1", "POFD_1" @@ -107,12 +135,56 @@ \ -fcst &OUTPUT_DIR;/series_analysis/input_fcst_file_list \ -obs &OUTPUT_DIR;/series_analysis/input_obs_file_list \ - -out &OUTPUT_DIR;/series_analysis/series_analysis_FILE_LIST_PROB_APCP_06_2012040900_to_2012041100.nc \ + -out &OUTPUT_DIR;/series_analysis/series_analysis_FILE_LIST_PROB_APCP_06_2012040900_to_2012041000.nc \ + -config &CONFIG_DIR;/SeriesAnalysisConfig \ + -v 1 + + + &OUTPUT_DIR;/series_analysis/series_analysis_FILE_LIST_PROB_APCP_06_2012040900_to_2012041000.nc + + + + + echo "&DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F033.grib \ + &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F039.grib \ + &DATA_DIR_MODEL;/grib1/sref/sref_2012040821_F045.grib" \ + > &OUTPUT_DIR;/series_analysis/aggregate_fcst_file_list; \ + echo "&DATA_DIR_OBS;/stage4_hmt/stage4_2012041006_06h.grib \ + &DATA_DIR_OBS;/stage4_hmt/stage4_2012041012_06h.grib \ + &DATA_DIR_OBS;/stage4_hmt/stage4_2012041018_06h.grib" \ + > &OUTPUT_DIR;/series_analysis/aggregate_obs_file_list; \ + &MET_BIN;/series_analysis + + MODEL SREF + OBTYPE STAGE4 + FCST_CAT_THRESH >=0.00, >=0.25, >=0.50, >=0.75, >=1.00 + FCST_FIELD { name = "PROB"; level = "A06"; prob = { name = "APCP"; thresh_lo = 0.25; }; } + OBS_CAT_THRESH >0.25 + OBS_FIELD { name = "APCP"; level = "A06"; } + MASK_POLY + FHO_STATS + CTC_STATS + CTS_STATS + MCTC_STATS + MCTS_STATS + CNT_STATS + SL1L2_STATS + SAL1L2_STATS + PCT_STATS "ALL" + PSTD_STATS "TOTAL", "ROC_AUC", "BRIER", "BRIER_NCL", "BRIER_NCU" + PJC_STATS "CALIBRATION_1", "REFINEMENT_1" + PRC_STATS "PODY_1", "POFD_1" + + \ + -fcst &OUTPUT_DIR;/series_analysis/aggregate_fcst_file_list \ + -obs &OUTPUT_DIR;/series_analysis/aggregate_obs_file_list \ + -aggr &OUTPUT_DIR;/series_analysis/series_analysis_FILE_LIST_PROB_APCP_06_2012040900_to_2012041000.nc \ + -out &OUTPUT_DIR;/series_analysis/series_analysis_AGGR_FILE_LIST_PROB_APCP_06_2012040900_to_2012041018.nc \ -config &CONFIG_DIR;/SeriesAnalysisConfig \ -v 1 - &OUTPUT_DIR;/series_analysis/series_analysis_FILE_LIST_PROB_APCP_06_2012040900_to_2012041100.nc + &OUTPUT_DIR;/series_analysis/series_analysis_AGGR_FILE_LIST_PROB_APCP_06_2012040900_to_2012041018.nc diff --git a/internal/test_unit/xml/unit_stat_analysis_ps.xml b/internal/test_unit/xml/unit_stat_analysis_ps.xml index 9fd50dcb2a..1ea5e2e19a 100644 --- a/internal/test_unit/xml/unit_stat_analysis_ps.xml +++ b/internal/test_unit/xml/unit_stat_analysis_ps.xml @@ -38,6 +38,8 @@ &OUTPUT_DIR;/stat_analysis_ps/CONFIG_POINT_STAT_agg_stat_mpr_to_wdir_dump.stat &OUTPUT_DIR;/stat_analysis_ps/CONFIG_POINT_STAT_filter_mpr_sid.stat &OUTPUT_DIR;/stat_analysis_ps/CONFIG_POINT_STAT_filter_mpr_fcst_minus_obs.stat + &OUTPUT_DIR;/stat_analysis_ps/CONFIG_POINT_STAT_agg_vl1l2.stat + &OUTPUT_DIR;/stat_analysis_ps/CONFIG_POINT_STAT_agg_stat_vl1l2_to_vcnt.stat diff --git a/internal/test_unit/xml/unit_tc_diag.xml b/internal/test_unit/xml/unit_tc_diag.xml index e0e1686718..1f75454ac0 100644 --- a/internal/test_unit/xml/unit_tc_diag.xml +++ b/internal/test_unit/xml/unit_tc_diag.xml @@ -33,9 +33,9 @@ -v 2 - &OUTPUT_DIR;/tc_diag/sal092022_gfso_doper_2022092400_cyl_grid_parent.nc - &OUTPUT_DIR;/tc_diag/sal092022_gfso_doper_2022092400_diag.nc - &OUTPUT_DIR;/tc_diag/sal092022_gfso_doper_2022092400_diag.dat + &OUTPUT_DIR;/tc_diag/sal092022_gfso_doper_2022092400_cyl_grid_parent.nc + &OUTPUT_DIR;/tc_diag/sal092022_gfso_doper_2022092400_diag.nc + &OUTPUT_DIR;/tc_diag/sal092022_gfso_doper_2022092400_diag.dat diff --git a/internal/test_unit/xml/unit_ugrid.xml b/internal/test_unit/xml/unit_ugrid.xml index 80a6a53360..5f6e517a7c 100644 --- a/internal/test_unit/xml/unit_ugrid.xml +++ b/internal/test_unit/xml/unit_ugrid.xml @@ -20,6 +20,9 @@ + &TEST_DIR; + true + &MET_BIN;/grid_stat @@ -33,7 +36,7 @@ &OUTPUT_DIR;/grid_stat_ugrid/grid_stat_UGRID_MPAS_OUT_TO_GRID_000000L_20120409_120000V.stat - &OUTPUT_DIR;/grid_stat_ugrid/grid_stat_UGRID_MPAS_OUT_TO_GRID_000000L_20120409_120000V_pairs.nc + &OUTPUT_DIR;/grid_stat_ugrid/grid_stat_UGRID_MPAS_OUT_TO_GRID_000000L_20120409_120000V_pairs.nc @@ -51,7 +54,7 @@ &OUTPUT_DIR;/grid_stat_ugrid/grid_stat_UGRID_MPAS_DIAG_000000L_20120409_120000V.stat - &OUTPUT_DIR;/grid_stat_ugrid/grid_stat_UGRID_MPAS_DIAG_000000L_20120409_120000V_pairs.nc + &OUTPUT_DIR;/grid_stat_ugrid/grid_stat_UGRID_MPAS_DIAG_000000L_20120409_120000V_pairs.nc diff --git a/internal/test_util/Makefile.in b/internal/test_util/Makefile.in index 91fbb38a04..32c2b09cef 100644 --- a/internal/test_util/Makefile.in +++ b/internal/test_util/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/basic/Makefile.in b/internal/test_util/basic/Makefile.in index 8f7a6ae5f5..eacf3b24c2 100644 --- a/internal/test_util/basic/Makefile.in +++ b/internal/test_util/basic/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/basic/vx_config/Makefile.in b/internal/test_util/basic/vx_config/Makefile.in index 259d6dae5e..11f8c27d32 100644 --- a/internal/test_util/basic/vx_config/Makefile.in +++ b/internal/test_util/basic/vx_config/Makefile.in @@ -255,6 +255,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/basic/vx_log/Makefile.in b/internal/test_util/basic/vx_log/Makefile.in index 0913f22a00..bc602758e2 100644 --- a/internal/test_util/basic/vx_log/Makefile.in +++ b/internal/test_util/basic/vx_log/Makefile.in @@ -224,6 +224,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/basic/vx_util/Makefile.in b/internal/test_util/basic/vx_util/Makefile.in index ebbef0522c..7c4663e135 100644 --- a/internal/test_util/basic/vx_util/Makefile.in +++ b/internal/test_util/basic/vx_util/Makefile.in @@ -262,6 +262,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/Makefile.in b/internal/test_util/libcode/Makefile.in index 4d4f244b34..38acababa3 100644 --- a/internal/test_util/libcode/Makefile.in +++ b/internal/test_util/libcode/Makefile.in @@ -234,6 +234,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_data2d/Makefile.in b/internal/test_util/libcode/vx_data2d/Makefile.in index f659f7c81a..25f644ed86 100644 --- a/internal/test_util/libcode/vx_data2d/Makefile.in +++ b/internal/test_util/libcode/vx_data2d/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_data2d_factory/Makefile.in b/internal/test_util/libcode/vx_data2d_factory/Makefile.in index 1d93387de5..31ea2c180b 100644 --- a/internal/test_util/libcode/vx_data2d_factory/Makefile.in +++ b/internal/test_util/libcode/vx_data2d_factory/Makefile.in @@ -228,6 +228,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_data2d_grib/Makefile.in b/internal/test_util/libcode/vx_data2d_grib/Makefile.in index 5591f6439b..892f5eca9e 100644 --- a/internal/test_util/libcode/vx_data2d_grib/Makefile.in +++ b/internal/test_util/libcode/vx_data2d_grib/Makefile.in @@ -221,6 +221,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_data2d_nc_cf/Makefile.in b/internal/test_util/libcode/vx_data2d_nc_cf/Makefile.in index 52429cc106..749c25c8c9 100644 --- a/internal/test_util/libcode/vx_data2d_nc_cf/Makefile.in +++ b/internal/test_util/libcode/vx_data2d_nc_cf/Makefile.in @@ -220,6 +220,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_data2d_nc_met/Makefile.in b/internal/test_util/libcode/vx_data2d_nc_met/Makefile.in index 25fc3da12e..b02eb106d7 100644 --- a/internal/test_util/libcode/vx_data2d_nc_met/Makefile.in +++ b/internal/test_util/libcode/vx_data2d_nc_met/Makefile.in @@ -222,6 +222,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_geodesy/Makefile.in b/internal/test_util/libcode/vx_geodesy/Makefile.in index 63e759c489..f196c88c92 100644 --- a/internal/test_util/libcode/vx_geodesy/Makefile.in +++ b/internal/test_util/libcode/vx_geodesy/Makefile.in @@ -218,6 +218,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_grid/Makefile.in b/internal/test_util/libcode/vx_grid/Makefile.in index 05203d024a..9f7c65ed86 100644 --- a/internal/test_util/libcode/vx_grid/Makefile.in +++ b/internal/test_util/libcode/vx_grid/Makefile.in @@ -220,6 +220,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_nc_util/Makefile.in b/internal/test_util/libcode/vx_nc_util/Makefile.in index ad865d558f..af0e249f7b 100644 --- a/internal/test_util/libcode/vx_nc_util/Makefile.in +++ b/internal/test_util/libcode/vx_nc_util/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_physics/Makefile.in b/internal/test_util/libcode/vx_physics/Makefile.in index 02f6ef57ae..20b0102618 100644 --- a/internal/test_util/libcode/vx_physics/Makefile.in +++ b/internal/test_util/libcode/vx_physics/Makefile.in @@ -218,6 +218,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_plot_util/Makefile.in b/internal/test_util/libcode/vx_plot_util/Makefile.in index 6a950df3aa..7f6bdd6dbc 100644 --- a/internal/test_util/libcode/vx_plot_util/Makefile.in +++ b/internal/test_util/libcode/vx_plot_util/Makefile.in @@ -219,6 +219,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_ps/Makefile.in b/internal/test_util/libcode/vx_ps/Makefile.in index d288997194..78db014c97 100644 --- a/internal/test_util/libcode/vx_ps/Makefile.in +++ b/internal/test_util/libcode/vx_ps/Makefile.in @@ -218,6 +218,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_python3_utils/Makefile.in b/internal/test_util/libcode/vx_python3_utils/Makefile.in index 3d3d12cefb..b416b84940 100644 --- a/internal/test_util/libcode/vx_python3_utils/Makefile.in +++ b/internal/test_util/libcode/vx_python3_utils/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_series_data/Makefile.in b/internal/test_util/libcode/vx_series_data/Makefile.in index 94871489e0..ff1dd2b402 100644 --- a/internal/test_util/libcode/vx_series_data/Makefile.in +++ b/internal/test_util/libcode/vx_series_data/Makefile.in @@ -220,6 +220,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_solar/Makefile.in b/internal/test_util/libcode/vx_solar/Makefile.in index a7026539c3..9912971dc5 100644 --- a/internal/test_util/libcode/vx_solar/Makefile.in +++ b/internal/test_util/libcode/vx_solar/Makefile.in @@ -218,6 +218,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/libcode/vx_tc_util/Makefile.in b/internal/test_util/libcode/vx_tc_util/Makefile.in index daf8dce673..a7d3c13818 100644 --- a/internal/test_util/libcode/vx_tc_util/Makefile.in +++ b/internal/test_util/libcode/vx_tc_util/Makefile.in @@ -243,6 +243,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/tools/Makefile.in b/internal/test_util/tools/Makefile.in index e84bca7acf..c22646fb06 100644 --- a/internal/test_util/tools/Makefile.in +++ b/internal/test_util/tools/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/tools/other/Makefile.in b/internal/test_util/tools/other/Makefile.in index c054cf412f..8e6a1799bd 100644 --- a/internal/test_util/tools/other/Makefile.in +++ b/internal/test_util/tools/other/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/internal/test_util/tools/other/mode_time_domain/Makefile.am b/internal/test_util/tools/other/mode_time_domain/Makefile.am index 5bbe02612e..1f9be23d82 100644 --- a/internal/test_util/tools/other/mode_time_domain/Makefile.am +++ b/internal/test_util/tools/other/mode_time_domain/Makefile.am @@ -38,8 +38,6 @@ test_velocity_LDADD = \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_partition.o \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_read_data.o \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_txt_output.o \ - ${top_builddir}/src/tools/other/mode_time_domain/mtd-nc_grid.o \ - ${top_builddir}/src/tools/other/mode_time_domain/mtd-nc_utils_local.o \ -lvx_pxm \ -lvx_plot_util \ -lvx_nav \ diff --git a/internal/test_util/tools/other/mode_time_domain/Makefile.in b/internal/test_util/tools/other/mode_time_domain/Makefile.in index 3e70894414..b1b08361df 100644 --- a/internal/test_util/tools/other/mode_time_domain/Makefile.in +++ b/internal/test_util/tools/other/mode_time_domain/Makefile.in @@ -125,8 +125,6 @@ test_velocity_DEPENDENCIES = ${top_builddir}/src/tools/other/mode_time_domain/mt ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_partition.o \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_read_data.o \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_txt_output.o \ - ${top_builddir}/src/tools/other/mode_time_domain/mtd-nc_grid.o \ - ${top_builddir}/src/tools/other/mode_time_domain/mtd-nc_utils_local.o \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) @@ -245,6 +243,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ @@ -377,8 +376,6 @@ test_velocity_LDADD = \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_partition.o \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_read_data.o \ ${top_builddir}/src/tools/other/mode_time_domain/mtd-mtd_txt_output.o \ - ${top_builddir}/src/tools/other/mode_time_domain/mtd-nc_grid.o \ - ${top_builddir}/src/tools/other/mode_time_domain/mtd-nc_utils_local.o \ -lvx_pxm \ -lvx_plot_util \ -lvx_nav \ diff --git a/scripts/Rscripts/Makefile.in b/scripts/Rscripts/Makefile.in index fa8e805787..e53f9fe2df 100644 --- a/scripts/Rscripts/Makefile.in +++ b/scripts/Rscripts/Makefile.in @@ -260,6 +260,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/Rscripts/include/Makefile.in b/scripts/Rscripts/include/Makefile.in index 5d625c4e28..d62ae95412 100644 --- a/scripts/Rscripts/include/Makefile.in +++ b/scripts/Rscripts/include/Makefile.in @@ -202,6 +202,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/config/EnsembleStatConfig b/scripts/config/EnsembleStatConfig index bc84b81d8e..5353c92579 100644 --- a/scripts/config/EnsembleStatConfig +++ b/scripts/config/EnsembleStatConfig @@ -124,6 +124,8 @@ message_type_group_map = [ { key = "ONLYSF"; val = "ADPSFC,SFCSHP"; } ]; +obtype_as_group_val_flag = FALSE; + // // Ensemble bin sizes // May be set separately in each "obs.field" entry @@ -272,8 +274,10 @@ rng = { //////////////////////////////////////////////////////////////////////////////// -grid_weight_flag = NONE; -output_prefix = ""; -version = "V12.0.0"; +grid_weight_flag = NONE; +point_weight_flag = NONE; + +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/scripts/config/GenEnsProdConfig b/scripts/config/GenEnsProdConfig index 74350a328d..65d13aadbd 100644 --- a/scripts/config/GenEnsProdConfig +++ b/scripts/config/GenEnsProdConfig @@ -13,7 +13,6 @@ model = "FCST"; // // Output description to be written -// May be set separately in each "obs.field" entry // desc = "NA"; diff --git a/scripts/config/GridStatConfig_APCP_12 b/scripts/config/GridStatConfig_APCP_12 index 2358000a81..3cbc179064 100644 --- a/scripts/config/GridStatConfig_APCP_12 +++ b/scripts/config/GridStatConfig_APCP_12 @@ -217,8 +217,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "APCP_12"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "APCP_12"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/scripts/config/GridStatConfig_APCP_24 b/scripts/config/GridStatConfig_APCP_24 index 39eaa7d220..383efc78c5 100644 --- a/scripts/config/GridStatConfig_APCP_24 +++ b/scripts/config/GridStatConfig_APCP_24 @@ -227,8 +227,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "APCP_24"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "APCP_24"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/scripts/config/GridStatConfig_POP_12 b/scripts/config/GridStatConfig_POP_12 index 6fb4775939..974a3a79b7 100644 --- a/scripts/config/GridStatConfig_POP_12 +++ b/scripts/config/GridStatConfig_POP_12 @@ -226,8 +226,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = "POP_12"; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = "POP_12"; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/scripts/config/GridStatConfig_all b/scripts/config/GridStatConfig_all index a165ef836d..f39e7e90cf 100644 --- a/scripts/config/GridStatConfig_all +++ b/scripts/config/GridStatConfig_all @@ -257,8 +257,9 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// grid_weight_flag = NONE; -tmp_dir = "/tmp"; -output_prefix = ""; -version = "V12.0.0"; + +tmp_dir = "/tmp"; +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/scripts/config/PointStatConfig b/scripts/config/PointStatConfig index 3c79f15262..1f4140de1e 100644 --- a/scripts/config/PointStatConfig +++ b/scripts/config/PointStatConfig @@ -216,8 +216,11 @@ seeps_p1_thresh = NA; //////////////////////////////////////////////////////////////////////////////// rank_corr_flag = TRUE; -tmp_dir = "/tmp"; -output_prefix = ""; -version = "V12.0.0"; + +point_weight_flag = NONE; + +tmp_dir = "/tmp"; +output_prefix = ""; +version = "V12.0.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/scripts/python/Makefile.in b/scripts/python/Makefile.in index e45eb05cd4..670db075a0 100644 --- a/scripts/python/Makefile.in +++ b/scripts/python/Makefile.in @@ -232,6 +232,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/examples/Makefile.am b/scripts/python/examples/Makefile.am index e0461a3564..f1712318a6 100644 --- a/scripts/python/examples/Makefile.am +++ b/scripts/python/examples/Makefile.am @@ -32,7 +32,8 @@ pythonexamples_DATA = \ read_ascii_numpy.py \ read_ascii_point.py \ read_ascii_xarray.py \ - read_met_point_obs.py + read_met_point_obs.py \ + read_met_point_obs_pandas.py EXTRA_DIST = ${pythonexamples_DATA} diff --git a/scripts/python/examples/Makefile.in b/scripts/python/examples/Makefile.in index 1d4a58cadf..cdf8f0feb4 100644 --- a/scripts/python/examples/Makefile.in +++ b/scripts/python/examples/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ @@ -317,7 +318,8 @@ pythonexamples_DATA = \ read_ascii_numpy.py \ read_ascii_point.py \ read_ascii_xarray.py \ - read_met_point_obs.py + read_met_point_obs.py \ + read_met_point_obs_pandas.py EXTRA_DIST = ${pythonexamples_DATA} MAINTAINERCLEANFILES = Makefile.in diff --git a/scripts/python/examples/read_ascii_numpy.py b/scripts/python/examples/read_ascii_numpy.py index 342121f2ff..061e1aa250 100644 --- a/scripts/python/examples/read_ascii_numpy.py +++ b/scripts/python/examples/read_ascii_numpy.py @@ -85,7 +85,6 @@ def set_dataplane_attrs(): user_fill_value = float(sys.argv[3]) except: log(f"{SCRIPT_NAME} Ignored argument {sys.argv[3]}") - pass log(f"{SCRIPT_NAME} Input File:\t{repr(input_file)}") log(f"{SCRIPT_NAME} Data Name:\t{repr(data_name)}") diff --git a/scripts/python/examples/read_met_point_obs_pandas.py b/scripts/python/examples/read_met_point_obs_pandas.py new file mode 100644 index 0000000000..bf7bb5527b --- /dev/null +++ b/scripts/python/examples/read_met_point_obs_pandas.py @@ -0,0 +1,52 @@ +import os +import sys + +from met.point_nc import nc_point_obs + +# Description: Reads a point observation NetCDF file created by MET and passes +# the data to another MET tool via Python Embedding. This script can be copied +# to perform modifications to the data before it is passed to MET. +# Example: plot_point_obs "PYTHON_NUMPY=pyembed_met_point_nc.py in.nc" out.ps +# Contact: George McCabe + +# Read and format the input 11-column observations: +# (1) string: Message_Type +# (2) string: Station_ID +# (3) string: Valid_Time(YYYYMMDD_HHMMSS) +# (4) numeric: Lat(Deg North) +# (5) numeric: Lon(Deg East) +# (6) numeric: Elevation(msl) +# (7) string: Var_Name(or GRIB_Code) +# (8) numeric: Level +# (9) numeric: Height(msl or agl) +# (10) string: QC_String +# (11) numeric: Observation_Value + +print(f"Python Script:\t{sys.argv[0]}") + +if len(sys.argv) != 2: + print("ERROR: pyembed_met_point_nc.py -> Specify only 1 input file") + sys.exit(1) + +# Read the input file as the first argument +input_file = os.path.expandvars(sys.argv[1]) +print("Input File:\t" + repr(input_file)) + +# Read MET point observation NetCDF file +try: + point_obs = nc_point_obs(input_file) +except TypeError: + print(f"ERROR: Could not read MET point data file {input_file}") + sys.exit(1) + +# convert point observation data to a pandas DataFrame +df = point_obs.to_pandas() + +################################################## +# perform any modifications to the data here # +################################################## + +# convert pandas DataFrame to list format that is expected by MET +point_data = df.values.tolist() +print(f" point_data: Data Length:\t{len(point_data)}") +print(f" point_data: Data Type:\t{type(point_data)}") diff --git a/scripts/python/met/Makefile.in b/scripts/python/met/Makefile.in index 6a7570efa2..d8541387db 100644 --- a/scripts/python/met/Makefile.in +++ b/scripts/python/met/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/met/dataplane.py b/scripts/python/met/dataplane.py index a28661b365..4d94c0e712 100644 --- a/scripts/python/met/dataplane.py +++ b/scripts/python/met/dataplane.py @@ -25,6 +25,9 @@ def call_python(argv): sys.exit(1) met_base.log_message(f"User python command:\t{repr(' '.join(argv[1:]))}") + if not argv[1] or not argv[1].strip(): + met_base.quit_msg(f"User python command is empty") + sys.exit(1) # argv[1] contains the user defined python script pyembed_module_name = argv[1] @@ -178,7 +181,11 @@ def validate_met_data(met_data, fill_value=None): met_base.quit(f"{method_name} The met_data is None") sys.exit(1) - nx, ny = met_data.shape + if hasattr(met_data, 'shape'): + nx, ny = met_data.shape + else: + met_base.quit(f"{method_name} The met_data does not have the shape property") + sys.exit(1) met_fill_value = met_base.MET_FILL_VALUE if dataplane.is_xarray_dataarray(met_data): diff --git a/scripts/python/met/logger.py b/scripts/python/met/logger.py index a85de36d1f..1e7b30a798 100644 --- a/scripts/python/met/logger.py +++ b/scripts/python/met/logger.py @@ -25,10 +25,6 @@ def error_message(msg): for a_msg in msgs: logger.log_message(logger.append_error_prompt(a_msg)) - #@staticmethod - #def get_met_fill_value(): - # return logger.MET_FILL_VALUE - @staticmethod def info_message(msg): print(f'{logger.PROMPT} {logger.INFO_P} {msg}') @@ -78,8 +74,8 @@ def log_msg(self, msg): @staticmethod def get_numpy_filename(tmp_filename): - return logger.replace_extension(tmp_filename, "json", "npy") if tmp_filename.endswith(".json") else \ - logger.replace_extension(tmp_filename, "nc", "npy") if tmp_filename.endswith(".nc") else f'{tmp_filename}.npy' + file_ext = os.path.splitext(tmp_filename)[1] + return logger.replace_extension(tmp_filename, file_ext, ".npy") if file_ext else f'{tmp_filename}.npy' def is_debug_enabled(self, component_name=""): return met_base_tools.is_debug_enabled(component_name) @@ -99,22 +95,27 @@ class met_base_tools(object): ENV_MET_PYTHON_DEBUG = "MET_PYTHON_DEBUG" ENV_MET_PYTHON_TMP_FORMAT = "MET_PYTHON_TMP_FORMAT" + @staticmethod + def convert_byte_type_to_array(ndarray_data): + array_data = [] + if isinstance(ndarray_data[0], (np.ma.MaskedArray, np.ma.core.MaskedArray)): + for byte_data in ndarray_data: + array_data.append(byte_data.tobytes(fill_value=' ').decode('utf-8').rstrip()) + else: + for byte_data in ndarray_data: + array_data.append(byte_data.decode("utf-8").rstrip()) + return array_data + @staticmethod def convert_to_array(ndarray_data): is_byte_type = False if 0 < len(ndarray_data): is_byte_type = isinstance(ndarray_data[0], (bytes, np.bytes_)) - if isinstance(ndarray_data[0], np.ndarray): - if 0 < len(ndarray_data[0]): - is_byte_type = isinstance(ndarray_data[0][0], (bytes, np.bytes_)) + if not is_byte_type and isinstance(ndarray_data[0], np.ndarray) \ + and 0 < len(ndarray_data[0]): + is_byte_type = isinstance(ndarray_data[0][0], (bytes, np.bytes_)) if is_byte_type: - array_data = [] - if isinstance(ndarray_data[0], (np.ma.MaskedArray, np.ma.core.MaskedArray)): - for byte_data in ndarray_data: - array_data.append(byte_data.tobytes(fill_value=' ').decode('utf-8').rstrip()) - else: - for byte_data in ndarray_data: - array_data.append(byte_data.decode("utf-8").rstrip()) + array_data = met_base_tools.convert_byte_type_to_array(ndarray_data) elif isinstance(ndarray_data, (np.ma.MaskedArray, np.ma.core.MaskedArray)): array_data = ndarray_data.filled(fill_value=-9999).tolist() elif isinstance(ndarray_data, np.ndarray): diff --git a/scripts/python/met/mprbase.py b/scripts/python/met/mprbase.py index 0615171313..40cc36f7fb 100644 --- a/scripts/python/met/mprbase.py +++ b/scripts/python/met/mprbase.py @@ -6,6 +6,7 @@ class mpr_data(): # Read a text file with N columns and returns the list of N column data # Skip first "col_start" columns if col_start is not 0. + @staticmethod def read_mpr(input_file, col_last, col_start = 0, header=None, delim_whitespace=True, keep_default_na=False, skiprows=1, dtype='string'): diff --git a/scripts/python/met/point.py b/scripts/python/met/point.py index eb85c3711d..46576c8e59 100644 --- a/scripts/python/met/point.py +++ b/scripts/python/met/point.py @@ -181,11 +181,8 @@ def check_point_data(self): if self.use_var_id: self.check_data_member_string(self.obs_var_table,'obs_var_table') - #def convert_to_numpy(self, value_list): - # return met_point_tools.convert_to_ndarray(value_list) - def dump(self): - met_base_point.print_point_data(self.get_point_data()) + met_point_tools.print_point_data(self.get_point_data()) def get_count_string(self): return f' nobs={self.nobs} nhdr={self.nhdr} ntyp={self.nhdr_typ} nsid={self.nhdr_sid} nvld={self.nhdr_vld} nqty={self.nobs_qty} nvar={self.nobs_var}' @@ -346,7 +343,7 @@ def write_point_data(self, tmp_filename): nc_point_obs.write_nc_file(tmp_filename, self) if met_base_tools.is_debug_enabled("point"): - met_base.log_message(f"Save to a temporary NetCDF file (point)") + met_base.log_message("Save to a temporary NetCDF file (point)") else: self.write_point_data_json_numpy(tmp_filename) @@ -417,8 +414,8 @@ def __init__(self, point_data): def check_csv_record(self, csv_point_data, index): method_name = f"{self.__class__.__name__}.check_csv_record()" error_msgs = [] - # names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] - # dtype={'typ':'str', 'sid':'str', 'vld':'str', 'var':'str', 'qc':'str'} + # names: ['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] + # dtype: {'typ':'str', 'sid':'str', 'vld':'str', 'var':'str', 'qc':'str'} if 11 > len(csv_point_data): error_msgs.append(f"{method_name} {index}-th data: missing columns. should be 11 columns, not {len(csv_point_data)} columns") elif 11 < len(csv_point_data): @@ -488,7 +485,7 @@ def convert_point_data(self): self.use_var_id = not self.is_grib_code() index = 0 - #names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] + #name: ['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] for csv_point_record in self.point_data: # Build header map. hdr_typ_str = csv_point_record[0] @@ -539,7 +536,7 @@ def convert_point_data(self): obs_qty_map[qc_str] = qc_id qc_cnt += 1 - # names=['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] + # names: ['typ', 'sid', 'vld', 'lat', 'lon', 'elv', 'var', 'lvl', 'hgt', 'qc', 'obs'] self.obs_vid[index] = var_id self.obs_hid[index] = hdr_idx self.obs_lvl[index] = self.get_num_value(csv_point_record[7]) @@ -627,21 +624,21 @@ def read_data(self, args): # - set self.input_name # # Here is a template - ''' - if isinstance(args, dict): - in_filename = args.get('in_name',None) - elif isinstance(args, list): - in_filename = args[0] - else: - in_filename = args - self.input_name = in_filename - ''' + # + # if isinstance(args, dict): + # in_filename = args.get('in_name',None) + # elif isinstance(args, list): + # in_filename = args[0] + # else: + # in_filename = args + # self.input_name = in_filename pass class dummy_point_obs(met_point_obs): def read_data(self, args): + # Do nothing to return an empty point_obs pass @@ -689,7 +686,7 @@ def print_data(key, data_array, show_count=COUNT_SHOW): @staticmethod def print_point_data(met_point_data, print_subset=True): - method_name = f"met_point_tools.print_point_data()" + method_name = "met_point_tools.print_point_data()" print(' === MET point data by python embedding ===') if print_subset: met_point_tools.print_data('nhdr',met_point_data['nhdr']) @@ -714,26 +711,26 @@ def print_point_data(met_point_data, print_subset=True): met_point_tools.print_data('obs_val',met_point_data['obs_val']) else: print(f'{method_name} All',met_point_data) - print(f" nhdr: met_point_data['nhdr']") - print(f" nobs: met_point_data['nobs']") - print(f" use_var_id: met_point_data['use_var_id']") - print(f" hdr_typ: met_point_data['hdr_typ']") - print(f"hdr_typ_table: met_point_data['hdr_typ_table']") - print(f" hdr_sid: met_point_data['hdr_sid']") - print(f"hdr_sid_table: met_point_data['hdr_sid_table']") - print(f" hdr_vld: met_point_data['hdr_vld']") - print(f"hdr_vld_table: met_point_data['hdr_vld_table']") - print(f" hdr_lat: met_point_data['hdr_lat']") - print(f" hdr_lon: met_point_data['hdr_lon']") - print(f" hdr_elv: met_point_data['hdr_elv']") - print(f" obs_hid: met_point_data['obs_hid']") - print(f" obs_vid: met_point_data['obs_vid']") - print(f"obs_var_table: met_point_data['obs_var_table']") - print(f" obs_qty: met_point_data['obs_qty']") - print(f"obs_qty_table: met_point_data['obs_qty_table']") - print(f" obs_lvl: met_point_data['obs_lvl']") - print(f" obs_hgt: met_point_data['obs_hgt']") - print(f" obs_val: met_point_data['obs_val']") + print(" nhdr: met_point_data['nhdr']") + print(" nobs: met_point_data['nobs']") + print(" use_var_id: met_point_data['use_var_id']") + print(" hdr_typ: met_point_data['hdr_typ']") + print("hdr_typ_table: met_point_data['hdr_typ_table']") + print(" hdr_sid: met_point_data['hdr_sid']") + print("hdr_sid_table: met_point_data['hdr_sid_table']") + print(" hdr_vld: met_point_data['hdr_vld']") + print("hdr_vld_table: met_point_data['hdr_vld_table']") + print(" hdr_lat: met_point_data['hdr_lat']") + print(" hdr_lon: met_point_data['hdr_lon']") + print(" hdr_elv: met_point_data['hdr_elv']") + print(" obs_hid: met_point_data['obs_hid']") + print(" obs_vid: met_point_data['obs_vid']") + print("obs_var_table: met_point_data['obs_var_table']") + print(" obs_qty: met_point_data['obs_qty']") + print("obs_qty_table: met_point_data['obs_qty_table']") + print(" obs_lvl: met_point_data['obs_lvl']") + print(" obs_hgt: met_point_data['obs_hgt']") + print(" obs_val: met_point_data['obs_val']") print(' === MET point data by python embedding ===') diff --git a/scripts/python/met/point_nc.py b/scripts/python/met/point_nc.py index 37063bdb0d..db73d8fae4 100644 --- a/scripts/python/met/point_nc.py +++ b/scripts/python/met/point_nc.py @@ -8,10 +8,12 @@ ''' +import sys import os import numpy as np import netCDF4 as nc +import pandas as pd from met.point import met_point_obs, met_point_tools @@ -53,6 +55,11 @@ def get_string_array(nc_group, var_name): class nc_point_obs(met_point_obs): + def __init__(self, nc_filename=None): + super().__init__() + if nc_filename: + self.read_data(nc_filename) + # args should be string, list, or dictionary def get_nc_filename(self, args): nc_filename = None @@ -67,62 +74,65 @@ def get_nc_filename(self, args): def read_data(self, nc_filename): method_name = f"{self.__class__.__name__}.read_data()" - if nc_filename is None: - self.log_error_msg(f"{method_name} The input NetCDF filename is missing") - elif not os.path.exists(nc_filename): - self.log_error_msg(f"{method_name} input NetCDF file ({nc_filename}) does not exist") - else: + if not nc_filename: + raise TypeError(f"{method_name} The input NetCDF filename is missing") + if not os.path.exists(nc_filename): + raise TypeError(f"{method_name} input NetCDF file ({nc_filename}) does not exist") + + try: dataset = nc.Dataset(nc_filename, 'r') + except OSError: + raise TypeError(f"{method_name} Could not open NetCDF file ({nc_filename}") + + attr_name = 'use_var_id' + use_var_id_str = dataset.getncattr(attr_name) if attr_name in dataset.ncattrs() else "false" + self.use_var_id = use_var_id_str.lower() == 'true' + + # Header + self.hdr_typ = dataset['hdr_typ'][:] + self.hdr_sid = dataset['hdr_sid'][:] + self.hdr_vld = dataset['hdr_vld'][:] + self.hdr_lat = dataset['hdr_lat'][:] + self.hdr_lon = dataset['hdr_lon'][:] + self.hdr_elv = dataset['hdr_elv'][:] + self.hdr_typ_table = met_point_nc_tools.get_string_array(dataset, 'hdr_typ_table') + self.hdr_sid_table = met_point_nc_tools.get_string_array(dataset, 'hdr_sid_table') + self.hdr_vld_table = met_point_nc_tools.get_string_array(dataset, 'hdr_vld_table') + + nc_var = dataset.variables.get('obs_unit', None) + if nc_var: + self.obs_var_unit = nc_var[:] + nc_var = dataset.variables.get('obs_desc', None) + if nc_var: + self.obs_var_desc = nc_var[:] + + nc_var = dataset.variables.get('hdr_prpt_typ', None) + if nc_var: + self.hdr_prpt_typ = nc_var[:] + nc_var = dataset.variables.get('hdr_irpt_typ', None) + if nc_var: + self.hdr_irpt_typ = nc_var[:] + nc_var = dataset.variables.get('hdr_inst_typ', None) + if nc_var: + self.hdr_inst_typ =nc_var[:] + + #Observation data + self.hdr_sid = dataset['hdr_sid'][:] + self.obs_qty = np.array(dataset['obs_qty'][:]) + self.obs_hid = np.array(dataset['obs_hid'][:]) + self.obs_lvl = np.array(dataset['obs_lvl'][:]) + self.obs_hgt = np.array(dataset['obs_hgt'][:]) + self.obs_val = np.array(dataset['obs_val'][:]) + nc_var = dataset.variables.get('obs_vid', None) + if nc_var is None: + self.use_var_id = False + nc_var = dataset.variables.get('obs_gc', None) + else: + self.obs_var_table = met_point_nc_tools.get_string_array(dataset, 'obs_var') + if nc_var: + self.obs_vid = np.array(nc_var[:]) - attr_name = 'use_var_id' - use_var_id_str = dataset.getncattr(attr_name) if attr_name in dataset.ncattrs() else "false" - self.use_var_id = use_var_id_str.lower() == 'true' - - # Header - self.hdr_typ = dataset['hdr_typ'][:] - self.hdr_sid = dataset['hdr_sid'][:] - self.hdr_vld = dataset['hdr_vld'][:] - self.hdr_lat = dataset['hdr_lat'][:] - self.hdr_lon = dataset['hdr_lon'][:] - self.hdr_elv = dataset['hdr_elv'][:] - self.hdr_typ_table = met_point_nc_tools.get_string_array(dataset, 'hdr_typ_table') - self.hdr_sid_table = met_point_nc_tools.get_string_array(dataset, 'hdr_sid_table') - self.hdr_vld_table = met_point_nc_tools.get_string_array(dataset, 'hdr_vld_table') - - nc_var = dataset.variables.get('obs_unit', None) - if nc_var: - self.obs_var_unit = nc_var[:] - nc_var = dataset.variables.get('obs_desc', None) - if nc_var: - self.obs_var_desc = nc_var[:] - - nc_var = dataset.variables.get('hdr_prpt_typ', None) - if nc_var: - self.hdr_prpt_typ = nc_var[:] - nc_var = dataset.variables.get('hdr_irpt_typ', None) - if nc_var: - self.hdr_irpt_typ = nc_var[:] - nc_var = dataset.variables.get('hdr_inst_typ', None) - if nc_var: - self.hdr_inst_typ =nc_var[:] - - #Observation data - self.hdr_sid = dataset['hdr_sid'][:] - self.obs_qty = np.array(dataset['obs_qty'][:]) - self.obs_hid = np.array(dataset['obs_hid'][:]) - self.obs_lvl = np.array(dataset['obs_lvl'][:]) - self.obs_hgt = np.array(dataset['obs_hgt'][:]) - self.obs_val = np.array(dataset['obs_val'][:]) - nc_var = dataset.variables.get('obs_vid', None) - if nc_var is None: - self.use_var_id = False - nc_var = dataset.variables.get('obs_gc', None) - else: - self.obs_var_table = met_point_nc_tools.get_string_array(dataset, 'obs_var') - if nc_var: - self.obs_vid = np.array(nc_var[:]) - - self.obs_qty_table = met_point_nc_tools.get_string_array(dataset, 'obs_qty_table') + self.obs_qty_table = met_point_nc_tools.get_string_array(dataset, 'obs_qty_table') def save_ncfile(self, nc_filename): met_data = self.get_point_data() @@ -274,6 +284,22 @@ def write_nc_data(nc_dataset, point_obs): print(f' === ERROR at {method_name} type(nc_dataset)={type(nc_dataset)} type(point_obs)={type(point_obs)}') raise + def to_pandas(self): + return pd.DataFrame({ + 'typ': [self.hdr_typ_table[self.hdr_typ[i]] for i in self.obs_hid], + 'sid': [self.hdr_sid_table[self.hdr_sid[i]] for i in self.obs_hid], + 'vld': [self.hdr_vld_table[self.hdr_vld[i]] for i in self.obs_hid], + 'lat': [self.hdr_lat[i] for i in self.obs_hid], + 'lon': [self.hdr_lon[i] for i in self.obs_hid], + 'elv': [self.hdr_elv[i] for i in self.obs_hid], + 'var': [self.obs_var_table[i] if self.use_var_id else f'{i}' + for i in self.obs_vid], + 'lvl': self.obs_lvl, + 'hgt': self.obs_hgt, + 'qc': [np.nan if np.ma.is_masked(i) else self.obs_qty_table[i] + for i in self.obs_qty], + 'obs': self.obs_val, + }) def main(argv): if len(argv) != 1 and argv[1] != ARG_PRINT_DATA: @@ -289,5 +315,5 @@ def main(argv): point_obs_data.print_point_data(met_point_data) if __name__ == '__main__': - main() + main(sys.argv) print('Done python script') diff --git a/scripts/python/pyembed/Makefile.in b/scripts/python/pyembed/Makefile.in index b8fef31f8b..1ffd7a4eb5 100644 --- a/scripts/python/pyembed/Makefile.in +++ b/scripts/python/pyembed/Makefile.in @@ -260,6 +260,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/tc_diag/Makefile.in b/scripts/python/tc_diag/Makefile.in index 9d098740ad..6cda357496 100644 --- a/scripts/python/tc_diag/Makefile.in +++ b/scripts/python/tc_diag/Makefile.in @@ -262,6 +262,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/tc_diag/atcf_tools/Makefile.in b/scripts/python/tc_diag/atcf_tools/Makefile.in index fec6217c14..67b691d180 100644 --- a/scripts/python/tc_diag/atcf_tools/Makefile.in +++ b/scripts/python/tc_diag/atcf_tools/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/tc_diag/config/Makefile.in b/scripts/python/tc_diag/config/Makefile.in index cc4019b28f..df494ac1c5 100644 --- a/scripts/python/tc_diag/config/Makefile.in +++ b/scripts/python/tc_diag/config/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/tc_diag/diag_lib/Makefile.in b/scripts/python/tc_diag/diag_lib/Makefile.in index aa48ed0c5f..64b9c40add 100644 --- a/scripts/python/tc_diag/diag_lib/Makefile.in +++ b/scripts/python/tc_diag/diag_lib/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/tc_diag/tc_diag_driver/Makefile.in b/scripts/python/tc_diag/tc_diag_driver/Makefile.in index b4468d4bb2..41d0c24e56 100644 --- a/scripts/python/tc_diag/tc_diag_driver/Makefile.in +++ b/scripts/python/tc_diag/tc_diag_driver/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/scripts/python/utility/Makefile.in b/scripts/python/utility/Makefile.in index 0b977854db..bb8de463d2 100644 --- a/scripts/python/utility/Makefile.in +++ b/scripts/python/utility/Makefile.in @@ -204,6 +204,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/Makefile.in b/src/Makefile.in index 175bb0381a..05916e66b0 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -230,6 +230,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/Makefile.in b/src/basic/Makefile.in index 1df9e615ba..a5e04f07e1 100644 --- a/src/basic/Makefile.in +++ b/src/basic/Makefile.in @@ -232,6 +232,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/enum_to_string/Makefile.in b/src/basic/enum_to_string/Makefile.in index 6c1a9607bf..69c57f75cf 100644 --- a/src/basic/enum_to_string/Makefile.in +++ b/src/basic/enum_to_string/Makefile.in @@ -249,6 +249,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/vx_cal/Makefile.in b/src/basic/vx_cal/Makefile.in index a80d944f0a..bf9c15aa6c 100644 --- a/src/basic/vx_cal/Makefile.in +++ b/src/basic/vx_cal/Makefile.in @@ -256,6 +256,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/vx_cal/is_leap_year.cc b/src/basic/vx_cal/is_leap_year.cc index d37854d690..a383041475 100644 --- a/src/basic/vx_cal/is_leap_year.cc +++ b/src/basic/vx_cal/is_leap_year.cc @@ -102,7 +102,7 @@ void adjuste_day_for_month_year_units(int &day, int &month, int &year, double mo // Compute remaining days from the month fraction bool day_adjusted = false; const int day_offset = (int)(month_fraction * DAYS_PER_MONTH + 0.5); - const char *method_name = "adjuste_day() --> "; + const char *method_name = "adjuste_day_for_month_year_units() -> "; day += day_offset; if (day == 1 && abs(month_fraction-0.5) < DAY_EPSILON) { @@ -162,7 +162,7 @@ unixtime add_to_unixtime(unixtime base_unixtime, int sec_per_unit, unixtime ut; auto time_value_ut = (unixtime)time_value; double time_fraction = time_value - (double)time_value_ut; - const char *method_name = "add_to_unixtime() -->"; + const char *method_name = "add_to_unixtime() -> "; if (sec_per_unit == SEC_MONTH || sec_per_unit == SEC_YEAR) { if (time_value < 0) { diff --git a/src/basic/vx_config/Makefile.in b/src/basic/vx_config/Makefile.in index 5fddc64397..61940aed88 100644 --- a/src/basic/vx_config/Makefile.in +++ b/src/basic/vx_config/Makefile.in @@ -304,6 +304,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/vx_config/config.tab.cc b/src/basic/vx_config/config.tab.cc index 61513c670b..69427413b0 100644 --- a/src/basic/vx_config/config.tab.cc +++ b/src/basic/vx_config/config.tab.cc @@ -3229,11 +3229,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_simple_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_simple_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -3243,7 +3242,7 @@ s->T = bad_data_double; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -3274,7 +3273,7 @@ if ( s->Ptype == perc_thresh_freq_bias && s->PT <= 0 ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); @@ -3303,11 +3302,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_compound_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_compound_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -3318,7 +3316,7 @@ else s->T = num.d; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -3349,7 +3347,7 @@ if ( s->Ptype == perc_thresh_freq_bias && !is_eq(s->PT, 1.0) ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); cs << "(" << number_string << ")"; diff --git a/src/basic/vx_config/config.tab.yy b/src/basic/vx_config/config.tab.yy index dabe56d786..cab56d64af 100644 --- a/src/basic/vx_config/config.tab.yy +++ b/src/basic/vx_config/config.tab.yy @@ -1623,11 +1623,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_simple_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_simple_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -1637,7 +1636,7 @@ s->T = bad_data_double; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -1668,7 +1667,7 @@ if ( s->Ptype == perc_thresh_freq_bias && s->PT <= 0 ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); @@ -1697,11 +1696,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_compound_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_compound_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -1712,7 +1710,7 @@ else s->T = num.d; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -1743,7 +1741,7 @@ if ( s->Ptype == perc_thresh_freq_bias && !is_eq(s->PT, 1.0) ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); cs << "(" << number_string << ")"; diff --git a/src/basic/vx_config/config_constants.h b/src/basic/vx_config/config_constants.h index deb9a425fe..9f1366a681 100644 --- a/src/basic/vx_config/config_constants.h +++ b/src/basic/vx_config/config_constants.h @@ -297,7 +297,7 @@ struct InterpInfo { void clear(); void validate(); // Ensure that width and method are accordant bool operator==(const InterpInfo &) const; - InterpInfo &operator=(const InterpInfo &a) noexcept; // SoanrQube findings + InterpInfo &operator=(const InterpInfo &a) noexcept; // SonarQube findings }; //////////////////////////////////////////////////////////////////////// @@ -329,6 +329,7 @@ struct RegridInfo { void validate(); // ensure that width and method are accordant void validate_point(); // ensure that width and method are accordant RegridInfo &operator=(const RegridInfo &a) noexcept; // SoanrQube findings + ConcatString get_str() const; }; //////////////////////////////////////////////////////////////////////// @@ -461,6 +462,17 @@ enum class GridWeightType { //////////////////////////////////////////////////////////////////////// +// +// Enumeration for point_weight_flag configuration parameter +// + +enum class PointWeightType { + None, // Apply no point weighting + SID // Apply station ID weighting +}; + +//////////////////////////////////////////////////////////////////////// + // // Enumeration for grid_decomp_flag configuration parameter // @@ -537,6 +549,7 @@ static const char conf_key_model[] = "model"; static const char conf_key_desc[] = "desc"; static const char conf_key_obtype[] = "obtype"; static const char conf_key_output_flag[] = "output_flag"; +static const char conf_key_obtype_as_group_val_flag[] = "obtype_as_group_val_flag"; static const char conf_key_obs_window[] = "obs_window"; static const char conf_key_beg[] = "beg"; static const char conf_key_end[] = "end"; @@ -612,6 +625,8 @@ static const char conf_key_mask_sid[] = "mask.sid"; static const char conf_key_mask_llpnt[] = "mask.llpnt"; static const char conf_key_lat_thresh[] = "lat_thresh"; static const char conf_key_lon_thresh[] = "lon_thresh"; +static const char conf_key_lat_vname[] = "lat_vname"; +static const char conf_key_lon_vname[] = "lon_vname"; static const char conf_key_ci_alpha[] = "ci_alpha"; static const char conf_key_time_summary[] = "time_summary"; static const char conf_key_flag[] = "flag"; @@ -690,7 +705,9 @@ static const char conf_key_obs_to_qc_map[] = "obs_to_qc_map"; static const char conf_key_missing_thresh[] = "missing_thresh"; static const char conf_key_control_id[] = "control_id"; static const char conf_key_ens_member_ids[] = "ens_member_ids"; -static const char conf_key_seeps_p1_thresh[] = "seeps_p1_thresh"; +static const char conf_key_seeps_grid_climo_name[] = "seeps_grid_climo_name"; +static const char conf_key_seeps_point_climo_name[] = "seeps_point_climo_name"; +static const char conf_key_seeps_p1_thresh[] = "seeps_p1_thresh"; static const char conf_key_ugrid_coordinates_file[] = "ugrid_coordinates_file"; static const char conf_key_ugrid_dataset[] = "ugrid_dataset"; static const char conf_key_ugrid_map_config[] = "ugrid_map_config"; @@ -719,10 +736,16 @@ static const char conf_key_is_wind_direction[] = "is_wind_direction"; static const char conf_key_is_prob[] = "is_prob"; // -// Climatology parameter key names +// Climatology data parameter key names +// +static const char conf_key_climo_mean[] = "climo_mean"; +static const char conf_key_climo_mean_field[] = "climo_mean.field"; +static const char conf_key_climo_stdev[] = "climo_stdev"; +static const char conf_key_climo_stdev_field[] = "climo_stdev.field"; + +// +// Climatology distribution parameter key names // -static const char conf_key_climo_mean_field[] = "climo_mean.field"; -static const char conf_key_climo_stdev_field[] = "climo_stdev.field"; static const char conf_key_climo_cdf[] = "climo_cdf"; static const char conf_key_cdf_bins[] = "cdf_bins"; static const char conf_key_center_bins[] = "center_bins"; @@ -742,6 +765,7 @@ static const char conf_key_topo_mask[] = "topo_mask"; static const char conf_key_topo_mask_flag[] = "topo_mask.flag"; static const char conf_key_use_obs_thresh[] = "use_obs_thresh"; static const char conf_key_interp_fcst_thresh[] = "interp_fcst_thresh"; +static const char conf_key_point_weight_flag[] = "point_weight_flag"; // // Grid-Stat specific parameter key names @@ -1289,6 +1313,9 @@ static const char conf_val_bca[] = "BCA"; static const char conf_val_cos_lat[] = "COS_LAT"; static const char conf_val_area[] = "AREA"; +// Point weight flag values +static const char conf_val_sid[] = "SID"; + // Duplicate flag values static const char conf_val_unique[] = "UNIQUE"; diff --git a/src/basic/vx_config/config_util.cc b/src/basic/vx_config/config_util.cc index 344f997bea..a4323a23b1 100644 --- a/src/basic/vx_config/config_util.cc +++ b/src/basic/vx_config/config_util.cc @@ -14,6 +14,7 @@ #include "config_util.h" #include "enum_as_int.hpp" +#include "configobjecttype_to_string.h" #include "vx_math.h" #include "vx_util.h" @@ -265,6 +266,13 @@ RegridInfo &RegridInfo::operator=(const RegridInfo &a) noexcept { return *this; } +/////////////////////////////////////////////////////////////////////////////// + +ConcatString RegridInfo::get_str() const { + ConcatString cs(interpmthd_to_string(method)); + cs << "(" << width << ")"; + return cs; +} /////////////////////////////////////////////////////////////////////////////// @@ -592,16 +600,14 @@ StringArray parse_conf_message_type(Dictionary *dict, bool error_out) { /////////////////////////////////////////////////////////////////////////////// StringArray parse_conf_sid_list(Dictionary *dict, const char *conf_key) { - StringArray sa, cur, sid_sa; - ConcatString mask_name; + StringArray sid_sa; const char *method_name = "parse_conf_sid_list() -> "; - sa = parse_conf_string_array(dict, conf_key, method_name); + StringArray sa(parse_conf_string_array(dict, conf_key, method_name)); - // Parse station ID's to exclude from each entry + // Append to the list of station ID's for(int i=0; i " + mlog << Debug(4) << method_name << "parsing station ID masking file \"" << tmp_file << "\"\n"; // Open the mask station id file specified + ifstream in; in.open(tmp_file.c_str()); if(!in) { - mlog << Error << "\nparse_sid_mask() -> " + mlog << Error << "\n" << method_name << "Can't open the station ID masking file \"" << tmp_file << "\".\n\n"; exit(1); @@ -657,7 +659,7 @@ void parse_sid_mask(const ConcatString &mask_sid_str, // Store the first entry as the name of the mask in >> sid_str; - mask_name = sid_str; + mask_sid.set_name(sid_str); // Store the rest of the entries as masking station ID's while(in >> sid_str) mask_sid.add(sid_str.c_str()); @@ -665,9 +667,9 @@ void parse_sid_mask(const ConcatString &mask_sid_str, // Close the input file in.close(); - mlog << Debug(4) << "parse_sid_mask() -> " + mlog << Debug(4) << method_name << "parsed " << mask_sid.n() << " station ID's for the \"" - << mask_name << "\" mask from file \"" << tmp_file << "\"\n"; + << mask_sid.name() << "\" mask from file \"" << tmp_file << "\"\n"; } // Process list of strings else { @@ -675,19 +677,19 @@ void parse_sid_mask(const ConcatString &mask_sid_str, // Print a warning if the string contains a dot which suggests // the user was trying to specify a file name. if(check_reg_exp("[.]", mask_sid_str.c_str())) { - mlog << Warning << "\nparse_sid_mask() -> " + mlog << Warning << "\n" << method_name << "unable to process \"" << mask_sid_str << "\" as a file name and processing it as a single " << "station ID mask instead.\n\n"; } - mlog << Debug(4) << "parse_sid_mask() -> " + mlog << Debug(4) << method_name << "storing single station ID mask \"" << mask_sid_str << "\"\n"; // Check for embedded whitespace or slashes if(check_reg_exp(ws_reg_exp, mask_sid_str.c_str()) || check_reg_exp("[/]", mask_sid_str.c_str())) { - mlog << Error << "\nparse_sid_mask() -> " + mlog << Error << "\n" << method_name << "masking station ID string can't contain whitespace or " << "slashes \"" << mask_sid_str << "\".\n\n"; exit(1); @@ -700,15 +702,16 @@ void parse_sid_mask(const ConcatString &mask_sid_str, // One elements means no colon was specified if(sa.n() == 1) { mask_sid.add_css(sa[0]); - mask_name = ( mask_sid.n() == 1 ? mask_sid[0] : "MASK_SID" ); + mask_sid.set_name((mask_sid.n() == 1 ? + mask_sid.sid_map().begin()->first : "MASK_SID")); } // Two elements means one colon was specified else if(sa.n() == 2) { - mask_name = sa[0]; mask_sid.add_css(sa[1]); + mask_sid.set_name(sa[0]); } else { - mlog << Error << "\nparse_sid_mask() -> " + mlog << Error << "\n" << method_name << "masking station ID string may contain at most one colon to " << "specify the mask name \"" << mask_sid_str << "\".\n\n"; exit(1); @@ -716,14 +719,26 @@ void parse_sid_mask(const ConcatString &mask_sid_str, } - // Sort the mask_sid's - mask_sid.sort(); - - return; + return mask_sid; } /////////////////////////////////////////////////////////////////////////////// +StringArray parse_sid_mask_as_list(const ConcatString &mask_sid_str) { + + MaskSID ms = parse_sid_mask(mask_sid_str); + + StringArray sa; + for(const auto &pair : ms.sid_map()) sa.add(pair.first); + + return sa; +} + +/////////////////////////////////////////////////////////////////////////////// +// +// Code for MaskLatLon struct +// +/////////////////////////////////////////////////////////////////////////////// void MaskLatLon::clear() { name.clear(); @@ -756,7 +771,6 @@ MaskLatLon &MaskLatLon::operator=(const MaskLatLon &a) noexcept { return *this; } - /////////////////////////////////////////////////////////////////////////////// vector parse_conf_llpnt_mask(Dictionary *dict) { @@ -1331,13 +1345,10 @@ BootInfo parse_conf_boot(Dictionary *dict) { return info; } - /////////////////////////////////////////////////////////////////////////////// -RegridInfo parse_conf_regrid(Dictionary *dict, bool error_out) { - Dictionary *regrid_dict = (Dictionary *) nullptr; +RegridInfo parse_conf_regrid(Dictionary *dict, RegridInfo *default_info, bool error_out) { RegridInfo info; - int v; if(!dict) { mlog << Error << "\nparse_conf_regrid() -> " @@ -1346,10 +1357,10 @@ RegridInfo parse_conf_regrid(Dictionary *dict, bool error_out) { } // Conf: regrid - regrid_dict = dict->lookup_dictionary(conf_key_regrid, false); + Dictionary *regrid_dict = dict->lookup_dictionary(conf_key_regrid, false); // Check that the regrid dictionary is present - if(!regrid_dict) { + if(!regrid_dict && !default_info) { if(error_out) { mlog << Error << "\nparse_conf_regrid() -> " << "can't find the \"regrid\" dictionary!\n\n"; @@ -1360,61 +1371,164 @@ RegridInfo parse_conf_regrid(Dictionary *dict, bool error_out) { } } - // Parse to_grid as an integer - v = regrid_dict->lookup_int(conf_key_to_grid, false, false); + // Conf: to_grid (optional) as an integer or string + const DictionaryEntry * entry = nullptr; + + if(regrid_dict) entry = regrid_dict->lookup(conf_key_to_grid, false); - // If integer lookup successful, convert to FieldType. - if(regrid_dict->last_lookup_status()) { - info.field = int_to_fieldtype(v); - info.enable = (info.field == FieldType::Fcst || - info.field == FieldType::Obs); + // to_grid found + if(entry) { + + // Convert integer to FieldType + if(entry->type() == IntegerType) { + info.field = int_to_fieldtype(entry->i_value()); + info.enable = (info.field == FieldType::Fcst || + info.field == FieldType::Obs); + } + // Store grid name string + else if(entry->type() == StringType) { + info.name = entry->string_value(); + info.enable = true; + } + else { + mlog << Error << "\nparse_conf_regrid() -> " + << "Unexpected type (" + << configobjecttype_to_string(entry->type()) + << ") for \"" << conf_key_to_grid + << "\" configuration entry.\n\n"; + exit(1); + } + } + // Use default RegridInfo + else if(default_info){ + info.name = default_info->name; + info.enable = default_info->enable; } - // If integer lookup unsuccessful, parse vx_grid as a string. - // Do not error out since to_grid isn't specified for climo.regrid. + // Use global default else { - info.name = regrid_dict->lookup_string(conf_key_to_grid, false); + info.name = ""; info.enable = true; } - // Conf: vld_thresh - double thr = regrid_dict->lookup_double(conf_key_vld_thresh, false); - info.vld_thresh = (is_bad_data(thr) ? default_vld_thresh : thr); + // Conf: vld_thresh (required) + if(regrid_dict && regrid_dict->lookup(conf_key_vld_thresh, false)) { + info.vld_thresh = regrid_dict->lookup_double(conf_key_vld_thresh); + } + // Use default RegridInfo + else if(default_info) { + info.vld_thresh = default_info->vld_thresh; + } + // Use global default + else { + info.vld_thresh = default_vld_thresh; + } - // Parse the method and width - info.method = int_to_interpmthd(regrid_dict->lookup_int(conf_key_method)); - info.width = regrid_dict->lookup_int(conf_key_width); + // Conf: method (required) + if(regrid_dict && regrid_dict->lookup(conf_key_method, false)) { + info.method = int_to_interpmthd(regrid_dict->lookup_int(conf_key_method)); + } + // Use default RegridInfo + else if(default_info) { + info.method = default_info->method; + } - // Conf: shape - v = regrid_dict->lookup_int(conf_key_shape, false); - if (regrid_dict->last_lookup_status()) { - info.shape = int_to_gridtemplate(v); + // Conf: width (required) + if(regrid_dict && regrid_dict->lookup(conf_key_width, false)) { + info.width = regrid_dict->lookup_int(conf_key_width); + } + // Use default RegridInfo + else if(default_info) { + info.width = default_info->width; + } + + // Conf: shape (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_shape, false)) { + info.shape = int_to_gridtemplate(regrid_dict->lookup_int(conf_key_shape)); + } + // Use default RegridInfo + else if(default_info) { + info.shape = default_info->shape; } + // Use global default else { - // If not specified, use the default square shape info.shape = GridTemplateFactory::GridTemplates::Square; } - // Conf: gaussian dx and radius - double conf_value = regrid_dict->lookup_double(conf_key_gaussian_dx, false); - info.gaussian.dx = (is_bad_data(conf_value) ? default_gaussian_dx : conf_value); - conf_value = regrid_dict->lookup_double(conf_key_gaussian_radius, false); - info.gaussian.radius = (is_bad_data(conf_value) ? default_gaussian_radius : conf_value); - conf_value = regrid_dict->lookup_double(conf_key_trunc_factor, false); - info.gaussian.trunc_factor = (is_bad_data(conf_value) ? default_trunc_factor : conf_value); - if (info.method == InterpMthd::Gaussian || info.method == InterpMthd::MaxGauss) info.gaussian.compute(); + // Conf: gaussian_dx (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_gaussian_dx, false)) { + info.gaussian.dx = regrid_dict->lookup_double(conf_key_gaussian_dx); + } + // Use default RegridInfo + else if(default_info) { + info.gaussian.dx = default_info->gaussian.dx; + } + // Use global default + else { + info.gaussian.dx = default_gaussian_dx; + } + + // Conf: gaussian_radius (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_gaussian_radius, false)) { + info.gaussian.radius = regrid_dict->lookup_double(conf_key_gaussian_radius); + } + // Use default RegridInfo + else if(default_info) { + info.gaussian.radius = default_info->gaussian.radius; + } + // Use global default + else { + info.gaussian.radius = default_gaussian_radius; + } + + // Conf: gaussian_trunc_factor (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_trunc_factor, false)) { + info.gaussian.trunc_factor = regrid_dict->lookup_double(conf_key_trunc_factor); + } + // Use default RegridInfo + else if(default_info) { + info.gaussian.trunc_factor = default_info->gaussian.trunc_factor; + } + // Use global default + else { + info.gaussian.trunc_factor = default_trunc_factor; + } + + // Compute Guassian parameters + if(info.method == InterpMthd::Gaussian || + info.method == InterpMthd::MaxGauss) { + info.gaussian.compute(); + } // MET#2437 Do not search the higher levels of config file context for convert, // censor_thresh, and censor_val. They must be specified within the // regrid dictionary itself. - // Conf: convert - info.convert_fx.set(regrid_dict->lookup(conf_key_convert, false)); + // Conf: convert (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_convert, false)) { + info.convert_fx.set(regrid_dict->lookup(conf_key_convert)); + } + // Use default RegridInfo + else if(default_info) { + info.convert_fx = default_info->convert_fx; + } - // Conf: censor_thresh - info.censor_thresh = regrid_dict->lookup_thresh_array(conf_key_censor_thresh, false, true, false); + // Conf: censor_thresh (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_censor_thresh, false)) { + info.censor_thresh = regrid_dict->lookup_thresh_array(conf_key_censor_thresh); + } + // Use default RegridInfo + else if(default_info) { + info.censor_thresh = default_info->censor_thresh; + } - // Conf: censor_val - info.censor_val = regrid_dict->lookup_num_array(conf_key_censor_val, false, true, false); + // Conf: censor_val (optional) + if(regrid_dict && regrid_dict->lookup(conf_key_censor_val, false)) { + info.censor_val = regrid_dict->lookup_num_array(conf_key_censor_val); + } + // Use default RegridInfo + else if(default_info) { + info.censor_val = default_info->censor_val; + } // Validate the settings info.validate(); @@ -2081,9 +2195,10 @@ HiRAInfo parse_conf_hira(Dictionary *dict) { GridWeightType parse_conf_grid_weight_flag(Dictionary *dict) { GridWeightType t = GridWeightType::None; int v; + const char *method_name = "parse_conf_grid_weight_flag() -> "; if(!dict) { - mlog << Error << "\nparse_conf_grid_weight_flag() -> " + mlog << Error << "\n" << method_name << "empty dictionary!\n\n"; exit(1); } @@ -2096,7 +2211,7 @@ GridWeightType parse_conf_grid_weight_flag(Dictionary *dict) { else if(v == conf_const.lookup_int(conf_val_cos_lat)) t = GridWeightType::Cos_Lat; else if(v == conf_const.lookup_int(conf_val_area)) t = GridWeightType::Area; else { - mlog << Error << "\nparse_conf_grid_weight_flag() -> " + mlog << Error << "\n" << method_name << "Unexpected config file value of " << v << " for \"" << conf_key_grid_weight_flag << "\".\n\n"; exit(1); @@ -2107,6 +2222,35 @@ GridWeightType parse_conf_grid_weight_flag(Dictionary *dict) { /////////////////////////////////////////////////////////////////////////////// +PointWeightType parse_conf_point_weight_flag(Dictionary *dict) { + PointWeightType t = PointWeightType::None; + int v; + const char *method_name = "parse_conf_point_weight_flag() -> "; + + if(!dict) { + mlog << Error << "\n" << method_name + << "empty dictionary!\n\n"; + exit(1); + } + + // Get the integer flag value for the current entry + v = dict->lookup_int(conf_key_point_weight_flag); + + // Convert integer to enumerated GridWeightType + if(v == conf_const.lookup_int(conf_val_none)) t = PointWeightType::None; + else if(v == conf_const.lookup_int(conf_val_sid)) t = PointWeightType::SID; + else { + mlog << Error << "\n" << method_name + << "Unexpected config file value of " << v << " for \"" + << conf_key_point_weight_flag << "\".\n\n"; + exit(1); + } + + return t; +} + +/////////////////////////////////////////////////////////////////////////////// + DuplicateType parse_conf_duplicate_flag(Dictionary *dict) { DuplicateType t = DuplicateType::None; int v; @@ -2514,28 +2658,28 @@ void check_mask_names(const StringArray &sa) { /////////////////////////////////////////////////////////////////////////////// -void check_climo_n_vx(Dictionary *dict, const int n_vx) { - int n; +void check_climo_n_vx(Dictionary *dict, const int n_input) { + int n_climo; // Check for a valid number of climatology mean fields - n = parse_conf_n_vx(dict->lookup_array(conf_key_climo_mean_field, false)); - if(n != 0 && n != n_vx) { + n_climo = parse_conf_n_vx(dict->lookup_array(conf_key_climo_mean_field, false)); + if(n_climo != 0 && n_climo != 1 && n_climo != n_input) { mlog << Error << "\ncheck_climo_n_vx() -> " << "The number of climatology mean fields in \"" - << conf_key_climo_mean_field - << "\" must be zero or match the number (" << n_vx - << ") in \"" << conf_key_fcst_field << "\".\n\n"; + << conf_key_climo_mean_field << "\" (" << n_climo + << ") must be 0, 1, or match the number of input fields (" + << n_input << ").\n\n"; exit(1); } // Check for a valid number of climatology standard deviation fields - n = parse_conf_n_vx(dict->lookup_array(conf_key_climo_stdev_field, false)); - if(n != 0 && n != n_vx) { + n_climo = parse_conf_n_vx(dict->lookup_array(conf_key_climo_stdev_field, false)); + if(n_climo != 0 && n_climo != 1 && n_climo != n_input) { mlog << Error << "\ncheck_climo_n_vx() -> " << "The number of climatology standard deviation fields in \"" - << conf_key_climo_stdev_field - << "\" must be zero or match the number (" - << n_vx << ") in \"" << conf_key_fcst_field << "\".\n\n"; + << conf_key_climo_stdev_field << "\" (" << n_climo + << ") must be 0, 1, or match the number of input fields (" + << n_input << ").\n\n"; exit(1); } diff --git a/src/basic/vx_config/config_util.h b/src/basic/vx_config/config_util.h index 3dae869b2b..885fa6fb2e 100644 --- a/src/basic/vx_config/config_util.h +++ b/src/basic/vx_config/config_util.h @@ -31,18 +31,32 @@ static const char conf_key_old_prepbufr_map[] = "obs_prefbufr_map"; // for ba //////////////////////////////////////////////////////////////////////// extern ConcatString parse_conf_version(Dictionary *dict); -extern ConcatString parse_conf_string(Dictionary *dict, const char *, bool check_empty = true); +extern ConcatString parse_conf_string( + Dictionary *dict, + const char *, + bool check_empty=true); extern GrdFileType parse_conf_file_type(Dictionary *dict); extern std::map - parse_conf_output_flag(Dictionary *dict, const STATLineType *, int); + parse_conf_output_flag( + Dictionary *dict, + const STATLineType *, int); extern std::map parse_conf_output_stats(Dictionary *dict); extern int parse_conf_n_vx(Dictionary *dict); -extern Dictionary parse_conf_i_vx_dict(Dictionary *dict, int index); -extern StringArray parse_conf_tc_model(Dictionary *dict, bool error_out = default_dictionary_error_out); -extern StringArray parse_conf_message_type(Dictionary *dict, bool error_out = default_dictionary_error_out); -extern StringArray parse_conf_sid_list(Dictionary *dict, const char *); -extern void parse_sid_mask(const ConcatString &, StringArray &, ConcatString &); +extern Dictionary parse_conf_i_vx_dict( + Dictionary *dict, + int index); +extern StringArray parse_conf_tc_model( + Dictionary *dict, + bool error_out=default_dictionary_error_out); +extern StringArray parse_conf_message_type( + Dictionary *dict, + bool error_out=default_dictionary_error_out); +extern StringArray parse_conf_sid_list( + Dictionary *dict, + const char *); +extern MaskSID parse_sid_mask(const ConcatString &); +extern StringArray parse_sid_mask_as_list(const ConcatString &); extern std::vector parse_conf_llpnt_mask(Dictionary *dict); extern StringArray parse_conf_obs_qty_inc(Dictionary *dict); @@ -51,31 +65,45 @@ extern NumArray parse_conf_ci_alpha(Dictionary *dict); extern NumArray parse_conf_eclv_points(Dictionary *dict); extern ClimoCDFInfo parse_conf_climo_cdf(Dictionary *dict); extern TimeSummaryInfo parse_conf_time_summary(Dictionary *dict); -extern std::map parse_conf_key_value_map( - Dictionary *dict, const char *conf_key_map_name, const char *caller=nullptr); +extern std::map + parse_conf_key_value_map( + Dictionary *dict, + const char *conf_key_map_name, + const char *caller=nullptr); extern void parse_add_conf_key_value_map( - Dictionary *dict, const char *conf_key_map_name, std::map *m); + Dictionary *dict, + const char *conf_key_map_name, + std::map *m); extern void parse_add_conf_key_values_map( - Dictionary *dict, const char *conf_key_map_name, - std::map *m, const char *caller=nullptr); + Dictionary *dict, + const char *conf_key_map_name, + std::map *m, + const char *caller=nullptr); extern std::map parse_conf_message_type_map(Dictionary *dict); extern std::map parse_conf_message_type_group_map(Dictionary *dict); -extern std::map parse_conf_metadata_map(Dictionary *dict); +extern std::map + parse_conf_metadata_map(Dictionary *dict); extern std::map parse_conf_obs_name_map(Dictionary *dict); extern std::map parse_conf_obs_to_qc_map(Dictionary *dict); extern std::map parse_conf_key_convert_map( - Dictionary *dict, const char *conf_key_map_name, const char *caller=nullptr); + Dictionary *dict, + const char *conf_key_map_name, + const char *caller=nullptr); extern BootInfo parse_conf_boot(Dictionary *dict); -extern RegridInfo parse_conf_regrid(Dictionary *dict, bool error_out = default_dictionary_error_out); +extern RegridInfo parse_conf_regrid( + Dictionary *dict, + RegridInfo *default_info=nullptr, + bool error_out=default_dictionary_error_out); extern InterpInfo parse_conf_interp(Dictionary *dict, const char *); extern NbrhdInfo parse_conf_nbrhd(Dictionary *dict, const char *); extern HiRAInfo parse_conf_hira(Dictionary *dict); extern GridWeightType parse_conf_grid_weight_flag(Dictionary *dict); +extern PointWeightType parse_conf_point_weight_flag(Dictionary *dict); extern DuplicateType parse_conf_duplicate_flag(Dictionary *dict); extern ObsSummary parse_conf_obs_summary(Dictionary *dict); extern ConcatString parse_conf_tmp_dir(Dictionary *dict); @@ -92,7 +120,9 @@ extern ConcatString parse_conf_ugrid_coordinates_file(Dictionary *dict); extern ConcatString parse_conf_ugrid_dataset(Dictionary *dict); extern ConcatString parse_conf_ugrid_map_config(Dictionary *dict); extern double parse_conf_ugrid_max_distance_km(Dictionary *dict); -extern void parse_add_conf_ugrid_metadata_map(Dictionary *dict, std::map *m); +extern void parse_add_conf_ugrid_metadata_map( + Dictionary *dict, + std::map *m); extern void check_mask_names(const StringArray &); diff --git a/src/basic/vx_config/dictionary.h b/src/basic/vx_config/dictionary.h index 742789000b..bcb4a7f34b 100644 --- a/src/basic/vx_config/dictionary.h +++ b/src/basic/vx_config/dictionary.h @@ -154,9 +154,9 @@ class DictionaryEntry { //////////////////////////////////////////////////////////////////////// -inline ConfigObjectType DictionaryEntry::type() const { return ( Type ); } +inline ConfigObjectType DictionaryEntry::type() const { return Type; } -inline ConcatString DictionaryEntry::name() const { return ( Name ); } +inline ConcatString DictionaryEntry::name() const { return Name; } inline bool DictionaryEntry::is_number() const { return ( (Type == IntegerType) || (Type == FloatType) ); } @@ -164,11 +164,11 @@ inline bool DictionaryEntry::is_dictionary() const { return ( Type == Dictionary inline bool DictionaryEntry::is_array() const { return ( Type == ArrayType ); } -inline int DictionaryEntry::n_args() const { return ( Nargs ); } +inline int DictionaryEntry::n_args() const { return Nargs; } -inline const IcodeVector * DictionaryEntry::icv() const { return ( v ); } +inline const IcodeVector * DictionaryEntry::icv() const { return v; } -inline Dictionary * DictionaryEntry::dict() const { return ( Dict ); } +inline Dictionary * DictionaryEntry::dict() const { return Dict; } //////////////////////////////////////////////////////////////////////// @@ -243,7 +243,7 @@ class Dictionary { virtual const DictionaryEntry * operator[](int) const; - virtual const Dictionary * parent() const; + virtual Dictionary * parent() const; virtual bool is_array() const; @@ -344,15 +344,15 @@ class Dictionary { //////////////////////////////////////////////////////////////////////// -inline int Dictionary::n_entries() const { return ( Nentries ); } +inline int Dictionary::n_entries() const { return Nentries; } -inline const Dictionary * Dictionary::parent() const { return ( Parent ); } +inline Dictionary * Dictionary::parent() const { return Parent; } inline void Dictionary::set_is_array(bool __tf) { IsArray = __tf; return; } -inline bool Dictionary::is_array() const { return ( IsArray ); } +inline bool Dictionary::is_array() const { return IsArray; } -inline bool Dictionary::last_lookup_status() const { return ( LastLookupStatus ); } +inline bool Dictionary::last_lookup_status() const { return LastLookupStatus; } //////////////////////////////////////////////////////////////////////// @@ -434,7 +434,7 @@ class DictionaryStack { //////////////////////////////////////////////////////////////////////// -inline int DictionaryStack::n_elements () const { return ( Nelements ); } +inline int DictionaryStack::n_elements () const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_config/icode.h b/src/basic/vx_config/icode.h index d344b797dd..6e99455933 100644 --- a/src/basic/vx_config/icode.h +++ b/src/basic/vx_config/icode.h @@ -216,7 +216,7 @@ class IcodeVector { //////////////////////////////////////////////////////////////////////// -inline int IcodeVector::length() const { return ( Ncells ); } +inline int IcodeVector::length() const { return Ncells; } //////////////////////////////////////////////////////////////////////// @@ -267,7 +267,7 @@ class CellStack { //////////////////////////////////////////////////////////////////////// -inline int CellStack::depth() const { return ( Depth ); } +inline int CellStack::depth() const { return Depth; } //////////////////////////////////////////////////////////////////////// @@ -319,7 +319,7 @@ class ICVStack { //////////////////////////////////////////////////////////////////////// -inline int ICVStack::depth() const { return ( Depth ); } +inline int ICVStack::depth() const { return Depth; } //////////////////////////////////////////////////////////////////////// @@ -358,7 +358,7 @@ class ICVQueue { //////////////////////////////////////////////////////////////////////// -inline int ICVQueue::n_elements() const { return ( Nelements ); } +inline int ICVQueue::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// @@ -402,7 +402,7 @@ class ICVArray { //////////////////////////////////////////////////////////////////////// -inline int ICVArray::n_elements() const { return ( Nelements ); } +inline int ICVArray::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_config/idstack.h b/src/basic/vx_config/idstack.h index 7c274744d4..ab45105b1c 100644 --- a/src/basic/vx_config/idstack.h +++ b/src/basic/vx_config/idstack.h @@ -101,7 +101,7 @@ class IdentifierQueue { //////////////////////////////////////////////////////////////////////// -inline int IdentifierQueue::n_elements() const { return ( Nelements ); } +inline int IdentifierQueue::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// @@ -153,7 +153,7 @@ class IdentifierArray { //////////////////////////////////////////////////////////////////////// -inline int IdentifierArray::n_elements() const { return ( Nelements ); } +inline int IdentifierArray::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_config/my_config_scanner.cc b/src/basic/vx_config/my_config_scanner.cc index 13e41a196c..25a5cec234 100644 --- a/src/basic/vx_config/my_config_scanner.cc +++ b/src/basic/vx_config/my_config_scanner.cc @@ -514,7 +514,7 @@ int do_id() { -int j, k; +int j; const char *method_name = "do_id() -> "; Column += m_strlen(configtext); @@ -618,15 +618,7 @@ for (j=0; j " << "unable to parse string \"" << configtext << "\"\n\n"; @@ -1475,10 +1436,6 @@ if ( index < 0 ) { } -configlval.pc_info.perc_index = index; -configlval.pc_info.value = value; - - return SIMPLE_PERC_THRESH; } diff --git a/src/basic/vx_config/number_stack.h b/src/basic/vx_config/number_stack.h index 298a1b73ca..0a64f7323b 100644 --- a/src/basic/vx_config/number_stack.h +++ b/src/basic/vx_config/number_stack.h @@ -95,7 +95,7 @@ class NumberStack { //////////////////////////////////////////////////////////////////////// -inline int NumberStack::depth() const { return ( Nelements ); } +inline int NumberStack::depth() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_config/threshold.cc b/src/basic/vx_config/threshold.cc index 2bf216e084..ef650ef2c0 100644 --- a/src/basic/vx_config/threshold.cc +++ b/src/basic/vx_config/threshold.cc @@ -36,6 +36,12 @@ extern ThreshNode * result; extern bool test_mode; +extern const std::string scp_perc_thresh_type_str("SCP"); + +extern const std::string cdp_perc_thresh_type_str("CDP"); + +static bool print_climo_perc_thresh_log_message = true; + //////////////////////////////////////////////////////////////////////// @@ -49,6 +55,91 @@ return ( t == thresh_le || t == thresh_ge || t == thresh_eq ); } +//////////////////////////////////////////////////////////////////////// + + +bool is_climo_dist_type(PercThreshType t) + +{ + +return ( t == perc_thresh_fcst_climo_dist || + t == perc_thresh_obs_climo_dist ); + +} + + +//////////////////////////////////////////////////////////////////////// + + +bool parse_perc_thresh(const char *str, PC_info *info) + +{ + +bool match = false; + +if ( perc_thresh_info_map.empty() ) return false; + +ConcatString search_cs(str); + +for (auto const& x : perc_thresh_info_map) { + + if ( search_cs.startswith(x.second.short_name.c_str()) && + is_number(str + x.second.short_name.size()) ) { + + if ( info ) { + + info->ptype = x.first; + + info->value = atof(str + x.second.short_name.size()); + + } + + match = true; + + break; + + } + +} + + // + // MET #2924 Backward compatible support for SCP and CDP types + // + +if ( !match && + (search_cs.startswith(scp_perc_thresh_type_str.c_str()) || + search_cs.startswith(cdp_perc_thresh_type_str.c_str())) ) { + + if ( print_climo_perc_thresh_log_message ) { + + mlog << Debug(2) << R"(Please replace the deprecated "SCP" and "CDP" )" + << R"(threshold types with "SOCP" and "OCDP", respectively, in the ")" + << str << R"(" threshold string.)" << "\n"; + + print_climo_perc_thresh_log_message = false; + + } + + ConcatString cs; + + if ( search_cs.startswith(scp_perc_thresh_type_str.c_str()) ) { + cs << perc_thresh_info_map.at(perc_thresh_sample_obs_climo).short_name; + cs << str + scp_perc_thresh_type_str.size(); + } + else { + cs << perc_thresh_info_map.at(perc_thresh_obs_climo_dist).short_name; + cs << str + cdp_perc_thresh_type_str.size(); + } + + return parse_perc_thresh(cs.c_str(), info); + +} + +return match; + +} + + //////////////////////////////////////////////////////////////////////// @@ -119,27 +210,15 @@ if ( right_child ) { delete right_child; right_child = nullptr; } //////////////////////////////////////////////////////////////////////// -bool Or_Node::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool Or_Node::check(double x, double cmn, double csd) const +bool Or_Node::check(double x, const ClimoPntInfo *cpi) const { -const bool tf_left = left_child->check(x, cmn, csd); +const bool tf_left = left_child->check(x, cpi); if ( tf_left ) return true; -const bool tf_right = right_child->check(x, cmn, csd); +const bool tf_right = right_child->check(x, cpi); return tf_right; @@ -168,13 +247,13 @@ return n; //////////////////////////////////////////////////////////////////////// -double Or_Node::climo_prob() const +double Or_Node::obs_climo_prob() const { if ( !left_child || !right_child ) { - mlog << Error << "\nOr_Node::climo_prob() -> " + mlog << Error << "\nOr_Node::obs_climo_prob() -> " << "node not populated!\n\n"; exit ( 1 ); @@ -182,8 +261,8 @@ if ( !left_child || !right_child ) { } double prob = bad_data_double; -double prob_left = left_child->climo_prob(); -double prob_right = right_child->climo_prob(); +double prob_left = left_child->obs_climo_prob(); +double prob_right = right_child->obs_climo_prob(); if ( !is_bad_data(prob_left) && !is_bad_data(prob_right) ) { @@ -220,21 +299,8 @@ return ( left_child->need_perc() || right_child->need_perc() ); //////////////////////////////////////////////////////////////////////// -void Or_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void Or_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void Or_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) { @@ -248,8 +314,8 @@ if ( !left_child || !right_child ) { } - left_child->set_perc(fptr, optr, cptr, fthr, othr); -right_child->set_perc(fptr, optr, cptr, fthr, othr); + left_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); +right_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); return; @@ -340,27 +406,15 @@ if ( right_child ) { delete right_child; right_child = nullptr; } //////////////////////////////////////////////////////////////////////// -bool And_Node::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool And_Node::check(double x, double cmn, double csd) const +bool And_Node::check(double x, const ClimoPntInfo *cpi) const { -const bool tf_left = left_child->check(x, cmn, csd); +const bool tf_left = left_child->check(x, cpi); if ( ! tf_left ) return false; -const bool tf_right = right_child->check(x, cmn, csd); +const bool tf_right = right_child->check(x, cpi); return ( tf_left && tf_right ); @@ -389,13 +443,13 @@ return n; //////////////////////////////////////////////////////////////////////// -double And_Node::climo_prob() const +double And_Node::obs_climo_prob() const { if ( !left_child || !right_child ) { - mlog << Error << "\nAnd_Node::climo_prob() -> " + mlog << Error << "\nAnd_Node::obs_climo_prob() -> " << "node not populated!\n\n"; exit ( 1 ); @@ -403,8 +457,8 @@ if ( !left_child || !right_child ) { } double prob = bad_data_double; -double prob_left = left_child->climo_prob(); -double prob_right = right_child->climo_prob(); +double prob_left = left_child->obs_climo_prob(); +double prob_right = right_child->obs_climo_prob(); // // For opposing inequalities, compute the difference in percentiles @@ -459,21 +513,8 @@ return ( left_child->need_perc() || right_child->need_perc() ); //////////////////////////////////////////////////////////////////////// -void And_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void And_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void And_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) { @@ -487,8 +528,8 @@ if ( !left_child || !right_child ) { } - left_child->set_perc(fptr, optr, cptr, fthr, othr); -right_child->set_perc(fptr, optr, cptr, fthr, othr); + left_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); +right_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); return; @@ -578,23 +619,11 @@ if ( child ) { delete child; child = nullptr; } //////////////////////////////////////////////////////////////////////// -bool Not_Node::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool Not_Node::check(double x, double cmn, double csd) const +bool Not_Node::check(double x, const ClimoPntInfo *cpi) const { -const bool tf = child->check(x, cmn, csd); +const bool tf = child->check(x, cpi); return !tf; @@ -622,12 +651,12 @@ return n; //////////////////////////////////////////////////////////////////////// -double Not_Node::climo_prob() const +double Not_Node::obs_climo_prob() const { double prob = bad_data_double; -double prob_child = child->climo_prob(); +double prob_child = child->obs_climo_prob(); if ( !is_bad_data(prob_child) ) prob = 1.0 - prob_child; @@ -660,23 +689,11 @@ return child->need_perc(); //////////////////////////////////////////////////////////////////////// -void Not_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void Not_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void Not_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) + { if ( !child ) { @@ -688,7 +705,7 @@ if ( !child ) { } -child->set_perc(fptr, optr, cptr, fthr, othr); +child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); return; @@ -780,35 +797,44 @@ Simple_Node::~Simple_Node() //////////////////////////////////////////////////////////////////////// -bool Simple_Node::check(double x) const +bool Simple_Node::check(double x, const ClimoPntInfo *cpi) const { -return check(x, bad_data_double, bad_data_double); +if ( op == thresh_na ) return true; -} +double tval; + // + // check climo distribution percentile thresholds + // -//////////////////////////////////////////////////////////////////////// +if ( is_climo_dist_type(Ptype) ) { + // + // check the pointer + // -bool Simple_Node::check(double x, double cmn, double csd) const + if(!cpi) { -{ + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " + << "climatological distribution percentile threshold type requested " + << "with no ClimoPntInfo provided!\n\n"; -if ( op == thresh_na ) return true; + exit ( 1 ); -double tval; + } + + double cmn = (Ptype == perc_thresh_fcst_climo_dist ? cpi->fcmn : cpi->ocmn); + double csd = (Ptype == perc_thresh_fcst_climo_dist ? cpi->fcsd : cpi->ocsd); // - // check climo distribution percentile thresholds + // check the climo data // -if ( Ptype == perc_thresh_climo_dist ) { - if(is_bad_data(cmn) || is_bad_data(csd)) { - mlog << Error << "\nSimple_Node::check(double, double, double) const -> " + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " << "climatological distribution percentile threshold \"" << s << "\" requested with invalid mean (" << cmn << ") or standard deviation (" << csd << ").\n\n"; @@ -832,7 +858,7 @@ else { if ( Ptype != no_perc_thresh_type && is_bad_data(tval) ) { - mlog << Error << "\nSimple_Node::check(double, double, double) const -> " + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " << "percentile threshold \"" << s << "\" used before it was set.\n\n"; @@ -856,7 +882,7 @@ switch ( op ) { case thresh_ne: tf = !eq; break; default: - mlog << Error << "\nSimple_Node::check(double, double, double) const -> " + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " << "bad op ... " << op << "\n\n"; exit ( 1 ); @@ -914,24 +940,12 @@ return; } -//////////////////////////////////////////////////////////////////////// - - -void Simple_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - //////////////////////////////////////////////////////////////////////// -void Simple_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void Simple_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) { @@ -946,9 +960,10 @@ bool fbias_fcst = false; // handle sample percentile types // - if ( Ptype == perc_thresh_sample_fcst ) ptr = fptr; -else if ( Ptype == perc_thresh_sample_obs ) ptr = optr; -else if ( Ptype == perc_thresh_sample_climo ) ptr = cptr; + if ( Ptype == perc_thresh_sample_fcst ) ptr = fptr; +else if ( Ptype == perc_thresh_sample_obs ) ptr = optr; +else if ( Ptype == perc_thresh_sample_fcst_climo ) ptr = fcptr; +else if ( Ptype == perc_thresh_sample_obs_climo ) ptr = ocptr; // // handle bias-correction type @@ -960,7 +975,7 @@ else if ( Ptype == perc_thresh_freq_bias ) { mlog << Error << "\nSimple_Node::set_perc() -> " << "not enough information provided to define the " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\".\n\n"; exit ( 1 ); @@ -1013,7 +1028,7 @@ else if ( Ptype == perc_thresh_freq_bias ) { mlog << Error << "\nSimple_Node::set_perc() -> " << "unsupported options for computing the " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\".\n\n"; exit ( 1 ); @@ -1028,7 +1043,7 @@ else if ( Ptype == perc_thresh_freq_bias ) { mlog << Error << "\nSimple_Node::set_perc() -> " << "unable to compute the percentile for the " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\".\n\n"; exit ( 1 ); @@ -1051,7 +1066,7 @@ else { if ( !ptr ) { mlog << Error << "\nSimple_Node::set_perc() -> " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\" requested but no data provided.\n\n"; @@ -1086,7 +1101,7 @@ else { if ( data.n() == 0 ) { mlog << Error << "\nSimple_Node::set_perc() -> " - << "can't compute " << perc_thresh_info[Ptype].long_name + << "can't compute " << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\" because no valid data was provided.\n\n"; @@ -1216,15 +1231,15 @@ return; //////////////////////////////////////////////////////////////////////// -double Simple_Node::climo_prob() const +double Simple_Node::obs_climo_prob() const { double prob = bad_data_double; -if ( Ptype == perc_thresh_climo_dist ) { +if ( Ptype == perc_thresh_obs_climo_dist ) { - // Climo probability varies based on the threshold type + // Observation climo probability varies based on the threshold type switch ( op ) { case thresh_lt: @@ -1251,9 +1266,9 @@ if ( Ptype == perc_thresh_climo_dist ) { default: - mlog << Error << "\nSimple_Node::climo_prob() -> " - << "cannot convert climatological distribution percentile " - << "threshold to a probability!\n\n"; + mlog << Error << "\nSimple_Node::obs_climo_prob() -> " + << "cannot convert observation climatological distribution " + << "percentile threshold to a probability!\n\n"; exit ( 1 ); @@ -1272,9 +1287,10 @@ bool Simple_Node::need_perc() const { -return ( Ptype == perc_thresh_sample_fcst || - Ptype == perc_thresh_sample_obs || - Ptype == perc_thresh_sample_climo || +return ( Ptype == perc_thresh_sample_fcst || + Ptype == perc_thresh_sample_obs || + Ptype == perc_thresh_sample_fcst_climo || + Ptype == perc_thresh_sample_obs_climo || Ptype == perc_thresh_freq_bias ); } @@ -1496,17 +1512,16 @@ return; //////////////////////////////////////////////////////////////////////// -void SingleThresh::set(double pt, ThreshType ind, int perc_index, double t) +void SingleThresh::set(double pt, ThreshType ind, PercThreshType ptype, double t) { clear(); -if ( (perc_index < 0) || (perc_index >= n_perc_thresh_infos) ) { +if ( ptype == no_perc_thresh_type ) { - mlog << Error - << "\nSingleThresh::set(double pt, ThreshType ind, int perc_index, double t) -> " - << "bad perc_index ... " << perc_index << "\n\n"; + mlog << Error << "\nSingleThresh::set(double, ThreshType, PercThreshType, double) -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -1515,12 +1530,12 @@ if ( (perc_index < 0) || (perc_index >= n_perc_thresh_infos) ) { Simple_Node * a = new Simple_Node; ConcatString cs; -cs << perc_thresh_info[perc_index].short_name << pt; +cs << perc_thresh_info_map.at(ptype).short_name << pt; if( !is_bad_data(t) ) cs << "(" << t << ")"; a->T = t; a->op = ind; -a->Ptype = perc_thresh_info[perc_index].type; +a->Ptype = ptype; a->PT = pt; a->s << thresh_type_str[ind] << cs; a->abbr_s << thresh_abbr_str[ind] << cs; @@ -1656,27 +1671,15 @@ return false; //////////////////////////////////////////////////////////////////////// -void SingleThresh::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void SingleThresh::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void SingleThresh::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) + { if ( node ) { - node->set_perc(fptr, optr, cptr, fthr, othr); + node->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); } @@ -1796,23 +1799,11 @@ return; //////////////////////////////////////////////////////////////////////// -bool SingleThresh::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool SingleThresh::check(double x, double cmn, double csd) const +bool SingleThresh::check(double x, const ClimoPntInfo *cpi) const { -return ( node ? node->check(x, cmn, csd) : true ); +return ( node ? node->check(x, cpi) : true ); } diff --git a/src/basic/vx_config/threshold.h b/src/basic/vx_config/threshold.h index 3eb74b5a0b..c879cfe3c7 100644 --- a/src/basic/vx_config/threshold.h +++ b/src/basic/vx_config/threshold.h @@ -13,6 +13,7 @@ //////////////////////////////////////////////////////////////////////// #include +#include #include #include "concat_string.h" @@ -22,7 +23,6 @@ //////////////////////////////////////////////////////////////////////// - // // Enumeration of thresholding operations // @@ -39,7 +39,6 @@ enum ThreshType { thresh_complex = -2, no_thresh_type = -1 - }; static const int n_thresh_type = 7; @@ -55,82 +54,71 @@ static const char thresh_default_sep[] = ","; extern bool is_inclusive(ThreshType); - //////////////////////////////////////////////////////////////////////// - // // Enumeration of percentile threshold types // enum PercThreshType { - - perc_thresh_user_specified = 0, - perc_thresh_sample_fcst = 1, - perc_thresh_sample_obs = 2, - perc_thresh_sample_climo = 3, - perc_thresh_climo_dist = 4, - perc_thresh_freq_bias = 5, + perc_thresh_user_specified = 0, + perc_thresh_sample_fcst = 1, + perc_thresh_sample_obs = 2, + perc_thresh_sample_fcst_climo = 3, + perc_thresh_sample_obs_climo = 4, + perc_thresh_fcst_climo_dist = 5, + perc_thresh_obs_climo_dist = 6, + perc_thresh_freq_bias = 7, no_perc_thresh_type = -1 - }; - -static const int n_perc_thresh_type = 7; - +extern bool is_climo_dist_type(PercThreshType); struct PercThreshInfo { - - const PercThreshType type; - - const char * const short_name; - - const int short_name_length; - - const char * const long_name; - + const std::string short_name; + const std::string long_name; }; - -static const PercThreshInfo perc_thresh_info [] = { - - { perc_thresh_user_specified, "USP", 3, "USER_SPECIFIED_PERC" }, - - { perc_thresh_sample_fcst, "SFP", 3, "SAMPLE_FCST_PERC" }, - - { perc_thresh_sample_obs, "SOP", 3, "SAMPLE_OBS_PERC" }, - - { perc_thresh_sample_climo, "SCP", 3, "SAMPLE_CLIMO_PERC" }, - - { perc_thresh_climo_dist, "CDP", 3, "CLIMO_DIST_PERC" }, - - { perc_thresh_freq_bias, "FBIAS", 5, "FREQ_BIAS_PERC" }, - +static const std::map perc_thresh_info_map = { + { perc_thresh_user_specified, { "USP", "USER_SPECIFIED_PERC" } }, + { perc_thresh_sample_fcst, { "SFP", "SAMPLE_FCST_PERC" } }, + { perc_thresh_sample_obs, { "SOP", "SAMPLE_OBS_PERC" } }, + { perc_thresh_sample_fcst_climo, { "SFCP", "SAMPLE_FCST_CLIMO_PERC" } }, + { perc_thresh_sample_obs_climo, { "SOCP", "SAMPLE_OBS_CLIMO_PERC" } }, + { perc_thresh_fcst_climo_dist, { "FCDP", "CLIMO_FCST_DIST_PERC" } }, + { perc_thresh_obs_climo_dist, { "OCDP", "CLIMO_OBS_DIST_PERC" } }, + { perc_thresh_freq_bias, { "FBIAS", "FREQ_BIAS_PERC" } }, }; -static const int n_perc_thresh_infos = sizeof(perc_thresh_info)/sizeof(*perc_thresh_info); - static const int perc_thresh_default_precision = 0; static const double perc_thresh_default_tol = 0.05; - struct PC_info { + PercThreshType ptype; + double value; +}; - int perc_index; +extern bool parse_perc_thresh(const char *str, PC_info *info = nullptr); - double value; +struct ClimoPntInfo { + ClimoPntInfo() { clear(); } + ClimoPntInfo(double a, double b, double c, double d) : + fcmn(a), fcsd(b), ocmn(c), ocsd(d) {} + void set(double a, double b, double c, double d) { fcmn = a; fcsd = b; ocmn = c; ocsd = d; } + void clear() { fcmn = fcsd = ocmn = ocsd = bad_data_double; } + double fcmn; + double fcsd; + double ocmn; + double ocsd; }; - //////////////////////////////////////////////////////////////////////// - class SingleThresh; class Simple_Node; - //////////////////////////////////////////////////////////////////////// class ThreshNode { @@ -144,8 +132,7 @@ class ThreshNode { ThreshNode(); virtual ~ThreshNode(); - virtual bool check(double) const = 0; - virtual bool check(double, double, double) const = 0; + virtual bool check(double, const ClimoPntInfo *cpi = nullptr) const = 0; virtual ThreshNode * copy() const = 0; @@ -157,14 +144,14 @@ class ThreshNode { virtual double pvalue() const = 0; - virtual double climo_prob() const = 0; + virtual double obs_climo_prob() const = 0; virtual bool need_perc() const = 0; - virtual void set_perc(const NumArray *, const NumArray *, const NumArray *) = 0; - - virtual void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *) = 0; + virtual void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) = 0; virtual void multiply_by(const double) = 0; @@ -186,31 +173,30 @@ class Or_Node : public ThreshNode { Or_Node(); ~Or_Node(); - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - ThreshNode * copy() const; + ThreshNode * copy() const override; - ThreshType type() const; + ThreshType type() const override; - double value() const; + double value() const override; - PercThreshType ptype() const; + PercThreshType ptype() const override; - double pvalue() const; + double pvalue() const override; - double climo_prob() const; + double obs_climo_prob() const override; - bool need_perc() const; + bool need_perc() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void multiply_by(const double) override; - void multiply_by(const double); - - void get_simple_nodes(std::vector &) const; + void get_simple_nodes(std::vector &) const override; ThreshNode * left_child; ThreshNode * right_child; @@ -221,10 +207,10 @@ class Or_Node : public ThreshNode { //////////////////////////////////////////////////////////////////////// -inline ThreshType Or_Node::type() const { return ( thresh_complex ); } -inline double Or_Node::value() const { return ( bad_data_double ); } -inline PercThreshType Or_Node::ptype() const { return ( no_perc_thresh_type ); } -inline double Or_Node::pvalue() const { return ( bad_data_double ); } +inline ThreshType Or_Node::type() const { return thresh_complex ; } +inline double Or_Node::value() const { return bad_data_double ; } +inline PercThreshType Or_Node::ptype() const { return no_perc_thresh_type ; } +inline double Or_Node::pvalue() const { return bad_data_double ; } //////////////////////////////////////////////////////////////////////// @@ -237,31 +223,30 @@ class And_Node : public ThreshNode { And_Node(); ~And_Node(); - bool check(double) const; - bool check(double, double, double) const; - - ThreshType type() const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - double value() const; + ThreshType type() const override; - PercThreshType ptype() const; + double value() const override; - double pvalue() const; + PercThreshType ptype() const override; - double climo_prob() const; + double pvalue() const override; - bool need_perc() const; + double obs_climo_prob() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *); + bool need_perc() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; - void multiply_by(const double); + void multiply_by(const double) override; - void get_simple_nodes(std::vector &) const; + void get_simple_nodes(std::vector &) const override; - ThreshNode * copy() const; + ThreshNode * copy() const override; ThreshNode * left_child; ThreshNode * right_child; @@ -272,10 +257,10 @@ class And_Node : public ThreshNode { //////////////////////////////////////////////////////////////////////// -inline ThreshType And_Node::type() const { return ( thresh_complex ); } -inline double And_Node::value() const { return ( bad_data_double ); } -inline PercThreshType And_Node::ptype() const { return ( no_perc_thresh_type ); } -inline double And_Node::pvalue() const { return ( bad_data_double ); } +inline ThreshType And_Node::type() const { return thresh_complex ; } +inline double And_Node::value() const { return bad_data_double ; } +inline PercThreshType And_Node::ptype() const { return no_perc_thresh_type ; } +inline double And_Node::pvalue() const { return bad_data_double ; } //////////////////////////////////////////////////////////////////////// @@ -288,31 +273,30 @@ class Not_Node : public ThreshNode { Not_Node(); ~Not_Node(); - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - ThreshType type() const; + ThreshType type() const override; - double value() const; + double value() const override; - PercThreshType ptype() const; + PercThreshType ptype() const override; - double pvalue() const; + double pvalue() const override; - double climo_prob() const; + double obs_climo_prob() const override; - bool need_perc() const; + bool need_perc() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void multiply_by(const double) override; - void multiply_by(const double); + void get_simple_nodes(std::vector &) const override; - void get_simple_nodes(std::vector &) const; - - ThreshNode * copy() const; + ThreshNode * copy() const override; ThreshNode * child; @@ -322,10 +306,10 @@ class Not_Node : public ThreshNode { //////////////////////////////////////////////////////////////////////// -inline ThreshType Not_Node::type() const { return ( thresh_complex ); } -inline double Not_Node::value() const { return ( bad_data_double ); } -inline PercThreshType Not_Node::ptype() const { return ( no_perc_thresh_type ); } -inline double Not_Node::pvalue() const { return ( bad_data_double ); } +inline ThreshType Not_Node::type() const { return thresh_complex ; } +inline double Not_Node::value() const { return bad_data_double ; } +inline PercThreshType Not_Node::ptype() const { return no_perc_thresh_type ; } +inline double Not_Node::pvalue() const { return bad_data_double ; } //////////////////////////////////////////////////////////////////////// @@ -354,39 +338,38 @@ class Simple_Node : public ThreshNode { void set_na(); - void set_perc(const NumArray *, const NumArray *, const NumArray *); - - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; // // get stuff // - ThreshType type() const; + ThreshType type() const override; - double value() const; + double value() const override; - PercThreshType ptype() const; + PercThreshType ptype() const override; - double pvalue() const; + double pvalue() const override; - double climo_prob() const; + double obs_climo_prob() const override; - bool need_perc() const; + bool need_perc() const override; - void get_simple_nodes(std::vector &) const; + void get_simple_nodes(std::vector &) const override; // // do stuff // - ThreshNode * copy() const; + ThreshNode * copy() const override; - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - void multiply_by(const double); + void multiply_by(const double) override; }; @@ -394,10 +377,10 @@ class Simple_Node : public ThreshNode { //////////////////////////////////////////////////////////////////////// -inline ThreshType Simple_Node::type() const { return ( op ); } -inline double Simple_Node::value() const { return ( T ); } -inline PercThreshType Simple_Node::ptype() const { return ( Ptype ); } -inline double Simple_Node::pvalue() const { return ( PT ); } +inline ThreshType Simple_Node::type() const { return op ; } +inline double Simple_Node::value() const { return T ; } +inline PercThreshType Simple_Node::ptype() const { return Ptype ; } +inline double Simple_Node::pvalue() const { return PT ; } //////////////////////////////////////////////////////////////////////// @@ -430,14 +413,15 @@ class SingleThresh { void clear(); void set(double t, ThreshType); - void set(double pt, ThreshType, int perc_index, double t = bad_data_double); + void set(double pt, ThreshType, PercThreshType, double t = bad_data_double); void set(const ThreshNode *); void set(const char *); bool need_perc() const; - void set_perc(const NumArray *, const NumArray *, const NumArray *); - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr); void set_na(); @@ -445,7 +429,7 @@ class SingleThresh { double get_value() const; PercThreshType get_ptype() const; double get_pvalue() const; - double get_climo_prob() const; + double get_obs_climo_prob() const; void get_simple_nodes(std::vector &) const; void multiply_by(const double); @@ -453,8 +437,7 @@ class SingleThresh { ConcatString get_str(int precision = thresh_default_precision) const; ConcatString get_abbr_str(int precision = thresh_default_precision) const; - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const; }; @@ -462,11 +445,11 @@ class SingleThresh { //////////////////////////////////////////////////////////////////////// -inline ThreshType SingleThresh::get_type() const { return ( node ? node->type() : thresh_na ); } -inline double SingleThresh::get_value() const { return ( node ? node->value() : bad_data_double ); } -inline PercThreshType SingleThresh::get_ptype() const { return ( node ? node->ptype() : no_perc_thresh_type ); } -inline double SingleThresh::get_pvalue() const { return ( node ? node->pvalue() : bad_data_double ); } -inline double SingleThresh::get_climo_prob() const { return ( node ? node->climo_prob() : bad_data_double ); } +inline ThreshType SingleThresh::get_type() const { return ( node ? node->type() : thresh_na ); } +inline double SingleThresh::get_value() const { return ( node ? node->value() : bad_data_double ); } +inline PercThreshType SingleThresh::get_ptype() const { return ( node ? node->ptype() : no_perc_thresh_type ); } +inline double SingleThresh::get_pvalue() const { return ( node ? node->pvalue() : bad_data_double ); } +inline double SingleThresh::get_obs_climo_prob() const { return ( node ? node->obs_climo_prob() : bad_data_double ); } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_log/Makefile.in b/src/basic/vx_log/Makefile.in index 272dddb3ca..9b4c7969ca 100644 --- a/src/basic/vx_log/Makefile.in +++ b/src/basic/vx_log/Makefile.in @@ -235,6 +235,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/vx_log/logger.cc b/src/basic/vx_log/logger.cc index 8535cca6e4..c3d6ee1b48 100644 --- a/src/basic/vx_log/logger.cc +++ b/src/basic/vx_log/logger.cc @@ -627,7 +627,6 @@ Logger & Logger::operator<<(const string s) // // put the next character into the ConcatString msg // - // tmp[0] = s[i]; msg.add(s[i]); if (s[i] == '\n') @@ -636,7 +635,7 @@ Logger & Logger::operator<<(const string s) // this was a newline, so // put msg into the StringArray messages // - messages.add((string)msg); + messages.add((string)msg); // // clear msg, and continue checking s @@ -655,7 +654,7 @@ Logger & Logger::operator<<(const string s) { if (s[s.length() - 1] != '\n') { - messages.add((string)msg); + messages.add((string)msg); msg.clear(); } diff --git a/src/basic/vx_log/logger.h b/src/basic/vx_log/logger.h index d88333e98b..5f2c687e16 100644 --- a/src/basic/vx_log/logger.h +++ b/src/basic/vx_log/logger.h @@ -177,9 +177,9 @@ class LoggerDebug { ////////////////////////////////////////////////////////////////// -inline int LoggerDebug::value() const { return (Value); } +inline int LoggerDebug::value() const { return Value; } -inline LoggerDebug::operator int () const { return (Value); } +inline LoggerDebug::operator int () const { return Value; } ////////////////////////////////////////////////////////////////// @@ -292,9 +292,9 @@ class Logger ////////////////////////////////////////////////////////////////// -inline ConcatString Logger::log_filename() const { return (LogFilename); } +inline ConcatString Logger::log_filename() const { return LogFilename; } -inline int Logger::verbosity_level() const { return (VerbosityLevel); } +inline int Logger::verbosity_level() const { return VerbosityLevel; } inline bool Logger::is_open() const { return (out != nullptr); } diff --git a/src/basic/vx_log/string_array.cc b/src/basic/vx_log/string_array.cc index aacf27baf4..60ebc4190d 100644 --- a/src/basic/vx_log/string_array.cc +++ b/src/basic/vx_log/string_array.cc @@ -363,7 +363,7 @@ void StringArray::add_css(const std::string text) } Sorted = false; - + return; } @@ -450,7 +450,7 @@ void StringArray::insert(int i, const char * text) s.insert(s.begin()+i, text); Sorted = false; - + return; } @@ -464,7 +464,7 @@ bool StringArray::has(const std::string text) const { bool found = false; bool forward = true; - + if (Sorted && !IgnoreCase) { found = binary_search(s.begin(), s.end(), text); } @@ -496,13 +496,13 @@ bool StringArray::has(const std::string text, int & index, bool forward) const // bool found = false; index = -1; - + if (!s.empty()) { int count; std::string lower_text = text; std::vector::const_iterator it; if ( IgnoreCase ) transform(lower_text.begin(), lower_text.end(), lower_text.begin(), ::tolower); - + if (forward) { count = 0; for(it = s.begin(); it != s.end(); it++, count++) { @@ -552,7 +552,7 @@ bool StringArray::has(const std::string text, int & index, bool forward) const } if (found) index = count; } - + return found; } @@ -595,7 +595,7 @@ void StringArray::parse_delim(const std::string text, const char *delim) clear(); std::string str = text; - + size_t start = 0; size_t end = str.find_first_of(delim); while (end != string::npos) { @@ -608,7 +608,7 @@ void StringArray::parse_delim(const std::string text, const char *delim) s.push_back(str.substr(start).c_str()); Sorted = false; - + return; } @@ -729,15 +729,15 @@ void StringArray::sort() Sorted = true; return; } - + if ( !Sorted ) { std::sort(s.begin(), s.end()); } - + Sorted = true; - + return; - + } @@ -751,7 +751,7 @@ StringArray StringArray::uniq() const StringArray sa; sa.s = s; - + std::vector::iterator it; it = std::unique(sa.s.begin(), sa.s.end()); @@ -783,7 +783,7 @@ regex_t buffer; regex_t *preg = &buffer; // Check for null pointers -if( !reg_exp_str || !test_str ) return( false ); +if( !reg_exp_str || !test_str ) return false; if( regcomp(preg, reg_exp_str, REG_EXTENDED*REG_NOSUB) != 0 ) { mlog << Error << "\ncheck_reg_exp(char *, char *) -> " @@ -798,7 +798,7 @@ if( regexec(preg, test_str, 0, 0, 0) == 0 ) { valid = true; } // Free allocated memory. regfree( preg ); -return( valid ); +return valid; } diff --git a/src/basic/vx_log/string_array.h b/src/basic/vx_log/string_array.h index 41103fa513..1061eb83e8 100644 --- a/src/basic/vx_log/string_array.h +++ b/src/basic/vx_log/string_array.h @@ -132,11 +132,11 @@ class StringArray { //////////////////////////////////////////////////////////////////////// -inline int StringArray::n_elements() const { return ( (int) (s.size()) ); } +inline int StringArray::n_elements() const { return (int) s.size(); } -inline int StringArray::n () const { return ( s.size() ); } +inline int StringArray::n () const { return s.size(); } -inline int StringArray::max_length() const { return ( MaxLength ); } +inline int StringArray::max_length() const { return MaxLength; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_log/vx_log.h b/src/basic/vx_log/vx_log.h index ce8a8132cb..0ae10c3811 100644 --- a/src/basic/vx_log/vx_log.h +++ b/src/basic/vx_log/vx_log.h @@ -26,6 +26,15 @@ //////////////////////////////////////////////////////////////////////// +inline double get_exe_duration(clock_t start_clock, clock_t end_clock) { + return ((double)(end_clock - start_clock)) / CLOCKS_PER_SEC; +} + +inline double get_exe_duration(clock_t start_clock) { + return get_exe_duration(start_clock, clock()); +} + +//////////////////////////////////////////////////////////////////////// #endif // __VX_LOG_H__ diff --git a/src/basic/vx_math/Makefile.in b/src/basic/vx_math/Makefile.in index f42012d3ea..28c953fbbf 100644 --- a/src/basic/vx_math/Makefile.in +++ b/src/basic/vx_math/Makefile.in @@ -249,6 +249,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/basic/vx_math/affine.cc b/src/basic/vx_math/affine.cc index 9fa0965bd6..75dbc4cac5 100644 --- a/src/basic/vx_math/affine.cc +++ b/src/basic/vx_math/affine.cc @@ -1262,7 +1262,7 @@ rho = image_aspect/view_aspect; mag = min(rho, 1.0); -mag *= (view_width)/(image_width); +mag *= view_width/image_width; return mag; diff --git a/src/basic/vx_math/affine.h b/src/basic/vx_math/affine.h index 38f744a010..4c2bf5001c 100644 --- a/src/basic/vx_math/affine.h +++ b/src/basic/vx_math/affine.h @@ -129,17 +129,17 @@ class Box { //////////////////////////////////////////////////////////////////////// -inline double Box::left () const { return ( Left ); } -inline double Box::right() const { return ( Right ); } +inline double Box::left () const { return Left; } +inline double Box::right() const { return Right; } -inline double Box::bottom () const { return ( Bottom ); } -inline double Box::top () const { return ( Top ); } +inline double Box::bottom () const { return Bottom; } +inline double Box::top () const { return Top; } -inline double Box::x_ll () const { return ( Left ); } -inline double Box::y_ll () const { return ( Bottom ); } +inline double Box::x_ll () const { return Left; } +inline double Box::y_ll () const { return Bottom; } -inline double Box::x_ur () const { return ( Right ); } -inline double Box::y_ur () const { return ( Top ); } +inline double Box::x_ur () const { return Right; } +inline double Box::y_ur () const { return Top; } inline double Box::width () const { return ( Right - Left ); } inline double Box::height () const { return ( Top - Bottom ); } @@ -287,15 +287,15 @@ class Affine : public GeneralAffine { //////////////////////////////////////////////////////////////////////// -inline double Affine::m11() const { return ( M11 ); } -inline double Affine::m12() const { return ( M12 ); } -inline double Affine::m21() const { return ( M21 ); } -inline double Affine::m22() const { return ( M22 ); } +inline double Affine::m11() const { return M11; } +inline double Affine::m12() const { return M12; } +inline double Affine::m21() const { return M21; } +inline double Affine::m22() const { return M22; } -inline double Affine::tx () const { return ( TX ); } -inline double Affine::ty () const { return ( TY ); } +inline double Affine::tx () const { return TX; } +inline double Affine::ty () const { return TY; } -inline double Affine::det() const { return ( Det ); } +inline double Affine::det() const { return Det; } inline void Affine::calc_det() { Det = M11*M22 - M21*M12; return; } @@ -428,21 +428,21 @@ class ConformalAffine : public GeneralAffine { //////////////////////////////////////////////////////////////////////// -inline double ConformalAffine::angle() const { return ( Angle ); } +inline double ConformalAffine::angle() const { return Angle; } -inline double ConformalAffine::scale() const { return ( Scale ); } +inline double ConformalAffine::scale() const { return Scale; } -inline bool ConformalAffine::is_conformal() const { return ( true ); } +inline bool ConformalAffine::is_conformal() const { return true; } -inline double ConformalAffine::m11() const { return ( Scale*CosAngle ); } -inline double ConformalAffine::m12() const { return ( Scale*SinAngle ); } +inline double ConformalAffine::m11() const { return Scale*CosAngle; } +inline double ConformalAffine::m12() const { return Scale*SinAngle; } -inline double ConformalAffine::m21() const { return ( -Scale*SinAngle ); } -inline double ConformalAffine::m22() const { return ( Scale*CosAngle ); } +inline double ConformalAffine::m21() const { return -Scale*SinAngle; } +inline double ConformalAffine::m22() const { return Scale*CosAngle; } -inline double ConformalAffine::tx () const { return ( TX ); } -inline double ConformalAffine::ty () const { return ( TY ); } +inline double ConformalAffine::tx () const { return TX; } +inline double ConformalAffine::ty () const { return TY; } inline double ConformalAffine::det() const { return ( Scale*Scale ); } diff --git a/src/basic/vx_math/hist.cc b/src/basic/vx_math/hist.cc index d0bcd924e1..c5d2c89133 100644 --- a/src/basic/vx_math/hist.cc +++ b/src/basic/vx_math/hist.cc @@ -51,7 +51,7 @@ Histogram::~Histogram() { -if ( Count ) { delete [] Count; Count = (int *) nullptr; } +Count.clear(); } @@ -93,7 +93,7 @@ void Histogram::init_from_scratch() { -Count = (int *) nullptr; +Count.clear(); Nbins = 0; @@ -120,9 +120,7 @@ void Histogram::clear() { -int j; - -for (j=0; j #include @@ -28,7 +29,7 @@ class Histogram { private: - int * Count; + std::vector Count; int Nbins; @@ -100,16 +101,16 @@ class Histogram { //////////////////////////////////////////////////////////////////////// -inline int Histogram::nbins () const { return ( Nbins ); } +inline int Histogram::nbins () const { return Nbins ; } -inline double Histogram::bottom () const { return ( Bottom ); } -inline double Histogram::delta () const { return ( Delta ); } +inline double Histogram::bottom () const { return Bottom; } +inline double Histogram::delta () const { return Delta ; } -inline double Histogram::min_data_value () const { return ( MinValue ); } -inline double Histogram::max_data_value () const { return ( MaxValue ); } +inline double Histogram::min_data_value () const { return MinValue; } +inline double Histogram::max_data_value () const { return MaxValue; } -inline int Histogram::too_big_count () const { return ( TooBigCount ); } -inline int Histogram::too_small_count () const { return ( TooSmallCount ); } +inline int Histogram::too_big_count () const { return TooBigCount; } +inline int Histogram::too_small_count () const { return TooSmallCount; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_math/is_bad_data.h b/src/basic/vx_math/is_bad_data.h index 8b3f3ce7f7..cb1c4afa8c 100644 --- a/src/basic/vx_math/is_bad_data.h +++ b/src/basic/vx_math/is_bad_data.h @@ -28,38 +28,70 @@ inline int is_bad_data(int a) { - if(a == bad_data_int || std::isnan(a)) return(1); - else return(0); + if(a == bad_data_int || std::isnan(a)) return 1; + else return 0; } inline int is_bad_data(long long a) { - if(a == bad_data_ll || std::isnan(a)) return(1); - else return(0); + if(a == bad_data_ll || std::isnan(a)) return 1; + else return 0; } inline int is_bad_data(double a) { - if(fabs(a - bad_data_double) < default_tol || std::isnan(a)) return(1); - else return(0); + if(fabs(a - bad_data_double) < default_tol || std::isnan(a)) return 1; + else return 0; } inline int is_bad_data(float a) { - if(fabs(a - bad_data_float) < default_tol || std::isnan(a)) return(1); - else return(0); + if(fabs(a - bad_data_float) < default_tol || std::isnan(a)) return 1; + else return 0; } inline int is_bad_data(char a) { - return(a == bad_data_char); + return (a == bad_data_char); } inline int is_eq(double a, double b, double tol) { - if(fabs(a - b) < tol) return(1); - else return(0); + if(fabs(a - b) < tol) return 1; + else return 0; } inline int is_eq(double a, double b) { - return(is_eq(a, b, default_tol)); + return is_eq(a, b, default_tol); } +inline int is_eq(double a, int b) { + return is_eq(a, (double)b); +} + +inline int is_eq(int a, double b) { + return is_eq((double)a, b); +} + +inline int is_eq(double a, unixtime b) { + return is_eq(a, (double)b); +} + +inline int is_eq(unixtime a, double b) { + return is_eq((double)a, b); +} + +inline int is_eq(float a, float b) { + return is_eq((double)a, (double)b); +} + +inline int is_eq(double a, float b) { + return is_eq(a, (double)b); +} + +inline int is_eq(float a, double b) { + return is_eq((double)a, b); +} + +template +inline int is_eq(T a, T b) { + return (a == b); +} //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_math/legendre.cc b/src/basic/vx_math/legendre.cc index 6a7b78e269..f04741135f 100644 --- a/src/basic/vx_math/legendre.cc +++ b/src/basic/vx_math/legendre.cc @@ -117,9 +117,9 @@ void Legendre::init_from_scratch() { -P = 0; +P.clear(); -PP = 0; +PP.clear(); clear(); @@ -135,9 +135,9 @@ void Legendre::clear() { -if ( P ) { delete [] P; P = nullptr; } +P.clear(); -if ( PP ) { delete [] PP; PP = nullptr; } +PP.clear(); X = 0.0; @@ -201,9 +201,9 @@ clear(); MaxDegree = N; -P = new double [N + 1]; +P.resize(N + 1); -PP = new double [N + 1]; +PP.resize(N + 1); calc(0.0); diff --git a/src/basic/vx_math/legendre.h b/src/basic/vx_math/legendre.h index 6fcb1b508a..b9af82db02 100644 --- a/src/basic/vx_math/legendre.h +++ b/src/basic/vx_math/legendre.h @@ -19,6 +19,10 @@ #define __VX_LEGENDRE_H__ +//////////////////////////////////////////////////////////////////////// + +#include + //////////////////////////////////////////////////////////////////////// @@ -37,9 +41,9 @@ class Legendre { double X; // last x value - double * P; // allocated + std::vector P; // allocated - double * PP; // allocated + std::vector PP; // allocated public: @@ -90,15 +94,15 @@ class Legendre { //////////////////////////////////////////////////////////////////////// -inline double Legendre::value() const { return ( P[MaxDegree] ); } +inline double Legendre::value() const { return P[MaxDegree]; } -inline double Legendre::last_x() const { return ( X ); } +inline double Legendre::last_x() const { return X; } -inline double Legendre::value(int __n__) const { return ( P[__n__] ); } +inline double Legendre::value(int __n__) const { return P[__n__]; } -inline double Legendre::der_value() const { return ( PP[MaxDegree] ); } +inline double Legendre::der_value() const { return PP[MaxDegree]; } -inline double Legendre::der_value(int __n__) const { return ( PP[__n__] ); } +inline double Legendre::der_value(int __n__) const { return PP[__n__]; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_math/ptile.cc b/src/basic/vx_math/ptile.cc index 44156fb3fb..3618ae9ad4 100644 --- a/src/basic/vx_math/ptile.cc +++ b/src/basic/vx_math/ptile.cc @@ -215,11 +215,10 @@ if ( n <= 1 ) return 0; int i, j, ties_current, ties_total, tie_rank_start = 0, tie_rank_end; double tie_rank_mean; RankInfo *rank_info = (RankInfo *) nullptr; -double *ordered_array = (double *) nullptr; +vector ordered_array(n); double prev_v, v; rank_info = new RankInfo [n]; -ordered_array = new double [n]; // Each RankInfo structure contains a index value from 0 to n-1 and a pointer // to the data to be ranked @@ -296,9 +295,8 @@ if(ties_current != 0) { } if(rank_info) { delete [] rank_info; rank_info = (RankInfo *) nullptr; } -if(ordered_array) { delete [] ordered_array; ordered_array = (double *) nullptr; } -return(ties_total); +return ties_total; } diff --git a/src/basic/vx_math/vx_vector.h b/src/basic/vx_math/vx_vector.h index 425287b564..faa523241f 100644 --- a/src/basic/vx_math/vx_vector.h +++ b/src/basic/vx_math/vx_vector.h @@ -111,9 +111,9 @@ class Vector { //////////////////////////////////////////////////////////////////////// -inline double Vector::x() const { return ( X ); } -inline double Vector::y() const { return ( Y ); } -inline double Vector::z() const { return ( Z ); } +inline double Vector::x() const { return X; } +inline double Vector::y() const { return Y; } +inline double Vector::z() const { return Z; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/CircularTemplate.cc b/src/basic/vx_util/CircularTemplate.cc index 593b1eedec..4fb91c1f85 100644 --- a/src/basic/vx_util/CircularTemplate.cc +++ b/src/basic/vx_util/CircularTemplate.cc @@ -41,12 +41,12 @@ CircularTemplate::CircularTemplate(const int width, bool wrap_lon) : _wrapLon = wrap_lon; - // width of 2 is not supported - if (width == 2) { + // width of 2 is not supported + if (width == 2) { mlog << Error << "\nCircularTemplate::CircularTemplate() -> " << "unsupported width of " << width << " for circles.\n\n"; - exit(1); - } + exit(1); + } bool evenWidth = ((width % 2) == 0); @@ -59,9 +59,9 @@ CircularTemplate::CircularTemplate(const int width, bool wrap_lon) : // offset is within the circle. double radius = (width-1)/2.0; - + // Create the offsets list. - + // Need to increase the area we look at if the width is even, because // some valid offset points will actually be farther from the reference point // than the radius, because the reference point is offset from the true @@ -71,31 +71,31 @@ CircularTemplate::CircularTemplate(const int width, bool wrap_lon) : if(evenWidth) maxOffset++; int minOffset = static_cast(floor(-1 * radius)); - + for(int y = minOffset; y <= maxOffset; y++) { for(int x = minOffset; x <= maxOffset; x++) { double double_x = (double)x; double double_y = (double)y; - if(evenWidth) { + if(evenWidth) { // if width is even, the reference point is actually shifted 1/2 a grid spacing down and to the left, // from the true center of the circle. - // - // so when we calculate distance, we need to subtract .5 so that the distance reflects the distance from the center - // of the circle, instead of the distance from the reference. - // - // for example - a circle with width == 4. The reference point is the lower left corner of the center square. - // the point directly below that is at (0,-1), but it's actually (-.5, -1.5) from the center of the circle. - // - // another example - same circle. The point directly to the right of the reference point is (1,0), but it's - // actually (.5,-.5) from the center. - + // + // so when we calculate distance, we need to subtract .5 so that the distance reflects the distance from the center + // of the circle, instead of the distance from the reference. + // + // for example - a circle with width == 4. The reference point is the lower left corner of the center square. + // the point directly below that is at (0,-1), but it's actually (-.5, -1.5) from the center of the circle. + // + // another example - same circle. The point directly to the right of the reference point is (1,0), but it's + // actually (.5,-.5) from the center. + double_x -= 0.5; double_y -= 0.5; } double distance= sqrt((double_x * double_x) + (double_y * double_y)); - if(distance <= radius) _addOffset(x, y); + if(distance <= radius) _addOffset(x, y); } // end for x } // end for y diff --git a/src/basic/vx_util/CircularTemplate.h b/src/basic/vx_util/CircularTemplate.h index 9ba5d9417c..29b38f1980 100644 --- a/src/basic/vx_util/CircularTemplate.h +++ b/src/basic/vx_util/CircularTemplate.h @@ -61,7 +61,7 @@ class CircularTemplate : public GridTemplate { // Return the class name for error messages. static const char* _className(void) { - return("CircleTemplate"); + return "CircleTemplate"; } }; diff --git a/src/basic/vx_util/GridPoint.cc b/src/basic/vx_util/GridPoint.cc index 8ddd5f31b9..009cf86bf4 100644 --- a/src/basic/vx_util/GridPoint.cc +++ b/src/basic/vx_util/GridPoint.cc @@ -64,7 +64,6 @@ GridPoint::~GridPoint(void) void GridPoint::rotate(const double angle) { - //const double M_PI = 3.14159265358979323846; double angle_rad = angle * M_PI / 180.0; double cosa = cos(angle_rad); double sina = sin(angle_rad); diff --git a/src/basic/vx_util/GridPoint.h b/src/basic/vx_util/GridPoint.h index 4e3cacd53e..accb34da02 100644 --- a/src/basic/vx_util/GridPoint.h +++ b/src/basic/vx_util/GridPoint.h @@ -103,14 +103,14 @@ class GridPoint bool operator==(const GridPoint &other) const { return (this->x == other.x && - this->y == other.y); + this->y == other.y); } bool operator!=(const GridPoint &other) const { return (this->x != other.x || - this->y != other.y); + this->y != other.y); } @@ -134,7 +134,7 @@ class GridPoint static const char *_className(void) { - return("GridPoint"); + return "GridPoint"; } }; diff --git a/src/basic/vx_util/Makefile.am b/src/basic/vx_util/Makefile.am index b0daa451e9..db0896bbd6 100644 --- a/src/basic/vx_util/Makefile.am +++ b/src/basic/vx_util/Makefile.am @@ -61,6 +61,7 @@ libvx_util_a_SOURCES = ascii_table.cc ascii_table.h \ empty_string.h \ polyline.h polyline.cc \ mask_poly.h mask_poly.cc \ + mask_sid.h mask_sid.cc \ read_fortran_binary.h read_fortran_binary.cc \ met_buffer.h met_buffer.cc \ smart_buffer.h smart_buffer.cc \ diff --git a/src/basic/vx_util/Makefile.in b/src/basic/vx_util/Makefile.in index 83579ba1b4..2a7015be79 100644 --- a/src/basic/vx_util/Makefile.in +++ b/src/basic/vx_util/Makefile.in @@ -129,13 +129,14 @@ am__libvx_util_a_SOURCES_DIST = ascii_table.cc ascii_table.h \ interp_util.h two_to_one.cc two_to_one.h get_filenames.cc \ get_filenames.h util_constants.h bool_to_string.h \ empty_string.h polyline.h polyline.cc mask_poly.h mask_poly.cc \ - read_fortran_binary.h read_fortran_binary.cc met_buffer.h \ - met_buffer.cc smart_buffer.h smart_buffer.cc vx_util.h \ - CircularTemplate.h CircularTemplate.cc GridTemplate.h \ - GridTemplate.cc GridPoint.h GridPoint.cc GridOffset.h \ - GridOffset.cc observation.h observation.cc stat_column_defs.h \ - handle_openmp.h handle_openmp.cc RectangularTemplate.h \ - RectangularTemplate.cc python_line.h python_line.cc + mask_sid.h mask_sid.cc read_fortran_binary.h \ + read_fortran_binary.cc met_buffer.h met_buffer.cc \ + smart_buffer.h smart_buffer.cc vx_util.h CircularTemplate.h \ + CircularTemplate.cc GridTemplate.h GridTemplate.cc GridPoint.h \ + GridPoint.cc GridOffset.h GridOffset.cc observation.h \ + observation.cc stat_column_defs.h handle_openmp.h \ + handle_openmp.cc RectangularTemplate.h RectangularTemplate.cc \ + python_line.h python_line.cc @ENABLE_PYTHON_TRUE@am__objects_1 = \ @ENABLE_PYTHON_TRUE@ libvx_util_a-python_line.$(OBJEXT) am__objects_2 = $(am__objects_1) @@ -172,6 +173,7 @@ am_libvx_util_a_OBJECTS = libvx_util_a-ascii_table.$(OBJEXT) \ libvx_util_a-get_filenames.$(OBJEXT) \ libvx_util_a-polyline.$(OBJEXT) \ libvx_util_a-mask_poly.$(OBJEXT) \ + libvx_util_a-mask_sid.$(OBJEXT) \ libvx_util_a-read_fortran_binary.$(OBJEXT) \ libvx_util_a-met_buffer.$(OBJEXT) \ libvx_util_a-smart_buffer.$(OBJEXT) \ @@ -230,6 +232,7 @@ am__depfiles_remade = ./$(DEPDIR)/libvx_util_a-CircularTemplate.Po \ ./$(DEPDIR)/libvx_util_a-main.Po \ ./$(DEPDIR)/libvx_util_a-make_path.Po \ ./$(DEPDIR)/libvx_util_a-mask_poly.Po \ + ./$(DEPDIR)/libvx_util_a-mask_sid.Po \ ./$(DEPDIR)/libvx_util_a-memory.Po \ ./$(DEPDIR)/libvx_util_a-met_buffer.Po \ ./$(DEPDIR)/libvx_util_a-normalize.Po \ @@ -354,6 +357,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ @@ -507,6 +511,7 @@ libvx_util_a_SOURCES = ascii_table.cc ascii_table.h \ empty_string.h \ polyline.h polyline.cc \ mask_poly.h mask_poly.cc \ + mask_sid.h mask_sid.cc \ read_fortran_binary.h read_fortran_binary.cc \ met_buffer.h met_buffer.cc \ smart_buffer.h smart_buffer.cc \ @@ -601,6 +606,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-main.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-make_path.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-mask_poly.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-mask_sid.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-memory.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-met_buffer.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_util_a-normalize.Po@am__quote@ # am--include-marker @@ -1127,6 +1133,20 @@ libvx_util_a-mask_poly.obj: mask_poly.cc @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_util_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_util_a-mask_poly.obj `if test -f 'mask_poly.cc'; then $(CYGPATH_W) 'mask_poly.cc'; else $(CYGPATH_W) '$(srcdir)/mask_poly.cc'; fi` +libvx_util_a-mask_sid.o: mask_sid.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_util_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_util_a-mask_sid.o -MD -MP -MF $(DEPDIR)/libvx_util_a-mask_sid.Tpo -c -o libvx_util_a-mask_sid.o `test -f 'mask_sid.cc' || echo '$(srcdir)/'`mask_sid.cc +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_util_a-mask_sid.Tpo $(DEPDIR)/libvx_util_a-mask_sid.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='mask_sid.cc' object='libvx_util_a-mask_sid.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_util_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_util_a-mask_sid.o `test -f 'mask_sid.cc' || echo '$(srcdir)/'`mask_sid.cc + +libvx_util_a-mask_sid.obj: mask_sid.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_util_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_util_a-mask_sid.obj -MD -MP -MF $(DEPDIR)/libvx_util_a-mask_sid.Tpo -c -o libvx_util_a-mask_sid.obj `if test -f 'mask_sid.cc'; then $(CYGPATH_W) 'mask_sid.cc'; else $(CYGPATH_W) '$(srcdir)/mask_sid.cc'; fi` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_util_a-mask_sid.Tpo $(DEPDIR)/libvx_util_a-mask_sid.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='mask_sid.cc' object='libvx_util_a-mask_sid.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_util_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_util_a-mask_sid.obj `if test -f 'mask_sid.cc'; then $(CYGPATH_W) 'mask_sid.cc'; else $(CYGPATH_W) '$(srcdir)/mask_sid.cc'; fi` + libvx_util_a-read_fortran_binary.o: read_fortran_binary.cc @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_util_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_util_a-read_fortran_binary.o -MD -MP -MF $(DEPDIR)/libvx_util_a-read_fortran_binary.Tpo -c -o libvx_util_a-read_fortran_binary.o `test -f 'read_fortran_binary.cc' || echo '$(srcdir)/'`read_fortran_binary.cc @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_util_a-read_fortran_binary.Tpo $(DEPDIR)/libvx_util_a-read_fortran_binary.Po @@ -1439,6 +1459,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libvx_util_a-main.Po -rm -f ./$(DEPDIR)/libvx_util_a-make_path.Po -rm -f ./$(DEPDIR)/libvx_util_a-mask_poly.Po + -rm -f ./$(DEPDIR)/libvx_util_a-mask_sid.Po -rm -f ./$(DEPDIR)/libvx_util_a-memory.Po -rm -f ./$(DEPDIR)/libvx_util_a-met_buffer.Po -rm -f ./$(DEPDIR)/libvx_util_a-normalize.Po @@ -1530,6 +1551,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libvx_util_a-main.Po -rm -f ./$(DEPDIR)/libvx_util_a-make_path.Po -rm -f ./$(DEPDIR)/libvx_util_a-mask_poly.Po + -rm -f ./$(DEPDIR)/libvx_util_a-mask_sid.Po -rm -f ./$(DEPDIR)/libvx_util_a-memory.Po -rm -f ./$(DEPDIR)/libvx_util_a-met_buffer.Po -rm -f ./$(DEPDIR)/libvx_util_a-normalize.Po diff --git a/src/basic/vx_util/RectangularTemplate.h b/src/basic/vx_util/RectangularTemplate.h index 768315b68e..c6efef462d 100644 --- a/src/basic/vx_util/RectangularTemplate.h +++ b/src/basic/vx_util/RectangularTemplate.h @@ -66,7 +66,7 @@ class RectangularTemplate : public GridTemplate { // Return the class name for error messages. static const char* _className(void) { - return("RectangularTemplate"); + return "RectangularTemplate"; } }; diff --git a/src/basic/vx_util/ascii_header.h b/src/basic/vx_util/ascii_header.h index 499803bbab..a75074228b 100644 --- a/src/basic/vx_util/ascii_header.h +++ b/src/basic/vx_util/ascii_header.h @@ -86,12 +86,12 @@ inline void AsciiHeaderLine::set_version (const char *s) { Version = s; Versi inline void AsciiHeaderLine::set_data_type(const char *s) { DataType = s; DataType.ws_strip(); } inline void AsciiHeaderLine::set_line_type(const char *s) { LineType = s; LineType.ws_strip(); } -inline const char * AsciiHeaderLine::version() const { return(Version.c_str()); } -inline const char * AsciiHeaderLine::data_type() const { return(DataType.c_str()); } -inline const char * AsciiHeaderLine::line_type() const { return(LineType.c_str()); } -inline const char * AsciiHeaderLine::var_index_name() const { return(VarIndexName.c_str()); } -inline int AsciiHeaderLine::var_index_offset() const { return(VarIndexOffset); } -inline int AsciiHeaderLine::var_beg_offset() const { return(VarBegOffset); } +inline const char * AsciiHeaderLine::version() const { return Version.c_str(); } +inline const char * AsciiHeaderLine::data_type() const { return DataType.c_str(); } +inline const char * AsciiHeaderLine::line_type() const { return LineType.c_str(); } +inline const char * AsciiHeaderLine::var_index_name() const { return VarIndexName.c_str(); } +inline int AsciiHeaderLine::var_index_offset() const { return VarIndexOffset; } +inline int AsciiHeaderLine::var_beg_offset() const { return VarBegOffset; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/ascii_table.h b/src/basic/vx_util/ascii_table.h index 5e6de9b239..c7a47b0c86 100644 --- a/src/basic/vx_util/ascii_table.h +++ b/src/basic/vx_util/ascii_table.h @@ -328,34 +328,34 @@ class AsciiTable { //////////////////////////////////////////////////////////////////////// -inline int AsciiTable::nrows() const { return ( Nrows ); } +inline int AsciiTable::nrows() const { return Nrows; } -inline int AsciiTable::ncols() const { return ( Ncols ); } +inline int AsciiTable::ncols() const { return Ncols; } -inline char AsciiTable::col_sep_char() const { return ( ColSepChar ); } +inline char AsciiTable::col_sep_char() const { return ColSepChar; } -inline char AsciiTable::pad_char() const { return ( PadChar ); } +inline char AsciiTable::pad_char() const { return PadChar; } -inline bool AsciiTable::fill_blank () const { return ( FillBlank ); } +inline bool AsciiTable::fill_blank () const { return FillBlank; } inline void AsciiTable::set_fill_blank (bool tf) { FillBlank = tf; return; } -inline int AsciiTable::table_indent () const { return ( TableIndent ); } +inline int AsciiTable::table_indent () const { return TableIndent; } -inline int AsciiTable::precision() const { return ( Precision ); } +inline int AsciiTable::precision() const { return Precision; } -inline const char * AsciiTable::f_float_format() const { return ( f_FloatFormat ); } -inline const char * AsciiTable::g_float_format() const { return ( g_FloatFormat ); } +inline const char * AsciiTable::f_float_format() const { return f_FloatFormat; } +inline const char * AsciiTable::g_float_format() const { return g_FloatFormat; } -inline bool AsciiTable::comma_string() const { return ( DoCommaString ); } +inline bool AsciiTable::comma_string() const { return DoCommaString; } -inline bool AsciiTable::delete_trailing_blank_rows() const { return ( DeleteTrailingBlankRows ); } +inline bool AsciiTable::delete_trailing_blank_rows() const { return DeleteTrailingBlankRows; } -inline bool AsciiTable::elim_trailing_whitespace() const { return ( ElimTrailingWhitespace ); } +inline bool AsciiTable::elim_trailing_whitespace() const { return ElimTrailingWhitespace; } -inline bool AsciiTable::align_decimal_points() const { return ( AlignDecimalPoints ); } +inline bool AsciiTable::align_decimal_points() const { return AlignDecimalPoints; } -inline bool AsciiTable::decimal_points_aligned() const { return ( DecimalPointsAligned ); } +inline bool AsciiTable::decimal_points_aligned() const { return DecimalPointsAligned; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/check_endian.h b/src/basic/vx_util/check_endian.h index d81b6db824..de682470ff 100644 --- a/src/basic/vx_util/check_endian.h +++ b/src/basic/vx_util/check_endian.h @@ -85,8 +85,8 @@ extern void shuffle_8(void *); inline void handle_big_4 (void *) { return; } inline void handle_big_8 (void *) { return; } - inline bool is_little_endian () { return ( false ); } - inline bool is_big_endian () { return ( true ); } + inline bool is_little_endian () { return false; } + inline bool is_big_endian () { return true ; } #else @@ -98,8 +98,8 @@ extern void shuffle_8(void *); inline void handle_big_4 (void * p) { shuffle_4(p); return; } inline void handle_big_8 (void * p) { shuffle_8(p); return; } - inline bool is_little_endian () { return ( true ); } - inline bool is_big_endian () { return ( false ); } + inline bool is_little_endian () { return true ; } + inline bool is_big_endian () { return false; } #endif diff --git a/src/basic/vx_util/command_line.h b/src/basic/vx_util/command_line.h index 9c8f25592a..b4734e855f 100644 --- a/src/basic/vx_util/command_line.h +++ b/src/basic/vx_util/command_line.h @@ -120,7 +120,7 @@ class CLOptionInfoArray { //////////////////////////////////////////////////////////////////////// -inline int CLOptionInfoArray::n_elements() const { return ( Nelements ); } +inline int CLOptionInfoArray::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// @@ -219,9 +219,9 @@ class CommandLine { //////////////////////////////////////////////////////////////////////// -inline int CommandLine::n() const { return ( args.n_elements() ); } +inline int CommandLine::n() const { return args.n_elements(); } -inline int CommandLine::max_length() const { return ( args.max_length() ); } +inline int CommandLine::max_length() const { return args.max_length(); } inline void CommandLine::set_usage(UsageFunction f) { Usage = f; return; } diff --git a/src/basic/vx_util/crc_array.h b/src/basic/vx_util/crc_array.h index 5603c1eb78..0eea7d529c 100644 --- a/src/basic/vx_util/crc_array.h +++ b/src/basic/vx_util/crc_array.h @@ -22,7 +22,6 @@ #include #include "num_array.h" -#include "int_array.h" #include "is_bad_data.h" #include "nint.h" #include "vx_cal.h" @@ -65,11 +64,11 @@ class CRC_Array { CRC_Array & operator=(const CRC_Array & _a) { - if ( this == &_a ) return ( * this ); + if ( this == &_a ) return *this; assign(_a); - return ( * this ); + return *this; } @@ -94,8 +93,8 @@ class CRC_Array { // get stuff // - int n_elements() const { return ( Nelements ); } - int n () const { return ( Nelements ); } + int n_elements() const { return Nelements; } + int n () const { return Nelements; } T operator[] (int) const; @@ -108,6 +107,7 @@ class CRC_Array { void add(const T &); void add(const CRC_Array &); + void add_uniq(const T &, bool forward=true); void add_css_sec(const char *); void set(const T & val); @@ -145,7 +145,7 @@ clear(); for(int j=0; j::operator==(const CRC_Array & a) const { -if ( Nelements != a.Nelements ) return ( false ); +if ( Nelements != a.Nelements ) return false; for(int j=0; j= Nelements) ) { } -return ( e[i] ); +return e[i]; } @@ -424,7 +424,7 @@ else { } } -return ( found ); +return found; } @@ -454,7 +454,7 @@ else { } } -return ( found ); +return found; } @@ -509,6 +509,22 @@ return; //////////////////////////////////////////////////////////////////////// +template + +void CRC_Array::add_uniq(const T & k, bool forward) + +{ + +if ( !has(k, forward) ) add(k); + +return; + +} + + +//////////////////////////////////////////////////////////////////////// + + template void CRC_Array::add_css_sec(const char * text) @@ -577,7 +593,7 @@ for(j=0, count=0; j::min() const { -if ( Nelements == 0 ) return ( bad_data_int ); +if ( Nelements == 0 ) return bad_data_int; int j; @@ -605,7 +621,7 @@ for(j=0; j::max() const { -if(Nelements == 0) return(bad_data_int); +if(Nelements == 0) return bad_data_int; int j; @@ -633,7 +649,7 @@ for(j=0; j= N_items) ) { + + ConcatString cs = (File ? File->filename() : ""); + + mlog << Error << "\nDataLine::set_item(int) -> " + << "range check error setting line number " << LineNumber + << ", item number " << k+1 << " of " << N_items + << " from file \"" << cs << "\"\n\n"; + + exit ( 1 ); + +} + +Items[k] = item_cs; + +return; + +} + + +//////////////////////////////////////////////////////////////////////// + + const char * DataLine::get_item(int k) const { diff --git a/src/basic/vx_util/data_line.h b/src/basic/vx_util/data_line.h index bb9de49c8e..5b5b9d46b8 100644 --- a/src/basic/vx_util/data_line.h +++ b/src/basic/vx_util/data_line.h @@ -98,6 +98,12 @@ class DataLine { void dump(std::ostream &, int depth = 0) const; + // + // set stuff + // + + void set_item(int, const ConcatString &); + // // retrieve stuff // @@ -140,13 +146,13 @@ class DataLine { //////////////////////////////////////////////////////////////////////// -inline int DataLine::n_items () const { return ( N_items ); } +inline int DataLine::n_items () const { return N_items; } -inline int DataLine::line_number () const { return ( LineNumber ); } +inline int DataLine::line_number () const { return LineNumber; } -inline const char * DataLine::get_line () const { return ( Line.c_str() ); } +inline const char * DataLine::get_line () const { return Line.c_str(); } -inline const char * DataLine::get_delimiter() const { return ( Delimiter.c_str() ); } +inline const char * DataLine::get_delimiter() const { return Delimiter.c_str(); } inline void DataLine::set_is_header(bool __tf__) { IsHeader = __tf__; return; } @@ -207,13 +213,13 @@ class LineDataFile { //////////////////////////////////////////////////////////////////////// -inline const char * LineDataFile::filename() const { return ( Filename.c_str() ); } +inline const char * LineDataFile::filename() const { return Filename.c_str(); } -inline const char * LineDataFile::short_filename() const { return ( ShortFilename.c_str() ); } +inline const char * LineDataFile::short_filename() const { return ShortFilename.c_str(); } -inline int LineDataFile::last_line_number() const { return ( Last_Line_Number ); } +inline int LineDataFile::last_line_number() const { return Last_Line_Number; } -inline const StringArray & LineDataFile::header() const { return ( Header ); } +inline const StringArray & LineDataFile::header() const { return Header; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/data_plane.cc b/src/basic/vx_util/data_plane.cc index eb2ffbfc7a..e3edf3d0cf 100644 --- a/src/basic/vx_util/data_plane.cc +++ b/src/basic/vx_util/data_plane.cc @@ -435,7 +435,7 @@ bool DataPlane::is_all_bad_data() const { } } - return(status); + return status; } /////////////////////////////////////////////////////////////////////////////// @@ -461,7 +461,7 @@ double DataPlane::get(int x, int y) const { n = two_to_one(x, y); - return(Data[n]); + return Data[n]; } /////////////////////////////////////////////////////////////////////////////// @@ -518,7 +518,7 @@ void DataPlane::censor(const ThreshArray &censor_thresh, for(i=0; i & DataPlane::buf() { return ( Data ); } +inline const double * DataPlane::data() const { return Data.data(); } +inline std::vector & DataPlane::buf() { return Data; } //////////////////////////////////////////////////////////////////////// @@ -244,7 +244,7 @@ class DataPlaneArray { //////////////////////////////////////////////////////////////////////// -inline int DataPlaneArray::n_planes () const { return ( Nplanes ); } +inline int DataPlaneArray::n_planes () const { return Nplanes; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/data_plane_util.cc b/src/basic/vx_util/data_plane_util.cc index f07da09913..e039a0bf4e 100644 --- a/src/basic/vx_util/data_plane_util.cc +++ b/src/basic/vx_util/data_plane_util.cc @@ -210,7 +210,9 @@ DataPlane smooth_field(const DataPlane &dp, void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, int width, GridTemplateFactory::GridTemplates shape, bool wrap_lon, SingleThresh t, - const DataPlane *cmn, const DataPlane *csd, double vld_t) { + const DataPlane *fcmn, const DataPlane *fcsd, + const DataPlane *ocmn, const DataPlane *ocsd, + double vld_t) { GridPoint *gp = nullptr; int x, y; int n_vld = 0; @@ -227,26 +229,43 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, } // Check climatology data, if needed - if(cmn && csd) { - if(!cmn->is_empty() && !csd->is_empty()) use_climo = true; - } + if(fcmn && !fcmn->is_empty() && + fcsd && !fcsd->is_empty() && + ocmn && !ocmn->is_empty() && + ocsd && !ocsd->is_empty()) use_climo = true; // Check climatology dimensions if(use_climo) { // Check dimensions - if(cmn->nx() != dp.nx() || cmn->ny() != dp.ny()) { + if(fcmn->nx() != dp.nx() || fcmn->ny() != dp.ny()) { + mlog << Error << "\nfractional_coverage() -> " + << "forecast climatology mean dimension (" + << fcmn->nx() << ", " << fcmn->ny() + << ") does not match the data dimenion (" + << dp.nx() << ", " << dp.ny() << ")!\n\n"; + exit(1); + } + if(fcsd->nx() != dp.nx() || fcsd->ny() != dp.ny()) { + mlog << Error << "\nfractional_coverage() -> " + << "forecast climatology standard deviation dimension (" + << fcsd->nx() << ", " << fcsd->ny() + << ") does not match the data dimenion (" + << dp.nx() << ", " << dp.ny() << ")!\n\n"; + exit(1); + } + if(ocmn->nx() != dp.nx() || ocmn->ny() != dp.ny()) { mlog << Error << "\nfractional_coverage() -> " - << "climatology mean dimension (" - << cmn->nx() << ", " << cmn->ny() + << "observation climatology mean dimension (" + << ocmn->nx() << ", " << ocmn->ny() << ") does not match the data dimenion (" << dp.nx() << ", " << dp.ny() << ")!\n\n"; exit(1); } - if(csd->nx() != dp.nx() || csd->ny() != dp.ny()) { + if(ocsd->nx() != dp.nx() || ocsd->ny() != dp.ny()) { mlog << Error << "\nfractional_coverage() -> " - << "climatology standard deviation dimension (" - << csd->nx() << ", " << csd->ny() + << "observation climatology standard deviation dimension (" + << ocsd->nx() << ", " << ocsd->ny() << ") does not match the data dimenion (" << dp.nx() << ", " << dp.ny() << ")!\n\n"; exit(1); @@ -255,7 +274,7 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, #pragma omp parallel default(none) \ shared(mlog, dp, frac_dp, shape, width, wrap_lon, t) \ - shared(use_climo, cmn, csd, vld_t, bad) \ + shared(use_climo, fcmn, fcsd, ocmn, ocsd, vld_t, bad)\ private(x, y, n_vld, n_thr, gp, v) { @@ -293,9 +312,14 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, gp = gt->getNextInGrid()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld++; - if(t.check(v, - (use_climo ? cmn->get(gp->x, gp->y) : bad), - (use_climo ? csd->get(gp->x, gp->y) : bad))) n_thr++; + ClimoPntInfo cpi; + if(use_climo) { + cpi.set(fcmn->get(gp->x, gp->y), + fcsd->get(gp->x, gp->y), + ocmn->get(gp->x, gp->y), + ocsd->get(gp->x, gp->y)); + } + if(t.check(v, &cpi)) n_thr++; } } // Subtract off the bottom edge, shift up, and add the top. @@ -307,9 +331,14 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, gp = gt->getNextInBotEdge()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld--; - if(t.check(v, - (use_climo ? cmn->get(gp->x, gp->y) : bad), - (use_climo ? csd->get(gp->x, gp->y) : bad))) n_thr--; + ClimoPntInfo cpi; + if(use_climo) { + cpi.set(fcmn->get(gp->x, gp->y), + fcsd->get(gp->x, gp->y), + ocmn->get(gp->x, gp->y), + ocsd->get(gp->x, gp->y)); + } + if(t.check(v, &cpi)) n_thr--; } // Increment Y @@ -321,9 +350,14 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, gp = gt->getNextInTopEdge()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld++; - if(t.check(v, - (use_climo ? cmn->get(gp->x, gp->y) : bad), - (use_climo ? csd->get(gp->x, gp->y) : bad))) n_thr++; + ClimoPntInfo cpi; + if(use_climo) { + cpi.set(fcmn->get(gp->x, gp->y), + fcsd->get(gp->x, gp->y), + ocmn->get(gp->x, gp->y), + ocsd->get(gp->x, gp->y)); + } + if(t.check(v, &cpi)) n_thr++; } } diff --git a/src/basic/vx_util/data_plane_util.h b/src/basic/vx_util/data_plane_util.h index 7f0b9b27a7..0cb74224ad 100644 --- a/src/basic/vx_util/data_plane_util.h +++ b/src/basic/vx_util/data_plane_util.h @@ -53,7 +53,9 @@ extern DataPlane smooth_field(const DataPlane &dp, extern void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, int width, GridTemplateFactory::GridTemplates shape, bool wrap_lon, SingleThresh t, - const DataPlane *cmn, const DataPlane *csd, double vld_t); + const DataPlane *fcmn, const DataPlane *fcsd, + const DataPlane *ocmn, const DataPlane *ocsd, + double vld_t); extern void apply_mask(const DataPlane &, const MaskPlane &, NumArray &); extern void apply_mask(DataPlane &, const MaskPlane &); diff --git a/src/basic/vx_util/int_array.h b/src/basic/vx_util/int_array.h index c435c4aba7..608f6f2dfa 100644 --- a/src/basic/vx_util/int_array.h +++ b/src/basic/vx_util/int_array.h @@ -20,8 +20,6 @@ #include -#include "num_array.h" - #include "crc_array.h" @@ -34,6 +32,12 @@ typedef CRC_Array IntArray; //////////////////////////////////////////////////////////////////////// +extern ConcatString write_css(const IntArray &); + + +//////////////////////////////////////////////////////////////////////// + + #endif /* __INT_ARRAY_H__ */ diff --git a/src/basic/vx_util/interp_util.cc b/src/basic/vx_util/interp_util.cc index b5cb88c412..4bf797c2c6 100644 --- a/src/basic/vx_util/interp_util.cc +++ b/src/basic/vx_util/interp_util.cc @@ -704,7 +704,7 @@ double interp_geog_match(const DataPlane &dp, const GridTemplate >, //////////////////////////////////////////////////////////////////////// double interp_nbrhd(const DataPlane &dp, const GridTemplate >, int x, int y, - double t, const SingleThresh *st, double cmn, double csd, + double t, const SingleThresh *st, const ClimoPntInfo *cpi, const MaskPlane *mp) { int count, count_thr; @@ -723,7 +723,7 @@ double interp_nbrhd(const DataPlane &dp, const GridTemplate >, int x, int y, if(is_bad_data(data)) continue; count++; - if(st->check(data, cmn, csd)) count_thr++; + if(st->check(data, cpi)) count_thr++; } // Check whether enough valid grid points were found @@ -1099,8 +1099,8 @@ double compute_horz_interp(const DataPlane &dp, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, const SingleThresh *cat_thresh) { - return compute_horz_interp(dp, obs_x, obs_y, obs_v, bad_data_double, - bad_data_double, mthd, width, shape, wrap_lon, + return compute_horz_interp(dp, obs_x, obs_y, obs_v, nullptr, + mthd, width, shape, wrap_lon, interp_thresh, cat_thresh); } @@ -1108,7 +1108,7 @@ double compute_horz_interp(const DataPlane &dp, double compute_horz_interp(const DataPlane &dp, double obs_x, double obs_y, - double obs_v, double cmn, double csd, + double obs_v, const ClimoPntInfo *cpi, const InterpMthd mthd, const int width, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, @@ -1157,7 +1157,7 @@ double compute_horz_interp(const DataPlane &dp, case InterpMthd::Nbrhd: // Neighborhood fractional coverage v = interp_nbrhd(dp, *gt, x, y, - interp_thresh, cat_thresh, cmn, csd); + interp_thresh, cat_thresh, cpi); break; case InterpMthd::Bilin: // Bilinear interpolation diff --git a/src/basic/vx_util/interp_util.h b/src/basic/vx_util/interp_util.h index e8f30975de..9e5561eb63 100644 --- a/src/basic/vx_util/interp_util.h +++ b/src/basic/vx_util/interp_util.h @@ -64,10 +64,10 @@ extern NumArray interp_points (const DataPlane &dp, const GridTemplate >, dou // GridTemplate version takes center x/y extern NumArray interp_points (const DataPlane &dp, const GridTemplate >, int x, int y); -extern double interp_min (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); -extern double interp_max (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); -extern double interp_median (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); -extern double interp_uw_mean (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); +extern double interp_min (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); +extern double interp_max (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); +extern double interp_median (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); +extern double interp_uw_mean (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); // Non-GridTemplate version takes lower-left corner x/y extern double interp_min_ll (const DataPlane &dp, int x_ll, int y_ll, int w, double t); @@ -76,19 +76,19 @@ extern double interp_median_ll (const DataPlane &dp, int x_ll, int y_ll, int extern double interp_uw_mean_ll (const DataPlane &dp, int x_ll, int y_ll, int w, double t); // GridTemplate version takes center x/y -extern double interp_dw_mean (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, int i_pow, double t, const MaskPlane *mp = 0); -extern double interp_ls_fit (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double t, const MaskPlane *mp = 0); +extern double interp_dw_mean (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, int i_pow, double t, const MaskPlane *mp = nullptr); +extern double interp_ls_fit (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double t, const MaskPlane *mp = nullptr); extern void interp_gaussian_dp(DataPlane &, const GaussianInfo &, double t); extern double interp_gaussian (const DataPlane &, const DataPlane &, double obs_x, double obs_y, int max_r, double t); -extern double interp_geog_match(const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double obs_v, const MaskPlane *mp = 0); +extern double interp_geog_match(const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double obs_v, const MaskPlane *mp = nullptr); extern double interp_nbrhd (const DataPlane &, const GridTemplate >, int x, int y, double t, const SingleThresh *, - double cmn, double csd, const MaskPlane *mp = 0); -extern double interp_bilin (const DataPlane &, bool wrap_lon, double obs_x, double obs_y, const MaskPlane *mp = 0); -extern double interp_xy (const DataPlane &, bool wrap_lon, int x, int y, const MaskPlane *mp = 0); + const ClimoPntInfo *, const MaskPlane *mp = nullptr); +extern double interp_bilin (const DataPlane &, bool wrap_lon, double obs_x, double obs_y, const MaskPlane *mp = nullptr); +extern double interp_xy (const DataPlane &, bool wrap_lon, int x, int y, const MaskPlane *mp = nullptr); -extern double interp_best (const DataPlane &dp, const GridTemplate >, int x, int y, double obs_v, double t, const MaskPlane *mp = 0); +extern double interp_best (const DataPlane &dp, const GridTemplate >, int x, int y, double obs_v, double t, const MaskPlane *mp = nullptr); extern void get_xy_ll (double x, double y, int w, int h, int &x_ll, int &y_ll); @@ -115,15 +115,15 @@ extern double compute_horz_interp(const DataPlane &dp, const InterpMthd mthd, const int width, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, - const SingleThresh *cat_thresh = 0); + const SingleThresh *cat_thresh = nullptr); extern double compute_horz_interp(const DataPlane &dp, double obs_x, double obs_y, - double obs_v, double cmn, double csd, + double obs_v, const ClimoPntInfo *, const InterpMthd mthd, const int width, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, - const SingleThresh *cat_thresh = 0); + const SingleThresh *cat_thresh = nullptr); extern double compute_vert_pinterp(double, double, double, double, double); extern double compute_vert_zinterp(double, double, double, double, double); diff --git a/src/basic/vx_util/is_number.h b/src/basic/vx_util/is_number.h index 991416d2d1..2807ae7c9f 100644 --- a/src/basic/vx_util/is_number.h +++ b/src/basic/vx_util/is_number.h @@ -45,7 +45,7 @@ inline std::ostream & operator<<(std::ostream & __out, const Number & __n) if ( __n.is_int ) __out << (__n.i); else __out << (__n.d); -return ( __out ); +return __out; } diff --git a/src/basic/vx_util/long_array.h b/src/basic/vx_util/long_array.h index 99380b88ad..83864f9684 100644 --- a/src/basic/vx_util/long_array.h +++ b/src/basic/vx_util/long_array.h @@ -78,9 +78,9 @@ class LongArray { //////////////////////////////////////////////////////////////////////// -inline int LongArray::n_elements() const { return ( Nelements ); } +inline int LongArray::n_elements() const { return Nelements; } -inline LongArray::operator long * () const { return ( e ); } +inline LongArray::operator long * () const { return e; } inline void LongArray::erase() { Nelements = 0; return; } diff --git a/src/basic/vx_util/main.cc b/src/basic/vx_util/main.cc index f75b648e3f..f9b55fa203 100644 --- a/src/basic/vx_util/main.cc +++ b/src/basic/vx_util/main.cc @@ -1,5 +1,5 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* -// ** Copyright UCAR (c) 2022 - 2023 +// ** Copyright UCAR (c) 2022 - 2024 // ** University Corporation for Atmospheric Research (UCAR) // ** National Center for Atmospheric Research (NCAR) // ** Research Applications Lab (RAL) diff --git a/src/basic/vx_util/mask_poly.h b/src/basic/vx_util/mask_poly.h index 0b9c3c1b30..76f3f0ef1a 100644 --- a/src/basic/vx_util/mask_poly.h +++ b/src/basic/vx_util/mask_poly.h @@ -95,9 +95,9 @@ class MaskPoly { //////////////////////////////////////////////////////////////////////// -inline ConcatString MaskPoly::name() const { return ( Name ); } -inline ConcatString MaskPoly::file_name() const { return ( FileName ); } -inline int MaskPoly::n_points() const { return ( Npoints ); } +inline ConcatString MaskPoly::name() const { return Name ; } +inline ConcatString MaskPoly::file_name() const { return FileName; } +inline int MaskPoly::n_points() const { return Npoints ; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/mask_sid.cc b/src/basic/vx_util/mask_sid.cc new file mode 100644 index 0000000000..7c888bfda3 --- /dev/null +++ b/src/basic/vx_util/mask_sid.cc @@ -0,0 +1,161 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2024 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + +/////////////////////////////////////////////////////////////////////////////// + +#include "vx_util.h" + +#include "mask_sid.h" + +using namespace std; + +/////////////////////////////////////////////////////////////////////////////// +// +// Code for MaskSID struct +// +/////////////////////////////////////////////////////////////////////////////// + +MaskSID::MaskSID() { + init_from_scratch(); +} + +//////////////////////////////////////////////////////////////////////// + +MaskSID::~MaskSID() { + clear(); +} + +//////////////////////////////////////////////////////////////////////// + +MaskSID::MaskSID(const MaskSID &m) { + + init_from_scratch(); + + assign(m); +} + +//////////////////////////////////////////////////////////////////////// + +MaskSID & MaskSID::operator=(const MaskSID &m) noexcept { + + if(this == &m) return *this; + + assign(m); + + return *this; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskSID::init_from_scratch() { + + clear(); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void MaskSID::clear() { + Name.clear(); + HasWeights = false; + SIDMap.clear(); +} + +//////////////////////////////////////////////////////////////////////// + +void MaskSID::assign(const MaskSID & m) { + Name = m.Name; + HasWeights = m.HasWeights; + SIDMap = m.SIDMap; + + return; +} + +/////////////////////////////////////////////////////////////////////////////// + +bool MaskSID::operator==(const MaskSID &m) const { + bool match = true; + + if(!(Name == m.Name ) || + !(SIDMap == m.SIDMap)) { + match = false; + } + + return match; +} + +/////////////////////////////////////////////////////////////////////////////// + +const std::map & MaskSID::sid_map() const { + return SIDMap; +} + +/////////////////////////////////////////////////////////////////////////////// + +void MaskSID::set_name(const string &s) { + Name = s; + + return; +} + +/////////////////////////////////////////////////////////////////////////////// + +void MaskSID::add(const string &text) { + ConcatString sid(text); + + // Default weight value of 1.0 + double weight = 1.0; + + // Check for optional weight + StringArray sa(sid.split("(")); + if(sa.n() > 1) { + sid = sa[0]; + weight = stod(sa[1]); + HasWeights = true; + } + + // Add station ID map entry + if(SIDMap.count(sid) == 0) SIDMap[sid] = weight; + + return; +} + +/////////////////////////////////////////////////////////////////////////////// + +void MaskSID::add_css(const string &text) { + StringArray sa; + sa.add_css(text); + for(int i=0; i 0; +} + +/////////////////////////////////////////////////////////////////////////////// + +bool MaskSID::has_sid(const string &s, double &weight) const { + bool found = false; + + if(SIDMap.count(s) == 0) { + weight = bad_data_double; + } + else { + found = true; + weight = SIDMap.at(s); + } + + return found; +} + +/////////////////////////////////////////////////////////////////////////////// + diff --git a/src/basic/vx_util/mask_sid.h b/src/basic/vx_util/mask_sid.h new file mode 100644 index 0000000000..81b7071f37 --- /dev/null +++ b/src/basic/vx_util/mask_sid.h @@ -0,0 +1,71 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2024 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +//////////////////////////////////////////////////////////////////////// + +#ifndef __MASK_SID_H__ +#define __MASK_SID_H__ + +#include "vx_util.h" + +//////////////////////////////////////////////////////////////////////// +// +// Class to store masking station id information +// +//////////////////////////////////////////////////////////////////////// + +class MaskSID { + + void init_from_scratch(); + + void assign(const MaskSID &); + + // Mask name + ConcatString Name; + + // Boolean for non-default weights + bool HasWeights; + + // Mapping of SID name to weight value + std::map SIDMap; + + public: + + MaskSID(); + ~MaskSID(); + MaskSID(const MaskSID &); + MaskSID & operator=(const MaskSID &) noexcept; + + void clear(); + bool operator==(const MaskSID &) const; + + int n() const; + std::string name() const; + bool has_weights() const; + const std::map & sid_map() const; + + void set_name(const std::string &); + + // Formatted as: station_name(numeric_weight) + void add(const std::string &); + void add_css(const std::string &); + bool has_sid(const std::string &) const ; + bool has_sid(const std::string &, double &) const; +}; + +//////////////////////////////////////////////////////////////////////// + +inline int MaskSID::n() const { return (int) SIDMap.size(); } +inline std::string MaskSID::name() const { return Name; } +inline bool MaskSID::has_weights() const { return HasWeights; } + +//////////////////////////////////////////////////////////////////////// + +#endif // __MASK_SID_H__ + +//////////////////////////////////////////////////////////////////////// + diff --git a/src/basic/vx_util/memory.cc b/src/basic/vx_util/memory.cc index 621b3be0bb..297902685b 100644 --- a/src/basic/vx_util/memory.cc +++ b/src/basic/vx_util/memory.cc @@ -1,4 +1,4 @@ -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +/// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* // ** Copyright UCAR (c) 1992 - 2024 // ** University Corporation for Atmospheric Research (UCAR) // ** National Center for Atmospheric Research (NCAR) @@ -33,7 +33,7 @@ void oom() { void oom_grib2() { mlog << Error << "\nOut of memory reading GRIB2 data! Exiting!\n" - << "Check that MET and the GRIB2C library were compiled " + << "Check that MET and the GRIB2C library were compiled " << "consistently, either with or without the -D__64BIT__ " << "flag.\n\n"; exit(1); diff --git a/src/basic/vx_util/met_buffer.h b/src/basic/vx_util/met_buffer.h index 2f6ff02f42..0386e906a8 100644 --- a/src/basic/vx_util/met_buffer.h +++ b/src/basic/vx_util/met_buffer.h @@ -98,10 +98,10 @@ class MetBuffer { //////////////////////////////////////////////////////////////////////// -inline unsigned char * MetBuffer::operator()() const { return ( Buf ); } +inline unsigned char * MetBuffer::operator()() const { return Buf; } -inline bigint MetBuffer::n_bytes() const { return ( Nbytes ); } -inline bigint MetBuffer::n_alloc() const { return ( Nalloc ); } +inline bigint MetBuffer::n_bytes() const { return Nbytes; } +inline bigint MetBuffer::n_alloc() const { return Nalloc; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/ncrr_array.h b/src/basic/vx_util/ncrr_array.h index b802b7372c..e4b1fdc511 100644 --- a/src/basic/vx_util/ncrr_array.h +++ b/src/basic/vx_util/ncrr_array.h @@ -67,11 +67,11 @@ class NCRR_Array { NCRR_Array & operator=(const NCRR_Array & _a) { - if ( this == _a ) return ( * this ); + if ( this == _a ) return *this; assign(_a); - return ( * this ); + return *this; } @@ -89,9 +89,9 @@ class NCRR_Array { // get stuff // - int n_elements() const { return ( Nelements );} + int n_elements() const { return Nelements;} - int n () const { return ( Nelements );} + int n () const { return Nelements;} T & operator[](int) const; diff --git a/src/basic/vx_util/num_array.cc b/src/basic/vx_util/num_array.cc index baaac11c6a..06452934f3 100644 --- a/src/basic/vx_util/num_array.cc +++ b/src/basic/vx_util/num_array.cc @@ -19,6 +19,7 @@ #include "num_array.h" +#include "int_array.h" #include "is_bad_data.h" #include "ptile.h" #include "nint.h" @@ -122,7 +123,7 @@ void NumArray::init_from_scratch() { clear(); - + return; } @@ -170,7 +171,7 @@ void NumArray::assign(const NumArray & a) clear(); e = a.e; - + Sorted = a.Sorted; return; @@ -208,7 +209,7 @@ void NumArray::dump(ostream & out, int depth) const int j; for (j=0; j data (n); + vector data_loc (n); + vector data_rank (n); + if ( data.size() < n || data_loc.size() < n || data_rank.size() < n) { + mlog << Error << "\nint NumArray::rank_array() -> " << "memory allocation error\n\n"; - + exit ( 1 ); - + } // @@ -678,20 +677,13 @@ int NumArray::rank_array(int &ties) // Compute the rank of the data and store the ranks in the data_rank array // Keep track of the number of ties in the ranks. // - ties = do_rank(data, data_rank, n_vld); + ties = do_rank(data.data(), data_rank.data(), n_vld); // // Store the data_rank values // for(i=0; i & vals() const; double * buf(); int has(int, bool forward=true) const; @@ -127,12 +127,13 @@ class NumArray { //////////////////////////////////////////////////////////////////////// -inline int NumArray::n_elements() const { return ( e.size() ); } -inline int NumArray::n () const { return ( e.size() ); } -inline const double * NumArray::vals() const { return ( e.data() ); } -inline double * NumArray::buf() { return ( e.data() ); } -inline void NumArray::inc(int i, int v) { e[i] += v; return; } -inline void NumArray::inc(int i, double v) { e[i] += v; return; } +inline int NumArray::n_elements() const { return e.size(); } +inline int NumArray::n() const { return e.size(); } +inline const std::vector & + NumArray::vals() const { return e; } +inline double * NumArray::buf() { return e.data(); } +inline void NumArray::inc(int i, int v) { e[i] += v; return; } +inline void NumArray::inc(int i, double v) { e[i] += v; return; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/polyline.cc b/src/basic/vx_util/polyline.cc index f75ee6ef50..03c38ae00e 100644 --- a/src/basic/vx_util/polyline.cc +++ b/src/basic/vx_util/polyline.cc @@ -190,7 +190,7 @@ void Polyline::extend_points(int n) { if(!u || !v) { mlog << Error << "\nPolyline::extend_points(int) -> " - << "memory allocation error 1" << "\n\n"; + << "memory allocation error 1" << "\n\n"; exit(1); } @@ -303,7 +303,7 @@ double Polyline::angle() const { if(n_points < 3) { mlog << Error << "\nPolyline::angle() -> " - << "not enough points!\n\n"; + << "not enough points!\n\n"; exit(1); } @@ -655,7 +655,7 @@ double polyline_dist(const Polyline & a, const Polyline & b) { // done // - return ( min_dist ); + return min_dist; } /////////////////////////////////////////////////////////////////////////////// @@ -679,9 +679,9 @@ double polyline_pw_ls_mean_dist(const Polyline &a, const Polyline &b) { for(j=0; j qmp ) { dx = x_test - qx; dy = y_test - qy; - return ( sqrt( dx*dx + dy*dy ) ); + return sqrt( dx*dx + dy*dy ); } vx = rmpx - t0*cx; vy = rmpy - t0*cy; - return ( sqrt( vx*vx + vy*vy ) ); + return sqrt( vx*vx + vy*vy ); } diff --git a/src/basic/vx_util/smart_buffer.h b/src/basic/vx_util/smart_buffer.h index 2d85465381..bad1524476 100644 --- a/src/basic/vx_util/smart_buffer.h +++ b/src/basic/vx_util/smart_buffer.h @@ -90,13 +90,13 @@ class SmartBuffer { //////////////////////////////////////////////////////////////////////// -inline int SmartBuffer::size() const { return ( Size ); } +inline int SmartBuffer::size() const { return Size; } -inline bool SmartBuffer::is_empty() const { return ( Buf == 0 ); } +inline bool SmartBuffer::is_empty() const { return ( Buf == nullptr ); } -inline SmartBuffer::operator unsigned char * () const { return ( Buf ); } +inline SmartBuffer::operator unsigned char * () const { return Buf; } -inline SmartBuffer::operator void * () const { return ( Buf ); } +inline SmartBuffer::operator void * () const { return Buf; } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/stat_column_defs.h b/src/basic/vx_util/stat_column_defs.h index 826d5101c9..984ef3c40b 100644 --- a/src/basic/vx_util/stat_column_defs.h +++ b/src/basic/vx_util/stat_column_defs.h @@ -13,7 +13,7 @@ //////////////////////////////////////////////////////////////////////// -static const char * hdr_columns [] = { +static const char * const hdr_columns [] = { "VERSION", "MODEL", "DESC", "FCST_LEAD", "FCST_VALID_BEG", "FCST_VALID_END", @@ -29,17 +29,17 @@ static const char * hdr_columns [] = { "LINE_TYPE" }; -static const char * fho_columns [] = { +static const char * const fho_columns [] = { "TOTAL", "F_RATE", "H_RATE", "O_RATE" }; -static const char * ctc_columns [] = { +static const char * const ctc_columns [] = { "TOTAL", "FY_OY", "FY_ON", "FN_OY", "FN_ON", "EC_VALUE" }; -static const char * cts_columns [] = { +static const char * const cts_columns [] = { "TOTAL", "BASER", "BASER_NCL", "BASER_NCU", "BASER_BCL", "BASER_BCU", "FMEAN", "FMEAN_NCL", "FMEAN_NCU", "FMEAN_BCL", "FMEAN_BCU", @@ -64,11 +64,11 @@ static const char * cts_columns [] = { "HSS_EC", "HSS_EC_BCL", "HSS_EC_BCU", "EC_VALUE" }; -static const char * mctc_columns [] = { +static const char * const mctc_columns [] = { "TOTAL", "N_CAT", "Fi_Oj", "EC_VALUE" }; -static const char * mcts_columns [] = { +static const char * const mcts_columns [] = { "TOTAL", "N_CAT", "ACC", "ACC_NCL", "ACC_NCU", "ACC_BCL", "ACC_BCU", "HK", "HK_BCL", "HK_BCU", @@ -77,7 +77,7 @@ static const char * mcts_columns [] = { "HSS_EC", "HSS_EC_BCL", "HSS_EC_BCU", "EC_VALUE" }; -static const char * cnt_columns [] = { +static const char * const cnt_columns [] = { "TOTAL", "FBAR", "FBAR_NCL", "FBAR_NCU", "FBAR_BCL", "FBAR_BCU", "FSTDEV", "FSTDEV_NCL", "FSTDEV_NCU", "FSTDEV_BCL", "FSTDEV_BCU", @@ -108,36 +108,36 @@ static const char * cnt_columns [] = { "SI", "SI_BCL", "SI_BCU" }; -static const char * sl1l2_columns [] = { +static const char * const sl1l2_columns [] = { "TOTAL", "FBAR", "OBAR", "FOBAR", "FFBAR", "OOBAR", "MAE" }; -static const char * sal1l2_columns [] = { +static const char * const sal1l2_columns [] = { "TOTAL", "FABAR", "OABAR", "FOABAR", "FFABAR", "OOABAR", "MAE" }; -static const char * vl1l2_columns [] = { +static const char * const vl1l2_columns [] = { "TOTAL", "UFBAR", "VFBAR", "UOBAR", "VOBAR", "UVFOBAR", "UVFFBAR", "UVOOBAR", "F_SPEED_BAR", - "O_SPEED_BAR", "DIR_ME", "DIR_MAE", - "DIR_MSE" + "O_SPEED_BAR", "TOTAL_DIR", "DIR_ME", + "DIR_MAE", "DIR_MSE" }; -static const char * val1l2_columns [] = { +static const char * const val1l2_columns [] = { "TOTAL", "UFABAR", "VFABAR", "UOABAR", "VOABAR", "UVFOABAR", "UVFFABAR", "UVOOABAR", "FA_SPEED_BAR", - "OA_SPEED_BAR", "DIRA_ME", "DIRA_MAE", - "DIRA_MSE" + "OA_SPEED_BAR", "TOTAL_DIR", "DIRA_ME", + "DIRA_MAE", "DIRA_MSE" }; -static const char * vcnt_columns [] = { +static const char * const vcnt_columns [] = { "TOTAL", "FBAR", "FBAR_BCL", "FBAR_BCU", "OBAR", "OBAR_BCL", "OBAR_BCU", @@ -160,18 +160,19 @@ static const char * vcnt_columns [] = { "ANOM_CORR", "ANOM_CORR_NCL", "ANOM_CORR_NCU", "ANOM_CORR_BCL", "ANOM_CORR_BCU", "ANOM_CORR_UNCNTR", "ANOM_CORR_UNCNTR_BCL", "ANOM_CORR_UNCNTR_BCU", + "TOTAL_DIR", "DIR_ME", "DIR_ME_BCL", "DIR_ME_BCU", "DIR_MAE", "DIR_MAE_BCL", "DIR_MAE_BCU", "DIR_MSE", "DIR_MSE_BCL", "DIR_MSE_BCU", "DIR_RMSE", "DIR_RMSE_BCL", "DIR_RMSE_BCU" }; -static const char * pct_columns [] = { +static const char * const pct_columns [] = { "TOTAL", "N_THRESH", "THRESH_", "OY_", "ON_" }; -static const char * pstd_columns [] = { +static const char * const pstd_columns [] = { "TOTAL", "N_THRESH", "BASER", "BASER_NCL", "BASER_NCU", "RELIABILITY", "RESOLUTION", "UNCERTAINTY", "ROC_AUC", @@ -180,36 +181,37 @@ static const char * pstd_columns [] = { "BSS", "BSS_SMPL", "THRESH_", }; -static const char * pjc_columns [] = { +static const char * const pjc_columns [] = { "TOTAL", "N_THRESH", "THRESH_", "OY_TP_", "ON_TP_", "CALIBRATION_", "REFINEMENT_", "LIKELIHOOD_", "BASER_" }; -static const char * prc_columns [] = { +static const char * const prc_columns [] = { "TOTAL", "N_THRESH", "THRESH_", "PODY_", "POFD_" }; -static const char * eclv_columns [] = { +static const char * const eclv_columns [] = { "TOTAL", "BASER", "VALUE_BASER", "N_PNT", "CL_", "VALUE_" }; -static const char * mpr_columns [] = { - "TOTAL", "INDEX", "OBS_SID", - "OBS_LAT", "OBS_LON", "OBS_LVL", - "OBS_ELV", "FCST", "OBS", - "OBS_QC", "CLIMO_MEAN", "CLIMO_STDEV", - "CLIMO_CDF" +static const char * const mpr_columns [] = { + "TOTAL", "INDEX", "OBS_SID", + "OBS_LAT", "OBS_LON", "OBS_LVL", + "OBS_ELV", "FCST", "OBS", + "OBS_QC", + "OBS_CLIMO_MEAN", "OBS_CLIMO_STDEV", "OBS_CLIMO_CDF", + "FCST_CLIMO_MEAN", "FCST_CLIMO_STDEV" }; -static const char * nbrctc_columns [] = { +static const char * const nbrctc_columns [] = { "TOTAL", "FY_OY", "FY_ON", "FN_OY", "FN_ON" }; -static const char * nbrcts_columns [] = { +static const char * const nbrcts_columns [] = { "TOTAL", "BASER", "BASER_NCL", "BASER_NCU", "BASER_BCL", "BASER_BCU", "FMEAN", "FMEAN_NCL", "FMEAN_NCU", "FMEAN_BCL", "FMEAN_BCU", @@ -233,7 +235,7 @@ static const char * nbrcts_columns [] = { "BAGSS", "BAGSS_BCL", "BAGSS_BCU" }; -static const char * nbrcnt_columns [] = { +static const char * const nbrcnt_columns [] = { "TOTAL", "FBS", "FBS_BCL", "FBS_BCU", "FSS", "FSS_BCL", "FSS_BCU", @@ -243,14 +245,14 @@ static const char * nbrcnt_columns [] = { "O_RATE", "O_RATE_BCL", "O_RATE_BCU" }; -static const char * grad_columns [] = { +static const char * const grad_columns [] = { "TOTAL", "FGBAR", "OGBAR", "MGBAR", "EGBAR", "S1", "S1_OG", "FGOG_RATIO", "DX", "DY" }; -static const char * dmap_columns [] = { +static const char * const dmap_columns [] = { "TOTAL", "FY", "OY", "FBIAS", "BADDELEY", "HAUSDORFF", "MED_FO", "MED_OF", "MED_MIN", "MED_MAX", "MED_MEAN", @@ -259,7 +261,7 @@ static const char * dmap_columns [] = { "G", "GBETA", "BETA_VALUE" }; -static const char * isc_columns [] = { +static const char * const isc_columns [] = { "TOTAL", "TILE_DIM", "TILE_XLL", "TILE_YLL", "NSCALE", "ISCALE", "MSE", @@ -267,7 +269,7 @@ static const char * isc_columns [] = { "BASER", "FBIAS" }; -static const char * ecnt_columns [] = { +static const char * const ecnt_columns [] = { "TOTAL", "N_ENS", "CRPS", "CRPSS", "IGN", "ME", "RMSE", "SPREAD", "ME_OERR", @@ -279,32 +281,33 @@ static const char * ecnt_columns [] = { "ME_LT_OBS", "IGN_CONV_OERR", "IGN_CORR_OERR" }; -static const char * rps_columns [] = { +static const char * const rps_columns [] = { "TOTAL", "N_PROB", "RPS_REL", "RPS_RES", "RPS_UNC", "RPS", "RPSS", "RPSS_SMPL", "RPS_COMP" }; -static const char * rhist_columns [] = { +static const char * const rhist_columns [] = { "TOTAL", "N_RANK", "RANK_" }; -static const char * phist_columns [] = { +static const char * const phist_columns [] = { "TOTAL", "BIN_SIZE", "N_BIN", "BIN_" }; -static const char * orank_columns [] = { - "TOTAL", "INDEX", "OBS_SID", - "OBS_LAT", "OBS_LON", "OBS_LVL", - "OBS_ELV", "OBS", "PIT", - "RANK", "N_ENS_VLD", "N_ENS", - "ENS_", "OBS_QC", "ENS_MEAN", - "CLIMO_MEAN", "SPREAD", "ENS_MEAN_OERR", - "SPREAD_OERR", "SPREAD_PLUS_OERR", "CLIMO_STDEV" +static const char * const orank_columns [] = { + "TOTAL", "INDEX", "OBS_SID", + "OBS_LAT", "OBS_LON", "OBS_LVL", + "OBS_ELV", "OBS", "PIT", + "RANK", "N_ENS_VLD", "N_ENS", + "ENS_", "OBS_QC", "ENS_MEAN", + "OBS_CLIMO_MEAN", "SPREAD", "ENS_MEAN_OERR", + "SPREAD_OERR", "SPREAD_PLUS_OERR", "OBS_CLIMO_STDEV", + "FCST_CLIMO_MEAN", "FCST_CLIMO_STDEV" }; -static const char * ssvar_columns [] = { +static const char * const ssvar_columns [] = { "TOTAL", "N_BIN", "BIN_i", "BIN_N", "VAR_MIN", "VAR_MAX", "VAR_MEAN", "FBAR", "OBAR", @@ -320,16 +323,16 @@ static const char * ssvar_columns [] = { "RMSE" }; -static const char * relp_columns [] = { +static const char * const relp_columns [] = { "TOTAL", "N_ENS", "RELP_" }; -static const char * ssidx_columns [] = { +static const char * const ssidx_columns [] = { "FCST_MODEL", "REF_MODEL", "N_INIT", "N_TERM", "N_VLD", "SS_INDEX" }; -static const char * genmpr_columns [] = { +static const char * const genmpr_columns [] = { "TOTAL", "INDEX", "STORM_ID", "PROB_LEAD", "PROB_VAL", "AGEN_INIT", "AGEN_FHR", @@ -339,7 +342,7 @@ static const char * genmpr_columns [] = { "DEV_CAT", "OPS_CAT" }; -static const char * job_summary_columns [] = { +static const char * const job_summary_columns [] = { "TOTAL", "MEAN", "MEAN_NCL", "MEAN_NCU", "MEAN_BCL", "MEAN_BCU", "STDEV", "STDEV_BCL", "STDEV_BCU", @@ -349,12 +352,12 @@ static const char * job_summary_columns [] = { "WMO_TYPE", "WMO_MEAN", "WMO_WEIGHTED_MEAN" }; -static const char * job_wdir_columns [] = { +static const char * const job_wdir_columns [] = { "TOTAL", "FBAR", "OBAR", "ME", "MAE" }; -static const char * job_ramp_columns [] = { +static const char * const job_ramp_columns [] = { "TYPE", "FCOLUMN", "OCOLUMN", "FTIME", "OTIME", @@ -363,7 +366,7 @@ static const char * job_ramp_columns [] = { "WINDOW_BEG", "WINDOW_END" }; -static const char * job_ramp_mpr_columns [] = { +static const char * const job_ramp_mpr_columns [] = { "TOTAL", "INDEX", "INIT", "LEAD", "VALID", "FPRV", "FCUR", "FDLT", "FRAMP", @@ -371,7 +374,7 @@ static const char * job_ramp_mpr_columns [] = { "CATEGORY" }; -static const char * seeps_mpr_columns [] = { +static const char * const seeps_mpr_columns [] = { "OBS_SID", "OBS_LAT", "OBS_LON", "FCST", "OBS", "OBS_QC", "FCST_CAT", "OBS_CAT", "P1", @@ -379,10 +382,10 @@ static const char * seeps_mpr_columns [] = { "SEEPS" }; -static const char * seeps_columns [] = { - "TOTAL", "S12", "S13", - "S21", "S23", "S31", - "S32", "PF1", "PF2", +static const char * const seeps_columns [] = { + "TOTAL", "ODFL", "ODFH", + "OLFD", "OLFH", "OHFD", + "OHFL", "PF1", "PF2", "PF3", "PV1", "PV2", "PV3", "MEAN_FCST", "MEAN_OBS", "SEEPS" @@ -455,7 +458,7 @@ inline int get_n_eclv_columns (int n) { return(4 + 2*n); } // inline int get_n_rhist_columns (int n) { return(2 + n); } // n = N_RANK inline int get_n_phist_columns (int n) { return(3 + n); } // n = N_BINS inline int get_n_relp_columns (int n) { return(2 + n); } // n = N_ENS -inline int get_n_orank_columns (int n) { return(20 + n); } // n = N_ENS +inline int get_n_orank_columns (int n) { return(22 + n); } // n = N_ENS //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/string_fxns.cc b/src/basic/vx_util/string_fxns.cc index e0e0d37f6b..df21a7d754 100644 --- a/src/basic/vx_util/string_fxns.cc +++ b/src/basic/vx_util/string_fxns.cc @@ -192,7 +192,7 @@ void strip_char(char *str, const char c) ptr = str + m_strlen(str) - 1; if(*ptr == c) { - *(ptr) = 0; + *ptr = 0; } return; diff --git a/src/basic/vx_util/thresh_array.cc b/src/basic/vx_util/thresh_array.cc index 4a0b421ae1..1be3bb723b 100644 --- a/src/basic/vx_util/thresh_array.cc +++ b/src/basic/vx_util/thresh_array.cc @@ -291,9 +291,9 @@ void ThreshArray::parse_thresh_str(const char *thresh_str) { //////////////////////////////////////////////////////////////////////// int ThreshArray::has(const SingleThresh &st) const { - int index, status; + int index; - status = has(st, index); + int status = has(st, index); return status; } @@ -301,13 +301,12 @@ int ThreshArray::has(const SingleThresh &st) const { //////////////////////////////////////////////////////////////////////// int ThreshArray::has(const SingleThresh &st, int & index) const { - int j; index = -1; if(Nelements == 0) return 0; - for(j=0; j t[i+1].get_value() || t[i].get_type() != t[i+1].get_type() || @@ -386,34 +382,29 @@ void ThreshArray::check_bin_thresh() const { //////////////////////////////////////////////////////////////////////// -int ThreshArray::check_bins(double v) const { - return check_bins(v, bad_data_double, bad_data_double); -} - -//////////////////////////////////////////////////////////////////////// - -int ThreshArray::check_bins(double v, double mn, double sd) const { +int ThreshArray::check_bins(double v, const ClimoPntInfo *cpi) const { int i, bin; // Check for bad data or no thresholds if(is_bad_data(v) || Nelements == 0) return bad_data_int; - // For < and <=, check thresholds left to right. - if(t[0].get_type() == thresh_lt || t[0].get_type() == thresh_le) { + // For < and <=, check thresholds left to right + if(t[0].get_type() == thresh_lt || + t[0].get_type() == thresh_le) { for(i=0, bin=-1; i and >=, check thresholds right to left. + // For > and >=, check thresholds right to left else { for(i=Nelements-1, bin=-1; i>=0; i--) { - if(t[i].check(v, mn, sd)) { + if(t[i].check(v, cpi)) { bin = i+1; break; } @@ -421,26 +412,19 @@ int ThreshArray::check_bins(double v, double mn, double sd) const { if(bin == -1) bin = 0; } - // The bin value returned is 1-based, not 0-based. + // The bin value returned is 1-based, not 0-based return bin; } //////////////////////////////////////////////////////////////////////// -bool ThreshArray::check_dbl(double v) const { - return check_dbl(v, bad_data_double, bad_data_double); -} - -//////////////////////////////////////////////////////////////////////// - -bool ThreshArray::check_dbl(double v, double mn, double sd) const { - int i; +bool ThreshArray::check_dbl(double v, const ClimoPntInfo *cpi) const { // // Check if the value satisifes all the thresholds in the array // - for(i=0; ithresh()[i]), &(oarr->thresh()[i])); + t[i].set_perc(fptr, optr, fcptr, ocptr, + &(farr->thresh()[i]), + &(oarr->thresh()[i])); } return; @@ -631,6 +617,13 @@ ThreshArray define_prob_bins(double beg, double end, double inc, int prec) { v += inc; } + // Add final 1.0 threshold, if needed + v = ta[(ta.n() - 1)].get_value(); + if(v < 1.0 && !is_eq(v, 1.0)) { + cs << cs_erase << ">=1.0"; + ta.add(cs.c_str()); + } + return ta; } @@ -755,11 +748,13 @@ ThreshArray process_perc_thresh_bins(const ThreshArray &ta_in) { for(i=0; i=100) { @@ -810,11 +805,11 @@ ThreshArray process_rps_cdp_thresh(const ThreshArray &ta) { SingleThresh st; ThreshArray ta_out; - // Check for evenly-spaced CDP thresholds + // Check for evenly-spaced OCDP thresholds for(int i=0; i & operator=(const TwoD_Array & _t) { - if ( this == &_t ) return ( * this ); + if ( this == &_t ) return *this; assign(_t); - return ( * this ); + return *this; } @@ -78,14 +78,14 @@ class TwoD_Array { // get stuff // - int nx() const { return ( Nx ); } - int ny() const { return ( Ny ); } + int nx() const { return Nx; } + int ny() const { return Ny; } bool is_empty() const { return ( Nx*Ny == 0 ); } int count() const; - const T * data() const { return ( E ); } + const T * data() const { return E; } - T * buf() { return ( E ); } // careful with this + T * buf() { return E; } // careful with this T operator()(int, int) const; @@ -244,7 +244,7 @@ for (j=0,n=0; j::operator()(int _x, int _y) const { -return ( get(_x, _y) ); +return get(_x, _y); } @@ -292,7 +292,7 @@ if (E == nullptr) { exit ( 1 ); } -return ( E[two_to_one(_x, _y)] ); +return E[two_to_one(_x, _y)]; } @@ -320,15 +320,15 @@ bool TwoD_Array::f_is_on(int _x, int _y) const { -if ( s_is_on(_x, _y) ) return ( true ); +if ( s_is_on(_x, _y) ) return true; -if( (_x > 0) && s_is_on(_x - 1, _y) ) return ( true ); +if( (_x > 0) && s_is_on(_x - 1, _y) ) return true; -if( (_x > 0) && (_y > 0) && s_is_on(_x - 1, _y - 1)) return ( true ); +if( (_x > 0) && (_y > 0) && s_is_on(_x - 1, _y - 1)) return true; -if( (_y > 0 ) && s_is_on(_x, _y - 1) ) return ( true ); +if( (_y > 0 ) && s_is_on(_x, _y - 1) ) return true; -return ( false ); +return false; } @@ -355,12 +355,12 @@ int j; for (j=0; j=0; --j) { - if ( f_is_on(j, _y) ) return ( j ); + if ( f_is_on(j, _y) ) return j; } -return ( -1 ); +return -1; } diff --git a/src/basic/vx_util/util_constants.h b/src/basic/vx_util/util_constants.h index e2520efd18..c24ffdec88 100644 --- a/src/basic/vx_util/util_constants.h +++ b/src/basic/vx_util/util_constants.h @@ -102,7 +102,7 @@ static const int tmp_buf_size = 512; static const double grib_earth_radius_km = 6371.20; static const int default_nc_compression = 0; static const int default_precision = 5; -static const double default_grid_weight = 1.0; +static const double default_weight = 1.0; static const char default_tmp_dir[] = "/tmp"; //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/vx_util.h b/src/basic/vx_util/vx_util.h index ab8460b420..c396db1924 100644 --- a/src/basic/vx_util/vx_util.h +++ b/src/basic/vx_util/vx_util.h @@ -59,6 +59,7 @@ #include "empty_string.h" #include "polyline.h" #include "mask_poly.h" +#include "mask_sid.h" #include "read_fortran_binary.h" #include "stat_column_defs.h" diff --git a/src/basic/vx_util_math/Makefile.in b/src/basic/vx_util_math/Makefile.in index bd06680091..b742f23078 100644 --- a/src/basic/vx_util_math/Makefile.in +++ b/src/basic/vx_util_math/Makefile.in @@ -234,6 +234,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/Makefile.in b/src/libcode/Makefile.in index 5a7e7eb12d..f7629e1481 100644 --- a/src/libcode/Makefile.in +++ b/src/libcode/Makefile.in @@ -242,6 +242,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_afm/Makefile.in b/src/libcode/vx_afm/Makefile.in index 27f3023563..5498f8a896 100644 --- a/src/libcode/vx_afm/Makefile.in +++ b/src/libcode/vx_afm/Makefile.in @@ -243,6 +243,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_analysis_util/Makefile.in b/src/libcode/vx_analysis_util/Makefile.in index 10bbb3aea6..fe6a3bb1a4 100644 --- a/src/libcode/vx_analysis_util/Makefile.in +++ b/src/libcode/vx_analysis_util/Makefile.in @@ -250,6 +250,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_analysis_util/mode_atts.cc b/src/libcode/vx_analysis_util/mode_atts.cc index e51f63cef8..3538f47c84 100644 --- a/src/libcode/vx_analysis_util/mode_atts.cc +++ b/src/libcode/vx_analysis_util/mode_atts.cc @@ -1367,8 +1367,8 @@ if ( is_fcst_toggle_set ) { i = L.is_fcst(); - if ( ( is_fcst) && ( !i ) ) return 0; - if ( (!is_fcst) && ( i ) ) return 0; + if ( is_fcst && !i ) return 0; + if ( !is_fcst && i ) return 0; } @@ -1376,8 +1376,8 @@ if ( is_single_toggle_set ) { i = L.is_single(); - if ( ( is_single) && ( !i ) ) return 0; - if ( (!is_single) && ( i ) ) return 0; + if ( is_single && !i ) return 0; + if ( !is_single && i ) return 0; } @@ -1385,8 +1385,8 @@ if ( is_simple_toggle_set ) { i = L.is_simple(); - if ( ( is_simple) && ( !i ) ) return 0; - if ( (!is_simple) && ( i ) ) return 0; + if ( is_simple && !i ) return 0; + if ( !is_simple && i ) return 0; } @@ -1394,8 +1394,8 @@ if ( is_matched_toggle_set ) { i = L.is_matched(); - if ( ( is_matched) && ( !i ) ) return 0; - if ( (!is_matched) && ( i ) ) return 0; + if ( is_matched && !i ) return 0; + if ( !is_matched && i ) return 0; } diff --git a/src/libcode/vx_analysis_util/mode_job.cc b/src/libcode/vx_analysis_util/mode_job.cc index 368a968ea1..e97c8c3994 100644 --- a/src/libcode/vx_analysis_util/mode_job.cc +++ b/src/libcode/vx_analysis_util/mode_job.cc @@ -138,7 +138,7 @@ if ( accums ) { delete [] accums; accums = (NumArray *) nullptr; } n_lines_read = n_lines_kept = 0; // Write any remaning lines to the dump file -if ( dumpfile ) *(dumpfile) << dump_at; +if ( dumpfile ) *dumpfile << dump_at; dumpfile = (ostream *) nullptr; // don't delete @@ -346,7 +346,7 @@ n_dump++; // Write the buffer, if full if ( n_dump%dump_at.nrows() == 0 ) { - *(dumpfile) << dump_at; + *dumpfile << dump_at; dump_at.erase(); } diff --git a/src/libcode/vx_analysis_util/mode_job.h b/src/libcode/vx_analysis_util/mode_job.h index 57354a04b9..a84306c85b 100644 --- a/src/libcode/vx_analysis_util/mode_job.h +++ b/src/libcode/vx_analysis_util/mode_job.h @@ -100,7 +100,7 @@ class BasicModeAnalysisJob { inline void BasicModeAnalysisJob::set_precision (int p) { precision = p; return; } -inline int BasicModeAnalysisJob::get_precision () const { return(precision); } +inline int BasicModeAnalysisJob::get_precision () const { return precision; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_analysis_util/stat_job.cc b/src/libcode/vx_analysis_util/stat_job.cc index 573c0f43d6..ef7d6827e8 100644 --- a/src/libcode/vx_analysis_util/stat_job.cc +++ b/src/libcode/vx_analysis_util/stat_job.cc @@ -1174,13 +1174,13 @@ void STATAnalysisJob::parse_job_command(const char *jobstring) { else if(jc_array[i] == "-column_str_exc" ) { column_str_exc_map.clear(); } + else if(jc_array[i] == "-by" ) { + by_column.clear(); + } else if(jc_array[i] == "-set_hdr" ) { hdr_name.clear(); hdr_value.clear(); } - else if(jc_array[i] == "-by" ) { - by_column.clear(); - } else if(jc_array[i] == "-out_line_type" ) { out_line_type.clear(); } @@ -1216,7 +1216,7 @@ void STATAnalysisJob::parse_job_command(const char *jobstring) { << "unrecognized job type specified \"" << jc_array[i] << "\" in job command line: " << jobstring << "\n\n"; if(line) { delete [] line; line = (char *) nullptr; } - throw(1); + throw 1; } i++; } @@ -1450,15 +1450,15 @@ void STATAnalysisJob::parse_job_command(const char *jobstring) { } i+=2; } + else if(jc_array[i] == "-by") { + by_column.add_css(to_upper(jc_array[i+1])); + i+=1; + } else if(jc_array[i] == "-set_hdr") { hdr_name.add_css(to_upper(jc_array[i+1])); hdr_value.add_css(jc_array[i+2]); i+=2; } - else if(jc_array[i] == "-by") { - by_column.add_css(to_upper(jc_array[i+1])); - i+=1; - } else if(jc_array[i] == "-dump_row") { set_dump_row(jc_array[i+1].c_str()); i++; @@ -1638,7 +1638,7 @@ void STATAnalysisJob::parse_job_command(const char *jobstring) { << "\" in job command line: " << jobstring << "\n\n"; if(line) { delete [] line; line = (char *) nullptr; } - throw(1); + throw 1; } // end if } // end for @@ -1660,7 +1660,7 @@ void STATAnalysisJob::parse_job_command(const char *jobstring) { << (line_type.n() == 1 ? line_type[0] : "header") << " column named \"" << hdr_name[i] << "\"\n\n"; if(line) { delete [] line; line = (char *) nullptr; } - throw(1); + throw 1; } } // end for } @@ -1802,20 +1802,19 @@ void STATAnalysisJob::set_mask_sid(const char *c) { if(!c) return; - ConcatString mask_name; - mask_sid_str = c; // List the station ID mask mlog << Debug(1) << "Station ID Mask: " << mask_sid_str << "\n"; - parse_sid_mask(mask_sid_str, mask_sid, mask_name); + MaskSID ms = parse_sid_mask(mask_sid_str); + for(const auto &pair : ms.sid_map()) mask_sid.add(pair.first); // List the length of the station ID mask mlog << Debug(2) - << "Parsed Station ID Mask: " << mask_name - << " containing " << mask_sid.n() << " points\n"; + << "Parsed Station ID Mask (" << ms.name() + << ") containing " << mask_sid.n() << " stations\n"; return; } @@ -1854,7 +1853,8 @@ void STATAnalysisJob::set_boot_seed(const char *c) { void STATAnalysisJob::set_perc_thresh(const NumArray &f_na, const NumArray &o_na, - const NumArray &cmn_na) { + const NumArray &fcmn_na, + const NumArray &ocmn_na) { if(!out_fcst_thresh.need_perc() && !out_obs_thresh.need_perc()) return; @@ -1862,19 +1862,21 @@ void STATAnalysisJob::set_perc_thresh(const NumArray &f_na, // // Sort the input arrays // - NumArray fsort = f_na; - NumArray osort = o_na; - NumArray csort = cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort = f_na; + NumArray o_sort = o_na; + NumArray fcmn_sort = fcmn_na; + NumArray ocmn_sort = ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles // - out_fcst_thresh.set_perc(&fsort, &osort, &csort, + out_fcst_thresh.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &out_fcst_thresh, &out_obs_thresh); - out_obs_thresh.set_perc(&fsort, &osort, &csort, + out_obs_thresh.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &out_fcst_thresh, &out_obs_thresh); return; @@ -1897,7 +1899,7 @@ void STATAnalysisJob::open_dump_row_file() { << "can't open the output file \"" << dump_row << "\" for writing!\n\n"; - throw(1); + throw 1; } return; @@ -1912,7 +1914,7 @@ void STATAnalysisJob::close_dump_row_file() { // // Write any remaining lines // - *(dr_out) << dump_at; + *dr_out << dump_at; dr_out->close(); delete dr_out; @@ -1941,7 +1943,7 @@ void STATAnalysisJob::open_stat_file() { << "can't open the output STAT file \"" << stat_file << "\" for writing!\n\n"; - throw(1); + throw 1; } return; @@ -1991,6 +1993,7 @@ void STATAnalysisJob::setup_stat_file(int n_row, int n) { case STATLineType::prc: c = get_n_prc_columns(n); break; case STATLineType::eclv: c = get_n_eclv_columns(n); break; case STATLineType::mpr: c = n_mpr_columns; break; + case STATLineType::seeps: c = n_seeps_columns; break; case STATLineType::nbrctc: c = n_nbrctc_columns; break; case STATLineType::nbrcts: c = n_nbrcts_columns; break; case STATLineType::nbrcnt: c = n_nbrcnt_columns; break; @@ -2062,6 +2065,7 @@ void STATAnalysisJob::setup_stat_file(int n_row, int n) { case STATLineType::prc: write_prc_header_row (1, n, stat_at, 0, 0); break; case STATLineType::eclv: write_eclv_header_row (1, n, stat_at, 0, 0); break; case STATLineType::mpr: write_header_row (mpr_columns, n_mpr_columns, 1, stat_at, 0, 0); break; + case STATLineType::seeps: write_header_row (seeps_columns, n_seeps_columns, 1, stat_at, 0, 0); break; case STATLineType::nbrctc: write_header_row (nbrctc_columns, n_nbrctc_columns, 1, stat_at, 0, 0); break; case STATLineType::nbrcts: write_header_row (nbrcts_columns, n_nbrcts_columns, 1, stat_at, 0, 0); break; case STATLineType::nbrcnt: write_header_row (nbrcnt_columns, n_nbrcnt_columns, 1, stat_at, 0, 0); break; @@ -2133,7 +2137,7 @@ void STATAnalysisJob::close_stat_file() { // // Write any remaining lines // - *(stat_out) << stat_at; + *stat_out << stat_at; stat_out->close(); delete stat_out; @@ -2176,6 +2180,7 @@ void STATAnalysisJob::dump_stat_line(const STATLine &line, if(line_type.n() == 1) { switch(string_to_statlinetype(line_type[0].c_str())) { + case STATLineType::fho: write_header_row(fho_columns, n_fho_columns, 1, dump_at, 0, 0); break; @@ -2200,6 +2205,10 @@ void STATAnalysisJob::dump_stat_line(const STATLine &line, write_header_row(sal1l2_columns, n_sal1l2_columns, 1, dump_at, 0, 0); break; + case STATLineType::vcnt: + write_header_row(vcnt_columns, n_vcnt_columns, 1, dump_at, 0, 0); + break; + case STATLineType::vl1l2: write_header_row(vl1l2_columns, n_vl1l2_columns, 1, dump_at, 0, 0); break; @@ -2232,6 +2241,10 @@ void STATAnalysisJob::dump_stat_line(const STATLine &line, write_header_row(ecnt_columns, n_ecnt_columns, 1, dump_at, 0, 0); break; + case STATLineType::rps: + write_header_row(rps_columns, n_rps_columns, 1, dump_at, 0, 0); + break; + case STATLineType::isc: write_header_row(isc_columns, n_isc_columns, 1, dump_at, 0, 0); break; @@ -2248,6 +2261,14 @@ void STATAnalysisJob::dump_stat_line(const STATLine &line, write_header_row(seeps_mpr_columns, n_seeps_mpr_columns, 1, dump_at, 0, 0); break; + case STATLineType::dmap: + write_header_row(dmap_columns, n_dmap_columns, 1, dump_at, 0, 0); + break; + + case STATLineType::ssidx: + write_header_row(ssidx_columns, n_ssidx_columns, 1, dump_at, 0, 0); + break; + // Just write a STAT header line for indeterminant line types case STATLineType::mctc: case STATLineType::mcts: @@ -2267,7 +2288,7 @@ void STATAnalysisJob::dump_stat_line(const STATLine &line, default: mlog << Error << "\ndump_stat_line() -> " << "unexpected line type value " << line_type[0] << "\n\n"; - throw(1); + throw 1; } // end switch } // @@ -2308,7 +2329,7 @@ void STATAnalysisJob::dump_stat_line(const STATLine &line, // Write the buffer, if full // if(n_dump%dump_at.nrows() == 0) { - *(dr_out) << dump_at; + *dr_out << dump_at; dump_at.erase(); } diff --git a/src/libcode/vx_analysis_util/stat_job.h b/src/libcode/vx_analysis_util/stat_job.h index 208c6d4894..0a0c63b52d 100644 --- a/src/libcode/vx_analysis_util/stat_job.h +++ b/src/libcode/vx_analysis_util/stat_job.h @@ -144,6 +144,7 @@ class STATAnalysisJob { void set_boot_seed(const char *); void set_perc_thresh(const NumArray &, + const NumArray &, const NumArray &, const NumArray &); @@ -234,14 +235,17 @@ class STATAnalysisJob { std::map column_str_inc_map; std::map column_str_exc_map; - StringArray hdr_name; - StringArray hdr_value; - // // Store the case information for the -by option // StringArray by_column; + // + // Options for -set_hdr output + // + StringArray hdr_name; + StringArray hdr_value; + // // Variables used to the store the analysis job specification // @@ -361,7 +365,7 @@ class STATAnalysisJob { inline void STATAnalysisJob::set_job_type (const STATJobType t) { job_type = t; return; } inline void STATAnalysisJob::set_precision (int p) { precision = p; return; } -inline int STATAnalysisJob::get_precision () const { return(precision); } +inline int STATAnalysisJob::get_precision () const { return precision; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_analysis_util/stat_line.cc b/src/libcode/vx_analysis_util/stat_line.cc index c3006b5189..6f86649eab 100644 --- a/src/libcode/vx_analysis_util/stat_line.cc +++ b/src/libcode/vx_analysis_util/stat_line.cc @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,32 @@ using namespace std; +//////////////////////////////////////////////////////////////////////// + + + // + // MET #2924 Rename climatology column names + // + +static const map mpr_rename_map = { + { "CLIMO_MEAN", "OBS_CLIMO_MEAN" }, + { "CLIMO_STDEV", "OBS_CLIMO_STDEV" }, + { "CLIMO_CDF", "OBS_CLIMO_CDF" } +}; + +static const map orank_rename_map = { + { "CLIMO_MEAN", "OBS_CLIMO_MEAN" }, + { "CLIMO_STDEV", "OBS_CLIMO_STDEV" } +}; + +static const map< STATLineType, map > stat_lty_rename_map = { + { STATLineType::mpr, mpr_rename_map }, + { STATLineType::orank, orank_rename_map } +}; + +static StringArray print_stat_rename_message; + + //////////////////////////////////////////////////////////////////////// @@ -444,6 +471,29 @@ if ( is_bad_data(offset) ) { if ( !get_file()->header().has(col_str, offset) ) offset = bad_data_int; } + // + // If not found, check renamed columns for backward compatibility + // + +if ( is_bad_data(offset) ) { + + string s(col_str); + + if ( stat_lty_rename_map.count(Type) && + stat_lty_rename_map.at(Type).count(s) ) { + if ( !print_stat_rename_message.has(s) ) { + mlog << Debug(2) << "The \"" << s << "\" column in the " + << statlinetype_to_string(Type) + << " line type has been renamed as \"" + << (stat_lty_rename_map.at(Type)).at(s) + << "\". Please switch to using MET" + << met_version << " column names.\n"; + print_stat_rename_message.add(s); + } + return ( get_item((stat_lty_rename_map.at(Type)).at(s).c_str()) ); + } +} + // // Return bad data string for no match // diff --git a/src/libcode/vx_analysis_util/stat_line.h b/src/libcode/vx_analysis_util/stat_line.h index 9a30820fcc..6d099a8e59 100644 --- a/src/libcode/vx_analysis_util/stat_line.h +++ b/src/libcode/vx_analysis_util/stat_line.h @@ -118,7 +118,7 @@ class STATLine : public DataLine { //////////////////////////////////////////////////////////////////////// -inline STATLineType STATLine::type () const { return ( Type ); } +inline STATLineType STATLine::type () const { return Type; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_analysis_util/time_series.h b/src/libcode/vx_analysis_util/time_series.h index 48e523e6c8..fe3d7469ff 100644 --- a/src/libcode/vx_analysis_util/time_series.h +++ b/src/libcode/vx_analysis_util/time_series.h @@ -72,10 +72,10 @@ class TimeSeries { //////////////////////////////////////////////////////////////////////// -inline unixtime TimeSeries::time_start() const { return ( TimeStart ); } -inline int TimeSeries::time_delta() const { return ( TimeDelta ); } +inline unixtime TimeSeries::time_start() const { return TimeStart; } +inline int TimeSeries::time_delta() const { return TimeDelta; } -inline int TimeSeries::n_elements() const { return ( Nelements ); } +inline int TimeSeries::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_bool_calc/Makefile.in b/src/libcode/vx_bool_calc/Makefile.in index 07bead994d..531f87fa08 100644 --- a/src/libcode/vx_bool_calc/Makefile.in +++ b/src/libcode/vx_bool_calc/Makefile.in @@ -245,6 +245,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_bool_calc/token_stack.h b/src/libcode/vx_bool_calc/token_stack.h index 4f4cd898d4..0987216a73 100644 --- a/src/libcode/vx_bool_calc/token_stack.h +++ b/src/libcode/vx_bool_calc/token_stack.h @@ -74,7 +74,7 @@ class TokenStack { //////////////////////////////////////////////////////////////////////// -inline int TokenStack::depth() const { return ( Nelements ); } +inline int TokenStack::depth() const { return Nelements; } inline bool TokenStack::empty() const { return ( Nelements == 0 ); } diff --git a/src/libcode/vx_color/Makefile.in b/src/libcode/vx_color/Makefile.in index 08977daff3..217fc0590c 100644 --- a/src/libcode/vx_color/Makefile.in +++ b/src/libcode/vx_color/Makefile.in @@ -262,6 +262,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_color/color.h b/src/libcode/vx_color/color.h index cdf84f5113..8837f13c4c 100644 --- a/src/libcode/vx_color/color.h +++ b/src/libcode/vx_color/color.h @@ -77,9 +77,9 @@ class Color { //////////////////////////////////////////////////////////////////////// -inline unsigned char Color::red () const { return ( R ); } -inline unsigned char Color::green () const { return ( G ); } -inline unsigned char Color::blue () const { return ( B ); } +inline unsigned char Color::red () const { return R; } +inline unsigned char Color::green () const { return G; } +inline unsigned char Color::blue () const { return B; } inline bool Color::is_gray() const { return ( (R == G) && (G == B) ); } @@ -182,10 +182,10 @@ class CtableEntry { inline void CtableEntry::set_color(const Color & c) { C = c; return; } -inline const Color & CtableEntry::color() const { return ( C ); } +inline const Color & CtableEntry::color() const { return C; } -inline double CtableEntry::value_low() const { return ( ValueLo ); } -inline double CtableEntry::value_high() const { return ( ValueHi ); } +inline double CtableEntry::value_low() const { return ValueLo; } +inline double CtableEntry::value_high() const { return ValueHi; } //////////////////////////////////////////////////////////////////////// @@ -282,9 +282,9 @@ class ColorTable { //////////////////////////////////////////////////////////////////////// -inline int ColorTable::n_entries() const { return ( Nentries ); } +inline int ColorTable::n_entries() const { return Nentries; } -inline double ColorTable::gamma() const { return ( Gamma ); } +inline double ColorTable::gamma() const { return Gamma; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_color/color_list.h b/src/libcode/vx_color/color_list.h index 218c9344f0..9489e5cbb5 100644 --- a/src/libcode/vx_color/color_list.h +++ b/src/libcode/vx_color/color_list.h @@ -63,9 +63,9 @@ class ClistEntry { //////////////////////////////////////////////////////////////////////// -inline Dcolor ClistEntry::dc() const { return ( D ); } +inline Dcolor ClistEntry::dc() const { return D; } -inline const char * ClistEntry::name() const { return ( Name.c_str() ); } +inline const char * ClistEntry::name() const { return Name.c_str(); } //////////////////////////////////////////////////////////////////////// @@ -118,7 +118,7 @@ class ColorList { //////////////////////////////////////////////////////////////////////// -inline int ColorList::n_elements() const { return ( Nelements ); } +inline int ColorList::n_elements() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_color/color_table.cc b/src/libcode/vx_color/color_table.cc index 7a7698c487..bb63ab05fb 100644 --- a/src/libcode/vx_color/color_table.cc +++ b/src/libcode/vx_color/color_table.cc @@ -1068,7 +1068,7 @@ if(is_eq(old_v_lo, bad_data_value)) { new_v_lo = bad_data_value; } else { - v_lo = (old_v_lo - old_b)/(old_m); + v_lo = (old_v_lo - old_b)/old_m; new_v_lo = v_lo*new_m + new_b; } @@ -1080,7 +1080,7 @@ if(is_eq(old_v_hi, bad_data_value)) { new_v_hi = bad_data_value; } else { - v_hi = (old_v_hi - old_b)/(old_m); + v_hi = (old_v_hi - old_b)/old_m; new_v_hi = v_hi*new_m + new_b; } diff --git a/src/libcode/vx_data2d/Makefile.in b/src/libcode/vx_data2d/Makefile.in index 947fb3d73f..ead0ee6915 100644 --- a/src/libcode/vx_data2d/Makefile.in +++ b/src/libcode/vx_data2d/Makefile.in @@ -247,6 +247,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d/data2d_utils.cc b/src/libcode/vx_data2d/data2d_utils.cc index 6284ca434b..6fc68a93cc 100644 --- a/src/libcode/vx_data2d/data2d_utils.cc +++ b/src/libcode/vx_data2d/data2d_utils.cc @@ -24,6 +24,54 @@ using namespace std; //////////////////////////////////////////////////////////////////////// +bool build_grid_by_grid_string(const char *grid_str, Grid &grid, + const char *caller_name, bool do_warning) { + bool status = false; + + if (nullptr != grid_str && m_strlen(grid_str) > 0) { + // Parse as a white-space separated string + + StringArray sa; + sa.parse_wsss(grid_str); + + // Search for a named grid + if (sa.n() == 1 && find_grid_by_name(sa[0].c_str(), grid)) { + status = true; + mlog << Debug(3) << "Use the grid named \"" << grid_str << "\".\n"; + } + // Parse grid definition + else if (sa.n() > 1 && parse_grid_def(sa, grid)) { + status = true; + mlog << Debug(3) << "Use the grid defined by string \"" + << grid_str << "\".\n"; + } + else if (do_warning) { + mlog << Warning << "\nbuild_grid_by_grid_string() by " << caller_name + << " unsupported " << conf_key_set_attr_grid + << " definition string (" << grid_str + << ")!\n\n"; + } + } + + return status; +} + +//////////////////////////////////////////////////////////////////////// + +bool build_grid_by_grid_string(const ConcatString &grid_str, Grid &grid, + const char *caller_name, bool do_warning) { + bool status = false; + + if(grid_str.nonempty()) { + status = build_grid_by_grid_string(grid_str.c_str(), grid, + caller_name, do_warning); + } + + return status; +} + +//////////////////////////////////////////////////////////////////////// + bool derive_wdir(const DataPlane &u2d, const DataPlane &v2d, DataPlane &wdir2d) { int x, y; diff --git a/src/libcode/vx_data2d/data2d_utils.h b/src/libcode/vx_data2d/data2d_utils.h index 317eb92f72..24299475d3 100644 --- a/src/libcode/vx_data2d/data2d_utils.h +++ b/src/libcode/vx_data2d/data2d_utils.h @@ -20,6 +20,14 @@ //////////////////////////////////////////////////////////////////////// +extern bool build_grid_by_grid_string(const char *attr_grid, Grid &grid, + const char *caller_name=nullptr, + bool do_warning=true); + +extern bool build_grid_by_grid_string(const ConcatString &attr_grid, Grid &grid, + const char *caller_name=nullptr, + bool do_warning=true); + extern bool derive_wdir(const DataPlane &u2d, const DataPlane &v2d, DataPlane &wdir); diff --git a/src/libcode/vx_data2d/data_class.cc b/src/libcode/vx_data2d/data_class.cc index 23ff40f93f..f364e34e42 100644 --- a/src/libcode/vx_data2d/data_class.cc +++ b/src/libcode/vx_data2d/data_class.cc @@ -257,13 +257,21 @@ mlog << Debug(3) << "Resetting grid definition from \"" // Make sure the grid dimensions do not change // - if ( raw_nx() != grid.nx() || raw_ny() != grid.ny() ) { + if ( raw_nx() <= 0 && raw_ny() <= 0 ) { - mlog << Error << "\nMet2dDataFile::set_grid() -> " + mlog << Warning << "\nMet2dDataFile::set_grid() -> " << "When resetting the grid definition to \"" << grid.serialize() << "\", the grid dimensions " - << "cannot change (" << grid.nx() << ", " << grid.ny() + << "are changed (" << grid.nx() << ", " << grid.ny() << ") != (" << raw_nx() << ", " << raw_ny() << ").\n\n"; + } + else if ( raw_nx() != grid.nx() || raw_ny() != grid.ny() ) { + + mlog << Error << "\nMet2dDataFile::set_grid() -> " + << "When resetting the grid definition to \"" + << grid.serialize() << "\", the grid dimensions " + << "cannot change to (" << grid.nx() << ", " << grid.ny() + << ") from (" << raw_nx() << ", " << raw_ny() << ").\n\n"; exit ( 1 ); diff --git a/src/libcode/vx_data2d/data_class.h b/src/libcode/vx_data2d/data_class.h index 2e2bf2c7e1..5fbf07e266 100644 --- a/src/libcode/vx_data2d/data_class.h +++ b/src/libcode/vx_data2d/data_class.h @@ -174,9 +174,9 @@ inline int Met2dDataFile::ny() const { return ( Dest_Grid ? (Dest_Grid->ny()) : inline int Met2dDataFile::raw_nx() const { return ( Raw_Grid ? (Raw_Grid->nx()) : 0 ); } inline int Met2dDataFile::raw_ny() const { return ( Raw_Grid ? (Raw_Grid->ny()) : 0 ); } -inline const char * Met2dDataFile::filename() const { return ( Filename.c_str() ); } +inline const char * Met2dDataFile::filename() const { return Filename.c_str(); } -inline int Met2dDataFile::shift_right() const { return ( ShiftRight ); } +inline int Met2dDataFile::shift_right() const { return ShiftRight; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d/level_info.h b/src/libcode/vx_data2d/level_info.h index 4a1dc28f1d..d6956ca4f4 100644 --- a/src/libcode/vx_data2d/level_info.h +++ b/src/libcode/vx_data2d/level_info.h @@ -98,15 +98,15 @@ class LevelInfo /////////////////////////////////////////////////////////////////////////////// -inline LevelType LevelInfo::type() const { return(Type); } -inline int LevelInfo::type_num() const { return(TypeNum); } -inline ConcatString LevelInfo::req_name() const { return(ReqName); } -inline ConcatString LevelInfo::name() const { return(Name); } -inline ConcatString LevelInfo::units() const { return(Units); } -inline double LevelInfo::upper() const { return(Upper); } -inline double LevelInfo::lower() const { return(Lower); } -inline double LevelInfo::increment()const { return(Increment);} -inline bool LevelInfo::is_offset()const { return(Is_offset);} +inline LevelType LevelInfo::type() const { return Type; } +inline int LevelInfo::type_num() const { return TypeNum; } +inline ConcatString LevelInfo::req_name() const { return ReqName; } +inline ConcatString LevelInfo::name() const { return Name; } +inline ConcatString LevelInfo::units() const { return Units; } +inline double LevelInfo::upper() const { return Upper; } +inline double LevelInfo::lower() const { return Lower; } +inline double LevelInfo::increment()const { return Increment;} +inline bool LevelInfo::is_offset()const { return Is_offset;} /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d/table_lookup.cc b/src/libcode/vx_data2d/table_lookup.cc index e0db7aab3b..8ab83eeae2 100644 --- a/src/libcode/vx_data2d/table_lookup.cc +++ b/src/libcode/vx_data2d/table_lookup.cc @@ -1083,9 +1083,9 @@ bool TableFlatFile::lookup_grib1(const char * parm_name, int table_number, int c for(vector::iterator it = matches.begin(); it < matches.end(); it++) - mlog << Debug(3) << " parm_name: " << (it)->parm_name - << ", table_number = " << (it)->table_number - << ", code = " << (it)->code << "\n"; + mlog << Debug(3) << " parm_name: " << it->parm_name + << ", table_number = " << it->table_number + << ", code = " << it->code << "\n"; mlog << Debug(3) << "Using the first match found: " << " parm_name: " << e.parm_name @@ -1146,11 +1146,11 @@ bool TableFlatFile::lookup_grib1(const char * parm_name, int table_number, int c for(vector::iterator it = matches.begin(); it < matches.end(); it++) { - mlog << Debug(3) << " parm_name: " << (it)->parm_name - << ", table_number = " << (it)->table_number - << ", code = " << (it)->code - << ", center = " << (it)->center - << ", subcenter = " << (it)->subcenter << "\n"; + mlog << Debug(3) << " parm_name: " << it->parm_name + << ", table_number = " << it->table_number + << ", code = " << it->code + << ", center = " << it->center + << ", subcenter = " << it->subcenter << "\n"; } mlog << Debug(3) << "Using the first match found: " @@ -1282,10 +1282,10 @@ bool TableFlatFile::lookup_grib2(const char * parm_name, int a, int b, int c, for(vector::iterator it = matches.begin(); it < matches.end(); it++) - mlog << Debug(3) << " parm_name: " << (it)->parm_name - << ", index_a = " << (it)->index_a - << ", index_b = " << (it)->index_b - << ", index_c = " << (it)->index_c << "\n"; + mlog << Debug(3) << " parm_name: " << it->parm_name + << ", index_a = " << it->index_a + << ", index_b = " << it->index_b + << ", index_c = " << it->index_c << "\n"; mlog << Debug(3) << "Using the first match found: " << " parm_name: " << e.parm_name @@ -1349,13 +1349,13 @@ bool TableFlatFile::lookup_grib2(const char * parm_name, for(vector::iterator it = matches.begin(); it < matches.end(); it++) - mlog << Debug(3) << " parm_name: " << (it)->parm_name - << ", index_a = " << (it)->index_a - << ", grib2_mtab = " << (it)->mtab_set - << ", grib2_cntr = " << (it)->cntr - << ", grib2_ltab = " << (it)->ltab - << ", index_b = " << (it)->index_b - << ", index_c = " << (it)->index_c + mlog << Debug(3) << " parm_name: " << it->parm_name + << ", index_a = " << it->index_a + << ", grib2_mtab = " << it->mtab_set + << ", grib2_cntr = " << it->cntr + << ", grib2_ltab = " << it->ltab + << ", index_b = " << it->index_b + << ", index_c = " << it->index_c << "\n"; mlog << Debug(3) << "Using the first match found: " diff --git a/src/libcode/vx_data2d/table_lookup.h b/src/libcode/vx_data2d/table_lookup.h index a1a1d56c0e..39dd488e23 100644 --- a/src/libcode/vx_data2d/table_lookup.h +++ b/src/libcode/vx_data2d/table_lookup.h @@ -209,8 +209,8 @@ class TableFlatFile { //////////////////////////////////////////////////////////////////////// -inline int TableFlatFile::n_grib1_elements() const { return ( N_grib1_elements ); } -inline int TableFlatFile::n_grib2_elements() const { return ( N_grib2_elements ); } +inline int TableFlatFile::n_grib1_elements() const { return N_grib1_elements; } +inline int TableFlatFile::n_grib2_elements() const { return N_grib2_elements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d/var_info.cc b/src/libcode/vx_data2d/var_info.cc index a1775a7275..ac12513e76 100644 --- a/src/libcode/vx_data2d/var_info.cc +++ b/src/libcode/vx_data2d/var_info.cc @@ -26,6 +26,7 @@ #include "vx_cal.h" #include "vx_math.h" #include "vx_log.h" +#include "data2d_utils.h" using namespace std; @@ -115,6 +116,7 @@ void VarInfo::assign(const VarInfo &v) { nBins = v.nBins; Range = v.Range; + DefaultRegrid = v.DefaultRegrid; Regrid = v.Regrid; SetAttrName = v.SetAttrName; @@ -175,6 +177,7 @@ void VarInfo::clear() { nBins = 0; Range.clear(); + DefaultRegrid.clear(); Regrid.clear(); SetAttrName.clear(); @@ -214,26 +217,29 @@ void VarInfo::dump(ostream &out) const { // Dump out the contents out << "VarInfo::dump():\n" - << " MagicStr = " << MagicStr.contents() << "\n" - << " ReqName = " << ReqName.contents() << "\n" - << " Name = " << Name.contents() << "\n" - << " LongName = " << LongName.contents() << "\n" - << " Units = " << Units.contents() << "\n" - << " PFlag = " << PFlag << "\n" - << " PName = " << PName.contents() << "\n" - << " PUnits = " << PUnits.contents() << "\n" - << " PAsScalar = " << PAsScalar << "\n" - << " UVIndex = " << UVIndex << "\n" - << " Init = " << init_str << " (" << Init << ")\n" - << " Valid = " << valid_str << " (" << Valid << ")\n" - << " Ensemble = " << Ensemble.contents() << "\n" - << " Lead = " << lead_str << " (" << Lead << ")\n" - << " ConvertFx = " << (ConvertFx.is_set() ? "IsSet" : "(nul)") << "\n" - << " CensorThresh = " << CensorThresh.get_str() << "\n" - << " CensorVal = " << CensorVal.serialize() << "\n" - << " nBins = " << nBins << "\n" - << " Range = " << Range.serialize() << "\n" - << " Regrid = " << interpmthd_to_string(Regrid.method) << "\n"; + << " MagicStr = " << MagicStr.contents() << "\n" + << " ReqName = " << ReqName.contents() << "\n" + << " Name = " << Name.contents() << "\n" + << " LongName = " << LongName.contents() << "\n" + << " Units = " << Units.contents() << "\n" + << " PFlag = " << PFlag << "\n" + << " PName = " << PName.contents() << "\n" + << " PUnits = " << PUnits.contents() << "\n" + << " PAsScalar = " << PAsScalar << "\n" + << " UVIndex = " << UVIndex << "\n" + << " Init = " << init_str << " (" << Init << ")\n" + << " Valid = " << valid_str << " (" << Valid << ")\n" + << " Ensemble = " << Ensemble.contents() << "\n" + << " Lead = " << lead_str << " (" << Lead << ")\n" + << " ConvertFx = " << (ConvertFx.is_set() ? "IsSet" : "(nul)") << "\n" + << " CensorThresh = " << CensorThresh.get_str() << "\n" + << " CensorVal = " << CensorVal.serialize() << "\n" + << " nBins = " << nBins << "\n" + << " Range = " << Range.serialize() << "\n" + << " DefaultRegrid = " << interpmthd_to_string(DefaultRegrid.method) + << "(" << DefaultRegrid.width << ")\n" + << " Regrid = " << interpmthd_to_string(Regrid.method) + << "(" << Regrid.width << ")\n"; Level.dump(out); @@ -424,6 +430,13 @@ void VarInfo::set_range(const NumArray &a) { /////////////////////////////////////////////////////////////////////////////// +void VarInfo::set_default_regrid(const RegridInfo &ri) { + DefaultRegrid = ri; + return; +} + +/////////////////////////////////////////////////////////////////////////////// + void VarInfo::set_regrid(const RegridInfo &ri) { Regrid = ri; return; @@ -527,7 +540,7 @@ void VarInfo::set_dict(Dictionary &dict) { if(dict.last_lookup_status()) set_range(na); // Parse regrid, if present - Regrid = parse_conf_regrid(&dict, false); + Regrid = parse_conf_regrid(&dict, &DefaultRegrid, false); // Parse set_attr strings SetAttrName = @@ -541,25 +554,7 @@ void VarInfo::set_dict(Dictionary &dict) { // Parse set_attr grid s = parse_set_attr_string(dict, conf_key_set_attr_grid); - if(s.nonempty()) { - - // Parse as a white-space separated string - StringArray sa; - sa.parse_wsss(s); - - // Search for a named grid - if(sa.n() == 1 && find_grid_by_name(sa[0].c_str(), SetAttrGrid)) { - } - // Parse grid definition - else if(sa.n() > 1 && parse_grid_def(sa, SetAttrGrid)) { - } - else { - mlog << Warning << "\nVarInfo::set_dict() -> " - << "unsupported " << conf_key_set_attr_grid - << " definition string (" << s - << ")!\n\n"; - } - } + build_grid_by_grid_string(s, SetAttrGrid, "VarInfo::set_dict(Dictionary &dict) ->"); // Parse set_attr times s = parse_set_attr_string(dict, conf_key_set_attr_init); diff --git a/src/libcode/vx_data2d/var_info.h b/src/libcode/vx_data2d/var_info.h index ccb58a93f1..eba7551b67 100644 --- a/src/libcode/vx_data2d/var_info.h +++ b/src/libcode/vx_data2d/var_info.h @@ -57,7 +57,8 @@ class VarInfo int nBins; // Number of pdf bins NumArray Range; // Range of pdf bins - RegridInfo Regrid; // Regridding logic + RegridInfo DefaultRegrid; // Default regridding logic + RegridInfo Regrid; // Regridding logic // Options to override metadata ConcatString SetAttrName; @@ -98,7 +99,7 @@ class VarInfo void clear(); void clone_base() const; - + virtual void dump(std::ostream &) const; // @@ -189,6 +190,7 @@ class VarInfo void set_n_bins(const int &); void set_range(const NumArray &); + void set_default_regrid(const RegridInfo &); void set_regrid(const RegridInfo &); void set_level_info_grib(Dictionary & dict); @@ -212,48 +214,48 @@ class VarInfo inline void VarInfo::add_grib_code(Dictionary &d) { return; } -inline ConcatString VarInfo::magic_str() const { return(MagicStr); } -inline ConcatString VarInfo::req_name() const { return(ReqName); } -inline ConcatString VarInfo::name() const { return(Name); } -inline ConcatString VarInfo::units() const { return(Units); } -inline LevelInfo VarInfo::level() const { return(Level); } -inline ConcatString VarInfo::req_level_name() const { return(Level.req_name()); } -inline ConcatString VarInfo::level_name() const { return(Level.name()); } -inline ConcatString VarInfo::long_name() const { return(LongName); } -inline ConcatString VarInfo::ens() const { return(Ensemble); } +inline ConcatString VarInfo::magic_str() const { return MagicStr; } +inline ConcatString VarInfo::req_name() const { return ReqName; } +inline ConcatString VarInfo::name() const { return Name; } +inline ConcatString VarInfo::units() const { return Units; } +inline LevelInfo VarInfo::level() const { return Level; } +inline ConcatString VarInfo::req_level_name() const { return Level.req_name(); } +inline ConcatString VarInfo::level_name() const { return Level.name(); } +inline ConcatString VarInfo::long_name() const { return LongName; } +inline ConcatString VarInfo::ens() const { return Ensemble; } -inline bool VarInfo::p_flag() const { return(PFlag); } -inline ConcatString VarInfo::p_name() const { return(PName); } -inline ConcatString VarInfo::p_units() const { return(PUnits); } -inline SingleThresh VarInfo::p_thresh_lo() const { return(PThreshLo); } -inline SingleThresh VarInfo::p_thresh_hi() const { return(PThreshHi); } -inline bool VarInfo::p_as_scalar() const { return(PAsScalar); } +inline bool VarInfo::p_flag() const { return PFlag; } +inline ConcatString VarInfo::p_name() const { return PName; } +inline ConcatString VarInfo::p_units() const { return PUnits; } +inline SingleThresh VarInfo::p_thresh_lo() const { return PThreshLo; } +inline SingleThresh VarInfo::p_thresh_hi() const { return PThreshHi; } +inline bool VarInfo::p_as_scalar() const { return PAsScalar; } -inline int VarInfo::uv_index() const { return(UVIndex); } +inline int VarInfo::uv_index() const { return UVIndex; } -inline unixtime VarInfo::init() const { return(Init); } -inline unixtime VarInfo::valid() const { return(Valid); } -inline int VarInfo::lead() const { return(Lead); } +inline unixtime VarInfo::init() const { return Init; } +inline unixtime VarInfo::valid() const { return Valid; } +inline int VarInfo::lead() const { return Lead; } -inline ThreshArray VarInfo::censor_thresh() const { return(CensorThresh); } -inline NumArray VarInfo::censor_val() const { return(CensorVal); } +inline ThreshArray VarInfo::censor_thresh() const { return CensorThresh; } +inline NumArray VarInfo::censor_val() const { return CensorVal; } -inline int VarInfo::n_bins() const { return(nBins); } -inline NumArray VarInfo::range() const { return(Range); } +inline int VarInfo::n_bins() const { return nBins; } +inline NumArray VarInfo::range() const { return Range; } -inline RegridInfo VarInfo::regrid() const { return(Regrid); } +inline RegridInfo VarInfo::regrid() const { return Regrid; } inline ConcatString VarInfo::name_attr() const { return(SetAttrName.empty() ? name() : SetAttrName); } inline ConcatString VarInfo::units_attr() const { return(SetAttrUnits.empty() ? units() : SetAttrUnits); } inline ConcatString VarInfo::level_attr() const { return(SetAttrLevel.empty() ? level_name() : SetAttrLevel); } inline ConcatString VarInfo::long_name_attr() const { return(SetAttrLongName.empty() ? long_name() : SetAttrLongName); } -inline Grid VarInfo::grid_attr() const { return(SetAttrGrid); } +inline Grid VarInfo::grid_attr() const { return SetAttrGrid; } -inline unixtime VarInfo::init_attr() const { return(SetAttrInit); } -inline unixtime VarInfo::valid_attr() const { return(SetAttrValid); } -inline int VarInfo::lead_attr() const { return(SetAttrLead); } -inline int VarInfo::accum_attr() const { return(SetAttrAccum); } +inline unixtime VarInfo::init_attr() const { return SetAttrInit; } +inline unixtime VarInfo::valid_attr() const { return SetAttrValid; } +inline int VarInfo::lead_attr() const { return SetAttrLead; } +inline int VarInfo::accum_attr() const { return SetAttrAccum; } //////////////////////////////////////////////////////////////////////// @@ -278,7 +280,7 @@ class EnsVarInfo { private: std::vector inputs; // Vector of InputInfo - VarInfo * ctrl_info; // Field info for control member + VarInfo * ctrl_info; // Field info for control member public: EnsVarInfo(); @@ -294,7 +296,7 @@ class EnsVarInfo { void set_ctrl(VarInfo *); VarInfo * get_ctrl(int); - // Get VarInfo from first InputInfo if requested VarInfo is nullptr + // Get VarInfo from first InputInfo if requested VarInfo is nullptr VarInfo * get_var_info(int index=0); ConcatString get_file(int index=0); int get_file_index(int index=0); diff --git a/src/libcode/vx_data2d_factory/Makefile.in b/src/libcode/vx_data2d_factory/Makefile.in index e9254cb32b..ee7e40e244 100644 --- a/src/libcode/vx_data2d_factory/Makefile.in +++ b/src/libcode/vx_data2d_factory/Makefile.in @@ -248,6 +248,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d_factory/parse_file_list.cc b/src/libcode/vx_data2d_factory/parse_file_list.cc index 2e2be2a632..dd31e1a213 100644 --- a/src/libcode/vx_data2d_factory/parse_file_list.cc +++ b/src/libcode/vx_data2d_factory/parse_file_list.cc @@ -213,11 +213,18 @@ GrdFileType ftype = FileType_None; for ( int i=0; itri) { @@ -725,6 +726,11 @@ void read_pds(const GribRecord &r, int &bms_flag, accum = 0; break; + case 123: // Average of N uninitialized analyses, starting at the reference time, at intervals of P2. + valid_ut = init_ut; + accum = 0; + break; + case 136: // Climatological Standard Deviation valid_ut = init_ut; accum = 0; diff --git a/src/libcode/vx_data2d_grib/grib_classes.cc b/src/libcode/vx_data2d_grib/grib_classes.cc index 5e3a26ab42..8a0c626eb1 100644 --- a/src/libcode/vx_data2d_grib/grib_classes.cc +++ b/src/libcode/vx_data2d_grib/grib_classes.cc @@ -1805,7 +1805,7 @@ else if ((h.type == 3) || (h.type == 13)) file << " lat_sp: " << char3_to_int(h.grid_type.lambert_conf.lat_sp) << "\n"; file << " lon_sp: " << char3_to_int(h.grid_type.lambert_conf.lon_sp) << "\n\n"; } -else if ((h.type == 5)) +else if (h.type == 5) { file << " lat1: " << char3_to_int(h.grid_type.stereographic.lat1) << "\n"; @@ -1822,7 +1822,7 @@ else if ((h.type == 5)) file << " scan_flag: " << (int) h.grid_type.stereographic.scan_flag << "\n\n"; } -else if ((h.type == 10)) +else if (h.type == 10) { file << " lat1: " << char3_to_int(h.grid_type.rot_latlon_grid.lat1) << "\n"; diff --git a/src/libcode/vx_data2d_grib/grib_strings.cc b/src/libcode/vx_data2d_grib/grib_strings.cc index d8239f7e3d..b8707b9040 100644 --- a/src/libcode/vx_data2d_grib/grib_strings.cc +++ b/src/libcode/vx_data2d_grib/grib_strings.cc @@ -111,11 +111,11 @@ ConcatString get_grib_level_list_str(int k, int grib_level) if(match > 0) { switch(k) { - case(0): // GRIB Level Name + case 0: // GRIB Level Name str = grib_level_list[match].name; break; - case(1): // GRIB Level Abbreviation + case 1: // GRIB Level Abbreviation str = grib_level_list[match].abbr; break; diff --git a/src/libcode/vx_data2d_grib/grib_utils.cc b/src/libcode/vx_data2d_grib/grib_utils.cc index e353471d3d..e89c51d806 100644 --- a/src/libcode/vx_data2d_grib/grib_utils.cc +++ b/src/libcode/vx_data2d_grib/grib_utils.cc @@ -676,7 +676,7 @@ c[0] &= 127; else if ( n == 3 ) answer = (double) 0.001*parity*char3_to_int(c); else answer = (double) bad_data_float; -return ( answer ); +return answer; } diff --git a/src/libcode/vx_data2d_grib/var_info_grib.h b/src/libcode/vx_data2d_grib/var_info_grib.h index 4d37432e9f..d4a5aa2a10 100644 --- a/src/libcode/vx_data2d_grib/var_info_grib.h +++ b/src/libcode/vx_data2d_grib/var_info_grib.h @@ -116,15 +116,15 @@ class VarInfoGrib : public VarInfo /////////////////////////////////////////////////////////////////////////////// -inline GrdFileType VarInfoGrib::file_type() const { return(FileType_Gb1); } -inline int VarInfoGrib::ptv() const { return(PTV); } -inline int VarInfoGrib::code() const { return(Code); } -inline int VarInfoGrib::lvl_type() const { return(LvlType); } -inline int VarInfoGrib::p_code() const { return(PCode); } -inline int VarInfoGrib::center() const { return(Center); } -inline int VarInfoGrib::subcenter() const { return(Subcenter); } -inline int VarInfoGrib::field_rec() const { return(FieldRec); } -inline int VarInfoGrib::tri() const { return(TRI); } +inline GrdFileType VarInfoGrib::file_type() const { return FileType_Gb1; } +inline int VarInfoGrib::ptv() const { return PTV; } +inline int VarInfoGrib::code() const { return Code; } +inline int VarInfoGrib::lvl_type() const { return LvlType; } +inline int VarInfoGrib::p_code() const { return PCode; } +inline int VarInfoGrib::center() const { return Center; } +inline int VarInfoGrib::subcenter() const { return Subcenter; } +inline int VarInfoGrib::field_rec() const { return FieldRec; } +inline int VarInfoGrib::tri() const { return TRI; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_grib2/Makefile.in b/src/libcode/vx_data2d_grib2/Makefile.in index e26d1e102b..ed24c03da3 100644 --- a/src/libcode/vx_data2d_grib2/Makefile.in +++ b/src/libcode/vx_data2d_grib2/Makefile.in @@ -238,6 +238,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d_grib2/data2d_grib2.h b/src/libcode/vx_data2d_grib2/data2d_grib2.h index 46c50ee2f5..3811f7cfa8 100644 --- a/src/libcode/vx_data2d_grib2/data2d_grib2.h +++ b/src/libcode/vx_data2d_grib2/data2d_grib2.h @@ -164,7 +164,7 @@ class MetGrib2DataFile : public Met2dDataFile { //////////////////////////////////////////////////////////////////////// -inline GrdFileType MetGrib2DataFile::file_type () const { return ( FileType_Gb2 ); } +inline GrdFileType MetGrib2DataFile::file_type () const { return FileType_Gb2; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_grib2/var_info_grib2.h b/src/libcode/vx_data2d_grib2/var_info_grib2.h index 7848aefb7f..2ce946d0db 100644 --- a/src/libcode/vx_data2d_grib2/var_info_grib2.h +++ b/src/libcode/vx_data2d_grib2/var_info_grib2.h @@ -147,31 +147,31 @@ class VarInfoGrib2 : public VarInfo /////////////////////////////////////////////////////////////////////////////// -inline GrdFileType VarInfoGrib2::file_type() const { return(FileType_Gb2); } -inline int VarInfoGrib2::record() const { return(Record); } -inline int VarInfoGrib2::discipline() const { return(Discipline); } -inline int VarInfoGrib2::m_table() const { return(MTable); } -inline int VarInfoGrib2::l_table() const { return(LTable); } -inline int VarInfoGrib2::parm_cat() const { return(ParmCat); } -inline int VarInfoGrib2::parm() const { return(Parm); } -inline int VarInfoGrib2::pdt() const { return(PDTmpl); } -inline int VarInfoGrib2::process() const { return(Process); } -inline int VarInfoGrib2::ens_type() const { return(EnsType); } -inline int VarInfoGrib2::der_type() const { return(DerType); } -inline int VarInfoGrib2::stat_type() const { return(StatType); } -inline int VarInfoGrib2::perc_val() const { return(PercVal); } - -inline int VarInfoGrib2::aerosol_type() const { return(AerosolType); } -inline int VarInfoGrib2::aerosol_interval_type() const { return(AerosolIntervalType); } -inline double VarInfoGrib2::aerosol_size_lower() const { return(AerosolSizeLower); } -inline double VarInfoGrib2::aerosol_size_upper() const { return(AerosolSizeUpper); } - -inline int VarInfoGrib2::n_ipdtmpl() const { - return(IPDTmplIndex.n()); } +inline GrdFileType VarInfoGrib2::file_type() const { return FileType_Gb2; } +inline int VarInfoGrib2::record() const { return Record; } +inline int VarInfoGrib2::discipline() const { return Discipline; } +inline int VarInfoGrib2::m_table() const { return MTable; } +inline int VarInfoGrib2::l_table() const { return LTable; } +inline int VarInfoGrib2::parm_cat() const { return ParmCat; } +inline int VarInfoGrib2::parm() const { return Parm; } +inline int VarInfoGrib2::pdt() const { return PDTmpl; } +inline int VarInfoGrib2::process() const { return Process; } +inline int VarInfoGrib2::ens_type() const { return EnsType; } +inline int VarInfoGrib2::der_type() const { return DerType; } +inline int VarInfoGrib2::stat_type() const { return StatType; } +inline int VarInfoGrib2::perc_val() const { return PercVal; } + +inline int VarInfoGrib2::aerosol_type() const { return AerosolType; } +inline int VarInfoGrib2::aerosol_interval_type() const { return AerosolIntervalType; } +inline double VarInfoGrib2::aerosol_size_lower() const { return AerosolSizeLower; } +inline double VarInfoGrib2::aerosol_size_upper() const { return AerosolSizeUpper; } + +inline int VarInfoGrib2::n_ipdtmpl() const { + return IPDTmplIndex.n(); } inline int VarInfoGrib2::ipdtmpl_index(int i) const { - return(IPDTmplIndex[i]); } + return IPDTmplIndex[i]; } inline int VarInfoGrib2::ipdtmpl_val(int i) const { - return(IPDTmplVal[i]); } + return IPDTmplVal[i]; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_cf/Makefile.in b/src/libcode/vx_data2d_nc_cf/Makefile.in index cd972c6936..fb06b420b5 100644 --- a/src/libcode/vx_data2d_nc_cf/Makefile.in +++ b/src/libcode/vx_data2d_nc_cf/Makefile.in @@ -240,6 +240,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.cc b/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.cc index 1a11af2b0d..cc77b7f17a 100644 --- a/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.cc +++ b/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.cc @@ -159,11 +159,14 @@ bool MetNcCFDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) { // Not sure why we do this - NcVarInfo *data_var = (NcVarInfo *)nullptr; - VarInfoNcCF *vinfo_nc = (VarInfoNcCF *)&vinfo; + auto data_var = (NcVarInfo *)nullptr; + auto vinfo_nc = (VarInfoNcCF *)&vinfo; static const string method_name = "MetNcCFDataFile::data_plane(VarInfo &, DataPlane &) -> "; + Grid grid_attr = vinfo.grid_attr(); + _file->update_grid(grid_attr); + // Initialize the data plane plane.clear(); @@ -339,6 +342,9 @@ int MetNcCFDataFile::data_plane_array(VarInfo &vinfo, static const string method_name = "MetNcCFDataFile::data_plane_array(VarInfo &, DataPlaneArray &) -> "; + Grid grid_attr = vinfo.grid_attr(); + _file->update_grid(grid_attr); + // Initialize plane_array.clear(); @@ -673,4 +679,12 @@ long MetNcCFDataFile::convert_value_to_offset(double z_value, string z_dim_name) return z_offset; } +//////////////////////////////////////////////////////////////////////// + +Grid MetNcCFDataFile::build_grid_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, + const long lat_counts, const long lon_counts) { + return (nullptr != _file) ? _file->build_grid_from_lat_lon_vars(lat_var, lon_var, lat_counts, lon_counts) : grid(); +} + + //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.h b/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.h index a80b176c3a..05e68fc54f 100644 --- a/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.h +++ b/src/libcode/vx_data2d_nc_cf/data2d_nc_cf.h @@ -102,6 +102,9 @@ class MetNcCFDataFile : public Met2dDataFile { void dump(std::ostream &, int = 0) const; + Grid build_grid_from_lat_lon_vars(netCDF::NcVar *lat_var, netCDF::NcVar *lon_var, + const long lat_counts, const long lon_counts); + }; diff --git a/src/libcode/vx_data2d_nc_cf/nc_cf_file.cc b/src/libcode/vx_data2d_nc_cf/nc_cf_file.cc index eebf2ae1b6..be015948a0 100644 --- a/src/libcode/vx_data2d_nc_cf/nc_cf_file.cc +++ b/src/libcode/vx_data2d_nc_cf/nc_cf_file.cc @@ -57,8 +57,6 @@ static ConcatString y_dim_var_name; static double get_nc_var_att_double(const NcVar *nc_var, const char *att_name, bool is_required=true); -#define USE_BUFFER 1 - //////////////////////////////////////////////////////////////////////// @@ -136,6 +134,9 @@ void NcCfFile::close() _dims = (NcDim **)nullptr; } + grid_ready = false; + has_attr_grid = false; + _numDims = 0; _dimNames.clear(); @@ -213,8 +214,8 @@ bool NcCfFile::open(const char * filepath) // Pull out the variables int max_dim_count = 0; - NcVar *z_var = (NcVar *)nullptr; - NcVar *valid_time_var = (NcVar *)nullptr; + auto z_var = (NcVar *)nullptr; + auto valid_time_var = (NcVar *)nullptr; ConcatString att_value; StringArray varNames; @@ -311,7 +312,7 @@ bool NcCfFile::open(const char * filepath) // Parse the units for the time variable. ut = sec_per_unit = 0; if (get_var_units(valid_time_var, units)) { - if (units.length() == 0) { + if (units.empty()) { mlog << Warning << "\n" << method_name << "the \"time\" variable must contain a \"units\" attribute. " << "Using valid time of 0\n\n"; @@ -324,7 +325,7 @@ bool NcCfFile::open(const char * filepath) } NcVar bounds_time_var; - NcVar *nc_time_var = (NcVar *)nullptr; + auto nc_time_var = (NcVar *)nullptr; bool use_bounds_var = false; ConcatString bounds_var_name; nc_time_var = valid_time_var; @@ -341,12 +342,12 @@ bool NcCfFile::open(const char * filepath) if (bounds_att) delete bounds_att; // Determine the number of times present. - int n_times = (int) get_data_size(valid_time_var); + int n_times = get_data_size(valid_time_var); int tim_buf_size = n_times; if (use_bounds_var) tim_buf_size *= 2; - double *time_values = new double[tim_buf_size]; + vector time_values(tim_buf_size); - if( get_nc_data(nc_time_var, time_values) ) { + if( get_nc_data(nc_time_var, time_values.data()) ) { bool no_leap_year = get_att_no_leap_year(valid_time_var); if( time_dim_count > 1 ) { double latest_time = bad_data_double; @@ -380,7 +381,6 @@ bool NcCfFile::open(const char * filepath) } } else ValidTime.add(0); //Initialize - delete [] time_values; } NcVar init_time_var = get_var(_ncFile, "forecast_reference_time"); @@ -403,7 +403,7 @@ bool NcCfFile::open(const char * filepath) // Parse the units for the time variable. if (get_var_units(&init_time_var, units)) { - if (units.length() == 0) { + if (units.empty()) { mlog << Warning << "\n" << method_name << "the \"forecast_reference_time\" variable must contain a \"units\" attribute.\n\n"; ut = sec_per_unit = 0; @@ -418,8 +418,8 @@ bool NcCfFile::open(const char * filepath) ut = sec_per_unit = 0; } - double time_value = get_nc_time(&init_time_var,(int)0); - InitTime = (unixtime)ut + sec_per_unit * time_value; + double time_value = get_nc_time(&init_time_var,0); + InitTime = ut + (unixtime)(sec_per_unit * time_value); } // Pull out the grid. This must be done after pulling out the dimension @@ -436,7 +436,8 @@ bool NcCfFile::open(const char * filepath) StringArray z_dims; StringArray t_dims; StringArray dimNames; - string var_x_dim_name, var_y_dim_name; + string var_x_dim_name; + string var_y_dim_name; if (IS_VALID_NC_P(_xDim)) var_x_dim_name = GET_NC_NAME_P(_xDim); if (IS_VALID_NC_P(_yDim)) var_y_dim_name = GET_NC_NAME_P(_yDim); for (int j=0; j z_values(z_count); - if( get_nc_data(z_var, z_values) ) { + if( get_nc_data(z_var, z_values.data()) ) { for(int i=0; i " + mlog << Error << "\n" << method_name << "bad status for var->get()\n\n"; exit(1); } @@ -922,7 +944,7 @@ double NcCfFile::getData(NcVar * var, const LongArray & a) const // done mlog << Debug(6) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; return d; } @@ -951,7 +973,7 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const if (dim_count != a.n_elements()) { mlog << Error << "\n" << method_name - << "needed " << (dim_count) << " arguments for variable " + << "needed " << dim_count << " arguments for variable " << (GET_NC_NAME_P(v)) << ", got " << (a.n_elements()) << "\n\n"; exit(1); } @@ -966,7 +988,7 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const // find varinfo's bool found = false; - NcVarInfo *var = (NcVarInfo *)nullptr; + auto var = (NcVarInfo *)nullptr; for (int j = 0; j < Nvars; ++j) { @@ -991,13 +1013,19 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const for (int j = 0; j < a.n_elements(); ++j) { - if (a[j] == vx_data2d_star) + if (a[j] != vx_data2d_star) continue; + + ++count; + if ( var == nullptr || ((j != var->x_slot) && (j != var->y_slot)) ) { - ++count; - if ( var == nullptr || ((j != var->x_slot) && (j != var->y_slot)) ) - { + if (has_attr_grid) { + mlog << Debug(3) << "\n" << method_name + << "star found in unknown slot (" << j << ") for " << GET_NC_NAME_P(v) << "\n\n"; + } + else { mlog << Error << "\n" << method_name << "star found in bad slot (" << j << ") for " << GET_NC_NAME_P(v) << "\n\n"; + exit(1); } } @@ -1011,13 +1039,22 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const } // check slots - additional logic to satisfy Fortify Null Dereference - int x_slot_tmp = 0; - int y_slot_tmp = 0; + int x_slot_tmp = 0; + int y_slot_tmp = 0; if (var == nullptr || var->x_slot < 0 || var->y_slot < 0) { - mlog << Error << "\n" << method_name - << "bad x|y|z slot\n\n"; - exit(1); + if (has_attr_grid) { + mlog << Warning << "\n" << method_name + << "bad x|y|z slot (" << var->x_slot << "|" << var->y_slot + << "|" << var->z_slot << "|" << var->t_slot <<")\n\n"; + x_slot_tmp = dim_count - 1; + y_slot_tmp = dim_count - 2; + } + else { + mlog << Error << "\n" << method_name + << "bad x|y|z slot\n\n"; + exit(1); + } } else { x_slot_tmp = var->x_slot; @@ -1040,6 +1077,18 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const const int nx = grid.nx(); const int ny = grid.ny(); + size_t data_size = 1; + for (int k=0; kgetDim(k).getSize(); + } + if (data_size == 1) data_size = v->getDim(x_slot).getSize() * v->getDim(y_slot).getSize(); + if (!is_eq(data_size, (size_t)nx*ny)) { + mlog << Error << "\n" << method_name + << "Allocated DataPlane from Grid (" << nx*ny << ") does not match with the variable size (" + << data_size << "). Please check set_attr_grid settings (nx and ny) if applied.\n\n"; + exit(1); + } + plane.clear(); plane.set_size(nx, ny); @@ -1051,7 +1100,7 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const // get the data const int plane_size = nx * ny; - double *d = new double[plane_size]; + vector d(plane_size); size_t dim_size; LongArray offsets; @@ -1074,7 +1123,7 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const offsets[y_slot] = 0; lengths[y_slot] = ny; - get_nc_data(v, d, lengths, offsets); + get_nc_data(v, d.data(), lengths, offsets); int offset = 0; if( x_slot > y_slot ) { @@ -1091,8 +1140,8 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const plane.set(value, x, y_offset); - } // for x - } // for y + } /* for y */ + } /* for x */ } else { for (int x = 0; x< nx; ++x) { @@ -1108,15 +1157,13 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const plane.set(value, x, y_offset); - } // for y - } // for x + } /* for y */ + } /* for x */ } - delete [] d; - // done mlog << Debug(6) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; return true; } @@ -1130,7 +1177,7 @@ bool NcCfFile::getData(const char *var_name, NcVarInfo *&info) const { info = find_var_name(var_name); - if (info == 0) + if (info == nullptr) return false; bool found = getData(info->var, a, plane); @@ -1462,9 +1509,9 @@ void NcCfFile::read_netcdf_grid() !((_xDim && _yDim) || (x_dim_var_name.nonempty() && y_dim_var_name.nonempty()))) { - mlog << Error << "\nNcCfFile::read_netcdf_grid() -> " + mlog << Warning << "\nNcCfFile::read_netcdf_grid() -> " << "Couldn't figure out projection from information in netCDF file.\n\n"; - exit(1); + return; } return; @@ -1475,6 +1522,25 @@ void NcCfFile::read_netcdf_grid() //////////////////////////////////////////////////////////////////////// +Grid NcCfFile::build_grid_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, + const long lat_counts, const long lon_counts) { + Grid grid_ll; + bool swap_to_north = false; + LatLonData data = get_data_from_lat_lon_vars(lat_var, lon_var, + lat_counts, lon_counts, + swap_to_north); + + data.dump(); + + grid_ll.set(data); // resets swap_to_north to false + if (swap_to_north) grid_ll.set_swap_to_north(true); + return grid_ll; +} + + +//////////////////////////////////////////////////////////////////////// + + void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) { static const string method_name = "NcCfFile::get_grid_from_grid_mapping()"; @@ -1487,9 +1553,9 @@ void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) bool status = get_att_value_chars(grid_mapping_att, mapping_name); if (!status) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot extract grid mapping name from netCDF file.\n\n"; - exit(1); + return; } NcVar *grid_mapping_var = nullptr; @@ -1503,12 +1569,12 @@ void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) } } /* endfor - i */ - if ((grid_mapping_var == 0) || (IS_INVALID_NC_P(grid_mapping_var))) + if ((nullptr == grid_mapping_var) || (IS_INVALID_NC_P(grid_mapping_var))) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot extract grid mapping variable (" << mapping_name << ") from netCDF file.\n\n"; - exit(1); + return; } // Get the name of the grid mapping @@ -1517,9 +1583,10 @@ void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) if (IS_INVALID_NC_P(grid_mapping_name_att)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot get coordinate system name from netCDF file.\n\n"; - exit(1); + if (grid_mapping_name_att) delete grid_mapping_name_att; + return; } //string grid_mapping_name = grid_mapping_name_att->getValues(att->as_string(0); @@ -1587,10 +1654,10 @@ void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) } else { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Unknown grid mapping name (" << grid_mapping_name << ") found in netCDF file.\n\n"; - exit(1); + return; } } @@ -1654,9 +1721,9 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m x_coord_units_name == "meters") x_coord_to_m_cf = 1.0; else if (x_coord_units_name == "km") x_coord_to_m_cf = 1000.0; else { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "The X coordinates must be in meters or kilometers for MET.\n\n"; - exit(1); + return; } } } @@ -1679,9 +1746,9 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m y_coord_units_name == "meters" ) y_coord_to_m_cf = 1.0; else if (y_coord_units_name == "km") y_coord_to_m_cf = 1000.0; else { - mlog << Error << "\n" << method_name << " -> " - << "The X coordinates must be in meters or kilometers for MET.\n\n"; - exit(1); + mlog << Warning << "\n" << method_name << " -> " + << "The Y coordinates must be in meters or kilometers for MET.\n\n"; + return; } } } @@ -1718,11 +1785,11 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m double curr_delta = fabs(x_values[i] - x_values[i-1]); if (fabs(curr_delta - dx_m_a) > DELTA_TOLERANCE) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Lambert Azimuthal Equal Area files " << "where the delta along the x-axis is constant (" << curr_delta << " != " << dx_m_a << ")\n\n"; - exit(1); + return; } } @@ -1731,11 +1798,11 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m double curr_delta = fabs(y_values[i] - y_values[i-1]); if (fabs(curr_delta - dy_m_a) > DELTA_TOLERANCE) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Lambert Azimuthal Equal Area files " << "where the delta along the y-axis is constant (" << curr_delta << " != " << dy_m_a << ")\n\n"; - exit(1); + return; } } @@ -1787,11 +1854,11 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m if(!is_bad_data(false_easting) && !is_eq(false_easting, 0.0)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET cannot process Lambert Azimuthal Equal Area files " << "with non-zero false_easting (" << false_easting << ").\n\n"; - exit(1); + return; } // false_northing @@ -1801,11 +1868,11 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m if(!is_bad_data(false_northing) && !is_eq(false_northing, 0.0)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET cannot process Lambert Azimuthal Equal Area files " << "with non-zero false_northing (" << false_northing << ").\n\n"; - exit(1); + return; } // Calculate the pin indices. The pin will be located at the grid's reference @@ -1826,6 +1893,8 @@ void NcCfFile::get_grid_mapping_lambert_azimuthal_equal_area(const NcVar *grid_m grid.set(data); if (dy_m < 0) grid.set_swap_to_north(true); + grid_ready = true; + } @@ -1844,10 +1913,10 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin grid_mapping_var, (string)"standard_parallel"); if (IS_INVALID_NC_P(std_parallel_att)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot get standard_parallel attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // longitude_of_central_meridian @@ -1856,10 +1925,10 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin grid_mapping_var, (string)"longitude_of_central_meridian"); if (IS_INVALID_NC_P(central_lon_att)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot get longitude_of_central_meridian attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // latitude_of_projection_origin @@ -1868,10 +1937,10 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin grid_mapping_var, (string)"latitude_of_projection_origin"); if (IS_INVALID_NC_P(proj_origin_lat_att)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot get latitude_of_projection_origin attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // Look for the x/y dimensions and x/y coordinate variables @@ -1880,9 +1949,9 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin if (get_data_size(_xCoordVar) != (int) GET_NC_SIZE_P(_xDim) || get_data_size(_yCoordVar) != (int) GET_NC_SIZE_P(_yDim)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; - exit(1); + return; } // Make sure that the coordinate variables are given in meters. If we get @@ -1906,9 +1975,9 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin x_coord_units_name == "meters") x_coord_to_m_cf = 1.0; else if (x_coord_units_name == "km") x_coord_to_m_cf = 1000.0; else { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "The X coordinates must be in meters or kilometers for MET.\n\n"; - exit(1); + return; } } } @@ -1930,9 +1999,9 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin y_coord_units_name == "meters" ) y_coord_to_m_cf = 1.0; else if (y_coord_units_name == "km") y_coord_to_m_cf = 1000.0; else { - mlog << Error << "\n" << method_name << " -> " - << "The X coordinates must be in meters or kilometers for MET.\n\n"; - exit(1); + mlog << Warning << "\n" << method_name << " -> " + << "The Y coordinates must be in meters or kilometers for MET.\n\n"; + return; } } } @@ -1964,9 +2033,9 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin if (fabs(dx_m_a - dy_m_a) > DELTA_TOLERANCE) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Lambert Conformal files where the x-axis and y-axis deltas are the same\n\n"; - exit(1); + return; } // As a sanity check, make sure that the deltas are constant through the @@ -1977,9 +2046,9 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin double curr_delta = fabs(x_values[i] - x_values[i-1]); if (fabs(curr_delta - dx_m_a) > DELTA_TOLERANCE) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Lambert Conformal files where the delta along the x-axis is constant\n\n"; - exit(1); + return; } } @@ -1988,9 +2057,9 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin double curr_delta = fabs(y_values[i] - y_values[i-1]); if (fabs(curr_delta - dy_m_a) > DELTA_TOLERANCE) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Lambert Conformal files where the delta along the y-axis is constant\n\n"; - exit(1); + return; } } @@ -2032,6 +2101,7 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin grid.set(data); if (dy_m < 0) grid.set_swap_to_north(true); + grid_ready = true; if(std_parallel_att) delete std_parallel_att; if(central_lon_att) delete central_lon_att; @@ -2125,32 +2195,32 @@ void NcCfFile::get_grid_mapping_latitude_longitude(const NcVar *grid_mapping_var if (_xDim == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find X dimension (degrees_east) in netCDF file.\n\n"; - exit(1); + return; } if (_yDim == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find Y dimension (degrees_north) in netCDF file.\n\n"; - exit(1); + return; } if (_xCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find X coord variable (" << GET_NC_NAME_P(_xDim) << ") in netCDF file.\n\n"; - exit(1); + return; } if (_yCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find Y coord variable (" << GET_NC_NAME_P(_yDim) << ") in netCDF file.\n\n"; - exit(1); + return; } long lon_counts = _xDim->getSize(); @@ -2158,9 +2228,9 @@ void NcCfFile::get_grid_mapping_latitude_longitude(const NcVar *grid_mapping_var if (get_data_size(_xCoordVar) != lon_counts || get_data_size(_yCoordVar) != lat_counts) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; - exit(1); + return; } get_grid_from_lat_lon_vars(_yCoordVar, _xCoordVar, lat_counts, lon_counts); @@ -2225,7 +2295,7 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va { double x_coord_to_m_cf = 1.0; double y_coord_to_m_cf = 1.0; - static const string method_name = "NcCfFile::get_grid_mapping_polar_stereographic() --> "; + static const string method_name = "NcCfFile::get_grid_mapping_polar_stereographic() -> "; // Get projection attributes // proj_origin_lat: either 90.0 or -90.0, to decide the northern/southern hemisphere @@ -2258,17 +2328,17 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va << "This is an ellipsoidal earth.\n\n"; } else if(!has_scale_factor && !has_standard_parallel) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "The attribute \"scale_factor_at_projection_origin\" and \"standard_parallel\" of the " << GET_NC_NAME_P(grid_mapping_var) << " variable do not exist.\n\n"; - exit(1); + return; } else if(has_scale_factor && !is_eq(proj_origin_scale_factor, 1.0)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "Unexpected attribute value of " << proj_origin_scale_factor << " for the scale_factor_at_projection_origin attribute of the " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // Look for the x/y dimensions and x/y coordinate variables @@ -2277,9 +2347,9 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va if (get_data_size(_xCoordVar) != (int) GET_NC_SIZE_P(_xDim) || get_data_size(_yCoordVar) != (int) GET_NC_SIZE_P(_yDim)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; - exit(1); + return; } // Make sure that the coordinate variables are given in meters. If we get @@ -2303,10 +2373,10 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va x_coord_units_name == "meters") x_coord_to_m_cf = 1.0; else if ( x_coord_units_name == "km") x_coord_to_m_cf = 1000.0; else { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "The X coordinates (" << x_coord_units_name << ") must be in meters or kilometers for MET.\n\n"; - exit(1); + return; } } } @@ -2328,9 +2398,9 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va y_coord_units_name == "meters" ) y_coord_to_m_cf = 1.0; else if ( y_coord_units_name == "km") y_coord_to_m_cf = 1000.0; else { - mlog << Error << "\n" << method_name - << "The X coordinates must be in meters or kilometers for MET.\n\n"; - exit(1); + mlog << Warning << "\n" << method_name + << "The Y coordinates must be in meters or kilometers for MET.\n\n"; + return; } } } @@ -2362,9 +2432,9 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va if (fabs(dx_m_a - dy_m_a) > DELTA_TOLERANCE) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "MET can only process Polar Stereographic files where the x-axis and y-axis deltas are the same.\n\n"; - exit(1); + return; } if (is_eq(semi_major_axis, bad_data_double)) @@ -2451,6 +2521,7 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va data.dump(); grid.set(data); + grid_ready = true; //Note: do not set grid.set_swap_to_north() @@ -2592,10 +2663,10 @@ void NcCfFile::get_grid_mapping_rotated_latitude_longitude(const NcVar *grid_map grid_mapping_var, (string)"grid_north_pole_latitude"); if (IS_INVALID_NC_P(grid_np_lat_att)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot get grid_north_pole_latitude attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // grid_north_pole_longitude @@ -2604,10 +2675,10 @@ void NcCfFile::get_grid_mapping_rotated_latitude_longitude(const NcVar *grid_map grid_mapping_var, (string)"grid_north_pole_longitude"); if (IS_INVALID_NC_P(grid_np_lon_att)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Cannot get grid_north_pole_longitude attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // Look for the grid_latitude and grid_longitude dimensions @@ -2684,32 +2755,32 @@ void NcCfFile::get_grid_mapping_rotated_latitude_longitude(const NcVar *grid_map if (_xDim == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find X dimension (degrees_east) in netCDF file.\n\n"; - exit(1); + return; } if (_yDim == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find Y dimension (degrees_north) in netCDF file.\n\n"; - exit(1); + return; } if (_xCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find X coord variable (" << GET_NC_NAME_P(_xDim) << ") in netCDF file.\n\n"; - exit(1); + return; } if (_yCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find Y coord variable (" << GET_NC_NAME_P(_yDim) << ") in netCDF file.\n\n"; - exit(1); + return; } long lon_counts = _xDim->getSize(); @@ -2717,9 +2788,9 @@ void NcCfFile::get_grid_mapping_rotated_latitude_longitude(const NcVar *grid_map if (get_data_size(_xCoordVar) != lon_counts || get_data_size(_yCoordVar) != lat_counts) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; - exit(1); + return; } // Store spacing in LatLon data structure @@ -2813,10 +2884,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"perspective_point_height"); if (IS_INVALID_NC_P(perspective_point_height_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get perspective_point_height attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // semi_major_axis @@ -2824,10 +2895,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"semi_major_axis"); if (IS_INVALID_NC_P(semi_major_axis_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get semi_major_axis attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // semi_minor_axis @@ -2835,10 +2906,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"semi_minor_axis"); if (IS_INVALID_NC_P(semi_minor_axis_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get semi_minor_axis attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // inverse_flattening @@ -2846,10 +2917,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"inverse_flattening"); if (IS_INVALID_NC_P(inverse_flattening_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get inverse_flattening attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // latitude_of_projection_origin @@ -2857,10 +2928,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"latitude_of_projection_origin"); if (IS_INVALID_NC_P(proj_origin_lat_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get latitude_of_projection_origin attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // longitude_of_projection_origin @@ -2868,10 +2939,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"longitude_of_projection_origin"); if (IS_INVALID_NC_P(proj_origin_lon_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get longitude_of_projection_origin attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // sweep_angle_axis @@ -2879,10 +2950,10 @@ void NcCfFile::get_grid_mapping_geostationary( grid_mapping_var, (string)"sweep_angle_axis"); if (IS_INVALID_NC_P(sweep_angle_axis_att)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Cannot get sweep_angle_axis attribute from " << GET_NC_NAME_P(grid_mapping_var) << " variable.\n\n"; - exit(1); + return; } // Look for the x/y dimensions and x/y coordinate variables @@ -2891,21 +2962,21 @@ void NcCfFile::get_grid_mapping_geostationary( bool do_exit = false; if (_xDim == nullptr) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Didn't find X dimension (projection_x_coordinate) in netCDF file.\n\n"; do_exit = true; } if (_yDim == nullptr) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Didn't find Y dimension (projection_y_coordinate) in netCDF file.\n\n"; do_exit = true; } if (_xCoordVar == nullptr) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Didn't find X coord variable (" << GET_NC_NAME_P(_xDim) << ") in netCDF file.\n\n"; do_exit = true; @@ -2913,7 +2984,7 @@ void NcCfFile::get_grid_mapping_geostationary( if (_yCoordVar == nullptr) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Didn't find Y coord variable (" << GET_NC_NAME_P(_yDim) << ") in netCDF file.\n\n"; do_exit = true; @@ -2922,12 +2993,12 @@ void NcCfFile::get_grid_mapping_geostationary( if (get_data_size(_xCoordVar) != (int) GET_NC_SIZE_P(_xDim) || get_data_size(_yCoordVar) != (int) GET_NC_SIZE_P(_yDim)) { - mlog << Error << "\n" << method_name + mlog << Warning << "\n" << method_name << "-> Coordinate variables don't match dimension sizes in netCDF file.\n\n"; do_exit = true; } - if (do_exit) exit(1); + if (do_exit) return; // Figure out the dx/dy and x/y pin values from the dimension variables @@ -2936,7 +3007,6 @@ void NcCfFile::get_grid_mapping_geostationary( get_nc_data(_xCoordVar, x_values); - long y_counts = GET_NC_SIZE_P(_yDim); double y_values[y_counts]; @@ -2958,7 +3028,7 @@ void NcCfFile::get_grid_mapping_geostationary( NcVar *var_y_bound = (NcVar *)nullptr; for (int j=0; j 0) { data.x_image_bounds = new double[bound_count]; data.y_image_bounds = new double[bound_count]; - if (0 != var_x_bound) get_nc_data(var_x_bound, data.x_image_bounds); - if (0 != var_y_bound) get_nc_data(var_y_bound, data.y_image_bounds); + if (nullptr != var_x_bound) get_nc_data(var_x_bound, data.x_image_bounds); + if (nullptr != var_y_bound) get_nc_data(var_y_bound, data.y_image_bounds); } double flatten = 1.0/data.inverse_flattening; @@ -3015,6 +3085,7 @@ void NcCfFile::get_grid_mapping_geostationary( // Note: Computing lat/lon was deferred because it took 1 minutes grid.set(data); + grid_ready = true; if (perspective_point_height_att) delete perspective_point_height_att; if (semi_major_axis_att) delete semi_major_axis_att; @@ -3093,19 +3164,19 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { } if (_xCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find X coord variable (" << x_dim_var_name << ") in netCDF file.\n\n"; if (coordinates_att) delete coordinates_att; - return true; + return false; } if (_yCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find Y coord variable (" << y_dim_var_name << ") in netCDF file.\n\n"; if (coordinates_att) delete coordinates_att; - return true; + return false; } StringArray dimNames; @@ -3134,10 +3205,10 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { if ((x_size != lon_counts && x_size != latlon_counts) || (y_size != lat_counts && x_size != latlon_counts)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; if (coordinates_att) delete coordinates_att; - exit(1); + return false; } if (coordinates_att) { @@ -3180,7 +3251,7 @@ bool NcCfFile::get_grid_from_dimensions() } if (!has_var(_ncFile, dim_name.c_str())) { - mlog << Debug(4) << method_name << " -> " << "The coordinate variable \"" + mlog << Debug(6) << method_name << " -> " << "The coordinate variable \"" << _dims[dim_num]->getName() << "\" does not exist.\n"; continue; } @@ -3261,18 +3332,18 @@ bool NcCfFile::get_grid_from_dimensions() if (_xCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find X coord variable (" << GET_NC_NAME_P(_xDim) << ") in netCDF file.\n\n"; - exit(1); + return false; } if (_yCoordVar == nullptr) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Didn't find Y coord variable (" << GET_NC_NAME_P(_yDim) << ") in netCDF file.\n\n"; - exit(1); + return false; } long lat_counts = GET_NC_SIZE_P(_yDim); @@ -3308,19 +3379,25 @@ void NcCfFile::get_grid_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, const long lat_counts, const long lon_counts, bool &swap_to_north) { - static const string method_name = "get_data_from_lat_lon_vars()"; + static const string method_name = "NcCfFile::get_data_from_lat_lon_vars()"; // Figure out the dlat/dlon values from the dimension variables + LatLonData data; + data.name = latlon_proj_type; + data.Nlat = (int)lat_counts; + data.Nlon = (int)lon_counts; + long x_size = get_data_size(lon_var); long y_size = get_data_size(lat_var); long latlon_counts = lon_counts*lat_counts; bool two_dim_coord = (x_size == latlon_counts) && (y_size == latlon_counts ); + if( !two_dim_coord && (x_size != lon_counts || y_size != lat_counts)) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Coordinate variables don't match dimension sizes in netCDF file.\n\n"; - exit(1); + return data; } double lat_values[lat_counts]; @@ -3346,6 +3423,8 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, get_nc_data(lat_var,lat_values); get_nc_data(lon_var,lon_values); } + data.lat_ll = lat_values[0]; + data.lon_ll = rescale_lon(-lon_values[0]); // Calculate dlat and dlon assuming they are constant. @@ -3357,6 +3436,9 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, << " lon[" << (lon_counts-1) << "]=" << lon_values[lon_counts-1] << " dlon=" << dlon << "\n"; + data.delta_lat = dlat; + data.delta_lon = dlon; + ConcatString point_nccf; bool skip_sanity_check = get_att_value_string(_ncFile, nc_att_met_point_nccf, point_nccf); if (!skip_sanity_check) { @@ -3387,7 +3469,7 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, << i-1 << "]=" << lat_values[i-1] << " lat[" << i << "]=" << lat_values[i] << " " << fabs(curr_delta - dlat) << " > " << degree_tolerance << "\n"; - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Latitude/Longitude files where the latitudes are evenly spaced (dlat=" << dlat <<", delta[" << i << "]=" << curr_delta << ")\n\n"; sanity_check_failed = true; @@ -3407,7 +3489,7 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, << i-1 << "]=" << lon_values[i-1] << " lon[" << i << "]=" << lon_values[i] << " " << fabs(curr_delta - dlon) << " > " << degree_tolerance << "\n"; - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "MET can only process Latitude/Longitude files where the longitudes are evenly spaced (dlon=" << dlon <<", delta[" << i << "]=" << curr_delta << ")\n\n"; sanity_check_failed = true; @@ -3416,9 +3498,9 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, } if (sanity_check_failed) { - mlog << Error << "\n" << method_name << " -> " + mlog << Warning << "\n" << method_name << " -> " << "Please check the input data is the lat/lon projection\n\n"; - exit(1); + return data; } } @@ -3431,16 +3513,6 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, // guaranteed anywhere that I see. But if this is not the case, then we // will probably also need to reorder the data itself. - LatLonData data; - - data.name = latlon_proj_type; - data.lat_ll = lat_values[0]; - data.lon_ll = rescale_lon(-lon_values[0]); - data.delta_lat = dlat; - data.delta_lon = dlon; - data.Nlat = lat_counts; - data.Nlon = lon_counts; - if (dlat < 0) { swap_to_north = true; data.delta_lat = -dlat; @@ -3449,6 +3521,7 @@ LatLonData NcCfFile::get_data_from_lat_lon_vars(NcVar *lat_var, NcVar *lon_var, else { swap_to_north = false; } + grid_ready = true; return data; diff --git a/src/libcode/vx_data2d_nc_cf/nc_cf_file.h b/src/libcode/vx_data2d_nc_cf/nc_cf_file.h index d3e50a30d0..f6f0b8b3fa 100644 --- a/src/libcode/vx_data2d_nc_cf/nc_cf_file.h +++ b/src/libcode/vx_data2d_nc_cf/nc_cf_file.h @@ -56,22 +56,23 @@ class NcCfFile { int getNx() const { - if (_xDim == 0) + if (_xDim == nullptr) return 0; - return GET_NC_SIZE_P(_xDim); + return (int)GET_NC_SIZE_P(_xDim); } - + int getNy() const { - if (_yDim == 0) + if (_yDim == nullptr) return 0; - return GET_NC_SIZE_P(_yDim); + return (int)GET_NC_SIZE_P(_yDim); } - + NcVarInfo *get_time_var_info() const { return _time_var_info; } - + + // // time // @@ -93,6 +94,7 @@ class NcCfFile { int Nvars; NcVarInfo * Var; // allocated + StringArray coord_var_names; // // Grid @@ -110,14 +112,20 @@ class NcCfFile { bool getData(const char *, const LongArray &, DataPlane &, NcVarInfo *&) const; + bool update_grid(const Grid &); + + Grid build_grid_from_lat_lon_vars(netCDF::NcVar *lat_var, netCDF::NcVar *lon_var, + const long lat_counts, const long lon_counts); NcVarInfo* find_var_name(const char * var_name) const; NcVarInfo* find_var_by_dim_name(const char *dim_name) const; private: static const double DELTA_TOLERANCE; - + netCDF::NcFile * _ncFile; // allocated + bool grid_ready; + bool has_attr_grid; // // dimensions @@ -142,7 +150,7 @@ class NcCfFile { netCDF::NcVar *_xCoordVar; netCDF::NcVar *_yCoordVar; NcVarInfo *_time_var_info; - + void init_from_scratch(); NcCfFile(const NcCfFile &); @@ -164,7 +172,7 @@ class NcCfFile { void read_netcdf_grid(); void get_grid_from_grid_mapping(const netCDF::NcVarAtt *grid_mapping_att); - + void get_grid_mapping_albers_conical_equal_area(const netCDF::NcVar *grid_mapping_var); void get_grid_mapping_azimuthal_equidistant(const netCDF::NcVar *grid_mapping_var); void get_grid_mapping_lambert_azimuthal_equal_area(const netCDF::NcVar *grid_mapping_var); @@ -179,7 +187,7 @@ class NcCfFile { void get_grid_mapping_transverse_mercator(const netCDF::NcVar *grid_mapping_var); void get_grid_mapping_vertical_perspective(const netCDF::NcVar *grid_mapping_var); void get_grid_mapping_geostationary(const netCDF::NcVar *grid_mapping_var); - + bool get_grid_from_coordinates(const netCDF::NcVar *data_var); bool get_grid_from_dimensions(); void get_grid_from_lat_lon_vars(netCDF::NcVar *lat_var, netCDF::NcVar *lon_var, diff --git a/src/libcode/vx_data2d_nc_cf/var_info_nc_cf.h b/src/libcode/vx_data2d_nc_cf/var_info_nc_cf.h index 36b3f7b892..439369d95f 100644 --- a/src/libcode/vx_data2d_nc_cf/var_info_nc_cf.h +++ b/src/libcode/vx_data2d_nc_cf/var_info_nc_cf.h @@ -86,14 +86,14 @@ class VarInfoNcCF : public VarInfo /////////////////////////////////////////////////////////////////////////////// -inline GrdFileType VarInfoNcCF::file_type() const { return(FileType_NcCF); } -inline const LongArray & VarInfoNcCF::dimension() const { return(Dimension); } -inline int VarInfoNcCF::dimension(int i) const { return(Dimension[i]); } -inline int VarInfoNcCF::n_dimension() const { return(Dimension.n_elements());} -inline const NumArray & VarInfoNcCF::dim_value() const { return(Dim_value); } -inline double VarInfoNcCF::dim_value(int i) const { return(Dim_value[i]); } -inline const BoolArray & VarInfoNcCF::is_offset() const { return(Is_offset); } -inline bool VarInfoNcCF::is_offset(int i) const { return(Is_offset[i]); } +inline GrdFileType VarInfoNcCF::file_type() const { return FileType_NcCF; } +inline const LongArray & VarInfoNcCF::dimension() const { return Dimension; } +inline int VarInfoNcCF::dimension(int i) const { return Dimension[i]; } +inline int VarInfoNcCF::n_dimension() const { return Dimension.n_elements();} +inline const NumArray & VarInfoNcCF::dim_value() const { return Dim_value; } +inline double VarInfoNcCF::dim_value(int i) const { return Dim_value[i]; } +inline const BoolArray & VarInfoNcCF::is_offset() const { return Is_offset; } +inline bool VarInfoNcCF::is_offset(int i) const { return Is_offset[i]; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_met/Makefile.in b/src/libcode/vx_data2d_nc_met/Makefile.in index e160b86a38..a40eb0c86a 100644 --- a/src/libcode/vx_data2d_nc_met/Makefile.in +++ b/src/libcode/vx_data2d_nc_met/Makefile.in @@ -242,6 +242,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d_nc_met/data2d_nc_met.h b/src/libcode/vx_data2d_nc_met/data2d_nc_met.h index db7f166ce1..d4ae16fc55 100644 --- a/src/libcode/vx_data2d_nc_met/data2d_nc_met.h +++ b/src/libcode/vx_data2d_nc_met/data2d_nc_met.h @@ -85,7 +85,7 @@ class MetNcMetDataFile : public Met2dDataFile { //////////////////////////////////////////////////////////////////////// -inline GrdFileType MetNcMetDataFile::file_type () const { return ( FileType_NcMet ); } +inline GrdFileType MetNcMetDataFile::file_type () const { return FileType_NcMet; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_met/get_met_grid.cc b/src/libcode/vx_data2d_nc_met/get_met_grid.cc index 90a6cd798b..0c428c6ab2 100644 --- a/src/libcode/vx_data2d_nc_met/get_met_grid.cc +++ b/src/libcode/vx_data2d_nc_met/get_met_grid.cc @@ -699,10 +699,9 @@ void get_semilatlon_var(NcFile *ncfile, const char * var_name, NumArray &out_na) // Store the requested data in the specified NumArray object long count = get_data_size(&nc_var); - double * data_values = new double[ count ]; - get_nc_data(&nc_var, data_values); + vector data_values(count); + get_nc_data(&nc_var, data_values.data()); for(int i=0; igetName()) << ", got " << (a.n_elements()) << "\n\n"; + << "needed " << dimCount << " arguments for variable " + << var->getName() << ", got " << a.n_elements() << "\n\n"; exit ( 1 ); @@ -457,7 +457,7 @@ int dimCount = GET_NC_DIM_COUNT_P(v); if ( dimCount != a.n_elements() ) { mlog << Error << "\n" << method_name - << "needed " << (dimCount) << " arguments for variable " + << "needed " << dimCount << " arguments for variable " << (GET_NC_NAME_P(v)) << ", got " << (a.n_elements()) << "\n\n"; exit ( 1 ); @@ -587,24 +587,24 @@ plane.set_size(Nx, Ny); dim[x_slot] = Nx; dim[y_slot] = Ny; - double *data_array = new double[cell_count]; - double *double_array = new double[cell_count]; + vector data_array(cell_count); + vector double_array(cell_count); clock_time = clock(); - get_nc_data(v, double_array, dim, cur); - copy_nc_data_as_double(data_array, double_array, x_slot, y_slot, Nx, Ny, + get_nc_data(v, double_array.data(), dim, cur); + copy_nc_data_as_double(data_array.data(), double_array.data(), x_slot, y_slot, Nx, Ny, missing_value, fill_value); nc_time = clock(); if (mlog.verbosity_level() >= 7) { double duration_sec = (double)(nc_time - clock_time)/CLOCKS_PER_SEC; - check_nc_data_2d(data_array, Nx, Ny, missing_value); + check_nc_data_2d(data_array.data(), Nx, Ny, missing_value); mlog << Debug(7) << method_name_short << "took " << duration_sec << " seconds to read NetCDF data\n"; } - plane.set_block(data_array, Nx, Ny); + plane.set_block(data_array.data(), Nx, Ny); if (mlog.verbosity_level() >= 7) { double duration_sec = (double)(clock() - nc_time)/CLOCKS_PER_SEC; @@ -612,9 +612,6 @@ plane.set_size(Nx, Ny); << " seconds to fill data plane\n"; } - if (data_array) delete[] data_array; - if (double_array) delete[] double_array; - // // done // diff --git a/src/libcode/vx_data2d_nc_met/var_info_nc_met.h b/src/libcode/vx_data2d_nc_met/var_info_nc_met.h index 737a22a714..750662da18 100644 --- a/src/libcode/vx_data2d_nc_met/var_info_nc_met.h +++ b/src/libcode/vx_data2d_nc_met/var_info_nc_met.h @@ -78,10 +78,10 @@ class VarInfoNcMet : public VarInfo /////////////////////////////////////////////////////////////////////////////// -inline GrdFileType VarInfoNcMet::file_type() const { return(FileType_NcMet); } -inline const LongArray & VarInfoNcMet::dimension() const { return(Dimension); } -inline int VarInfoNcMet::dimension(int i) const { return(Dimension[i]); } -inline int VarInfoNcMet::n_dimension() const { return(Dimension.n_elements()); } +inline GrdFileType VarInfoNcMet::file_type() const { return FileType_NcMet; } +inline const LongArray & VarInfoNcMet::dimension() const { return Dimension; } +inline int VarInfoNcMet::dimension(int i) const { return Dimension[i]; } +inline int VarInfoNcMet::n_dimension() const { return Dimension.n_elements(); } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_wrf/Makefile.in b/src/libcode/vx_data2d_nc_wrf/Makefile.in index dbe34976c3..682fe818bc 100644 --- a/src/libcode/vx_data2d_nc_wrf/Makefile.in +++ b/src/libcode/vx_data2d_nc_wrf/Makefile.in @@ -242,6 +242,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d_nc_wrf/get_wrf_grid.cc b/src/libcode/vx_data2d_nc_wrf/get_wrf_grid.cc index 312a5397a4..801ceeb253 100644 --- a/src/libcode/vx_data2d_nc_wrf/get_wrf_grid.cc +++ b/src/libcode/vx_data2d_nc_wrf/get_wrf_grid.cc @@ -439,7 +439,7 @@ lon_rad = -u; lon_deg = lon_rad*deg_per_rad; -return ( lon_deg ); +return lon_deg; } @@ -455,7 +455,7 @@ double lat; lat = 2.0*atand(exp(v)) - 90.0; -return ( lat ); +return lat; } diff --git a/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc b/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc index 6beba9a08d..edb45079e6 100644 --- a/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc +++ b/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc @@ -239,7 +239,7 @@ void VarInfoNcWrf::set_magic(const ConcatString &nstr, const ConcatString &lstr) level_value = unix_time; as_offset = false; } - else if (is_number(ptr2)) { + else if (is_number(ptr2)) { if (as_offset) level = atoi(ptr2); else { level = vx_data2d_dim_by_value; diff --git a/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.h b/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.h index 4bb201cb15..44f4f48cfd 100644 --- a/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.h +++ b/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.h @@ -247,14 +247,14 @@ class VarInfoNcWrf : public VarInfo /////////////////////////////////////////////////////////////////////////////// -inline GrdFileType VarInfoNcWrf::file_type() const { return(FileType_NcWrf); } -inline const LongArray & VarInfoNcWrf::dimension() const { return(Dimension); } -inline int VarInfoNcWrf::dimension(int i) const { return(Dimension[i]); } -inline int VarInfoNcWrf::n_dimension() const { return(Dimension.n_elements()); } -inline const NumArray & VarInfoNcWrf::dim_value() const { return(Dim_value); } -inline double VarInfoNcWrf::dim_value(int i) const { return(Dim_value[i]); } -inline const BoolArray & VarInfoNcWrf::is_offset() const { return(Is_offset); } -inline bool VarInfoNcWrf::is_offset(int i) const { return(Is_offset[i]); } +inline GrdFileType VarInfoNcWrf::file_type() const { return FileType_NcWrf; } +inline const LongArray & VarInfoNcWrf::dimension() const { return Dimension; } +inline int VarInfoNcWrf::dimension(int i) const { return Dimension[i]; } +inline int VarInfoNcWrf::n_dimension() const { return Dimension.n_elements(); } +inline const NumArray & VarInfoNcWrf::dim_value() const { return Dim_value; } +inline double VarInfoNcWrf::dim_value(int i) const { return Dim_value[i]; } +inline const BoolArray & VarInfoNcWrf::is_offset() const { return Is_offset; } +inline bool VarInfoNcWrf::is_offset(int i) const { return Is_offset[i]; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_wrf/wrf_file.cc b/src/libcode/vx_data2d_nc_wrf/wrf_file.cc index f950201ea3..56820aea09 100644 --- a/src/libcode/vx_data2d_nc_wrf/wrf_file.cc +++ b/src/libcode/vx_data2d_nc_wrf/wrf_file.cc @@ -317,7 +317,7 @@ InitTime = parse_init_time(att_value.c_str()); dimNames.clear(); get_dim_names(&v, &dimNames); string c; - for (k=0; k<(dim_count); ++k) { + for (k=0; kgetDimCount(); if ( dim_count != a.n_elements() ) { mlog << Error << "\n" << method_name - << "needed " << (dim_count) << " arguments for variable " + << "needed " << dim_count << " arguments for variable " << (GET_NC_NAME_P(var)) << ", got " << (a.n_elements()) << "\n\n"; exit ( 1 ); @@ -585,7 +585,7 @@ if ( dim_count != a.n_elements() ) { mlog << Warning << "\n" << method_name << "needed " << dim_count << " arguments for variable " - << (var_name) << ", got " << (a.n_elements()) << "\n\n"; + << var_name << ", got " << a.n_elements() << "\n\n"; exit ( 1 ); @@ -594,7 +594,7 @@ if ( dim_count != a.n_elements() ) { if (dim_count >= max_wrf_args ) { mlog << Warning << "\n" << method_name - << " too may arguments for variable \"" << (var_name) << "\"\n\n"; + << " too may arguments for variable \"" << var_name << "\"\n\n"; return false; @@ -626,7 +626,7 @@ for (j=0; j d(Ny); LongArray offsets; LongArray lengths; @@ -863,7 +863,7 @@ lengths[y_slot] = Ny; int type_id = GET_NC_TYPE_ID_P(v); for (x=0; x " - << "can't parse values for \"" << key << "\" from python \"" + << "can't parse values for \"" << key << "\" from python \"" << Py_TYPE(obj)->tp_name << "\" object.\n"; } diff --git a/src/libcode/vx_data2d_python/python_dataplane.cc b/src/libcode/vx_data2d_python/python_dataplane.cc index 9f53bb7cf8..6921b1eed6 100644 --- a/src/libcode/vx_data2d_python/python_dataplane.cc +++ b/src/libcode/vx_data2d_python/python_dataplane.cc @@ -114,11 +114,11 @@ bool straight_python_dataplane(const char * user_script_name, { -PyObject * module_obj = 0; -PyObject * module_dict_obj = 0; -PyObject * key_obj = 0; -PyObject * numpy_array_obj = 0; -PyObject * attrs_dict_obj = 0; +PyObject * module_obj = nullptr; +PyObject * module_dict_obj = nullptr; +PyObject * key_obj = nullptr; +PyObject * numpy_array_obj = nullptr; +PyObject * attrs_dict_obj = nullptr; ConcatString cs, user_dir, user_base; const char *method_name = "straight_python_dataplane() -> "; diff --git a/src/libcode/vx_data2d_python/var_info_python.h b/src/libcode/vx_data2d_python/var_info_python.h index b0c8000b47..abdfec9021 100644 --- a/src/libcode/vx_data2d_python/var_info_python.h +++ b/src/libcode/vx_data2d_python/var_info_python.h @@ -73,7 +73,7 @@ class VarInfoPython : public VarInfo /////////////////////////////////////////////////////////////////////////////// -inline GrdFileType VarInfoPython::file_type() const { return(Type); } +inline GrdFileType VarInfoPython::file_type() const { return Type; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_ugrid/Makefile.in b/src/libcode/vx_data2d_ugrid/Makefile.in index 7712baafe9..b6f107c162 100644 --- a/src/libcode/vx_data2d_ugrid/Makefile.in +++ b/src/libcode/vx_data2d_ugrid/Makefile.in @@ -240,6 +240,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_data2d_ugrid/ugrid_file.cc b/src/libcode/vx_data2d_ugrid/ugrid_file.cc index d157f75f0c..4a2e22c909 100644 --- a/src/libcode/vx_data2d_ugrid/ugrid_file.cc +++ b/src/libcode/vx_data2d_ugrid/ugrid_file.cc @@ -350,7 +350,7 @@ bool UGridFile::open_metadata(const char * filepath) int n_times = IS_VALID_NC_P(_tDim) ? get_dim_size(_tDim) : (int) get_data_size(valid_time_var); int tim_buf_size = n_times; - double *time_values = new double[tim_buf_size]; + vector time_values(tim_buf_size); if(2 == time_dim_count) { for(int i=0; i 1 ) { double latest_time = bad_data_double; @@ -396,7 +396,6 @@ bool UGridFile::open_metadata(const char * filepath) } } else ValidTime.add(0); //Initialize - delete [] time_values; } // Pull out the grid. This must be done after pulling out the dimension @@ -440,14 +439,13 @@ bool UGridFile::open_metadata(const char * filepath) if (IS_VALID_NC_P(z_var)) { int z_count = (int) get_data_size(z_var); - double *z_values = new double[z_count]; + vector z_values(z_count); - if( get_nc_data(z_var, z_values) ) { + if( get_nc_data(z_var, z_values.data()) ) { for(int i=0; i d(plane_size); int length; size_t dim_size; @@ -726,7 +724,7 @@ bool UGridFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const } } - get_nc_data(v, d, lengths, offsets); + get_nc_data(v, d.data(), lengths, offsets); double min_value = 10e10; double max_value = -min_value; @@ -744,8 +742,6 @@ bool UGridFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const } // for x - if (nullptr != d) delete [] d; - // done ConcatString log_message; for (int idx=0; idx _lat(face_count); + vector _lon(face_count); if (IS_INVALID_NC_P(_latVar)) { mlog << Error << "\n" << method_name << "latitude variable is missing\n\n"; exit(1); } - else if (!get_nc_data(_latVar,_lat)) { + else if (!get_nc_data(_latVar,_lat.data())) { mlog << Error << "\n" << method_name << "fail to read latitude values\n\n"; exit(1); } @@ -922,7 +918,7 @@ void UGridFile::read_netcdf_grid() mlog << Error << "\n" << method_name << "longitude variable is missing\n\n"; exit(1); } - else if (!get_nc_data(_lonVar,_lon)) { + else if (!get_nc_data(_lonVar,_lon.data())) { mlog << Error << "\n" << method_name << "fail to read latitude values\n\n"; exit(1); } @@ -943,7 +939,7 @@ void UGridFile::read_netcdf_grid() // Convert longitude from degrees east to west for (int idx=0; idx= 0 ); } diff --git a/src/libcode/vx_gis/shapetype_to_string.cc b/src/libcode/vx_gis/shapetype_to_string.cc index 55e8f318c4..e292ffe878 100644 --- a/src/libcode/vx_gis/shapetype_to_string.cc +++ b/src/libcode/vx_gis/shapetype_to_string.cc @@ -28,7 +28,6 @@ #include "shapetype_to_string.h" - using namespace std; diff --git a/src/libcode/vx_gis/shp_array.h b/src/libcode/vx_gis/shp_array.h index f0bfe543c0..cf3f4b846b 100644 --- a/src/libcode/vx_gis/shp_array.h +++ b/src/libcode/vx_gis/shp_array.h @@ -67,11 +67,11 @@ class Shp_Array { Shp_Array & operator=(const Shp_Array & _a) { - if ( this == &_a ) return ( * this ); + if ( this == &_a ) return *this; assign(_a); - return ( * this ); + return *this; } @@ -95,9 +95,9 @@ class Shp_Array { // get stuff // - int n() const { return ( Nelements ); } + int n() const { return Nelements; } - int n_elements() const { return ( Nelements ); } + int n_elements() const { return Nelements; } // // do stuff @@ -371,7 +371,7 @@ if ( (N < 0) || (N >= Nelements) ) { exit ( 1 ); } -return ( E[N] ); +return E[N]; } @@ -385,7 +385,7 @@ T * Shp_Array::buf() const { -return ( E ); +return E; } diff --git a/src/libcode/vx_gis/shp_file.h b/src/libcode/vx_gis/shp_file.h index 2dd900aefc..64ae681880 100644 --- a/src/libcode/vx_gis/shp_file.h +++ b/src/libcode/vx_gis/shp_file.h @@ -159,11 +159,11 @@ class ShpFile { //////////////////////////////////////////////////////////////////////// -inline const ShpFileHeader * ShpFile::header() const { return ( &Header ); } +inline const ShpFileHeader * ShpFile::header() const { return &Header; } -inline int ShpFile::shape_type() const { return ( Header.shape_type ); } +inline int ShpFile::shape_type() const { return Header.shape_type; } -inline bool ShpFile::at_eof() const { return ( At_Eof ); } +inline bool ShpFile::at_eof() const { return At_Eof; } inline bool ShpFile::is_open() const { return ( fd >= 0 ); } diff --git a/src/libcode/vx_gnomon/Makefile.in b/src/libcode/vx_gnomon/Makefile.in index adec4f8fb1..62729f1c9f 100644 --- a/src/libcode/vx_gnomon/Makefile.in +++ b/src/libcode/vx_gnomon/Makefile.in @@ -234,6 +234,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_grid/Makefile.in b/src/libcode/vx_grid/Makefile.in index 99bb769bf1..c42467a57a 100644 --- a/src/libcode/vx_grid/Makefile.in +++ b/src/libcode/vx_grid/Makefile.in @@ -276,6 +276,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_grid/earth_rotation.cc b/src/libcode/vx_grid/earth_rotation.cc index 3da1347f23..a904fb9615 100644 --- a/src/libcode/vx_grid/earth_rotation.cc +++ b/src/libcode/vx_grid/earth_rotation.cc @@ -179,7 +179,13 @@ M23 = clat*clon; M33 = slat; */ -set_np(lat_center, lon_center, lon_center - 180.0); + // + // MET #2841 define the rotation by subtracting 90 degrees + // instead of 180 to define TCRMW grids as pointing east + // instead of north. + // + +set_np(lat_center, lon_center, lon_center - 90.0); // // diff --git a/src/libcode/vx_grid/goes_grid.cc b/src/libcode/vx_grid/goes_grid.cc index 58c5c143b0..296dc27a11 100644 --- a/src/libcode/vx_grid/goes_grid.cc +++ b/src/libcode/vx_grid/goes_grid.cc @@ -121,22 +121,21 @@ void GoesImagerGrid::latlon_to_xy(double lat, double lon, double & x_idx, double float sy = -rc*cos_clat*sin(del_lon_angle); float sz = rc*sin(c_lat); - // // check that point is on disk of the earth + // check that point is on disk of the earth if((Data.H*(Data.H - sx)) < (sy*sy + Data.radius_ratio2*sz*sz)) { x_idx = -1; y_idx = -1; return; } - float rl = sqrt((sx*sx + sy*sy + sz*sz)); - float xx = asin((-sy/rl)); - float yy = atan((sz/sx)); + float rl = sqrt(sx*sx + sy*sy + sz*sz); + float xx = asin(-sy/rl); + float yy = atan(sz/sx); x_idx = round((xx - Data.x_image_bounds[0])/Data.dx_rad); y_idx = round((Data.y_image_bounds[0] - yy)/Data.dy_rad); - // cerr << "lat: " << lat << " lon: " << lon << " ximage: " << xx << " yimage: " << yy << endl; return; } @@ -432,13 +431,13 @@ void GoesImagerData::compute_lat_lon() mlog << Error << method_name << " index=" << index << " too big than " << buf_len << "\n"; else { - if (std::isnan(lat_rad)) lat = bad_data_float; + if (std::isnan(lat_rad)) lat = bad_data_float; else { lat = lat_rad * deg_per_rad; if (lat > lat_max) {lat_max = lat; idx_lat_max = index; } if (lat < lat_min) {lat_min = lat; idx_lat_min = index; } } - if (std::isnan(lon_rad)) lon = bad_data_float; + if (std::isnan(lon_rad)) lon = bad_data_float; else { lon = lon_of_projection_origin - (lon_rad * deg_per_rad); if (lon > lon_max) {lon_max = lon; idx_lon_max = index; } diff --git a/src/libcode/vx_grid/goes_grid.h b/src/libcode/vx_grid/goes_grid.h index 039d75c638..b3157bfeac 100644 --- a/src/libcode/vx_grid/goes_grid.h +++ b/src/libcode/vx_grid/goes_grid.h @@ -80,7 +80,7 @@ class GoesImagerGrid : public GridRep { //////////////////////////////////////////////////////////////////////// -inline double GoesImagerGrid::scale_km() const { return ( 1.0 ); } +inline double GoesImagerGrid::scale_km() const { return 1.0; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_grid/laea_grid.cc b/src/libcode/vx_grid/laea_grid.cc index 6d59f25f6e..b14ddf159a 100644 --- a/src/libcode/vx_grid/laea_grid.cc +++ b/src/libcode/vx_grid/laea_grid.cc @@ -455,7 +455,7 @@ v = new double [n]; if ( !u || !v ) { mlog << Error << "\nLaeaGrid::xy_closedpolyline_area() -> " - << "memory allocation error\n\n"; + << "memory allocation error\n\n"; exit ( 1 ); diff --git a/src/libcode/vx_grid/laea_grid.h b/src/libcode/vx_grid/laea_grid.h index 3a9c92751a..ea1b78315a 100644 --- a/src/libcode/vx_grid/laea_grid.h +++ b/src/libcode/vx_grid/laea_grid.h @@ -144,9 +144,9 @@ class LaeaGrid : public GridRep { //////////////////////////////////////////////////////////////////////// -inline bool LaeaGrid::is_north () const { return ( true ); } -inline bool LaeaGrid::is_south () const { return ( false ); } -inline double LaeaGrid::scale_km() const { return ( -1.0 ); } +inline bool LaeaGrid::is_north () const { return true; } +inline bool LaeaGrid::is_south () const { return false; } +inline double LaeaGrid::scale_km() const { return -1.0; } inline void LaeaGrid::set_so2(double) { return; } diff --git a/src/libcode/vx_grid/latlon_grid.h b/src/libcode/vx_grid/latlon_grid.h index bcac543474..c4dd812bc2 100644 --- a/src/libcode/vx_grid/latlon_grid.h +++ b/src/libcode/vx_grid/latlon_grid.h @@ -89,8 +89,8 @@ class LatLonGrid : public GridRep { //////////////////////////////////////////////////////////////////////// -inline double LatLonGrid::scale_km() const { return ( -1.0 ); } -inline bool LatLonGrid::wrap_lon() const { return ( wrapLon ); } +inline double LatLonGrid::scale_km() const { return -1.0; } +inline bool LatLonGrid::wrap_lon() const { return wrapLon; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_grid/lc_grid.h b/src/libcode/vx_grid/lc_grid.h index 26134eab3d..be111dd40e 100644 --- a/src/libcode/vx_grid/lc_grid.h +++ b/src/libcode/vx_grid/lc_grid.h @@ -128,14 +128,14 @@ class LambertGrid : public GridRep { //////////////////////////////////////////////////////////////////////// -inline bool LambertGrid::is_north() const { return ( IsNorthHemisphere ); } -inline bool LambertGrid::is_south() const { return ( ! IsNorthHemisphere ); } +inline bool LambertGrid::is_north() const { return IsNorthHemisphere; } +inline bool LambertGrid::is_south() const { return !IsNorthHemisphere; } -inline double LambertGrid::scale_km() const { return ( Data.d_km ); } +inline double LambertGrid::scale_km() const { return Data.d_km; } -inline bool LambertGrid::has_so2() const { return ( Has_SO2 ); } +inline bool LambertGrid::has_so2() const { return Has_SO2; } -inline double LambertGrid::so2_angle() const { return ( SO2_Angle ); } +inline double LambertGrid::so2_angle() const { return SO2_Angle; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_grid/st_grid.cc b/src/libcode/vx_grid/st_grid.cc index 840a6a69f8..762f7d4968 100644 --- a/src/libcode/vx_grid/st_grid.cc +++ b/src/libcode/vx_grid/st_grid.cc @@ -791,7 +791,7 @@ double stereographic_alpha(double scale_lat, double r_km, double d_km) double alpha; -alpha = (1.0 + sind(fabs(scale_lat)))*((r_km)/(d_km)); +alpha = (1.0 + sind(fabs(scale_lat)))*(r_km/d_km); return alpha; diff --git a/src/libcode/vx_grid/st_grid.h b/src/libcode/vx_grid/st_grid.h index 3d23bfe514..8f251871a7 100644 --- a/src/libcode/vx_grid/st_grid.h +++ b/src/libcode/vx_grid/st_grid.h @@ -107,10 +107,10 @@ class StereographicGrid : public GridRep { //////////////////////////////////////////////////////////////////////// -inline bool StereographicGrid::is_north () const { return ( IsNorthHemisphere ); } -inline bool StereographicGrid::is_south () const { return ( ! IsNorthHemisphere ); } +inline bool StereographicGrid::is_north () const { return IsNorthHemisphere; } +inline bool StereographicGrid::is_south () const { return !IsNorthHemisphere; } -inline double StereographicGrid::scale_km () const { return ( Data.d_km ); } +inline double StereographicGrid::scale_km () const { return Data.d_km; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_grid/tcrmw_grid.cc b/src/libcode/vx_grid/tcrmw_grid.cc index a2e1738e1b..e695e1c49d 100644 --- a/src/libcode/vx_grid/tcrmw_grid.cc +++ b/src/libcode/vx_grid/tcrmw_grid.cc @@ -136,7 +136,7 @@ Ir.set_xyz(1.0, 0.0, 0.0); Jr.set_xyz(0.0, 1.0, 0.0); Kr.set_xyz(0.0, 0.0, 1.0); -Range_n = 0; +Range_n = 0; Azimuth_n = 0; Range_max_km = 0.0; @@ -288,6 +288,7 @@ y = (lat_rot - RData.rot_lat_ll)/(RData.delta_rot_lat); x = lon_rot/(RData.delta_rot_lon); +x = Nx - x; // MET #2841 switch from counterclockwise to clockwise RotatedLatLonGrid::xy_to_latlon(x, y, lat, lon); @@ -310,6 +311,8 @@ const double range_max_deg = deg_per_km*Range_max_km; RotatedLatLonGrid::latlon_to_xy(lat, lon, x, y); +x = Nx - x; // MET #2841 switch from counterclockwise to clockwise + azi_deg = x*(RData.delta_rot_lon); range_deg = range_max_deg - y*(RData.delta_rot_lat); @@ -324,54 +327,23 @@ return; //////////////////////////////////////////////////////////////////////// -void TcrmwGrid::wind_ne_to_ra (const double lat, const double lon, - const double east_component, const double north_component, - double & radial_component, double & azimuthal_component) const +void TcrmwGrid::wind_ne_to_rt (const double azi_deg, + const double u_wind, const double v_wind, + double & radial_wind, double & tangential_wind) const { -Vector E, N, V; -Vector B_range, B_azi; -double azi_deg, range_deg, range_km; - - -latlon_to_range_azi(lat, lon, range_km, azi_deg); - -range_deg = deg_per_km*range_km; - -E = latlon_to_east (lat, lon); -N = latlon_to_north (lat, lon); - -V = east_component*E + north_component*N; - - -range_azi_to_basis(range_deg, azi_deg, B_range, B_azi); - - - - radial_component = dot(V, B_range); - -azimuthal_component = dot(V, B_azi); - - - - - -return; +double rcos = cosd(azi_deg); +double rsin = sind(azi_deg); +if (is_bad_data(u_wind) || is_bad_data(v_wind)) { + radial_wind = bad_data_double; + tangential_wind = bad_data_double; +} +else { + radial_wind = rcos*u_wind + rsin*v_wind; + tangential_wind = -1.0*rsin*u_wind + rcos*v_wind; } - - -//////////////////////////////////////////////////////////////////////// - - -void TcrmwGrid::wind_ne_to_ra_conventional (const double lat, const double lon, - const double east_component, const double north_component, - double & radial_component, double & azimuthal_component) const - -{ - -wind_ne_to_ra(lat, lon, east_component, north_component, radial_component, azimuthal_component); return; @@ -381,33 +353,17 @@ return; //////////////////////////////////////////////////////////////////////// -void TcrmwGrid::range_azi_to_basis(const double range_deg, const double azi_deg, Vector & B_range, Vector & B_azi) const +void TcrmwGrid::wind_ne_to_rt (const double lat, const double lon, + const double u_wind, const double v_wind, + double & radial_wind, double & tangential_wind) const { -double u, v, w; - - -u = cosd(range_deg)*sind(azi_deg); - -v = cosd(range_deg)*cosd(azi_deg); - -w = -sind(range_deg); - - - -B_range = u*Ir + v*Jr + w*Kr; - +double range_km, azi_deg; -u = cosd(azi_deg); - -v = -sind(azi_deg); - -w = 0.0; - - -B_azi = u*Ir + v*Jr + w*Kr; +latlon_to_range_azi(lat, lon, range_km, azi_deg); +wind_ne_to_rt(azi_deg, u_wind, v_wind, radial_wind, tangential_wind); return; @@ -425,8 +381,9 @@ RotatedLatLonGrid::latlon_to_xy(true_lat, true_lon, x, y); x -= Nx*floor(x/Nx); -x -= Nx*floor(x/Nx); +x = Nx - x; // MET #2841 switch from counterclockwise to clockwise +y -= Ny*floor(y/Ny); return; @@ -442,7 +399,9 @@ void TcrmwGrid::xy_to_latlon(double x, double y, double & true_lat, double & tru x -= Nx*floor(x/Nx); -x -= Nx*floor(x/Nx); +x = Nx - x; // MET #2841 switch from counterclockwise to clockwise + +y -= Ny*floor(y/Ny); RotatedLatLonGrid::xy_to_latlon(x, y, true_lat, true_lon); @@ -500,7 +459,3 @@ return; //////////////////////////////////////////////////////////////////////// - - - - diff --git a/src/libcode/vx_grid/tcrmw_grid.h b/src/libcode/vx_grid/tcrmw_grid.h index 8af34d9261..ee45b5e228 100644 --- a/src/libcode/vx_grid/tcrmw_grid.h +++ b/src/libcode/vx_grid/tcrmw_grid.h @@ -35,11 +35,8 @@ class TcrmwGrid : public RotatedLatLonGrid { void calc_ijk(); // calculate rotated basis vectors - void range_azi_to_basis(const double range_deg, const double azi_deg, Vector & B_range, Vector & B_azi) const; - TcrmwData TData; - Vector Ir, Jr, Kr; int Range_n, Azimuth_n; // # of points in the radial and azimuthal directions @@ -89,40 +86,32 @@ class TcrmwGrid : public RotatedLatLonGrid { void xy_to_latlon(double x, double y, double & true_lat, double & true_lon) const; + void wind_ne_to_rt(const double azi_deg, + const double u_wind, const double v_wind, + double & radial_wind, double & tangential_wind) const; + void wind_ne_to_rt(const double lat, const double lon, + const double u_wind, const double v_wind, + double & radial_wind, double & tangential_wind) const; - void wind_ne_to_ra(const double lat, const double lon, - const double east_component, const double north_component, - double & radial_component, double & azimuthal_component) const; - - - // - // possibly toggles the signs of the radial and/or azimuthal components - // - // to align with the conventions used in the TC community - // - void wind_ne_to_ra_conventional (const double lat, const double lon, - const double east_component, const double north_component, - double & radial_component, double & azimuthal_component) const; - }; //////////////////////////////////////////////////////////////////////// -inline int TcrmwGrid::range_n () const { return ( Range_n ); } -inline int TcrmwGrid::azimuth_n () const { return ( Azimuth_n ); } +inline int TcrmwGrid::range_n () const { return Range_n; } +inline int TcrmwGrid::azimuth_n () const { return Azimuth_n; } -inline double TcrmwGrid::range_max_km () const { return ( Range_max_km ); } +inline double TcrmwGrid::range_max_km () const { return Range_max_km; } -inline double TcrmwGrid::range_delta_km () const { return ( Range_max_km/(Range_n - 1) ); } +inline double TcrmwGrid::range_delta_km () const { return Range_max_km/(Range_n - 1); } -inline double TcrmwGrid::azimuth_delta_deg () const { return ( 360.0/Azimuth_n ); } +inline double TcrmwGrid::azimuth_delta_deg () const { return 360.0/Azimuth_n; } -inline double TcrmwGrid::lat_center_deg () const { return ( Lat_Center_Deg ); } -inline double TcrmwGrid::lon_center_deg () const { return ( Lon_Center_Deg ); } +inline double TcrmwGrid::lat_center_deg () const { return Lat_Center_Deg; } +inline double TcrmwGrid::lon_center_deg () const { return Lon_Center_Deg; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_grid/unstructured_grid.cc b/src/libcode/vx_grid/unstructured_grid.cc index d1a56ff9f0..98cf8bccb9 100644 --- a/src/libcode/vx_grid/unstructured_grid.cc +++ b/src/libcode/vx_grid/unstructured_grid.cc @@ -421,7 +421,7 @@ void UnstructuredData::set_points(int count, const std::vector &ptL n_face = count; point_lonlat.reserve(count); for (int i=0; i= UGRID_DEBUG_LEVEL) mlog << Debug(UGRID_DEBUG_LEVEL) << "UnstructuredData::set_points(int, std::vector &) first: (" diff --git a/src/libcode/vx_grid/unstructured_grid.h b/src/libcode/vx_grid/unstructured_grid.h index d3c0d40c66..3f46913472 100644 --- a/src/libcode/vx_grid/unstructured_grid.h +++ b/src/libcode/vx_grid/unstructured_grid.h @@ -85,8 +85,8 @@ class UnstructuredGrid : public GridRep { //////////////////////////////////////////////////////////////////////// -inline double UnstructuredGrid::scale_km() const { return ( -1.0 ); } -inline bool UnstructuredGrid::wrap_lon() const { return ( wrapLon ); } +inline double UnstructuredGrid::scale_km() const { return -1.0; } +inline bool UnstructuredGrid::wrap_lon() const { return wrapLon; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_gsl_prob/Makefile.in b/src/libcode/vx_gsl_prob/Makefile.in index e3ca39e845..298ff0f95d 100644 --- a/src/libcode/vx_gsl_prob/Makefile.in +++ b/src/libcode/vx_gsl_prob/Makefile.in @@ -242,6 +242,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_gsl_prob/gsl_cdf.cc b/src/libcode/vx_gsl_prob/gsl_cdf.cc index 414ad355e0..6b2cfb8f17 100644 --- a/src/libcode/vx_gsl_prob/gsl_cdf.cc +++ b/src/libcode/vx_gsl_prob/gsl_cdf.cc @@ -259,7 +259,7 @@ bot = gsl_ran_fdist_pdf(x, deg_freedom_1, deg_freedom_2); x_new = x - (top/bot); -return ( x_new ); +return x_new; } diff --git a/src/libcode/vx_gsl_prob/gsl_randist.cc b/src/libcode/vx_gsl_prob/gsl_randist.cc index 956e912727..755ce1f4c3 100644 --- a/src/libcode/vx_gsl_prob/gsl_randist.cc +++ b/src/libcode/vx_gsl_prob/gsl_randist.cc @@ -219,31 +219,31 @@ double ran_draw(const gsl_rng *r, DistType t, double p1, double p2) { // Switch on the distribution type switch(t) { - case(DistType::Normal): + case DistType::Normal: v = gsl_ran_gaussian(r, p1); break; - case(DistType::Exponential): + case DistType::Exponential: v = gsl_ran_exponential(r, p1); break; - case(DistType::ChiSquared): + case DistType::ChiSquared: v = gsl_ran_chisq(r, p1); break; - case(DistType::Gamma): + case DistType::Gamma: v = gsl_ran_gamma(r, p1, p2); break; - case(DistType::Uniform): + case DistType::Uniform: v = gsl_ran_flat(r, p1, p2); break; - case(DistType::Beta): + case DistType::Beta: v = gsl_ran_beta(r, p1, p2); break; - case(DistType::None): + case DistType::None: default: v = 0.0; break; @@ -269,31 +269,31 @@ double dist_var(DistType t, double p1, double p2) { // Switch on the distribution type switch(t) { - case(DistType::Normal): + case DistType::Normal: v = p1*p1; break; - case(DistType::Exponential): + case DistType::Exponential: v = 1.0 / (p1*p1); break; - case(DistType::ChiSquared): + case DistType::ChiSquared: v = 2*p1; break; - case(DistType::Gamma): + case DistType::Gamma: v = p1 / (p2*p2); break; - case(DistType::Uniform): + case DistType::Uniform: v = ((p2-p1)*(p2-p1)) / 12.0; break; - case(DistType::Beta): + case DistType::Beta: v = (p1*p2) / ((p1+p2)*(p1+p2)*(p1+p2+1.0)); break; - case(DistType::None): + case DistType::None: default: v = 0.0; break; @@ -312,7 +312,7 @@ int get_seed() { curr_time = time(nullptr); // Swap the first and fourth bytes and the second and third bytes - u = (unsigned char *) &(curr_time); + u = (unsigned char *) &curr_time; t = u[0]; u[0] = u[3]; u[3] = t; diff --git a/src/libcode/vx_gsl_prob/gsl_wavelet2d.cc b/src/libcode/vx_gsl_prob/gsl_wavelet2d.cc index b13af92956..eee5c46905 100644 --- a/src/libcode/vx_gsl_prob/gsl_wavelet2d.cc +++ b/src/libcode/vx_gsl_prob/gsl_wavelet2d.cc @@ -89,7 +89,7 @@ gsl_wavelet_workspace * wavelet_workspace_set(int n) { exit(1); } - return(work); + return work; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nav/Makefile.in b/src/libcode/vx_nav/Makefile.in index e91a581ff9..75018bdc9b 100644 --- a/src/libcode/vx_nav/Makefile.in +++ b/src/libcode/vx_nav/Makefile.in @@ -234,6 +234,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_nav/nav.cc b/src/libcode/vx_nav/nav.cc index 6109be7192..3ebcfe2596 100644 --- a/src/libcode/vx_nav/nav.cc +++ b/src/libcode/vx_nav/nav.cc @@ -514,7 +514,7 @@ else { } -lon += twopi*floor( 0.5 - ((lon)/twopi) ); +lon += twopi*floor( 0.5 - (lon/twopi) ); lon = lon * deg_per_rad; //lon *= cf; diff --git a/src/libcode/vx_nc_obs/Makefile.in b/src/libcode/vx_nc_obs/Makefile.in index b88b793888..709b4c356d 100644 --- a/src/libcode/vx_nc_obs/Makefile.in +++ b/src/libcode/vx_nc_obs/Makefile.in @@ -244,6 +244,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_nc_obs/met_point_data.cc b/src/libcode/vx_nc_obs/met_point_data.cc index 220e7f86cf..7f3264e434 100644 --- a/src/libcode/vx_nc_obs/met_point_data.cc +++ b/src/libcode/vx_nc_obs/met_point_data.cc @@ -59,7 +59,6 @@ void MetPointData::init_from_scratch() { use_arr_vars = false; } - //////////////////////////////////////////////////////////////////////// void MetPointData::clear() { @@ -127,7 +126,6 @@ bool MetPointData::get_lons(float *hdr_lons) { return true; } - //////////////////////////////////////////////////////////////////////// bool MetPointData::is_same_obs_values(const float obs_arr1[OBS_ARRAY_LEN], @@ -150,17 +148,10 @@ void MetPointData::set_obs_cnt(int obs_cnt) { obs_data->obs_cnt = obs_cnt; } - - - //////////////////////////////////////////////////////////////////////// - - - // - // Code for class MetPointDataPython - // - - +// +// Code for class MetPointDataPython +// //////////////////////////////////////////////////////////////////////// MetPointDataPython::MetPointDataPython() { @@ -178,14 +169,12 @@ MetPointDataPython::MetPointDataPython(MetPointDataPython &d) { header_data.assign(*d.get_header_data()); } - //////////////////////////////////////////////////////////////////////// MetPointDataPython::~MetPointDataPython() { clear(); } - //////////////////////////////////////////////////////////////////////// void MetPointDataPython::allocate(int obs_cnt) { @@ -193,63 +182,47 @@ void MetPointDataPython::allocate(int obs_cnt) { obs_data->allocate(); } -//////////////////////////////////////////////////////////////////////// - - - /////////////////////////////////////////////////////////////////////////////// -// struct MetPointObsData - -MetPointObsData::MetPointObsData(): - obs_cnt(0), - obs_ids((int *)0), - obs_hids((int *)0), - obs_qids((int *)0), - obs_lvls((float *)0), - obs_hgts((float *)0), - obs_vals((float *)0), - obs_arr((float *)0), - is_obs_array(false) -{ -} +// +// Code for struct MetPointObsData +// +/////////////////////////////////////////////////////////////////////////////// +MetPointObsData::MetPointObsData() { + clear(); +} /////////////////////////////////////////////////////////////////////////////// void MetPointObsData::allocate() { - if (is_obs_array) obs_arr = new float[obs_cnt*OBS_ARRAY_LEN]; // nobs * 5 + if (is_obs_array) { + obs_arr.resize(obs_cnt*OBS_ARRAY_LEN, bad_data_float); // nobs * 5 + } else { - obs_ids = new int[obs_cnt]; // grib_code or var_id - obs_hids = new int[obs_cnt]; - obs_qids = new int[obs_cnt]; - obs_lvls = new float[obs_cnt]; - obs_hgts = new float[obs_cnt]; - obs_vals = new float[obs_cnt]; + obs_ids.resize(obs_cnt, bad_data_int); // grib_code or var_id + obs_hids.resize(obs_cnt, bad_data_int); + obs_qids.resize(obs_cnt, bad_data_int); + obs_lvls.resize(obs_cnt, bad_data_float); + obs_hgts.resize(obs_cnt, bad_data_float); + obs_vals.resize(obs_cnt, bad_data_float); } } /////////////////////////////////////////////////////////////////////////////// void MetPointObsData::assign(MetPointObsData &o) { + clear(); obs_cnt = o.obs_cnt; is_obs_array = o.is_obs_array; - - clear(); - allocate(); - if (is_obs_array) - for (int idx=0; idx= obs_cnt) { + mlog << Error << "\n" << method_name + << "index value (" << index << ") out of range for " + << obs_cnt << " observations.\n\n"; + exit(1); + } + if(obs_qids[index] < 0 || obs_qids[index] >= qty_names.n()) { + mlog << Error << "\n" << method_name + << "observation quality index (" << obs_qids[index] + << ") out of range for " << qty_names.n() + << " quality strings.\n\n"; + exit(1); + } + + return qty_names[(obs_qids[index])]; +} + +/////////////////////////////////////////////////////////////////////////////// + // struct MetPointHeader MetPointHeader::MetPointHeader() diff --git a/src/libcode/vx_nc_obs/met_point_data.h b/src/libcode/vx_nc_obs/met_point_data.h index 939c63ba35..83ba31e729 100644 --- a/src/libcode/vx_nc_obs/met_point_data.h +++ b/src/libcode/vx_nc_obs/met_point_data.h @@ -18,6 +18,7 @@ #include +#include #include "nc_utils.h" @@ -61,13 +62,13 @@ struct MetPointObsData { int obs_cnt; bool is_obs_array; - int *obs_ids; // grib_code or var_id - int *obs_hids; - int *obs_qids; - float *obs_lvls; - float *obs_hgts; - float *obs_vals; - float *obs_arr; // nobs * 5 + std::vector obs_ids; // grib_code or var_id + std::vector obs_hids; + std::vector obs_qids; + std::vector obs_lvls; + std::vector obs_hgts; + std::vector obs_vals; + std::vector obs_arr; // nobs * 5 StringArray var_names; StringArray qty_names; @@ -78,7 +79,8 @@ struct MetPointObsData { void clear_numbers(); void clear_strings(); bool fill_obs_buf(int buf_size, int offset, float *obs_arr, int *qty_idx_arr); - float get_obs_val(int index); + float get_obs_val(int index) const; + std::string get_obs_qty(int index) const; }; diff --git a/src/libcode/vx_nc_obs/nc_obs_util.cc b/src/libcode/vx_nc_obs/nc_obs_util.cc index 0b6fa731e3..712c3d90d1 100644 --- a/src/libcode/vx_nc_obs/nc_obs_util.cc +++ b/src/libcode/vx_nc_obs/nc_obs_util.cc @@ -87,12 +87,15 @@ bool NcPointObsData::read_obs_data_numbers(NetcdfObsVars obs_vars, bool stop) { clear_numbers(); obs_cnt = obs_vars.obs_cnt; + if (!IS_INVALID_NC(obs_vars.obs_arr_var)) is_obs_array = true; + + // Resize arrays for input data + allocate(); + StringArray missing_vars; StringArray failed_vars; - if (!IS_INVALID_NC(obs_vars.obs_arr_var)) { - is_obs_array = true; - obs_arr = new float[obs_cnt*OBS_ARRAY_LEN]; - if (!get_nc_data(&obs_vars.obs_arr_var, obs_arr)) { + if(is_obs_array) { + if (!get_nc_data(&obs_vars.obs_arr_var, obs_arr.data())) { succeed = false; failed_vars.add(nc_var_obs_arr); } @@ -103,8 +106,7 @@ bool NcPointObsData::read_obs_data_numbers(NetcdfObsVars obs_vars, bool stop) { missing_vars.add(nc_var_obs_hid); } else { - obs_hids = new int[obs_cnt]; - if (!get_nc_data(&obs_vars.obs_hid_var, obs_hids)) { + if (!get_nc_data(&obs_vars.obs_hid_var, obs_hids.data())) { succeed = false; failed_vars.add(nc_var_obs_hid); } @@ -114,8 +116,7 @@ bool NcPointObsData::read_obs_data_numbers(NetcdfObsVars obs_vars, bool stop) { missing_vars.add(nc_var_obs_lvl); } else { - obs_lvls = new float[obs_cnt]; - if (!get_nc_data(&obs_vars.obs_lvl_var, obs_lvls)) { + if (!get_nc_data(&obs_vars.obs_lvl_var, obs_lvls.data())) { succeed = false; failed_vars.add(nc_var_obs_lvl); } @@ -125,8 +126,7 @@ bool NcPointObsData::read_obs_data_numbers(NetcdfObsVars obs_vars, bool stop) { missing_vars.add(nc_var_obs_hgt); } else { - obs_hgts = new float[obs_cnt]; - if (!get_nc_data(&obs_vars.obs_hgt_var, obs_hgts)) { + if (!get_nc_data(&obs_vars.obs_hgt_var, obs_hgts.data())) { succeed = false; failed_vars.add(nc_var_obs_hgt); } @@ -136,28 +136,34 @@ bool NcPointObsData::read_obs_data_numbers(NetcdfObsVars obs_vars, bool stop) { missing_vars.add(nc_var_obs_val); } else { - obs_vals = new float[obs_cnt]; - if (!get_nc_data(&obs_vars.obs_val_var, obs_vals)) { + if (!get_nc_data(&obs_vars.obs_val_var, obs_vals.data())) { succeed = false; failed_vars.add(nc_var_obs_val); } } if (IS_VALID_NC(obs_vars.obs_gc_var)) { - obs_ids = new int[obs_cnt]; - if (!get_nc_data(&obs_vars.obs_gc_var, obs_ids)) { + if (!get_nc_data(&obs_vars.obs_gc_var, obs_ids.data())) { succeed = false; failed_vars.add(nc_var_obs_gc); } } else if (IS_VALID_NC(obs_vars.obs_vid_var)) { - obs_ids = new int[obs_cnt]; - if (!get_nc_data(&obs_vars.obs_vid_var, obs_ids)) { + if (!get_nc_data(&obs_vars.obs_vid_var, obs_ids.data())) { succeed = false; failed_vars.add(nc_var_obs_vid); } } else succeed = false; - + if (IS_INVALID_NC(obs_vars.obs_qty_var)) { + succeed = false; + missing_vars.add(nc_var_obs_qty); + } + else { + if (!get_nc_data(&obs_vars.obs_qty_var, obs_qids.data())) { + succeed = false; + failed_vars.add(nc_var_obs_qty); + } + } } for (int idx=0; idx NC_BUFFER_SIZE_32K) - ? NC_BUFFER_SIZE_32K : (nhdr_count)); + ? NC_BUFFER_SIZE_32K : nhdr_count); // // Allocate space to store the data @@ -597,12 +603,12 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { char hdr_typ_block[buf_size][typ_len]; char hdr_sid_block[buf_size][sid_len]; char hdr_vld_block[buf_size][vld_len]; - int *hdr_typ_idx_block = new int[buf_size]; - int *hdr_sid_idx_block = new int[buf_size]; - int *hdr_vld_idx_block = new int[buf_size]; - float *hdr_lat_block = new float[buf_size]; - float *hdr_lon_block = new float[buf_size]; - float *hdr_elv_block = new float[buf_size]; + vector hdr_typ_idx_block(buf_size); + vector hdr_sid_idx_block(buf_size); + vector hdr_vld_idx_block(buf_size); + vector hdr_lat_block(buf_size); + vector hdr_lon_block(buf_size); + vector hdr_elv_block(buf_size); LongArray offsets; // = { 0, 0 }; LongArray lengths; // = { 1, 1 }; @@ -679,7 +685,7 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { else { // Get the corresponding header message type (index, not string) if(!get_nc_data(&hdr_typ_var, - hdr_typ_idx_block, lengths_1D, offsets_1D)) { + hdr_typ_idx_block.data(), lengths_1D, offsets_1D)) { mlog << Error << "\n" << method_name << "trouble getting hdr_typ\n\n"; exit(1); @@ -687,7 +693,7 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { // Get the corresponding header station id (index, not string) if(!get_nc_data(&hdr_sid_var, - hdr_sid_idx_block, lengths_1D, offsets_1D)) { + hdr_sid_idx_block.data(), lengths_1D, offsets_1D)) { mlog << Error << "\n" << method_name << "trouble getting hdr_sid\n\n"; exit(1); @@ -695,7 +701,7 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { // Get the corresponding header valid time (index, not string) if(!get_nc_data(&hdr_vld_var, - hdr_vld_idx_block, lengths_1D, offsets_1D)) { + hdr_vld_idx_block.data(), lengths_1D, offsets_1D)) { mlog << Error << "\n" << method_name << "trouble getting hdr_vld\n\n"; exit(1); @@ -705,19 +711,19 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { // Get the header for this observation // if(!get_nc_data(&hdr_lat_var, - hdr_lat_block, lengths_1D, offsets_1D)) { + hdr_lat_block.data(), lengths_1D, offsets_1D)) { mlog << Error << "\n" << method_name << "trouble getting hdr_lat\n\n"; exit(1); } if(!get_nc_data(&hdr_lon_var, - hdr_lon_block, lengths_1D, offsets_1D)) { + hdr_lon_block.data(), lengths_1D, offsets_1D)) { mlog << Error << "\n" << method_name << "trouble getting hdr_lon\n\n"; exit(1); } if(!get_nc_data(&hdr_elv_var, - hdr_elv_block, lengths_1D, offsets_1D)) { + hdr_elv_block.data(), lengths_1D, offsets_1D)) { mlog << Error << "\n" << method_name << "trouble getting hdr_elv\n\n"; exit(1); @@ -733,13 +739,6 @@ void NetcdfObsVars::read_header_data(MetPointHeader &hdr_data) { } } - delete[] hdr_typ_idx_block; - delete[] hdr_sid_idx_block; - delete[] hdr_vld_idx_block; - delete[] hdr_lat_block; - delete[] hdr_lon_block; - delete[] hdr_elv_block; - if (!has_array_vars) { int tmp_dim_size; @@ -854,40 +853,40 @@ bool NetcdfObsVars::read_obs_data(int buf_size, int offset, } } else { - int *obs_hid_buf = new int[buf_size]; - int *obs_vid_buf = new int[buf_size]; - float *obs_lvl_buf = new float[buf_size]; - float *obs_hgt_buf = new float[buf_size]; - float *obs_val_buf = new float[buf_size]; + vector obs_hid_buf(buf_size); + vector obs_vid_buf(buf_size); + vector obs_lvl_buf(buf_size); + vector obs_hgt_buf(buf_size); + vector obs_val_buf(buf_size); lengths[1] = 1; - if(!get_nc_data(&obs_hid_var, obs_hid_buf, lengths, offsets)) { + if(!get_nc_data(&obs_hid_var, obs_hid_buf.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "can't read the record for observation " << "index " << offset << "\n\n"; result = false; } if(!get_nc_data((IS_INVALID_NC(obs_gc_var) ? &obs_vid_var : &obs_gc_var), - obs_vid_buf, lengths, offsets)) { + obs_vid_buf.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "can't read the record (vid or gc) for observation " << "index " << offset << "\n\n"; result = false; } - if(!get_nc_data(&obs_lvl_var, obs_lvl_buf, lengths, offsets)) { + if(!get_nc_data(&obs_lvl_var, obs_lvl_buf.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "can't read the record (lvl) for observation " << "index " << offset << "\n\n"; result = false; } - if(!get_nc_data(&obs_hgt_var, obs_hgt_buf, lengths, offsets)) { + if(!get_nc_data(&obs_hgt_var, obs_hgt_buf.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "can't read the record (hgt) for observation " << "index " << offset << "\n\n"; result = false; } - if(!get_nc_data(&obs_val_var, obs_val_buf, lengths, offsets)) { + if(!get_nc_data(&obs_val_var, obs_val_buf.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "can't read the record (val) for observation " << "index " << offset << "\n\n"; @@ -912,11 +911,6 @@ bool NetcdfObsVars::read_obs_data(int buf_size, int offset, } } - delete[] obs_hid_buf; - delete[] obs_vid_buf; - delete[] obs_lvl_buf; - delete[] obs_hgt_buf; - delete[] obs_val_buf; } return result; } @@ -948,10 +942,10 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { // Read PB report type int buf_size = ((pb_hdr_count > NC_BUFFER_SIZE_32K) - ? NC_BUFFER_SIZE_32K : (pb_hdr_count)); - int *hdr_prpt_typ_block = new int[buf_size]; - int *hdr_irpt_typ_block = new int[buf_size]; - int *hdr_inst_typ_block = new int[buf_size]; + ? NC_BUFFER_SIZE_32K : pb_hdr_count); + vector hdr_prpt_typ_block(buf_size); + vector hdr_irpt_typ_block(buf_size); + vector hdr_inst_typ_block(buf_size); for(int i_start=0; i_start NC_BUFFER_SIZE_32K) buf_size2 = NC_BUFFER_SIZE_32K; @@ -961,7 +955,7 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { if (has_hdr_prpt_typ_var) { // Get the corresponding header PB message type (string) if(!get_nc_data(&hdr_prpt_typ_var, - hdr_prpt_typ_block, lengths, offsets)) { + hdr_prpt_typ_block.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "trouble getting hdr_prpt_typ\n\n"; exit(1); @@ -971,7 +965,7 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { if (has_hdr_irpt_typ_var) { // Get the corresponding header In message type (string) if(!get_nc_data(&hdr_irpt_typ_var, - hdr_irpt_typ_block, lengths, offsets)) { + hdr_irpt_typ_block.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "trouble getting hdr_irpt_typ\n\n"; exit(1); @@ -981,7 +975,7 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { if (has_hdr_inst_typ_var) { // Get the corresponding header instrument type (string) if(!get_nc_data(&hdr_inst_typ_var, - hdr_inst_typ_block, lengths, offsets)) { + hdr_inst_typ_block.data(), lengths, offsets)) { mlog << Error << "\n" << method_name << "trouble getting hdr_inst_typ\n\n"; exit(1); @@ -995,10 +989,6 @@ void NetcdfObsVars::read_pb_hdr_data(MetPointHeader &hdr_data) { } } - delete[] hdr_prpt_typ_block; - delete[] hdr_irpt_typ_block; - delete[] hdr_inst_typ_block; - } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nc_util/Makefile.am b/src/libcode/vx_nc_util/Makefile.am index a4e8499352..108ce97639 100644 --- a/src/libcode/vx_nc_util/Makefile.am +++ b/src/libcode/vx_nc_util/Makefile.am @@ -13,7 +13,7 @@ include ${top_srcdir}/Make-include noinst_LIBRARIES = libvx_nc_util.a libvx_nc_util_a_SOURCES = \ nc_var_info.cc nc_var_info.h \ - nc_utils.cc nc_utils.h nc_utils.hpp \ + nc_utils.cc nc_utils.h nc_utils_core.h nc_utils.hpp \ write_netcdf.cc write_netcdf.h \ grid_output.cc grid_output.h \ load_tc_data.cc load_tc_data.h \ diff --git a/src/libcode/vx_nc_util/Makefile.in b/src/libcode/vx_nc_util/Makefile.in index 9843b5647b..8079c5d628 100644 --- a/src/libcode/vx_nc_util/Makefile.in +++ b/src/libcode/vx_nc_util/Makefile.in @@ -242,6 +242,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ @@ -353,7 +354,7 @@ MAINTAINERCLEANFILES = Makefile.in noinst_LIBRARIES = libvx_nc_util.a libvx_nc_util_a_SOURCES = \ nc_var_info.cc nc_var_info.h \ - nc_utils.cc nc_utils.h nc_utils.hpp \ + nc_utils.cc nc_utils.h nc_utils_core.h nc_utils.hpp \ write_netcdf.cc write_netcdf.h \ grid_output.cc grid_output.h \ load_tc_data.cc load_tc_data.h \ diff --git a/src/libcode/vx_nc_util/nc_constants.h b/src/libcode/vx_nc_util/nc_constants.h index c308d0c484..28d8909321 100644 --- a/src/libcode/vx_nc_util/nc_constants.h +++ b/src/libcode/vx_nc_util/nc_constants.h @@ -29,10 +29,10 @@ // NetCDF keywords -static const char * const CONFIG_NetCDF_Dimension = "NetCDF_Dimension"; +constexpr char CONFIG_NetCDF_Dimension[] = "NetCDF_Dimension"; // Flag value used to indicate a range of values within a dimension -static const int range_flag = bad_data_int; +constexpr int range_flag = bad_data_int; //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nc_util/nc_utils.cc b/src/libcode/vx_nc_util/nc_utils.cc index 87a46d9391..36b71cd509 100644 --- a/src/libcode/vx_nc_util/nc_utils.cc +++ b/src/libcode/vx_nc_util/nc_utils.cc @@ -240,10 +240,9 @@ double get_att_value_double(const NcAtt *att) { void get_att_value_doubles(const NcAtt *att, NumArray &value) { value.erase(); - double *values = new double[att->getAttLength()]; - att->getValues(values); + vector values(att->getAttLength()); + att->getValues(values.data()); for(unsigned int i=0; i<=att->getAttLength(); i++) value.add(values[i]); - if(values) { delete [] values; values = 0; } return; } @@ -537,6 +536,16 @@ bool get_nc_att_value(const NcVar *var, const ConcatString &att_name, //////////////////////////////////////////////////////////////////////// +bool get_nc_att_values(const NcVar *var, const ConcatString &att_name, + unsigned short *att_val, bool exit_on_error) { + static const char *method_name = "get_nc_att_value(NcVar,float) -> "; + bool status = get_nc_att_values_(var, att_name, att_val, exit_on_error, + method_name); + return status; +} + +//////////////////////////////////////////////////////////////////////// + bool get_nc_att_value(const NcVarAtt *att, ConcatString &att_val) { bool status = false; @@ -877,14 +886,12 @@ void add_att(NcVar *var, const string &att_name, const double att_val) { int get_var_names(NcFile *nc, StringArray *var_names) { - NcVar var; int i = 0; int var_count = nc->getVarCount(); multimap mapVar = GET_NC_VARS_P(nc); - for (multimap::iterator it_var = mapVar.begin(); - it_var != mapVar.end(); ++it_var) { - var = (*it_var).second; + for (auto &kv : mapVar) { + NcVar var = kv.second; var_names->add(var.getName()); i++; } @@ -1389,8 +1396,8 @@ bool get_nc_data(NcVar *var, float *data) { switch ( type_id ) { case NcType::nc_DOUBLE: { - double *packed_data = new double[cell_count]; - if (get_nc_data_t(var, packed_data)) { + vector packed_data(cell_count); + if (get_nc_data_t(var, packed_data.data())) { double fill_value; bool has_fill_value = get_var_fill_value(var, fill_value); for (int idx=0; idx packed_data(cell_count, (long long)bad_data_int); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "int64", add_offset, scale_factor); - delete [] packed_data; } break; case NcType::nc_INT: { - int *packed_data = new int[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "int", add_offset, scale_factor); - delete [] packed_data; } break; case NcType::nc_SHORT: { short missing_value; bool has_missing = get_var_fill_value(var, missing_value); - short *packed_data = new short[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); + var->getVar(packed_data.data()); if (unsigned_value) { - unsigned short *ushort_data = new unsigned short[cell_count]; + vector ushort_data(cell_count); for (int idx=0; idx packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "unsigned short", add_offset, scale_factor); - delete [] packed_data; } break; case NcType::nc_BYTE: { ncbyte missing_value; bool has_missing = get_var_fill_value(var, missing_value); - ncbyte *packed_data = new ncbyte[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); + var->getVar(packed_data.data()); if (unsigned_value) { - unsigned char *ubyte_data = new unsigned char[cell_count]; + vector ubyte_data(cell_count); for (int idx=0; idx packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "unsigned char", add_offset, scale_factor); - delete [] packed_data; } break; default: @@ -1593,9 +1590,9 @@ bool get_nc_data(NcVar *var, double *data) { switch ( type_id ) { case NcType::nc_FLOAT: { - float *packed_data = new float[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); + var->getVar(packed_data.data()); float fill_value; bool has_fill_value = get_var_fill_value(var, fill_value); @@ -1604,96 +1601,87 @@ bool get_nc_data(NcVar *var, double *data) { data[idx] = bad_data_double; else data[idx] = (double)packed_data[idx]; } - delete [] packed_data; } break; case NcType::nc_INT64: { - long long *packed_data = new long long[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "int64", add_offset, scale_factor); - delete [] packed_data; } break; case NcType::nc_INT: { - int *packed_data = new int[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "int", add_offset, scale_factor); - delete [] packed_data; } break; case NcType::nc_SHORT: { short missing_value; bool has_missing = get_var_fill_value(var, missing_value); - short *packed_data = new short[cell_count]; - var->getVar(packed_data); + vector packed_data(cell_count); + var->getVar(packed_data.data()); if (unsigned_value) { - unsigned short *ushort_data = new unsigned short[cell_count]; + vector ushort_data(cell_count); for (int idx=0; idx packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "ushort", add_offset, scale_factor); - delete [] packed_data; } break; case NcType::nc_BYTE: { ncbyte missing_value; bool has_missing = get_var_fill_value(var, missing_value); - ncbyte *packed_data = new ncbyte[cell_count]; + vector packed_data(cell_count); - var->getVar(packed_data); + var->getVar(packed_data.data()); if (unsigned_value) { - unsigned char *ubyte_data = new unsigned char[cell_count]; + vector ubyte_data(cell_count); for (int idx=0; idx packed_data(cell_count); - var->getVar(packed_data); - copy_nc_data_(var, data, packed_data, cell_count, + var->getVar(packed_data.data()); + copy_nc_data_(var, data, packed_data.data(), cell_count, "ncubyte", add_offset, scale_factor); - delete [] packed_data; } break; default: @@ -1750,20 +1738,25 @@ bool get_nc_data(NcVar *var, char **data) { //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, uchar *data) { +bool get_nc_data(NcVar *var, uchar *data, bool allow_conversion) { bool return_status = false; int cell_count = get_data_size(var); int data_type = GET_NC_TYPE_ID_P(var); static const char *method_name = "get_nc_data(NcVar *, uchar *) -> "; if (NC_UBYTE == data_type) return_status = get_nc_data_t(var, data); - else if (NC_BYTE == data_type && has_unsigned_attribute(var)) { - ncbyte *signed_data = new ncbyte[cell_count]; - if (return_status = get_nc_data_t(var, signed_data)) { + else if (NC_BYTE == data_type) { + if (!has_unsigned_attribute(var) && !allow_conversion) { + mlog << Debug(1) << "\n" << method_name + << "INFO: Unexpected conversion from 'ncbyte' for variable \"" + << GET_NC_NAME_P(var) << "\".\n\n"; + } + vector signed_data(cell_count); + return_status = get_nc_data_t(var, signed_data.data()); + if (return_status) { for (int idx=0; idx short_data(cell_count); + if (return_status = get_nc_data_t(var, short_data.data())) { for (int idx=0; idx values(data_size); - if (get_nc_data(var, values)) { + if (get_nc_data(var, values.data())) { unixtime ut; int sec_per_unit; bool no_leap_year = get_att_no_leap_year(var); @@ -1929,7 +1921,6 @@ int get_index_at_nc_data(NcVar *var, double value, const string dim_name, bool i } } } - if (values) delete [] values; ConcatString value_str; if (is_time && (value > 10000000.)) value_str << unix_to_yyyymmdd_hhmmss(value); @@ -2804,65 +2795,45 @@ void copy_nc_atts(NcVar *var_from, NcVar *var_to, const bool all_attrs) { void copy_nc_data_char(NcVar *var_from, NcVar *var_to, int data_size) { //const string method_name = "copy_nc_data_char"; - char *data = new char[data_size]; - var_from->getVar(data); - var_to->putVar(data); - // mlog << Error << "\n" << method_name << " -> error writing the variable " - // << GET_NC_NAME_P(var_to) << " to the netCDF file\n\n"; - // exit(1); - delete[] data; + vector data(data_size); + var_from->getVar(data.data()); + var_to->putVar(data.data()); } //////////////////////////////////////////////////////////////////////// void copy_nc_data_double(NcVar *var_from, NcVar *var_to, int data_size) { //const string method_name = "copy_nc_data_double"; - double *data = new double[data_size]; - var_from->getVar(data); - var_to->putVar(data); - // mlog << Error << "\n" << method_name << " -> error writing the variable " - // << GET_NC_NAME_P(var_to) << " to the netCDF file\n\n"; - // exit(1); - delete[] data; + vector data(data_size); + var_from->getVar(data.data()); + var_to->putVar(data.data()); } //////////////////////////////////////////////////////////////////////// void copy_nc_data_float(NcVar *var_from, NcVar *var_to, int data_size) { //const string method_name = "copy_nc_data_float"; - float *data = new float[data_size]; - var_from->getVar(data); - var_to->putVar(data); - // mlog << Error << "\n" << method_name << " -> error writing the variable " - // << GET_NC_NAME_P(var_to) << " to the netCDF file\n\n"; - // exit(1); - delete[] data; + vector data(data_size); + var_from->getVar(data.data()); + var_to->putVar(data.data()); } //////////////////////////////////////////////////////////////////////// void copy_nc_data_int(NcVar *var_from, NcVar *var_to, int data_size) { //const string method_name = "copy_nc_data_int"; - int *data = new int[data_size]; - var_from->getVar(data); - var_to->putVar(data); - // mlog << Error << "\n" << method_name << " -> error writing the variable " - // << GET_NC_NAME_P(var_to) << " to the netCDF file\n\n"; - // exit(1); - delete[] data; + vector data(data_size); + var_from->getVar(data.data()); + var_to->putVar(data.data()); } //////////////////////////////////////////////////////////////////////// void copy_nc_data_short(NcVar *var_from, NcVar *var_to, int data_size) { //const string method_name = "copy_nc_data_double"; - short *data = new short[data_size]; - var_from->getVar(data); - var_to->putVar(data); - // mlog << Error << "\n" << method_name << " -> error writing the variable " - // << GET_NC_NAME_P(var_to) << " to the netCDF file\n\n"; - // exit(1); - delete[] data; + vector data(data_size); + var_from->getVar(data.data()); + var_to->putVar(data.data()); } //////////////////////////////////////////////////////////////////////// @@ -2872,9 +2843,6 @@ void copy_nc_data_string(NcVar *var_from, NcVar *var_to, int data_size) { string *data = new string[data_size]; var_from->getVar(data); var_to->putVar(data); - // mlog << Error << "\n" << method_name << " -> error writing the variable " - // << GET_NC_NAME_P(var_to) << " to the netCDF file\n\n"; - // exit(1); delete[] data; } @@ -3307,22 +3275,21 @@ bool is_nc_name_time(const ConcatString name) { //////////////////////////////////////////////////////////////////////// bool is_nc_attr_lat(const ConcatString name) { - bool is_latitude = (is_nc_name_lat(name) || name == "x" || name == "X"); + bool is_latitude = (is_nc_name_lat(name) || name == "y" || name == "Y"); return is_latitude; } //////////////////////////////////////////////////////////////////////// bool is_nc_attr_lon(const ConcatString name) { - bool is_longitude = (is_nc_name_lon(name) || name == "y" || name == "Y"); + bool is_longitude = (is_nc_name_lon(name) || name == "x" || name == "X"); return is_longitude; } //////////////////////////////////////////////////////////////////////// bool is_nc_attr_time(const ConcatString name) { - bool is_time = (is_nc_name_time(name) || name == "T"); - return is_time; + return is_nc_name_time(name); } //////////////////////////////////////////////////////////////////////// @@ -3330,31 +3297,49 @@ bool is_nc_attr_time(const ConcatString name) { NcVar get_nc_var_lat(const NcFile *nc) { NcVar var; bool found = false; + int max_dim_cnt = 0; + ConcatString att_val; + ConcatString coordinates_att; multimap mapVar = GET_NC_VARS_P(nc); static const char *method_name = "get_nc_var_lat() "; - for (multimap::iterator it_var = mapVar.begin(); - it_var != mapVar.end(); ++it_var) { - ConcatString name = (*it_var).first; - //if (is_nc_name_lat(name)) found = true; - if (get_var_standard_name(&(*it_var).second, name)) { - if (is_nc_name_lat(name)) found = true; + for (const auto &kv : mapVar) { + ConcatString name = kv.first; + if (is_nc_name_lat(name)) found = true; + if (!found && get_var_standard_name(&kv.second, att_val)) { + if (is_nc_name_lat(att_val)) found = true; } - if (!found && get_var_units(&(*it_var).second, name)) { - if (is_nc_unit_latitude(name.c_str())) { - if (get_nc_att_value(&(*it_var).second, axis_att_name, name)) { - if (is_nc_attr_lat(name)) found = true; - } - else if (get_nc_att_value(&(*it_var).second, - coordinate_axis_type_att_name, name)) { - if (is_nc_attr_lat(name)) found = true; - } + if (!found && get_var_units(&kv.second, att_val) + && is_nc_unit_latitude(att_val.c_str())) { + if (get_nc_att_value(&kv.second, axis_att_name, att_val)) { + if (is_nc_attr_lat(att_val)) found = true; + } + else if (get_nc_att_value(&kv.second, + coordinate_axis_type_att_name, att_val)) { + if (is_nc_attr_lat(att_val)) found = true; } } if (found) { - var = (*it_var).second; + var = kv.second; break; } + int dim_count = GET_NC_DIM_COUNT(kv.second); + if (dim_count > max_dim_cnt) { + max_dim_cnt = dim_count; + if (get_nc_att_value(&kv.second, coordinates_att_name, att_val)) coordinates_att = att_val; + } + } + + if (!found && !coordinates_att.empty()) { + StringArray coord_names = coordinates_att.split(" "); + for (int i=0; i< coord_names.n(); i++) { + NcVar var_lat = get_nc_var((NcFile *)nc, coord_names[i].c_str()); + if (get_var_units(&var_lat, att_val) && is_nc_unit_latitude(att_val.c_str())) { + found = true; + var = var_lat; + break; + } + } } if (found) { @@ -3372,31 +3357,50 @@ NcVar get_nc_var_lat(const NcFile *nc) { NcVar get_nc_var_lon(const NcFile *nc) { NcVar var; bool found = false; + int max_dim_cnt = 0; + ConcatString att_val; + ConcatString coordinates_att; multimap mapVar = GET_NC_VARS_P(nc); static const char *method_name = "get_nc_var_lon() "; - for (multimap::iterator it_var = mapVar.begin(); - it_var != mapVar.end(); ++it_var) { - ConcatString name = (*it_var).first; - //if (is_nc_name_lon(name)) found = true; - if (get_var_standard_name(&(*it_var).second, name)) { - if (is_nc_name_lon(name)) found = true; + for (const auto &kv : mapVar) { + ConcatString name = kv.first; + if (is_nc_name_lon(name)) found = true; + if (!found && get_var_standard_name(&kv.second, att_val)) { + if (is_nc_name_lon(att_val)) found = true; } - if (!found && get_var_units(&(*it_var).second, name)) { - if (is_nc_unit_longitude(name.c_str())) { - if (get_nc_att_value(&(*it_var).second, axis_att_name, name)) { - if (is_nc_attr_lon(name)) found = true; - } - else if (get_nc_att_value(&(*it_var).second, - coordinate_axis_type_att_name, name)) { - if (is_nc_attr_lon(name)) found = true; - } + if (!found && get_var_units(&kv.second, att_val) + && is_nc_unit_longitude(att_val.c_str())) { + if (get_nc_att_value(&kv.second, axis_att_name, att_val)) { + if (is_nc_attr_lon(att_val)) found = true; + } + else if (get_nc_att_value(&kv.second, + coordinate_axis_type_att_name, att_val)) { + if (is_nc_attr_lon(att_val)) found = true; } } if (found) { - var = (*it_var).second; + var = kv.second; break; } + + int dim_count = GET_NC_DIM_COUNT(kv.second); + if (dim_count > max_dim_cnt) { + max_dim_cnt = dim_count; + if (get_nc_att_value(&kv.second, coordinates_att_name, att_val)) coordinates_att = att_val; + } + } + + if (!found && !coordinates_att.empty()) { + StringArray coord_names = coordinates_att.split(" "); + for (int i=0; i< coord_names.n(); i++) { + NcVar var_lon = get_nc_var((NcFile *)nc, coord_names[i].c_str()); + if (get_var_units(&var_lon, att_val) && is_nc_unit_longitude(att_val.c_str())) { + found = true; + var = var_lon; + break; + } + } } if (found) { @@ -3414,33 +3418,51 @@ NcVar get_nc_var_lon(const NcFile *nc) { NcVar get_nc_var_time(const NcFile *nc) { NcVar var; bool found = false; + int max_dim_cnt = 0; + ConcatString att_val; + ConcatString coordinates_att; multimap mapVar = GET_NC_VARS_P(nc); static const char *method_name = "get_nc_var_time() "; - for (multimap::iterator it_var = mapVar.begin(); - it_var != mapVar.end(); ++it_var) { - ConcatString name = (*it_var).first; - //if (is_nc_name_time(name)) found = true; - if (get_var_standard_name(&(*it_var).second, name)) { - if (is_nc_name_time(name)) found = true; + for (auto &kv : mapVar) { + ConcatString name = kv.first; + if (!found && is_nc_name_time(name)) found = true; + if (get_var_standard_name(&kv.second, att_val)) { + if (is_nc_name_time(att_val)) found = true; mlog << Debug(7) << method_name << "checked variable \"" << name << "\" is_time: " << found << "\n"; } - if (!found && get_var_units(&(*it_var).second, name)) { - if (is_nc_unit_time(name.c_str())) { - if (get_nc_att_value(&(*it_var).second, axis_att_name, name)) { - if (is_nc_attr_time(name)) found = true; - } - else if (get_nc_att_value(&(*it_var).second, - coordinate_axis_type_att_name, name)) { - if (is_nc_attr_time(name)) found = true; - } + if (!found && get_var_units(&kv.second, att_val) + && is_nc_unit_time(att_val.c_str())) { + if (get_nc_att_value(&kv.second, axis_att_name, att_val)) { + if (is_nc_attr_time(att_val)) found = true; + } + else if (get_nc_att_value(&kv.second, + coordinate_axis_type_att_name, att_val)) { + if (is_nc_attr_time(att_val)) found = true; } } if (found) { - var = (*it_var).second; + var = kv.second; break; } + int dim_count = GET_NC_DIM_COUNT(kv.second); + if (dim_count > max_dim_cnt) { + max_dim_cnt = dim_count; + if (get_nc_att_value(&kv.second, coordinates_att_name, att_val)) coordinates_att = att_val; + } + } + + if (!found && !coordinates_att.empty()) { + StringArray coord_names = coordinates_att.split(" "); + for (int i=0; i< coord_names.n(); i++) { + NcVar var_time = get_nc_var((NcFile *)nc, coord_names[i].c_str()); + if (get_var_units(&var_time, att_val) && is_nc_unit_time(att_val.c_str())) { + found = true; + var = var_time; + break; + } + } } if (found) { @@ -3478,10 +3500,8 @@ NcFile *open_ncfile(const char * nc_name, bool write) { // Implement the old API var->num_vals() int get_data_size(NcVar *var) { - int dimCount = 0; int data_size = 1; - - dimCount = var->getDimCount(); + int dimCount = var->getDimCount(); for (int i=0; igetDim(i).getSize(); } diff --git a/src/libcode/vx_nc_util/nc_utils.h b/src/libcode/vx_nc_util/nc_utils.h index 260462018f..671b3bf609 100644 --- a/src/libcode/vx_nc_util/nc_utils.h +++ b/src/libcode/vx_nc_util/nc_utils.h @@ -18,51 +18,19 @@ #include -#ifndef ncbyte -typedef signed char ncbyte; // from ncvalues.h -#endif /* ncbyte */ -#ifndef uchar -typedef unsigned char uchar; -#endif /* uchar */ - #include "concat_string.h" #include "int_array.h" #include "long_array.h" #include "num_array.h" #include "nc_var_info.h" -//////////////////////////////////////////////////////////////////////// - -static const std::string C_unknown_str = std::string("unknown"); - -#define IS_VALID_NC(ncObj) (!ncObj.isNull()) -#define IS_VALID_NC_P(ncObjPtr) ((ncObjPtr != nullptr && !ncObjPtr->isNull())) - -#define IS_INVALID_NC(ncObj) ncObj.isNull() -#define IS_INVALID_NC_P(ncObjPtr) (ncObjPtr == nullptr || ncObjPtr->isNull()) - -#define GET_NC_NAME(ncObj) ncObj.getName() -#define GET_NC_NAME_P(ncObjPtr) ncObjPtr->getName() - -#define GET_NC_SIZE(ncObj) ncObj.getSize() -#define GET_NC_SIZE_P(ncObjPtr) ncObjPtr->getSize() - -#define GET_SAFE_NC_NAME(ncObj) (ncObj.isNull() ? C_unknown_str : ncObj.getName()) -#define GET_SAFE_NC_NAME_P(ncObjPtr) (IS_INVALID_NC_P(ncObjPtr) ? C_unknown_str : ncObjPtr->getName()) - -#define GET_NC_TYPE_ID(ncObj) ncObj.getType().getId() -#define GET_NC_TYPE_ID_P(ncObjPtr) ncObjPtr->getType().getId() -#define GET_NC_TYPE_NAME(ncObj) ncObj.getType().getName() -#define GET_NC_TYPE_NAME_P(ncObjPtr) ncObjPtr->getType().getName() - -#define GET_NC_DIM_COUNT(ncObj) ncObj.getDimCount() -#define GET_NC_DIM_COUNT_P(ncObjPtr) ncObjPtr->getDimCount() +#include "nc_utils.hpp" -#define GET_NC_VAR_COUNT(ncObj) ncObj.getVarCount() -#define GET_NC_VAR_COUNT_P(ncObjPtr) ncObjPtr->getVarCount() +//////////////////////////////////////////////////////////////////////// -#define GET_NC_VARS(ncObj) ncObj.getVars() -#define GET_NC_VARS_P(ncObjPtr) ncObjPtr->getVars() +#ifndef uchar +typedef unsigned char uchar; +#endif /* uchar */ //////////////////////////////////////////////////////////////////////// @@ -84,49 +52,49 @@ static const std::string C_unknown_str = std::string("unknown"); #define OBS_BUFFER_SIZE (128 * 1024) -static const char nc_dim_nhdr[] = "nhdr"; -static const char nc_dim_nhdr_typ[] = "nhdr_typ"; -static const char nc_dim_nhdr_sid[] = "nhdr_sid"; -static const char nc_dim_nhdr_vld[] = "nhdr_vld"; -static const char nc_dim_npbhdr[] = "npbhdr"; -static const char nc_dim_nobs[] = "nobs"; -static const char nc_dim_nqty[] = "nobs_qty"; -static const char nc_dim_hdr_arr[] = "hdr_arr_len"; -static const char nc_dim_obs_arr[] = "obs_arr_len"; -static const char nc_dim_mxstr[] = "mxstr"; -static const char nc_dim_mxstr2[] = "mxstr2"; -static const char nc_dim_mxstr3[] = "mxstr3"; -static const char nc_dim_nvar[] = "obs_var_num"; -static const char nc_dim_unit[] = "unit_len"; -static const char nc_dim_desc[] = "desc_len"; -static const char nc_var_desc[] = "obs_desc"; -static const char nc_var_hdr_arr[] = "hdr_arr"; -static const char nc_var_hdr_lat[] = "hdr_lat"; -static const char nc_var_hdr_lon[] = "hdr_lon"; -static const char nc_var_hdr_elv[] = "hdr_elv"; -static const char nc_var_hdr_typ[] = "hdr_typ"; -static const char nc_var_hdr_sid[] = "hdr_sid"; -static const char nc_var_hdr_vld[] = "hdr_vld"; -static const char nc_var_hdr_prpt_typ[] = "hdr_prpt_typ"; -static const char nc_var_hdr_irpt_typ[] = "hdr_irpt_typ"; -static const char nc_var_hdr_inst_typ[] = "hdr_inst_typ"; -static const char nc_var_hdr_typ_tbl[] = "hdr_typ_table"; -static const char nc_var_hdr_sid_tbl[] = "hdr_sid_table"; -static const char nc_var_hdr_vld_tbl[] = "hdr_vld_table"; -static const char nc_var_obs_arr[] = "obs_arr"; -static const char nc_var_obs_hid[] = "obs_hid"; -static const char nc_var_obs_gc[] = "obs_gc"; -static const char nc_var_obs_vid[] = "obs_vid"; -static const char nc_var_obs_lvl[] = "obs_lvl"; -static const char nc_var_obs_hgt[] = "obs_hgt"; -static const char nc_var_obs_val[] = "obs_val"; -static const char nc_var_obs_qty[] = "obs_qty"; -static const char nc_var_obs_qty_tbl[] = "obs_qty_table"; -static const char nc_var_obs_var[] = "obs_var"; -static const char nc_var_unit[] = "obs_unit"; -static const std::string nc_att_use_var_id = "use_var_id"; -static const char nc_att_obs_version[] = "MET_Obs_version"; -static const char nc_att_met_point_nccf[] = "MET_point_NCCF"; +constexpr char nc_dim_nhdr[] = "nhdr"; +constexpr char nc_dim_nhdr_typ[] = "nhdr_typ"; +constexpr char nc_dim_nhdr_sid[] = "nhdr_sid"; +constexpr char nc_dim_nhdr_vld[] = "nhdr_vld"; +constexpr char nc_dim_npbhdr[] = "npbhdr"; +constexpr char nc_dim_nobs[] = "nobs"; +constexpr char nc_dim_nqty[] = "nobs_qty"; +constexpr char nc_dim_hdr_arr[] = "hdr_arr_len"; +constexpr char nc_dim_obs_arr[] = "obs_arr_len"; +constexpr char nc_dim_mxstr[] = "mxstr"; +constexpr char nc_dim_mxstr2[] = "mxstr2"; +constexpr char nc_dim_mxstr3[] = "mxstr3"; +constexpr char nc_dim_nvar[] = "obs_var_num"; +constexpr char nc_dim_unit[] = "unit_len"; +constexpr char nc_dim_desc[] = "desc_len"; +constexpr char nc_var_desc[] = "obs_desc"; +constexpr char nc_var_hdr_arr[] = "hdr_arr"; +constexpr char nc_var_hdr_lat[] = "hdr_lat"; +constexpr char nc_var_hdr_lon[] = "hdr_lon"; +constexpr char nc_var_hdr_elv[] = "hdr_elv"; +constexpr char nc_var_hdr_typ[] = "hdr_typ"; +constexpr char nc_var_hdr_sid[] = "hdr_sid"; +constexpr char nc_var_hdr_vld[] = "hdr_vld"; +constexpr char nc_var_hdr_prpt_typ[] = "hdr_prpt_typ"; +constexpr char nc_var_hdr_irpt_typ[] = "hdr_irpt_typ"; +constexpr char nc_var_hdr_inst_typ[] = "hdr_inst_typ"; +constexpr char nc_var_hdr_typ_tbl[] = "hdr_typ_table"; +constexpr char nc_var_hdr_sid_tbl[] = "hdr_sid_table"; +constexpr char nc_var_hdr_vld_tbl[] = "hdr_vld_table"; +constexpr char nc_var_obs_arr[] = "obs_arr"; +constexpr char nc_var_obs_hid[] = "obs_hid"; +constexpr char nc_var_obs_gc[] = "obs_gc"; +constexpr char nc_var_obs_vid[] = "obs_vid"; +constexpr char nc_var_obs_lvl[] = "obs_lvl"; +constexpr char nc_var_obs_hgt[] = "obs_hgt"; +constexpr char nc_var_obs_val[] = "obs_val"; +constexpr char nc_var_obs_qty[] = "obs_qty"; +constexpr char nc_var_obs_qty_tbl[] = "obs_qty_table"; +constexpr char nc_var_obs_var[] = "obs_var"; +constexpr char nc_var_unit[] = "obs_unit"; +constexpr char nc_att_use_var_id[] = "use_var_id"; +constexpr char nc_att_obs_version[] = "MET_Obs_version"; +constexpr char nc_att_met_point_nccf[] = "MET_point_NCCF"; static const std::string add_offset_att_name = "add_offset"; static const std::string axis_att_name = "axis"; @@ -135,27 +103,25 @@ static const std::string coordinates_att_name = "coordinates"; static const std::string coordinate_axis_type_att_name = "_CoordinateAxisType"; static const std::string cf_att_name = "Conventions"; static const std::string description_att_name = "description"; -static const std::string fill_value_att_name = "_FillValue"; static const std::string grid_mapping_att_name = "grid_mapping"; static const std::string grid_mapping_name_att_name = "grid_mapping_name"; static const std::string long_name_att_name = "long_name"; -static const std::string missing_value_att_name = "missing_value"; static const std::string projection_att_name = "Projection"; static const std::string scale_factor_att_name = "scale_factor"; static const std::string standard_name_att_name = "standard_name"; static const std::string units_att_name = "units"; -static const char nc_time_unit_exp[] = "^[a-z|A-Z]* *since *[0-9]\\{1,4\\}-[0-9]\\{1,2\\}-[0-9]\\{1,2\\}"; -static const char nc_time_unit_ymd_exp[] = "[0-9]\\{1,4\\}-[0-9]\\{1,2\\}-[0-9]\\{1,2\\}"; +constexpr char nc_time_unit_exp[] = "^[a-z|A-Z]* *since *[0-9]\\{1,4\\}-[0-9]\\{1,2\\}-[0-9]\\{1,2\\}"; +constexpr char nc_time_unit_ymd_exp[] = "[0-9]\\{1,4\\}-[0-9]\\{1,2\\}-[0-9]\\{1,2\\}"; -static const char MET_NC_Obs_ver_1_2[] = "1.02"; -static const char MET_NC_Obs_version[] = "1.02"; +constexpr char MET_NC_Obs_ver_1_2[] = "1.02"; +constexpr char MET_NC_Obs_version[] = "1.02"; -static const int exit_code_no_error = 0; -static const int exit_code_no_dim = 1; -static const int exit_code_no_hdr_vars = 2; -static const int exit_code_no_loc_vars = 3; -static const int exit_code_no_obs_vars = 4; +constexpr int exit_code_no_error = 0; +constexpr int exit_code_no_dim = 1; +constexpr int exit_code_no_hdr_vars = 2; +constexpr int exit_code_no_loc_vars = 3; +constexpr int exit_code_no_obs_vars = 4; //////////////////////////////////////////////////////////////////////// @@ -183,9 +149,6 @@ extern bool get_att_no_leap_year(const netCDF::NcVar *); extern bool get_cf_conventions(const netCDF::NcFile *, ConcatString&); -extern netCDF::NcVarAtt *get_nc_att(const netCDF::NcVar *, const ConcatString &, bool exit_on_error = false); -extern netCDF::NcGroupAtt *get_nc_att(const netCDF::NcFile *, const ConcatString &, bool exit_on_error = false); - extern bool get_nc_att_value(const netCDF::NcVarAtt *, std::string &); extern bool get_nc_att_value(const netCDF::NcVarAtt *, int &, bool exit_on_error = true); extern bool get_nc_att_value(const netCDF::NcVarAtt *, float &, bool exit_on_error = true); @@ -195,6 +158,7 @@ extern bool get_nc_att_value(const netCDF::NcVar *, const ConcatString &, Concat extern bool get_nc_att_value(const netCDF::NcVar *, const ConcatString &, int &, bool exit_on_error = false); extern bool get_nc_att_value(const netCDF::NcVar *, const ConcatString &, float &, bool exit_on_error = false); extern bool get_nc_att_value(const netCDF::NcVar *, const ConcatString &, double &, bool exit_on_error = false); +extern bool get_nc_att_values(const netCDF::NcVar *, const ConcatString &, unsigned short *, bool exit_on_error = false); extern bool has_att(netCDF::NcFile *, const ConcatString name, bool exit_on_error=false); extern bool has_att(netCDF::NcVar *, const ConcatString name, bool do_log=false); @@ -257,11 +221,10 @@ extern ConcatString* get_string_val(netCDF::NcVar *var, const int index, const i extern bool get_nc_data(netCDF::NcVar *, int *data); extern bool get_nc_data(netCDF::NcVar *, char *data); extern bool get_nc_data(netCDF::NcVar *, char **data); -extern bool get_nc_data(netCDF::NcVar *, uchar *data); +extern bool get_nc_data(netCDF::NcVar *, uchar *data, bool allow_conversion=false); extern bool get_nc_data(netCDF::NcVar *, float *data); extern bool get_nc_data(netCDF::NcVar *, double *data); extern bool get_nc_data(netCDF::NcVar *, time_t *data); -extern bool get_nc_data(netCDF::NcVar *, ncbyte *data); extern bool get_nc_data(netCDF::NcVar *, unsigned short *data); extern bool get_nc_data(netCDF::NcVar *, int *data, const LongArray &curs); @@ -359,14 +322,8 @@ extern netCDF::NcDim add_dim(netCDF::NcFile *, const std::string &); extern netCDF::NcDim add_dim(netCDF::NcFile *, const std::string &, const size_t); extern bool has_dim(netCDF::NcFile *, const char *dim_name); extern bool get_dim(const netCDF::NcFile *, const ConcatString &, int &, bool error_out = false); -extern int get_dim_count(const netCDF::NcVar *); extern int get_dim_count(const netCDF::NcFile *); -extern int get_dim_size(const netCDF::NcDim *); -extern int get_dim_size(const netCDF::NcVar *, const int dim_offset); extern int get_dim_value(const netCDF::NcFile *, const std::string &, const bool error_out = false); -extern netCDF::NcDim get_nc_dim(const netCDF::NcFile *, const std::string &dim_name); -extern netCDF::NcDim get_nc_dim(const netCDF::NcVar *, const std::string &dim_name); -extern netCDF::NcDim get_nc_dim(const netCDF::NcVar *, const int dim_offset); extern bool get_dim_names(const netCDF::NcVar *var, StringArray *dimNames); extern bool get_dim_names(const netCDF::NcFile *nc, StringArray *dimNames); @@ -376,7 +333,6 @@ extern netCDF::NcVar get_nc_var_time(const netCDF::NcFile *nc); extern int get_index_at_nc_data(netCDF::NcVar *var, double value, const std::string dim_name, bool is_time=false); extern netCDF::NcFile* open_ncfile(const char * nc_name, bool write = false); -extern int get_data_size(netCDF::NcVar *); extern unixtime get_reference_unixtime(netCDF::NcVar *time_var, int &sec_per_unit, bool &no_leap_year); @@ -390,10 +346,6 @@ extern void parse_time_string(const char *str, unixtime &ut); //////////////////////////////////////////////////////////////////////// -#include "nc_utils.hpp" - -//////////////////////////////////////////////////////////////////////// - #endif /* __NC_UTILS_H__ */ //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nc_util/nc_utils.hpp b/src/libcode/vx_nc_util/nc_utils.hpp index 4a6c0a1a92..7298a7e4d8 100644 --- a/src/libcode/vx_nc_util/nc_utils.hpp +++ b/src/libcode/vx_nc_util/nc_utils.hpp @@ -13,6 +13,10 @@ //////////////////////////////////////////////////////////////////////// +#include "nc_utils_core.h" + +//////////////////////////////////////////////////////////////////////// + extern bool get_att_value(const netCDF::NcAtt *att, int &att_val); extern bool get_att_value(const netCDF::NcAtt *att, ConcatString &value); extern bool get_att_value(const netCDF::NcAtt *att, ncbyte &att_val); @@ -93,12 +97,33 @@ bool get_nc_att_value_(const netCDF::NcVar *var, const ConcatString &att_name, if (!status) { mlog << Error << "\n" << caller_name << get_log_msg_for_att(att, GET_SAFE_NC_NAME_P(var), att_name); - if (exit_on_error) { - if (att) delete att; - exit(1); - } } if (att) delete att; + if (!status && exit_on_error) exit(1); + + return status; +} + +//////////////////////////////////////////////////////////////////////// + +template +bool get_nc_att_values_(const netCDF::NcVar *var, const ConcatString &att_name, + T *att_vals, bool exit_on_error, + const char *caller_name) { + // caller should initialize att_vals + + // + // Retrieve the NetCDF variable attribute. + // + netCDF::NcVarAtt *att = get_nc_att(var, att_name); + bool status = IS_VALID_NC_P(att); + if (status) att->getValues(att_vals); + else { + mlog << Error << "\n" << caller_name + << get_log_msg_for_att(att, GET_SAFE_NC_NAME_P(var), att_name); + } + if (att) delete att; + if (!status && exit_on_error) exit(1); return status; } @@ -196,7 +221,7 @@ bool get_var_fill_value(const netCDF::NcVar *var, T &att_val) { if (att) delete att; - return(found); + return found; } //////////////////////////////////////////////////////////////////////// @@ -211,46 +236,100 @@ void apply_scale_factor_(T *data, const int cell_count, clock_t start_clock = clock(); const char *method_name = "apply_scale_factor(T) "; - if (cell_count > 0) { - int idx; - int positive_cnt = 0; - int unpacked_count = 0; - T min_value, max_value; - T raw_min_val, raw_max_val; + if (cell_count <= 0) return; - idx = 0; - if (has_fill_value) { - for (; idx data[idx]) raw_min_val = data[idx]; - if (raw_max_val < data[idx]) raw_max_val = data[idx]; - data[idx] = (data[idx] * scale_factor) + add_offset; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - unpacked_count++; - } + if (!is_eq(nc_fill_value, data[idx])) break; + data[idx] = met_fill_value; + } + } + + int tmp_idx = (idx < cell_count) ? idx : 0; + raw_min_val = raw_max_val = data[tmp_idx]; + min_value = max_value = (T)(((double)data[tmp_idx] * scale_factor) + add_offset); + for (; idx data[idx]) raw_min_val = data[idx]; + if (raw_max_val < data[idx]) raw_max_val = data[idx]; + data[idx] = (data[idx] * scale_factor) + add_offset; + if (data[idx] > 0) positive_cnt++; + if (min_value > data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + unpacked_count++; } - //cout << typeid(nc_fill_value).name(); - mlog << Debug(debug_level) << method_name << var_name - << "(" << typeid(data[0]).name() << "): unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ", scale_factor=" << scale_factor<< " add_offset=" << add_offset - << ". FillValue(" << data_type << ")=" << nc_fill_value << "\n"; - mlog << Debug(debug_level) << method_name - << " data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_val << " - " << raw_max_val - << "] Positive count: " << positive_cnt << "\n"; + } + mlog << Debug(debug_level) << method_name << var_name + << "(data_type=" << typeid(data[0]).name() << "): unpacked data: count=" + << unpacked_count << " out of " << cell_count + << ", scale_factor=" << scale_factor<< " add_offset=" << add_offset + << ". FillValue(" << data_type << ")=" << nc_fill_value << "\n"; + mlog << Debug(debug_level) << method_name + << " data range [" << min_value << " - " << max_value + << "] raw data: [" << raw_min_val << " - " << raw_max_val + << "] Positive count: " << positive_cnt << "\n"; + + mlog << Debug(debug_level) << method_name << " took " + << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + return; +} + +//////////////////////////////////////////////////////////////////////// + +template +void update_missing_values(T *data, const long cell_count, + const T nc_fill_value, const T met_fill_value, + const char *data_type, const char *var_name) { + int missing_count = 0; + const int debug_level = 7; + clock_t start_clock = clock(); + const char *method_name = "update_missing_values(T) "; + + if (cell_count <= 0) return; + + T max_value; + T min_value; + long idx = 0; + int positive_cnt = 0; + + // Set met_fill_value (-9999) for FillValues (missing values) + for (; idx data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + } + } + mlog << Debug(debug_level) << method_name << var_name + << "(data_type=" << typeid(data[0]).name() << "): FillValue(" << data_type << ")=" << nc_fill_value << "\n"; + mlog << Debug(debug_level) << method_name + << " data range [" << min_value << " - " << max_value + << "] Positive count: " << positive_cnt << "\n"; + if (0 < missing_count) { + mlog << Debug(3) << method_name << var_name + << "(data_type=" << typeid(data[0]).name() << "): found " << missing_count << " FillValues out of " << cell_count << "\n"; } mlog << Debug(debug_level) << method_name << " took " << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; @@ -269,7 +348,7 @@ bool get_nc_data_t(netCDF::NcVar *var, T *data) { if (return_status) { var->getVar(data); } - return(return_status); + return return_status; } //////////////////////////////////////////////////////////////////////// @@ -286,26 +365,31 @@ bool get_nc_data_(netCDF::NcVar *var, T *data, const T met_missing) { bool return_status = get_nc_data_t(var, data); if (return_status) { + T nc_missing; + const int cell_count = get_data_size(var); + bool has_missing_attr = get_var_fill_value(var, nc_missing); + if (!has_missing_attr) nc_missing = met_missing; + //scale_factor and add_offset if (has_add_offset_attr(var) || has_scale_factor_attr(var)) { - T nc_missing; - const int cell_count = get_data_size(var); double add_offset = get_var_add_offset(var); double scale_factor = get_var_scale_factor(var); - bool has_missing_attr = get_var_fill_value(var, nc_missing); - if (!has_missing_attr) nc_missing = met_missing; apply_scale_factor_(data, cell_count, add_offset, scale_factor, nc_missing, met_missing, has_missing_attr, "", GET_NC_NAME_P(var).c_str()); } + else if (has_missing_attr) { + update_missing_values(data, cell_count, nc_missing, met_missing, + "", GET_NC_NAME_P(var).c_str()); + } } - return(return_status); + return return_status; } //////////////////////////////////////////////////////////////////////// template -bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const LongArray &dims, const LongArray &curs) { +bool get_nc_data_(netCDF::NcVar *var, T *data, T met_missing, const LongArray &dims, const LongArray &curs) { bool return_status = false; const char *method_name = "get_nc_data_(T, *dims, *curs) "; @@ -346,7 +430,7 @@ bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const LongArray &dims } for (int idx1=0; idx1getVar(start, count, data); return_status = true; + T nc_missing; + bool has_missing_attr = get_var_fill_value(var, nc_missing); + if (!has_missing_attr) nc_missing = met_missing; + //scale_factor and add_offset if (has_add_offset_attr(var) || has_scale_factor_attr(var)) { - T nc_missing; double add_offset = get_var_add_offset(var); double scale_factor = get_var_scale_factor(var); - bool has_missing_attr = get_var_fill_value(var, nc_missing); - if (!has_missing_attr) nc_missing = bad_data; apply_scale_factor_(data, data_size, add_offset, scale_factor, - nc_missing, bad_data, has_missing_attr, + nc_missing, met_missing, has_missing_attr, "", GET_NC_NAME_P(var).c_str()); } + else if (has_missing_attr) { + update_missing_values(data, data_size, nc_missing, met_missing, + "", GET_NC_NAME_P(var).c_str()); + } } - return(return_status); + return return_status; } //////////////////////////////////////////////////////////////////////// @@ -412,26 +501,31 @@ bool get_nc_data_(netCDF::NcVar *var, T *data, T met_missing, const long dim, co var->getVar(start, count, data); return_status = true; + T nc_missing; + bool has_missing_attr = get_var_fill_value(var, nc_missing); + if (!has_missing_attr) nc_missing = met_missing; + //scale_factor and add_offset if (has_add_offset_attr(var) || has_scale_factor_attr(var)) { - T nc_missing; double add_offset = get_var_add_offset(var); double scale_factor = get_var_scale_factor(var); - bool has_missing_attr = get_var_fill_value(var, nc_missing); - if (!has_missing_attr) nc_missing = met_missing; apply_scale_factor_(data, dim, add_offset, scale_factor, nc_missing, met_missing, has_missing_attr, "", GET_NC_NAME_P(var).c_str()); } + else if (has_missing_attr) { + update_missing_values(data, dim, nc_missing, met_missing, + "", GET_NC_NAME_P(var).c_str()); + } } - return(return_status); + return return_status; } //////////////////////////////////////////////////////////////////////// // read a single data template -bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const LongArray &curs) { +bool get_nc_data_(netCDF::NcVar *var, T *data, T met_missing, const LongArray &curs) { bool return_status = false; //const char *method_name = "get_nc_data_(*curs) "; @@ -445,9 +539,9 @@ bool get_nc_data_(netCDF::NcVar *var, T *data, T bad_data, const LongArray &curs } // Retrieve the NetCDF value from the NetCDF variable. - return_status = get_nc_data_(var, data, bad_data, dims, curs); + return_status = get_nc_data_(var, data, met_missing, dims, curs); } - return(return_status); + return return_status; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nc_util/nc_utils_core.h b/src/libcode/vx_nc_util/nc_utils_core.h new file mode 100644 index 0000000000..93838a2dbe --- /dev/null +++ b/src/libcode/vx_nc_util/nc_utils_core.h @@ -0,0 +1,80 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2024 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + +//////////////////////////////////////////////////////////////////////// + +#ifndef __NC_UTILS_CORE_H__ +#define __NC_UTILS_CORE_H__ + +//////////////////////////////////////////////////////////////////////// + + +//////////////////////////////////////////////////////////////////////// + +#ifndef ncbyte +typedef signed char ncbyte; /* from ncvalues.h */ +#endif /* ncbyte */ + +//////////////////////////////////////////////////////////////////////// + +#define IS_VALID_NC(ncObj) (!ncObj.isNull()) +#define IS_VALID_NC_P(ncObjPtr) ((ncObjPtr != nullptr && !ncObjPtr->isNull())) + +#define IS_INVALID_NC(ncObj) ncObj.isNull() +#define IS_INVALID_NC_P(ncObjPtr) (ncObjPtr == nullptr || ncObjPtr->isNull()) + +#define GET_NC_NAME(ncObj) ncObj.getName() +#define GET_NC_NAME_P(ncObjPtr) ncObjPtr->getName() + +#define GET_NC_SIZE(ncObj) ncObj.getSize() +#define GET_NC_SIZE_P(ncObjPtr) ncObjPtr->getSize() + +#define GET_SAFE_NC_NAME(ncObj) (ncObj.isNull() ? C_unknown_str : ncObj.getName()) +#define GET_SAFE_NC_NAME_P(ncObjPtr) (IS_INVALID_NC_P(ncObjPtr) ? C_unknown_str : ncObjPtr->getName()) + +#define GET_NC_TYPE_ID(ncObj) ncObj.getType().getId() +#define GET_NC_TYPE_ID_P(ncObjPtr) ncObjPtr->getType().getId() +#define GET_NC_TYPE_NAME(ncObj) ncObj.getType().getName() +#define GET_NC_TYPE_NAME_P(ncObjPtr) ncObjPtr->getType().getName() + +#define GET_NC_DIM_COUNT(ncObj) ncObj.getDimCount() +#define GET_NC_DIM_COUNT_P(ncObjPtr) ncObjPtr->getDimCount() + +#define GET_NC_VAR_COUNT(ncObj) ncObj.getVarCount() +#define GET_NC_VAR_COUNT_P(ncObjPtr) ncObjPtr->getVarCount() + +#define GET_NC_VARS(ncObj) ncObj.getVars() +#define GET_NC_VARS_P(ncObjPtr) ncObjPtr->getVars() + +//////////////////////////////////////////////////////////////////////// + +static const std::string C_unknown_str = std::string("unknown"); + +static const std::string fill_value_att_name = "_FillValue"; +static const std::string missing_value_att_name = "missing_value"; + +//////////////////////////////////////////////////////////////////////// + + +extern int get_data_size(netCDF::NcVar *); +extern int get_dim_count(const netCDF::NcVar *); +extern int get_dim_size(const netCDF::NcDim *); +extern int get_dim_size(const netCDF::NcVar *, const int dim_offset); + +extern netCDF::NcVarAtt *get_nc_att(const netCDF::NcVar *, const ConcatString &, bool exit_on_error = false); +extern netCDF::NcGroupAtt *get_nc_att(const netCDF::NcFile *, const ConcatString &, bool exit_on_error = false); + +extern netCDF::NcDim get_nc_dim(const netCDF::NcFile *, const std::string &dim_name); +extern netCDF::NcDim get_nc_dim(const netCDF::NcVar *, const std::string &dim_name); +extern netCDF::NcDim get_nc_dim(const netCDF::NcVar *, const int dim_offset); + +//////////////////////////////////////////////////////////////////////// + +#endif /* __NC_UTILS_CORE_H__ */ + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nc_util/write_netcdf.cc b/src/libcode/vx_nc_util/write_netcdf.cc index 6a747cf7df..e156f85ce7 100644 --- a/src/libcode/vx_nc_util/write_netcdf.cc +++ b/src/libcode/vx_nc_util/write_netcdf.cc @@ -113,8 +113,8 @@ void write_netcdf_latlon_1d(NcFile *f_out, NcDim *lat_dim, NcDim *lon_dim, NcVar lat_var; NcVar lon_var; // Allocate space for lat/lon values - float *lat_data = new float [grid.ny()]; - float *lon_data = new float [grid.nx()]; + vector lat_data(grid.ny()); + vector lon_data(grid.nx()); // Define Variables lat_var = f_out->addVar("lat", ncFloat, *lat_dim); @@ -142,13 +142,10 @@ void write_netcdf_latlon_1d(NcFile *f_out, NcDim *lat_dim, NcDim *lon_dim, } // Write the lat data - put_nc_data(&lat_var, &lat_data[0], lat_dim->getSize(), 0); + put_nc_data(&lat_var, lat_data.data(), lat_dim->getSize(), 0); // Write the lon data - put_nc_data(&lon_var, &lon_data[0], lon_dim->getSize(), 0); - - if ( lat_data ) { delete [] lat_data; lat_data = nullptr; } - if ( lon_data ) { delete [] lon_data; lon_data = nullptr; } + put_nc_data(&lon_var, lon_data.data(), lon_dim->getSize(), 0); return; } @@ -164,8 +161,8 @@ void write_netcdf_latlon_2d(NcFile *f_out, NcDim *lat_dim, NcDim *lon_dim, long counts[2] = {grid.ny(), grid.nx()}; long offsets[2] = {0 , 0}; // Allocate space for lat/lon values - float *lat_data = new float [grid.nx()*grid.ny()]; - float *lon_data = new float [grid.nx()*grid.ny()]; + vector lat_data(grid.nx()*grid.ny()); + vector lon_data(grid.nx()*grid.ny()); // Define Variables dims.push_back(*lat_dim); @@ -196,13 +193,10 @@ void write_netcdf_latlon_2d(NcFile *f_out, NcDim *lat_dim, NcDim *lon_dim, } // Write the lat data - put_nc_data(&lat_var, &lat_data[0], counts, offsets); + put_nc_data(&lat_var, lat_data.data(), counts, offsets); // Write the lon data - put_nc_data(&lon_var, &lon_data[0], counts, offsets); - - if ( lat_data ) { delete [] lat_data; lat_data = nullptr; } - if ( lon_data ) { delete [] lon_data; lon_data = nullptr; } + put_nc_data(&lon_var, lon_data.data(), counts, offsets); return; } @@ -216,7 +210,7 @@ void write_netcdf_grid_weight(NcFile *f_out, NcDim *lat_dim, NcDim *lon_dim, vector dims; vector count; // Allocate space for weight values - float *wgt_data = new float [wgt_dp.nx()*wgt_dp.ny()]; + vector wgt_data(wgt_dp.nx()*wgt_dp.ny()); // Define Variables dims.push_back(*lat_dim); @@ -256,10 +250,9 @@ void write_netcdf_grid_weight(NcFile *f_out, NcDim *lat_dim, NcDim *lon_dim, // Write the weights count.push_back(wgt_dp.ny()); count.push_back(wgt_dp.nx()); - put_nc_data_with_dims(&wgt_var, &wgt_data[0], wgt_dp.ny(), wgt_dp.nx()); + put_nc_data_with_dims(&wgt_var, wgt_data.data(), wgt_dp.ny(), wgt_dp.nx()); // Clean up - if(wgt_data) { delete [] wgt_data; wgt_data = (float *) nullptr; } return; } diff --git a/src/libcode/vx_pb_util/Makefile.in b/src/libcode/vx_pb_util/Makefile.in index 4155cbd87d..c60be5fb82 100644 --- a/src/libcode/vx_pb_util/Makefile.in +++ b/src/libcode/vx_pb_util/Makefile.in @@ -240,6 +240,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_physics/Makefile.in b/src/libcode/vx_physics/Makefile.in index 4a92b393e6..88c09d0f76 100644 --- a/src/libcode/vx_physics/Makefile.in +++ b/src/libcode/vx_physics/Makefile.in @@ -234,6 +234,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_plot_util/Makefile.in b/src/libcode/vx_plot_util/Makefile.in index b04ac2aff8..16cd426cde 100644 --- a/src/libcode/vx_plot_util/Makefile.in +++ b/src/libcode/vx_plot_util/Makefile.in @@ -239,6 +239,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_pointdata_python/Makefile.in b/src/libcode/vx_pointdata_python/Makefile.in index 9df1cb65f5..e2ff7db4d5 100644 --- a/src/libcode/vx_pointdata_python/Makefile.in +++ b/src/libcode/vx_pointdata_python/Makefile.in @@ -241,6 +241,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_pointdata_python/python_pointdata.cc b/src/libcode/vx_pointdata_python/python_pointdata.cc index df27f81c9a..78a40e942a 100644 --- a/src/libcode/vx_pointdata_python/python_pointdata.cc +++ b/src/libcode/vx_pointdata_python/python_pointdata.cc @@ -253,12 +253,12 @@ MetPointHeader *header_data = met_pd_out.get_header_data(); check_header_data(header_data, method_name); - set_array_from_python(python_met_point_data, numpy_array_obs_qty, obs_data->obs_qids); - set_array_from_python(python_met_point_data, numpy_array_obs_hid, obs_data->obs_hids); - set_array_from_python(python_met_point_data, numpy_array_obs_vid, obs_data->obs_ids); - set_array_from_python(python_met_point_data, numpy_array_obs_lvl, obs_data->obs_lvls); - set_array_from_python(python_met_point_data, numpy_array_obs_hgt, obs_data->obs_hgts); - set_array_from_python(python_met_point_data, numpy_array_obs_val, obs_data->obs_vals); + set_array_from_python(python_met_point_data, numpy_array_obs_qty, obs_data->obs_qids.data()); + set_array_from_python(python_met_point_data, numpy_array_obs_hid, obs_data->obs_hids.data()); + set_array_from_python(python_met_point_data, numpy_array_obs_vid, obs_data->obs_ids.data()); + set_array_from_python(python_met_point_data, numpy_array_obs_lvl, obs_data->obs_lvls.data()); + set_array_from_python(python_met_point_data, numpy_array_obs_hgt, obs_data->obs_hgts.data()); + set_array_from_python(python_met_point_data, numpy_array_obs_val, obs_data->obs_vals.data()); set_str_array_from_python(python_met_point_data, numpy_array_obs_qty_table, &obs_data->qty_names); set_str_array_from_python(python_met_point_data, numpy_array_obs_var_table, &obs_data->var_names); @@ -350,8 +350,10 @@ bool process_point_data_list(PyObject *python_point_data, MetPointDataPython &me // get valid time index vld_time = obs.getValidTime(); if ( !header_data->vld_num_array.has(vld_time, vld_idx) ) { + // MET #2897 keep vld_array and vld_num_array in sync + header_data->vld_array.add(obs.getValidTimeString()); header_data->vld_num_array.add(vld_time); - header_data->vld_num_array.has(vld_time, vld_idx); + vld_idx = header_data->vld_num_array.n() - 1; } if (!is_eq(prev_lat, lat) || !is_eq(prev_lon, lon) || !is_eq(prev_elv, elv) @@ -363,7 +365,6 @@ bool process_point_data_list(PyObject *python_point_data, MetPointDataPython &me header_data->sid_idx_array.add(sid); header_data->typ_idx_array.add(typ_idx); header_data->vld_idx_array.add(vld_idx); - header_data->vld_array.add(obs.getValidTimeString()); prev_lat = lat; prev_lon = lon; @@ -763,7 +764,7 @@ void print_met_data(MetPointObsData *obs_data, MetPointHeader *header_data, << header_data->vld_idx_array.n() << ", lat=" << header_data->lat_array.n() << ", lon=" << header_data->lon_array.n() << ", elv=" - << header_data->elv_array.n() << ", message_type=" + << header_data->elv_array.n() << ", message_type=" << header_data->typ_array.n() << ", station_id=" << header_data->sid_array.n() << ", valid_time=" << header_data->vld_array.n() << ", prpt=" @@ -774,7 +775,7 @@ void print_met_data(MetPointObsData *obs_data, MetPointHeader *header_data, log_count = (header_data->hdr_count > min_count) ? min_count : header_data->hdr_count; mlog << Debug(debug_level) << method_name - << "header_data: message_type,station_id,time_time,lat,lon.elv\n"; + << "header_data: message_type,station_id,time_time,lat,lon,elv\n"; for (int idx=0; idx s(N, 0); +vector av(a.n(), nullptr); - av[j] = s + k; +for (int j=0; j<(a.n()); ++j) { c = a[j].c_str(); len = c.length(); - m_strncpy(s + k, c.text(), len, method_name); + m_strncpy(s.data() + k, c.text(), len, method_name); + + av[j] = s.data() + k; k += (len + 1); } -set(a.n(), av); +set(a.n(), av.data()); // // done // -if ( s ) { delete [] s; s = nullptr; } - -if ( av ) { delete [] av; av = nullptr; } - return; } @@ -196,27 +183,23 @@ void Wchar_Argv::set(int _argc, char ** _argv) clear(); -int j, k; -int argv_len; -int * len = nullptr; - +int k; Argc = _argc; -len = new int [Argc]; - - // // total length of the argument string ... // -argv_len = 0; +int argv_len = 0; +vector len(_argc, 0); -for (j=0; j<_argc; ++j) { +for (int j=0; j<_argc; ++j) { - len[j] = m_strlen(_argv[j]); // we're using the len array here because - // we don't want to call m_strlen more than - // once on each argv value + // we're using the len array here because + // we don't want to call m_strlen more than + // once on each argv value + if (_argv) len[j] = m_strlen(_argv[j]); argv_len += len[j]; @@ -235,7 +218,7 @@ for (j=0; j<_argc; ++j) { W_Buf = new wchar_t [argv_len]; -for (j=0; j " << "mbstowcs failed for string \"" << _argv[j] << "\"\n\n"; @@ -273,7 +256,7 @@ W_Argv = new wchar_t * [Argc]; k = 0; -for (j=0; j seeps_climo_grid_map_00; -static const char *def_seeps_filename = - "MET_BASE/climo/seeps/PPT24_seepsweights.nc"; -static const char *def_seeps_grid_filename = - "MET_BASE/climo/seeps/PPT24_seepsweights_grid.nc"; - static const char *var_name_sid = "sid"; static const char *var_name_lat = "lat"; static const char *var_name_lon = "lon"; @@ -55,48 +50,37 @@ double weighted_average(double, double, double, double); //////////////////////////////////////////////////////////////////////// -SeepsClimo *get_seeps_climo() { - if (! seeps_climo) seeps_climo = new SeepsClimo(); +SeepsClimo *get_seeps_climo(const ConcatString &seeps_point_climo_name) { + if (! seeps_climo) seeps_climo = new SeepsClimo(seeps_point_climo_name); return seeps_climo; } //////////////////////////////////////////////////////////////////////// void release_seeps_climo() { - if (seeps_climo) { delete seeps_climo; seeps_climo = 0; } + if (seeps_climo) { delete seeps_climo; seeps_climo = nullptr; } } //////////////////////////////////////////////////////////////////////// -SeepsClimoGrid *get_seeps_climo_grid(int month, int hour) { - bool not_found = true; - SeepsClimoGrid *seeps_climo_grid = nullptr; - for (map::iterator it=seeps_climo_grid_map_00.begin(); - it!=seeps_climo_grid_map_00.end(); ++it) { - if (it->first == month) { - not_found = false; - seeps_climo_grid = (SeepsClimoGrid *)it->second; - break; - } - } +SeepsClimoGrid *get_seeps_climo_grid(int month, const ConcatString &seeps_grid_climo_name, int hour) { - if (not_found) { - seeps_climo_grid = new SeepsClimoGrid(month, hour); - seeps_climo_grid_map_00[month] = seeps_climo_grid; + if (seeps_climo_grid_map_00.count(month) == 0) { + seeps_climo_grid_map_00[month] = nullptr; + seeps_climo_grid_map_00[month] = new SeepsClimoGrid(month, hour, seeps_grid_climo_name); } - return seeps_climo_grid; + + return seeps_climo_grid_map_00[month]; } //////////////////////////////////////////////////////////////////////// void release_seeps_climo_grid(int month, int hour) { - for (map::iterator it=seeps_climo_grid_map_00.begin(); - it!=seeps_climo_grid_map_00.end(); ++it) { - if (it->first == month) { - delete it->second; - seeps_climo_grid_map_00.erase(it); - break; - } + + if (seeps_climo_grid_map_00.count(month) > 0) { + delete seeps_climo_grid_map_00[month]; + seeps_climo_grid_map_00[month] = nullptr; + seeps_climo_grid_map_00.erase(month); } } @@ -108,25 +92,29 @@ double weighted_average(double v1, double w1, double v2, double w2) { v1 * w1 + v2 * w2); } - //////////////////////////////////////////////////////////////////////// - void SeepsAggScore::clear() { + // Syntax used to define SEEPS obs/fcast categories (s_* or c_*) + // o{d|l|h} : obs in {dry(0)|light(1)|heavy(2)} category + // f{d|l|h} : fcsts in {dry(0)|light(1)|heavy(2)} category + n_obs = 0; - c12 = c13 = c21 = c23 = c31 = c32 = 0; - s12 = s13 = s21 = s23 = s31 = s32 = 0.; - pv1 = pv2 = pv3 = 0.; - pf1 = pf2 = pf3 = 0.; - mean_fcst = mean_obs = bad_data_double; - weighted_score = score = bad_data_double; + c_odfl = c_odfh = c_olfd = c_olfh = c_ohfd = c_ohfl = 0; + s_odfl = s_odfh = s_olfd = s_olfh = s_ohfd = s_ohfl = 0.0; + pv1 = pv2 = pv3 = 0.0; + pf1 = pf2 = pf3 = 0.0; + mean_fcst = mean_fcst_wgt = bad_data_double; + mean_obs = mean_obs_wgt = bad_data_double; + score = score_wgt = bad_data_double; } //////////////////////////////////////////////////////////////////////// SeepsAggScore & SeepsAggScore::operator+=(const SeepsAggScore &c) { + const char *method_name = "SeepsAggScore::operator+=() -> "; // Check for degenerate case if(n_obs == 0 && c.n_obs == 0) return *this; @@ -139,20 +127,23 @@ SeepsAggScore & SeepsAggScore::operator+=(const SeepsAggScore &c) { n_obs += c.n_obs; // Increment counts - c12 += c.c12; - c13 += c.c13; - c21 += c.c21; - c23 += c.c23; - c31 += c.c31; - c32 += c.c32; - + c_odfl += c.c_odfl; + c_odfh += c.c_odfh; + c_olfd += c.c_olfd; + c_olfh += c.c_olfh; + c_ohfd += c.c_ohfd; + c_ohfl += c.c_ohfl; + // Compute weighted averages - s12 = weighted_average(s12, w1, c.s12, w2); - s13 = weighted_average(s13, w1, c.s13, w2); - s21 = weighted_average(s21, w1, c.s21, w2); - s23 = weighted_average(s23, w1, c.s23, w2); - s31 = weighted_average(s31, w1, c.s31, w2); - s32 = weighted_average(s32, w1, c.s32, w2); + s_odfl = weighted_average(s_odfl, w1, c.s_odfl, w2); + s_odfh = weighted_average(s_odfh, w1, c.s_odfh, w2); + s_olfd = weighted_average(s_olfd, w1, c.s_olfd, w2); + s_olfh = weighted_average(s_olfh, w1, c.s_olfh, w2); + s_ohfd = weighted_average(s_ohfd, w1, c.s_ohfd, w2); + s_ohfl = weighted_average(s_ohfl, w1, c.s_ohfl, w2); + mlog << Debug(9) << method_name + << "s_odfl, o_odfh => " + << s_odfl << " " << s_odfh << "\n"; pv1 = weighted_average(pv1, w1, c.pv1, w2); pv2 = weighted_average(pv2, w1, c.pv2, w2); @@ -162,21 +153,25 @@ SeepsAggScore & SeepsAggScore::operator+=(const SeepsAggScore &c) { pf2 = weighted_average(pf2, w1, c.pf2, w2); pf3 = weighted_average(pf3, w1, c.pf3, w2); - mean_fcst = weighted_average(mean_fcst, w1, c.mean_fcst, w2); - mean_obs = weighted_average(mean_obs, w1, c.mean_obs, w2); + mean_fcst = weighted_average(mean_fcst, w1, c.mean_fcst, w2); + mean_fcst_wgt = weighted_average(mean_fcst_wgt, w1, c.mean_fcst_wgt, w2); - score = weighted_average(score, w1, c.score, w2); - weighted_score = weighted_average(weighted_score, w1, c.weighted_score, w2); + mean_obs = weighted_average(mean_obs, w1, c.mean_obs, w2); + mean_obs_wgt = weighted_average(mean_obs_wgt, w1, c.mean_obs_wgt, w2); + + score = weighted_average(score, w1, c.score, w2); + score_wgt = weighted_average(score_wgt, w1, c.score_wgt, w2); return *this; } - //////////////////////////////////////////////////////////////////////// +SeepsClimoBase::SeepsClimoBase(const ConcatString &seeps_climo_name) : climo_file_name{seeps_climo_name} { -SeepsClimoBase::SeepsClimoBase() { clear(); + seeps_ready = false; + } //////////////////////////////////////////////////////////////////////// @@ -188,13 +183,57 @@ SeepsClimoBase::~SeepsClimoBase() { //////////////////////////////////////////////////////////////////////// void SeepsClimoBase::clear() { - seeps_ready = false; filtered_count = 0; seeps_p1_thresh.clear(); } //////////////////////////////////////////////////////////////////////// +ConcatString SeepsClimoBase::get_climo_filename() { + ConcatString log_seeps_filename; + ConcatString seeps_filename; + const char *method_name = "SeepsClimoBase::get_climo_filename() -> "; + + // Use the environment variable, if set. + ConcatString env_climo_name = get_env_climo_name(); + bool use_env = get_env(env_climo_name.c_str(), seeps_filename); + if(!use_env) { + seeps_filename = climo_file_name.nonempty() ? climo_file_name : get_def_climo_name(); + } + seeps_filename = replace_path(seeps_filename); + + seeps_ready = file_exists(seeps_filename.c_str()); + if (seeps_ready) { + mlog << Debug(7) << method_name + << "SEEPS climo name=\"" + << seeps_filename.c_str() << "\"\n"; + } + else { + ConcatString message = ""; + ConcatString message2 = ""; + if (use_env) { + message.add("from the environment variable "); + message.add(env_climo_name); + message2.add("Correct the environment variable"); + } + else { + message.add(climo_file_name.nonempty() + ? "from the configuration" : "from the default"); + message2.add("Correct the configuration"); + } + mlog << Warning << "\n" << method_name + << "The SEEPS climo name \"" << seeps_filename.c_str() + << "\" " << message << " does not exist!\n" + << message2 << " to set its location " + << "or disable output for SEEPS and SEEPS_MPR.\n\n"; + exit(1); + } + + return seeps_filename; +} + +//////////////////////////////////////////////////////////////////////// + void SeepsClimoBase::set_p1_thresh(const SingleThresh &p1_thresh) { seeps_p1_thresh = p1_thresh; } @@ -203,19 +242,12 @@ void SeepsClimoBase::set_p1_thresh(const SingleThresh &p1_thresh) { //////////////////////////////////////////////////////////////////////// -SeepsClimo::SeepsClimo() { +SeepsClimo::SeepsClimo(const ConcatString &seeps_climo_name) : SeepsClimoBase{seeps_climo_name} { + + clear(); + ConcatString seeps_name = get_climo_filename(); + if (file_exists(seeps_name.c_str())) read_seeps_climo_grid(seeps_name); - ConcatString seeps_name = get_seeps_climo_filename(); - seeps_ready = file_exists(seeps_name.c_str()); - if (seeps_ready) read_seeps_scores(seeps_name); - else { - mlog << Error << "\nSeepsClimo::SeepsClimo() -> " - << "The SEEPS point climo data \"" << seeps_name << "\" is missing!\n" - << "Set the " << MET_ENV_SEEPS_POINT_CLIMO_NAME - << " environment variable to define its location " - << "or disable output for SEEPS and SEEPS_MPR.\n\n"; - exit(1); - } } //////////////////////////////////////////////////////////////////////// @@ -249,13 +281,17 @@ SeepsClimoRecord *SeepsClimo::create_climo_record( double *p1, double *p2, double *t1, double *t2, double *scores) { int offset; SeepsClimoRecord *record = new SeepsClimoRecord(); + const char *method_name = "SeepsClimo::create_climo_record() -> "; record->sid = sid; record->lat = lat; record->lon = lon; record->elv = elv; if (standalone_debug_seeps && SAMPLE_STATION_ID == sid) { - cout << " sid=" << sid << ", lat=" << lat << ", lon=" << lon << ", elv=" << elv << "\n"; + cout << method_name + << "sid=" << sid << ", lat=" << lat + << ", lon=" << lon << ", elv=" << elv + << "\n"; } for (int idx=0; idxp1[idx] = p1[idx]; @@ -264,7 +300,8 @@ SeepsClimoRecord *SeepsClimo::create_climo_record( record->t2[idx] = t2[idx]; if (standalone_debug_seeps && SAMPLE_STATION_ID == sid) { - cout << str_format("\t%2d: %6.3f %6.3f %6.3f %6.3f ", + cout << method_name + << str_format("\t%2d: %6.3f %6.3f %6.3f %6.3f ", (idx+1), record->p1[idx], record->p2[idx], record->t1[idx], record->t2[idx]); } @@ -272,7 +309,8 @@ SeepsClimoRecord *SeepsClimo::create_climo_record( offset = idx*SEEPS_MATRIX_SIZE + idx_m; record->scores[idx][idx_m] = scores[offset]; if (standalone_debug_seeps && SAMPLE_STATION_ID == sid) { - cout << str_format(" %.3f", record->scores[idx][idx_m]); + cout << method_name + << str_format(" %.3f", record->scores[idx][idx_m]); } } if (standalone_debug_seeps && SAMPLE_STATION_ID == sid) cout << "\n"; @@ -287,7 +325,7 @@ SeepsRecord *SeepsClimo::get_record(int sid, int month, int hour) { SeepsRecord *record = nullptr; const char *method_name = "SeepsClimo::get_record() -> "; - if (seeps_ready) { + if (is_seeps_ready()) { SeepsClimoRecord *climo_record = nullptr; map::iterator it; if (hour < 6 || hour >= 18) { @@ -300,7 +338,7 @@ SeepsRecord *SeepsClimo::get_record(int sid, int month, int hour) { } if (nullptr != climo_record) { double p1 = climo_record->p1[month-1]; - if (seeps_p1_thresh.check(p1)) { + if (check_seeps_p1_thresh(p1)) { record = new SeepsRecord; record->sid = climo_record->sid; record->lat = climo_record->lat; @@ -311,14 +349,24 @@ SeepsRecord *SeepsClimo::get_record(int sid, int month, int hour) { record->p2 = climo_record->p2[month-1]; record->t1 = climo_record->t1[month-1]; record->t2 = climo_record->t2[month-1]; + mlog << Debug(9) << method_name + << "record info: sid, lat, lon, month, p1, p2, t1, t2 => " + << record->sid << " " << record->lat << " " + << record->lon << " " << record->month << " " + << record->p1 << " " << record->p2 << " " + << record->t1 << " " << record->t2 << "\n"; for (int idx=0; idxscores[idx] = climo_record->scores[month-1][idx]; + record->scores[idx] = climo_record->scores[month-1][idx]; + mlog << Debug(7) << method_name + << "record info (SEEPS matrix): score => " + << record->scores[idx] << "\n"; } } else if (!is_eq(p1, bad_data_double)) { - filtered_count++; - mlog << Debug(7) << method_name << " filtered by threshold p1=" - << climo_record->p1[month-1] <<"\n"; + increase_filtered_count(); + mlog << Debug(7) << method_name + << "filtered by threshold p1=" + << climo_record->p1[month-1] << "\n"; } } } @@ -330,45 +378,21 @@ SeepsRecord *SeepsClimo::get_record(int sid, int month, int hour) { << "or disable output for SEEPS and SEEPS_MPR.\n\n"; exit(1); } + mlog << Debug(9) << method_name + << "sid = " << sid + << ", month = " << month << ", hour = " << hour + << ", filtered_count = " << get_filtered_count() << "\n"; return record; } //////////////////////////////////////////////////////////////////////// -ConcatString SeepsClimo::get_seeps_climo_filename() { - ConcatString seeps_filename; - const char *method_name = "SeepsClimo::get_seeps_climo_filename() -> "; - - // Use the environment variable, if set. - bool use_env = get_env(MET_ENV_SEEPS_POINT_CLIMO_NAME, seeps_filename); - if(use_env) seeps_filename = replace_path(seeps_filename); - else seeps_filename = replace_path(def_seeps_filename); - - if (seeps_ready = file_exists(seeps_filename.c_str())) { - mlog << Debug(7) << method_name << "SEEPS point climo name=\"" - << seeps_filename.c_str() << "\"\n"; - } - else { - ConcatString message = ""; - if (use_env) { - message.add("from the env. name "); - message.add(MET_ENV_SEEPS_POINT_CLIMO_NAME); - } - mlog << Warning << "\n" << method_name - << "The SEEPS point climo name \"" << seeps_filename.c_str() - << "\"" << message << " does not exist!\n\n"; - } - - return seeps_filename; -} - -//////////////////////////////////////////////////////////////////////// - -double SeepsClimo::get_score(int sid, double p_fcst, double p_obs, - int month, int hour) { +double SeepsClimo::get_seeps_category(int sid, double p_fcst, double p_obs, + int month, int hour) { double score = bad_data_double; SeepsRecord *record = get_record(sid, month, hour); + const char *method_name = "SeepsClimo::get_seeps_category() -> "; if (nullptr != record) { // Determine location in contingency table @@ -376,6 +400,10 @@ double SeepsClimo::get_score(int sid, double p_fcst, double p_obs, int jc = (p_fcst>record->t1)+(p_fcst>record->t2); score = record->scores[(jc*3)+ic]; + mlog << Debug(9) << method_name + << "ic, jc, score => " + << ic << " " << jc << " " + << score << "\n"; delete record; } @@ -388,6 +416,7 @@ SeepsScore *SeepsClimo::get_seeps_score(int sid, double p_fcst, double p_obs, int month, int hour) { SeepsScore *score = nullptr; SeepsRecord *record = get_record(sid, month, hour); + const char *method_name = "SeepsClimo::get_seeps_score() -> "; if (nullptr != record) { score = new SeepsScore(); @@ -395,18 +424,27 @@ SeepsScore *SeepsClimo::get_seeps_score(int sid, double p_fcst, double p_obs, score->p2 = record->p2; score->t1 = record->t1; score->t2 = record->t2; - + mlog << Debug(9) << method_name + << "p1, p2, t1, t2 => " + << score->p1 << " " << score->p2 << " " + << score->t1 << " " << score->t2 + << "\n"; + score->obs_cat = (p_obs>record->t1)+(p_obs>record->t2); score->fcst_cat = (p_fcst>record->t1)+(p_fcst>record->t2); score->s_idx = (score->fcst_cat*3)+score->obs_cat; score->score = record->scores[score->s_idx]; + mlog << Debug(9) << method_name + << "obs_cat, fc_cat, s_idx, score => " + << score->obs_cat << " " << score->fcst_cat + << " " << score->s_idx << " " << score->score + << "\n"; delete record; } return score; } - //////////////////////////////////////////////////////////////////////// void SeepsClimo::print_all() { @@ -467,9 +505,9 @@ void SeepsClimo::print_record(SeepsRecord *record, bool with_header) { //////////////////////////////////////////////////////////////////////// -void SeepsClimo::read_seeps_scores(ConcatString filename) { +void SeepsClimo::read_seeps_climo_grid(const ConcatString &filename) { clock_t clock_time = clock(); - const char *method_name = "SeepsClimo::read_records() -> "; + const char *method_name = "SeepsClimo::read_seeps_climo_grid() -> "; try { double p1_00_buf[SEEPS_MONTH]; @@ -484,25 +522,31 @@ void SeepsClimo::read_seeps_scores(ConcatString filename) { double matrix_12_buf[SEEPS_MONTH*SEEPS_MATRIX_SIZE]; NcFile *nc_file = open_ncfile(filename.c_str()); + clear(); + // dimensions: month = 12 ; nstn = 5293 ; nmatrix = 9 ; get_dim(nc_file, dim_name_nstn, nstn, true); - mlog << Debug(6) << method_name << "dimensions nstn = " << nstn << "\n"; - if (standalone_debug_seeps) cout << "dimensions nstn = " << nstn << "\n"; - - int *sid_array = new int[nstn]; - double *lat_array = new double[nstn]; - double *lon_array = new double[nstn]; - double *elv_array = new double[nstn]; - double *p1_00_array = new double[nstn*SEEPS_MONTH]; - double *p2_00_array = new double[nstn*SEEPS_MONTH]; - double *t1_00_array = new double[nstn*SEEPS_MONTH]; - double *t2_00_array = new double[nstn*SEEPS_MONTH]; - double *p1_12_array = new double[nstn*SEEPS_MONTH]; - double *p2_12_array = new double[nstn*SEEPS_MONTH]; - double *t1_12_array = new double[nstn*SEEPS_MONTH]; - double *t2_12_array = new double[nstn*SEEPS_MONTH]; - double *matrix_00_array = new double[nstn*SEEPS_MONTH*SEEPS_MATRIX_SIZE]; - double *matrix_12_array = new double[nstn*SEEPS_MONTH*SEEPS_MATRIX_SIZE]; + mlog << Debug(6) << method_name + << "dimensions nstn = " << nstn << "\n"; + if (standalone_debug_seeps) { + cout << method_name + << "dimensions nstn = " << nstn << "\n"; + } + + vector sid_array(nstn); + vector lat_array(nstn); + vector lon_array(nstn); + vector elv_array(nstn); + vector p1_00_array(nstn*SEEPS_MONTH); + vector p2_00_array(nstn*SEEPS_MONTH); + vector t1_00_array(nstn*SEEPS_MONTH); + vector t2_00_array(nstn*SEEPS_MONTH); + vector p1_12_array(nstn*SEEPS_MONTH); + vector p2_12_array(nstn*SEEPS_MONTH); + vector t1_12_array(nstn*SEEPS_MONTH); + vector t2_12_array(nstn*SEEPS_MONTH); + vector matrix_00_array(nstn*SEEPS_MONTH*SEEPS_MATRIX_SIZE); + vector matrix_12_array(nstn*SEEPS_MONTH*SEEPS_MATRIX_SIZE); NcVar var_sid = get_nc_var(nc_file, var_name_sid); NcVar var_lat = get_nc_var(nc_file, var_name_lat); @@ -519,72 +563,72 @@ void SeepsClimo::read_seeps_scores(ConcatString filename) { NcVar var_matrix_00 = get_nc_var(nc_file, var_name_matrix_00); NcVar var_matrix_12 = get_nc_var(nc_file, var_name_matrix_12); - if (IS_INVALID_NC(var_sid) || !get_nc_data(&var_sid, sid_array)) { + if (IS_INVALID_NC(var_sid) || !get_nc_data(&var_sid, sid_array.data())) { mlog << Error << "\n" << method_name << "Did not get sid\n\n"; exit(1); } - if (IS_INVALID_NC(var_lat) || !get_nc_data(&var_lat, lat_array)) { + if (IS_INVALID_NC(var_lat) || !get_nc_data(&var_lat, lat_array.data())) { mlog << Error << "\n" << method_name << "Did not get lat\n\n"; exit(1); } - if (IS_INVALID_NC(var_lon) || !get_nc_data(&var_lon, lon_array)) { + if (IS_INVALID_NC(var_lon) || !get_nc_data(&var_lon, lon_array.data())) { mlog << Error << "\n" << method_name << "Did not get lon\n\n"; exit(1); } - if (IS_INVALID_NC(var_elv) || !get_nc_data(&var_elv, elv_array)) { + if (IS_INVALID_NC(var_elv) || !get_nc_data(&var_elv, elv_array.data())) { mlog << Error << "\n" << method_name << "Did not get elv\n\n"; exit(1); } - if (IS_INVALID_NC(var_p1_00) || !get_nc_data(&var_p1_00, p1_00_array)) { + if (IS_INVALID_NC(var_p1_00) || !get_nc_data(&var_p1_00, p1_00_array.data())) { mlog << Error << "\n" << method_name << "Did not get p1_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_p2_00) || !get_nc_data(&var_p2_00, p2_00_array)) { + if (IS_INVALID_NC(var_p2_00) || !get_nc_data(&var_p2_00, p2_00_array.data())) { mlog << Error << "\n" << method_name << "Did not get p2_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_t1_00) || !get_nc_data(&var_t1_00, t1_00_array)) { + if (IS_INVALID_NC(var_t1_00) || !get_nc_data(&var_t1_00, t1_00_array.data())) { mlog << Error << "\n" << method_name << "Did not get t1_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_t2_00) || !get_nc_data(&var_t2_00, t2_00_array)) { + if (IS_INVALID_NC(var_t2_00) || !get_nc_data(&var_t2_00, t2_00_array.data())) { mlog << Error << "\n" << method_name << "Did not get t2_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_p1_12) || !get_nc_data(&var_p1_12, p1_12_array)) { + if (IS_INVALID_NC(var_p1_12) || !get_nc_data(&var_p1_12, p1_12_array.data())) { mlog << Error << "\n" << method_name << "Did not get p1_12\n\n"; exit(1); } - if (IS_INVALID_NC(var_p2_12) || !get_nc_data(&var_p2_12, p2_12_array)) { + if (IS_INVALID_NC(var_p2_12) || !get_nc_data(&var_p2_12, p2_12_array.data())) { mlog << Error << "\n" << method_name << "Did not get p2_12\n\n"; exit(1); } - if (IS_INVALID_NC(var_t1_12) || !get_nc_data(&var_t1_12, t1_12_array)) { + if (IS_INVALID_NC(var_t1_12) || !get_nc_data(&var_t1_12, t1_12_array.data())) { mlog << Error << "\n" << method_name << "Did not get t1_12\n\n"; exit(1); } - if (IS_INVALID_NC(var_t2_12) || !get_nc_data(&var_t2_12, t2_12_array)) { + if (IS_INVALID_NC(var_t2_12) || !get_nc_data(&var_t2_12, t2_12_array.data())) { mlog << Error << "\n" << method_name << "Did not get t2_12\n\n"; exit(1); } - if (IS_INVALID_NC(var_matrix_00) || !get_nc_data(&var_matrix_00, matrix_00_array)) { + if (IS_INVALID_NC(var_matrix_00) || !get_nc_data(&var_matrix_00, matrix_00_array.data())) { mlog << Error << "\n" << method_name << "Did not get matrix_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_matrix_12) || !get_nc_data(&var_matrix_12, matrix_12_array)) { + if (IS_INVALID_NC(var_matrix_12) || !get_nc_data(&var_matrix_12, matrix_12_array.data())) { mlog << Error << "\n" << method_name << "Did not get matrix_12\n\n"; exit(1); @@ -619,30 +663,19 @@ void SeepsClimo::read_seeps_scores(ConcatString filename) { seeps_score_12_map[sid] = rec_12; } - if (sid_array) delete [] sid_array; - if (lat_array) delete [] lat_array; - if (lon_array) delete [] lon_array; - if (elv_array) delete [] elv_array; - if (p1_00_array) delete [] p1_00_array; - if (p2_00_array) delete [] p2_00_array; - if (t1_00_array) delete [] t1_00_array; - if (t2_00_array) delete [] t2_00_array; - if (p1_12_array) delete [] p1_12_array; - if (p2_12_array) delete [] p2_12_array; - if (t1_12_array) delete [] t1_12_array; - if (t2_12_array) delete [] t2_12_array; - if (matrix_00_array) delete [] matrix_00_array; - if (matrix_12_array) delete [] matrix_12_array; - nc_file->close(); float duration = (float)(clock() - clock_time)/CLOCKS_PER_SEC; - mlog << Debug(6) << method_name << "took " << duration << " seconds\n"; - if (standalone_debug_seeps) cout << method_name << "took " << duration << " seconds\n"; + mlog << Debug(6) << method_name + << "took " << duration << " seconds\n"; + if (standalone_debug_seeps) { + cout << method_name + << "took " << duration << " seconds\n"; + } } catch(int i_err) { - seeps_ready = false; + set_seeps_ready(false); mlog << Error << "\n" << method_name << "encountered an error on reading " << filename << ". ecception_code=" << i_err << "\n\n"; @@ -651,51 +684,45 @@ void SeepsClimo::read_seeps_scores(ConcatString filename) { } - - //////////////////////////////////////////////////////////////////////// - -SeepsClimoGrid::SeepsClimoGrid(int month, int hour) : month{month}, hour{hour} +SeepsClimoGrid::SeepsClimoGrid(int month, int hour, const ConcatString &seeps_climo_name) + : month{month}, hour{hour}, SeepsClimoBase{seeps_climo_name} { + clear(); - p1_buf = p2_buf = t1_buf = t2_buf = nullptr; - s12_buf = s13_buf = s21_buf = s23_buf = s31_buf = s32_buf = nullptr; + ConcatString seeps_name = get_climo_filename(); + if (file_exists(seeps_name.c_str())) read_seeps_climo_grid(seeps_name); +} - ConcatString seeps_name = get_seeps_climo_filename(); - seeps_ready = file_exists(seeps_name.c_str()); - if (seeps_ready) read_seeps_scores(seeps_name); - else { - mlog << Error << "\nSeepsClimoGrid::SeepsClimoGrid -> " - << "The SEEPS grid climo data \"" << seeps_name << "\" is missing!\n" - << "Set the " << MET_ENV_SEEPS_GRID_CLIMO_NAME - << " environment variable to define its location " - << "or disable output for SEEPS.\n\n"; - exit(1); - } +//////////////////////////////////////////////////////////////////////// +SeepsClimoGrid::~SeepsClimoGrid() { + clear(); } //////////////////////////////////////////////////////////////////////// -SeepsClimoGrid::~SeepsClimoGrid() { +void SeepsClimoGrid::init_from_scratch() { clear(); } //////////////////////////////////////////////////////////////////////// void SeepsClimoGrid::clear() { + SeepsClimoBase::clear(); - if (nullptr != p1_buf) { delete [] p1_buf; p1_buf = nullptr; } - if (nullptr != p2_buf) { delete [] p2_buf; p2_buf = nullptr; } - if (nullptr != t1_buf) { delete [] t1_buf; t1_buf = nullptr; } - if (nullptr != t2_buf) { delete [] t2_buf; t2_buf = nullptr; } - if (nullptr != s12_buf) { delete [] s12_buf; s12_buf = nullptr; } - if (nullptr != s13_buf) { delete [] s13_buf; s13_buf = nullptr; } - if (nullptr != s21_buf) { delete [] s21_buf; s21_buf = nullptr; } - if (nullptr != s23_buf) { delete [] s23_buf; s23_buf = nullptr; } - if (nullptr != s31_buf) { delete [] s31_buf; s31_buf = nullptr; } - if (nullptr != s32_buf) { delete [] s32_buf; s32_buf = nullptr; } + + p1_buf.clear(); + p2_buf.clear(); + t1_buf.clear(); + t2_buf.clear(); + s_odfl_buf.clear(); + s_odfh_buf.clear(); + s_olfd_buf.clear(); + s_olfh_buf.clear(); + s_ohfd_buf.clear(); + s_ohfl_buf.clear(); }; //////////////////////////////////////////////////////////////////////// @@ -704,15 +731,16 @@ SeepsScore *SeepsClimoGrid::get_record(int ix, int iy, double p_fcst, double p_obs) { SeepsScore *seeps_record = nullptr; const char *method_name = "SeepsClimoGrid::get_record() -> "; + if (!is_eq(p_fcst, -9999.0) && !is_eq(p_obs, -9999.0)) { int offset = iy * nx + ix; double p1 = p1_buf[offset]; - if (seeps_p1_thresh.check(p1)) { + if (check_seeps_p1_thresh(p1)) { // Determine location in contingency table int ic = (p_obs>t1_buf[offset])+(p_obs>t2_buf[offset]); int jc = (p_fcst>t1_buf[offset])+(p_fcst>t2_buf[offset]); - double score = get_score(offset, ic, jc); + double score = get_seeps_score(offset, ic, jc); seeps_record = new SeepsScore(); seeps_record->obs_cat = ic; @@ -725,8 +753,10 @@ SeepsScore *SeepsClimoGrid::get_record(int ix, int iy, seeps_record->score = score; } else if (~is_eq(p1, bad_data_double)) { - filtered_count++; - mlog << Debug(7) << method_name << " filtered by threshold p1=" << p1_buf[offset] <<"\n"; + increase_filtered_count(); + mlog << Debug(7) << method_name + << " filtered by threshold p1=" + << p1_buf[offset] << "\n"; } } @@ -735,86 +765,46 @@ SeepsScore *SeepsClimoGrid::get_record(int ix, int iy, //////////////////////////////////////////////////////////////////////// -double SeepsClimoGrid::get_score(int offset, int obs_cat, int fcst_cat) { +double SeepsClimoGrid::get_seeps_score(int offset, int obs_cat, int fcst_cat) { double score = bad_data_double; + const char *method_name = "SeepsClimoGrid::get_seeps_score() -> "; - if (offset >= (nx * ny)) { - mlog << Error << "\nSeepsClimoGrid::get_score() --> offset (" << offset - << " is too big (" << (nx*ny) << ")\n"; + if (offset < 0 || offset >= (nx * ny)) { + mlog << Error << method_name + << "offset (" << offset << ") is too big (" + << (nx*ny) << ")\n"; return score; } + // Place climate score depending on obs and forecast categories if (obs_cat == 0) { - if (fcst_cat == 1) score = s12_buf[offset]; - else if (fcst_cat == 2) score = s13_buf[offset]; + if (fcst_cat == 1) score = s_odfl_buf[offset]; + else if (fcst_cat == 2) score = s_odfh_buf[offset]; else score = 0.; } else if (obs_cat == 1) { - if (fcst_cat == 0) score = s21_buf[offset]; - else if (fcst_cat == 2) score = s23_buf[offset]; + if (fcst_cat == 0) score = s_olfd_buf[offset]; + else if (fcst_cat == 2) score = s_olfh_buf[offset]; else score = 0.; } else { - if (fcst_cat == 0) score = s31_buf[offset]; - else if (fcst_cat == 1) score = s32_buf[offset]; + if (fcst_cat == 0) score = s_ohfd_buf[offset]; + else if (fcst_cat == 1) score = s_ohfl_buf[offset]; else score = 0.; } + mlog << Debug(9) << method_name + << "obs_cat = " << obs_cat + << ", fcst_cat = " << fcst_cat + << ", score = " << score << "\n"; return score; } //////////////////////////////////////////////////////////////////////// -double SeepsClimoGrid::get_score(int ix, int iy, double p_fcst, double p_obs) { - double score = bad_data_double; - - if (!is_eq(p_fcst, -9999.0) && !is_eq(p_obs, -9999.0)) { - int offset = iy * nx + ix; - // Determine location in contingency table - int ic = (p_obs>t1_buf[offset])+(p_obs>t2_buf[offset]); - int jc = (p_fcst>t1_buf[offset])+(p_fcst>t2_buf[offset]); - score = get_score(offset, ic, jc); - } - - return score; -} - -//////////////////////////////////////////////////////////////////////// - -ConcatString SeepsClimoGrid::get_seeps_climo_filename() { - ConcatString seeps_filename; - const char *method_name = "SeepsClimoGrid::get_seeps_climo_filename() -> "; - - // Use the environment variable, if set. - bool use_env = get_env(MET_ENV_SEEPS_GRID_CLIMO_NAME, seeps_filename); - if(use_env) { - seeps_filename = replace_path(seeps_filename); - } - else seeps_filename = replace_path(def_seeps_grid_filename); - - if (seeps_ready = file_exists(seeps_filename.c_str())) { - mlog << Debug(7) << method_name << "SEEPS grid climo name=\"" - << seeps_filename.c_str() << "\"\n"; - } - else { - ConcatString message = ""; - if (use_env) { - message.add("from the env. name "); - message.add(MET_ENV_SEEPS_GRID_CLIMO_NAME); - } - mlog << Warning << "\n" << method_name - << "The SEEPS grid climo name \"" << seeps_filename.c_str() - << "\"" << message << " does not exist!\n\n"; - } - - return seeps_filename; -} - -//////////////////////////////////////////////////////////////////////// - -void SeepsClimoGrid::read_seeps_scores(ConcatString filename) { +void SeepsClimoGrid::read_seeps_climo_grid(const ConcatString &filename) { clock_t clock_time = clock(); - const char *method_name = "SeepsClimoGrid::read_seeps_scores() -> "; + const char *method_name = "SeepsClimoGrid::read_seeps_climo_grid() -> "; try { NcFile *nc_file = open_ncfile(filename.c_str()); @@ -823,26 +813,33 @@ void SeepsClimoGrid::read_seeps_scores(ConcatString filename) { if (!has_dim(nc_file, dim_name_lat) || !has_dim(nc_file, dim_name_lon)) { mlog << Error << "\n" << method_name << "\"" << filename << "\" is not valid SEEPS climo file\n\n"; - //exit(1); + exit(1); } get_dim(nc_file, dim_name_lat, ny, true); get_dim(nc_file, dim_name_lon, nx, true); - mlog << Debug(6) << method_name << "dimensions lon = " << nx << " lat = " << ny + mlog << Debug(6) << method_name + << "dimensions lon = " << nx << " lat = " << ny << " month=" << month << "\n"; - if (standalone_debug_seeps) cout << "dimensions lon = " << nx << " lat = " << ny - << " month=" << month << "\n";; - - p1_buf = new double[nx*ny]; - p2_buf = new double[nx*ny]; - t1_buf = new double[nx*ny]; - t2_buf = new double[nx*ny]; - s12_buf = new double[nx*ny]; - s13_buf = new double[nx*ny]; - s21_buf = new double[nx*ny]; - s23_buf = new double[nx*ny]; - s31_buf = new double[nx*ny]; - s32_buf = new double[nx*ny]; + if (standalone_debug_seeps) { + cout << method_name + << "dimensions lon = " << nx << " lat = " << ny + << " month=" << month << "\n"; + } + + // Variables in climo file named as s_odfl, s_odfh etc. These then stored + // into new convention s_odfl, s_odfh etc. + + p1_buf.resize(nx*ny); + p2_buf.resize(nx*ny); + t1_buf.resize(nx*ny); + t2_buf.resize(nx*ny); + s_odfl_buf.resize(nx*ny); + s_odfh_buf.resize(nx*ny); + s_olfd_buf.resize(nx*ny); + s_olfh_buf.resize(nx*ny); + s_ohfd_buf.resize(nx*ny); + s_ohfl_buf.resize(nx*ny); LongArray curs; // = { month-1, 0, 0 }; LongArray dims; // = { 1, ny, nx }; @@ -850,12 +847,12 @@ void SeepsClimoGrid::read_seeps_scores(ConcatString filename) { NcVar var_p2_00 = get_nc_var(nc_file, var_name_p2_00); NcVar var_t1_00 = get_nc_var(nc_file, var_name_t1_00); NcVar var_t2_00 = get_nc_var(nc_file, var_name_t2_00); - NcVar var_s12_00 = get_nc_var(nc_file, var_name_s12_00); - NcVar var_s13_00 = get_nc_var(nc_file, var_name_s13_00); - NcVar var_s21_00 = get_nc_var(nc_file, var_name_s21_00); - NcVar var_s23_00 = get_nc_var(nc_file, var_name_s23_00); - NcVar var_s31_00 = get_nc_var(nc_file, var_name_s31_00); - NcVar var_s32_00 = get_nc_var(nc_file, var_name_s32_00); + NcVar var_odfl_00 = get_nc_var(nc_file, var_name_odfl_00); + NcVar var_odfh_00 = get_nc_var(nc_file, var_name_odfh_00); + NcVar var_olfd_00 = get_nc_var(nc_file, var_name_olfd_00); + NcVar var_olfh_00 = get_nc_var(nc_file, var_name_olfh_00); + NcVar var_ohfd_00 = get_nc_var(nc_file, var_name_ohfd_00); + NcVar var_ohfl_00 = get_nc_var(nc_file, var_name_ohfl_00); curs.add(month-1); curs.add(0); @@ -864,73 +861,86 @@ void SeepsClimoGrid::read_seeps_scores(ConcatString filename) { dims.add(ny); dims.add(nx); - if (IS_INVALID_NC(var_p1_00) || !get_nc_data(&var_p1_00, p1_buf, dims, curs)) { + mlog << Debug(9) << method_name + << "var_odfl_00 = " << &var_odfl_00 << "\n"; + + if (IS_INVALID_NC(var_p1_00) || !get_nc_data(&var_p1_00, p1_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name << "Did not get p1_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_p2_00) || !get_nc_data(&var_p2_00, p2_buf, dims, curs)) { + if (IS_INVALID_NC(var_p2_00) || !get_nc_data(&var_p2_00, p2_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name << "Did not get p2_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_t1_00) || !get_nc_data(&var_t1_00, t1_buf, dims, curs)) { + if (IS_INVALID_NC(var_t1_00) || !get_nc_data(&var_t1_00, t1_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name << "Did not get t1_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_t2_00) || !get_nc_data(&var_t2_00, t2_buf, dims, curs)) { + if (IS_INVALID_NC(var_t2_00) || !get_nc_data(&var_t2_00, t2_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name << "Did not get t2_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_s12_00) || !get_nc_data(&var_s12_00, s12_buf, dims, curs)) { + if (IS_INVALID_NC(var_odfl_00) || !get_nc_data(&var_odfl_00, s_odfl_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name - << "Did not get s12_00\n\n"; + << "Did not get odfl_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_s13_00) || !get_nc_data(&var_s12_00, s13_buf, dims, curs)) { + if (IS_INVALID_NC(var_odfh_00) || !get_nc_data(&var_odfh_00, s_odfh_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name - << "Did not get s13_00\n\n"; + << "Did not get odfh_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_s21_00) || !get_nc_data(&var_s21_00, s21_buf, dims, curs)) { + if (IS_INVALID_NC(var_olfd_00) || !get_nc_data(&var_olfd_00, s_olfd_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name - << "Did not get s21_00\n\n"; + << "Did not get olfd_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_s23_00) || !get_nc_data(&var_s23_00, s23_buf, dims, curs)) { + if (IS_INVALID_NC(var_olfh_00) || !get_nc_data(&var_olfh_00, s_olfh_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name - << "Did not get s23_00\n\n"; + << "Did not get olfh_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_s31_00) || !get_nc_data(&var_s31_00, s31_buf, dims, curs)) { + if (IS_INVALID_NC(var_ohfd_00) || !get_nc_data(&var_ohfd_00, s_ohfd_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name - << "Did not get s31_00\n\n"; + << "Did not get ohfd_00\n\n"; exit(1); } - if (IS_INVALID_NC(var_s32_00) || !get_nc_data(&var_s32_00, s32_buf, dims, curs)) { + if (IS_INVALID_NC(var_ohfl_00) || !get_nc_data(&var_ohfl_00, s_ohfl_buf.data(), dims, curs)) { mlog << Error << "\n" << method_name - << "Did not get s32_00\n\n"; + << "Did not get ohfl_00\n\n"; exit(1); } nc_file->close(); + for(int i = 0; i < ny+3; i++) { + mlog << Debug(9) << method_name + << "s_odfl_buf[" << i << "] = " << s_odfl_buf[i] << "\n"; + } + float duration = (float)(clock() - clock_time)/CLOCKS_PER_SEC; - mlog << Debug(6) << method_name << "took " << duration << " seconds\n"; - if (standalone_debug_seeps) cout << method_name << "took " << duration << " seconds\n"; + mlog << Debug(6) << method_name + << "took " << duration << " seconds\n"; + if (standalone_debug_seeps) { + cout << method_name + << "took " << duration << " seconds\n"; + } } catch(...) { - seeps_ready = false; + set_seeps_ready(false); mlog << Error << "\n" << method_name << "encountered an error on reading " << filename << ".\n\n"; - exit(-1); + } // end catch block } + //////////////////////////////////////////////////////////////////////// void SeepsClimoGrid::print_all() { @@ -942,36 +952,25 @@ void SeepsClimoGrid::print_all() { cout << method_name << " p2_buf[" << offset << "] = " << p2_buf[offset] << "\n"; cout << method_name << " t1_buf[" << offset << "] = " << t1_buf[offset] << "\n"; cout << method_name << " t2_buf[" << offset << "] = " << t2_buf[offset] << "\n"; - cout << method_name << "s12_buf[" << offset << "] = " << s12_buf[offset] << "\n"; - cout << method_name << "s13_buf[" << offset << "] = " << s13_buf[offset] << "\n"; - cout << method_name << "s21_buf[" << offset << "] = " << s21_buf[offset] << "\n"; - cout << method_name << "s23_buf[" << offset << "] = " << s23_buf[offset] << "\n"; - cout << method_name << "s31_buf[" << offset << "] = " << s31_buf[offset] << "\n"; - cout << method_name << "s32_buf[" << offset << "] = " << s32_buf[offset] << "\n"; - - offset = 400; - cout << method_name << " p1_buf[" << offset << "] = " << p1_buf[offset] << "\n"; - cout << method_name << " p2_buf[" << offset << "] = " << p2_buf[offset] << "\n"; - cout << method_name << " t1_buf[" << offset << "] = " << t1_buf[offset] << "\n"; - cout << method_name << " t2_buf[" << offset << "] = " << t2_buf[offset] << "\n"; - cout << method_name << "s12_buf[" << offset << "] = " << s12_buf[offset] << "\n"; - cout << method_name << "s13_buf[" << offset << "] = " << s13_buf[offset] << "\n"; - cout << method_name << "s21_buf[" << offset << "] = " << s21_buf[offset] << "\n"; - cout << method_name << "s23_buf[" << offset << "] = " << s23_buf[offset] << "\n"; - cout << method_name << "s31_buf[" << offset << "] = " << s31_buf[offset] << "\n"; - cout << method_name << "s32_buf[" << offset << "] = " << s32_buf[offset] << "\n"; - + cout << method_name << "s_odfl_buf[" << offset << "] = " << s_odfl_buf[offset] << "\n"; + cout << method_name << "s_odfh_buf[" << offset << "] = " << s_odfh_buf[offset] << "\n"; + cout << method_name << "s_olfd_buf[" << offset << "] = " << s_olfd_buf[offset] << "\n"; + cout << method_name << "s_olfh_buf[" << offset << "] = " << s_olfh_buf[offset] << "\n"; + cout << method_name << "s_ohfd_buf[" << offset << "] = " << s_ohfd_buf[offset] << "\n"; + cout << method_name << "s_ohfl_buf[" << offset << "] = " << s_ohfl_buf[offset] << "\n"; + offset = (nx*ny) - 1; cout << method_name << " p1_buf[" << offset << "] = " << p1_buf[offset] << "\n"; cout << method_name << " p2_buf[" << offset << "] = " << p2_buf[offset] << "\n"; cout << method_name << " t1_buf[" << offset << "] = " << t1_buf[offset] << "\n"; cout << method_name << " t2_buf[" << offset << "] = " << t2_buf[offset] << "\n"; - cout << method_name << "s12_buf[" << offset << "] = " << s12_buf[offset] << "\n"; - cout << method_name << "s13_buf[" << offset << "] = " << s13_buf[offset] << "\n"; - cout << method_name << "s21_buf[" << offset << "] = " << s21_buf[offset] << "\n"; - cout << method_name << "s23_buf[" << offset << "] = " << s23_buf[offset] << "\n"; - cout << method_name << "s31_buf[" << offset << "] = " << s31_buf[offset] << "\n"; - cout << method_name << "s32_buf[" << offset << "] = " << s32_buf[offset] << "\n"; + cout << method_name << "s_odfl_buf[" << offset << "] = " << s_odfl_buf[offset] << "\n"; + cout << method_name << "s_odfh_buf[" << offset << "] = " << s_odfh_buf[offset] << "\n"; + cout << method_name << "s_olfd_buf[" << offset << "] = " << s_olfd_buf[offset] << "\n"; + cout << method_name << "s_olfh_buf[" << offset << "] = " << s_olfh_buf[offset] << "\n"; + cout << method_name << "s_ohfd_buf[" << offset << "] = " << s_ohfd_buf[offset] << "\n"; + cout << method_name << "s_ohfl_buf[" << offset << "] = " << s_ohfl_buf[offset] << "\n"; + } } diff --git a/src/libcode/vx_seeps/seeps.h b/src/libcode/vx_seeps/seeps.h index 60bca4468b..6f93ddffcb 100644 --- a/src/libcode/vx_seeps/seeps.h +++ b/src/libcode/vx_seeps/seeps.h @@ -17,51 +17,55 @@ //////////////////////////////////////////////////////////////////////// -#define SEEPS_MONTH 12 -#define SEEPS_MATRIX_SIZE 9 +constexpr int SEEPS_MONTH = 12; +constexpr int SEEPS_MATRIX_SIZE = 9; -#define SAMPLE_STATION_ID 11035 +constexpr int SAMPLE_STATION_ID = 11035; //////////////////////////////////////////////////////////////////////// -static const char *MET_ENV_SEEPS_POINT_CLIMO_NAME = "MET_SEEPS_POINT_CLIMO_NAME"; -static const char *MET_ENV_SEEPS_GRID_CLIMO_NAME = "MET_SEEPS_GRID_CLIMO_NAME"; - -static const char *dim_name_nstn = "nstn"; - -static const char *var_name_p1_00 = "p1_00"; -static const char *var_name_p2_00 = "p2_00"; -static const char *var_name_t1_00 = "t1_00"; -static const char *var_name_t2_00 = "t2_00"; -static const char *var_name_p1_12 = "p1_12"; -static const char *var_name_p2_12 = "p2_12"; -static const char *var_name_t1_12 = "t1_12"; -static const char *var_name_t2_12 = "t2_12"; -static const char *var_name_matrix_00 = "matrix_00"; -static const char *var_name_matrix_12 = "matrix_12"; -static const char *var_name_s12_00 = "s12_00"; -static const char *var_name_s13_00 = "s13_00"; -static const char *var_name_s21_00 = "s21_00"; -static const char *var_name_s23_00 = "s23_00"; -static const char *var_name_s31_00 = "s31_00"; -static const char *var_name_s32_00 = "s32_00"; -static const char *var_name_s12_12 = "s12_12"; -static const char *var_name_s13_12 = "s13_12"; -static const char *var_name_s21_12 = "s21_12"; -static const char *var_name_s23_12 = "s23_12"; -static const char *var_name_s31_12 = "s31_12"; -static const char *var_name_s32_12 = "s32_12"; +constexpr char MET_ENV_SEEPS_POINT_CLIMO_NAME[] = "MET_SEEPS_POINT_CLIMO_NAME"; +constexpr char MET_ENV_SEEPS_GRID_CLIMO_NAME[] = "MET_SEEPS_GRID_CLIMO_NAME"; + +constexpr char dim_name_nstn[] = "nstn"; + +constexpr char var_name_p1_00[] = "p1_00"; +constexpr char var_name_p2_00[] = "p2_00"; +constexpr char var_name_t1_00[] = "t1_00"; +constexpr char var_name_t2_00[] = "t2_00"; +constexpr char var_name_p1_12[] = "p1_12"; +constexpr char var_name_p2_12[] = "p2_12"; +constexpr char var_name_t1_12[] = "t1_12"; +constexpr char var_name_t2_12[] = "t2_12"; +constexpr char var_name_matrix_00[] = "matrix_00"; +constexpr char var_name_matrix_12[] = "matrix_12"; +constexpr char var_name_odfl_00[] = "odfl_00"; +constexpr char var_name_odfh_00[] = "odfh_00"; +constexpr char var_name_olfd_00[] = "olfd_00"; +constexpr char var_name_olfh_00[] = "olfh_00"; +constexpr char var_name_ohfd_00[] = "ohfd_00"; +constexpr char var_name_ohfl_00[] = "ohfl_00"; +constexpr char var_name_odfl_12[] = "odfl_12"; +constexpr char var_name_odfh_12[] = "odfh_12"; +constexpr char var_name_olfd_12[] = "olfd_12"; +constexpr char var_name_olfh_12[] = "olfh_12"; +constexpr char var_name_ohfd_12[] = "ohfd_12"; +constexpr char var_name_ohfl_12[] = "ohfl_12"; +constexpr char def_seeps_point_filename[] = + "MET_BASE/climo/seeps/PPT24_seepsweights.nc"; +constexpr char def_seeps_grid_filename[] = + "MET_BASE/climo/seeps/PPT24_seepsweights_grid.nc"; //density_radius = 0.75 degrees (83km; this is described as “the smallest possible // value that ensures approximately equal representation of all subregions of Europe”.) -static double density_radius = 0.75; +constexpr double density_radius = 0.75; const double density_radius_rad = density_radius * rad_per_deg; //////////////////////////////////////////////////////////////////////// struct SeepsScore { // For SEEPS_MPR - int obs_cat; // i = obs category 0,1,2 - int fcst_cat; // j = model category 0,1,2 + int obs_cat; // i = obs category 0,1,2 (dry, light, heavy) + int fcst_cat; // j = model category 0,1,2 (dry, light, heavy) int s_idx; // index for 3 by 3 matrix as 1 dimensional (fcst_cat*3)+obs_cat double p1; double p2; @@ -76,19 +80,19 @@ struct SeepsAggScore { // For SEEPS void clear(); SeepsAggScore & operator+=(const SeepsAggScore &); - int n_obs; - int c12; - int c13; - int c21; - int c23; - int c31; - int c32; - double s12; - double s13; - double s21; - double s23; - double s31; - double s32; + int n_obs; + int c_odfl; + int c_odfh; + int c_olfd; + int c_olfh; + int c_ohfd; + int c_ohfl; + double s_odfl; + double s_odfh; + double s_olfd; + double s_olfh; + double s_ohfd; + double s_ohfl; double pv1; // marginal probabilities of the observed values double pv2; double pv3; @@ -96,9 +100,11 @@ struct SeepsAggScore { // For SEEPS double pf2; double pf3; double mean_fcst; + double mean_fcst_wgt; double mean_obs; + double mean_obs_wgt; double score; - double weighted_score; + double score_wgt; }; //////////////////////////////////////////////////////////////////////// @@ -135,21 +141,31 @@ struct SeepsClimoRecord { class SeepsClimoBase { - protected: - + private: bool seeps_ready; int filtered_count; - SingleThresh seeps_p1_thresh; // Range of SEEPS p1 (probability of being dry)std::map seeps_score_00_map; + SingleThresh seeps_p1_thresh; // Range of SEEPS p1 (probability of being dry) + ConcatString climo_file_name; + + protected: + + bool is_seeps_ready() { return seeps_ready; }; + void increase_filtered_count() { filtered_count++; }; + bool check_seeps_p1_thresh(double p1) { return seeps_p1_thresh.check(p1); }; + ConcatString get_climo_filename(); virtual void clear(); + virtual ConcatString get_env_climo_name() { return "not defined"; }; + virtual char *get_def_climo_name() { return nullptr; }; + virtual void read_seeps_climo_grid(const ConcatString &filename) {}; + void set_seeps_ready(bool _seeps_ready) { seeps_ready = _seeps_ready; }; public: - SeepsClimoBase(); - ~SeepsClimoBase(); - + SeepsClimoBase(const ConcatString &seeps_climo_name); + virtual ~SeepsClimoBase(); void set_p1_thresh(const SingleThresh &p1_thresh); - int get_filtered_count(); + int get_filtered_count() const; }; @@ -167,20 +183,25 @@ class SeepsClimo : public SeepsClimoBase { double *p1, double *p2, double *t1, double *t2, double *scores); void print_record(SeepsClimoRecord *record, bool with_header=false); - void read_records(ConcatString filename); + void read_records(const ConcatString &filename); - ConcatString get_seeps_climo_filename(); - void read_seeps_scores(ConcatString filename); + protected: + void clear() override; + ConcatString get_env_climo_name() override { return MET_ENV_SEEPS_POINT_CLIMO_NAME; }; + char *get_def_climo_name() override { return (char *)def_seeps_point_filename; }; + void read_seeps_climo_grid(const ConcatString &filename) override; public: - SeepsClimo(); + SeepsClimo(const ConcatString &seeps_climo_name); ~SeepsClimo(); - void clear(); SeepsRecord *get_record(int sid, int month, int hour); - double get_score(int sid, double p_fcst, double p_obs, int month, int hour); - SeepsScore *get_seeps_score(int sid, double p_fcst, double p_obs, int month, int hour); + double get_seeps_category(int sid, double p_fcst, double p_obs, + int month, int hour); + SeepsScore *get_seeps_score(int sid, double p_fcst, double p_obs, + int month, int hour); + void print_all(); void print_record(SeepsRecord *record, bool with_header=false); @@ -188,8 +209,6 @@ class SeepsClimo : public SeepsClimoBase { // // - SeepsRecord get_seeps_record(int sid) const; - }; //////////////////////////////////////////////////////////////////////// @@ -199,32 +218,35 @@ class SeepsClimoGrid : public SeepsClimoBase { private: int month; - int hour; + int hour; // not implemented int nx; int ny; - double *p1_buf; - double *p2_buf; - double *t1_buf; - double *t2_buf; - double *s12_buf; - double *s13_buf; - double *s21_buf; - double *s23_buf; - double *s31_buf; - double *s32_buf; - - ConcatString get_seeps_climo_filename(); - void read_seeps_scores(ConcatString filename); + std::vector p1_buf; + std::vector p2_buf; + std::vector t1_buf; + std::vector t2_buf; + std::vector s_odfl_buf; + std::vector s_odfh_buf; + std::vector s_olfd_buf; + std::vector s_olfh_buf; + std::vector s_ohfd_buf; + std::vector s_ohfl_buf; + + void init_from_scratch(); + + protected: + void clear() override; + ConcatString get_env_climo_name() override { return MET_ENV_SEEPS_GRID_CLIMO_NAME; }; + char *get_def_climo_name() override { return (char *)def_seeps_grid_filename; }; + void read_seeps_climo_grid(const ConcatString &filename) override; public: - SeepsClimoGrid(int month, int hour); + SeepsClimoGrid(int month, int hour, const ConcatString &seeps_climo_name); ~SeepsClimoGrid(); - void clear(); SeepsScore *get_record(int ix, int iy, double p_fcst, double p_obs); - double get_score(int offset, int obs_cat, int fcst_cat); - double get_score(int ix, int iy, double p_fcst, double p_obs); + double get_seeps_score(int offset, int obs_cat, int fcst_cat); void print_all(); // @@ -236,12 +258,12 @@ class SeepsClimoGrid : public SeepsClimoBase { //////////////////////////////////////////////////////////////////////// -inline int SeepsClimoBase::get_filtered_count() { return filtered_count; } +inline int SeepsClimoBase::get_filtered_count() const { return filtered_count; } //////////////////////////////////////////////////////////////////////// -extern SeepsClimo *get_seeps_climo(); -extern SeepsClimoGrid *get_seeps_climo_grid(int month, int hour=0); +extern SeepsClimo *get_seeps_climo(const ConcatString &seeps_point_climo_name); +extern SeepsClimoGrid *get_seeps_climo_grid(int month, const ConcatString &seeps_grid_climo_name, int hour=0); extern void release_seeps_climo(); extern void release_seeps_climo_grid(); diff --git a/src/libcode/vx_series_data/Makefile.in b/src/libcode/vx_series_data/Makefile.in index 897d818054..6344467e44 100644 --- a/src/libcode/vx_series_data/Makefile.in +++ b/src/libcode/vx_series_data/Makefile.in @@ -237,6 +237,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_shapedata/Makefile.in b/src/libcode/vx_shapedata/Makefile.in index 290ee7582f..9702c46133 100644 --- a/src/libcode/vx_shapedata/Makefile.in +++ b/src/libcode/vx_shapedata/Makefile.in @@ -250,6 +250,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_shapedata/engine.cc b/src/libcode/vx_shapedata/engine.cc index dcc5840a47..ffaba59145 100644 --- a/src/libcode/vx_shapedata/engine.cc +++ b/src/libcode/vx_shapedata/engine.cc @@ -1752,20 +1752,20 @@ void ModeFuzzyEngine::do_fcst_merge_engine(const char *default_config, // this will capture any previous merging performed // in the current forecast field. // - *(fcst_engine->fcst_raw) = *(fcst_raw); - *(fcst_engine->obs_raw) = *(fcst_raw); + *(fcst_engine->fcst_raw) = *fcst_raw; + *(fcst_engine->obs_raw) = *fcst_raw; - *(fcst_engine->fcst_conv) = *(fcst_conv); - *(fcst_engine->obs_conv) = *(fcst_conv); + *(fcst_engine->fcst_conv) = *fcst_conv; + *(fcst_engine->obs_conv) = *fcst_conv; - *(fcst_engine->fcst_thresh) = *(fcst_thresh); - *(fcst_engine->obs_thresh) = *(fcst_thresh); + *(fcst_engine->fcst_thresh) = *fcst_thresh; + *(fcst_engine->obs_thresh) = *fcst_thresh; - *(fcst_engine->fcst_mask) = *(fcst_mask); - *(fcst_engine->obs_mask) = *(fcst_mask); + *(fcst_engine->fcst_mask) = *fcst_mask; + *(fcst_engine->obs_mask) = *fcst_mask; - *(fcst_engine->fcst_split) = *(fcst_split); - *(fcst_engine->obs_split) = *(fcst_split); + *(fcst_engine->fcst_split) = *fcst_split; + *(fcst_engine->obs_split) = *fcst_split; fcst_engine->n_fcst = n_fcst; fcst_engine->n_obs = n_fcst; @@ -1919,20 +1919,20 @@ void ModeFuzzyEngine::do_obs_merge_engine(const char *default_config, // this will capture any previous merging performed // in the current observation field. // - *(obs_engine->fcst_raw) = *(obs_raw); - *(obs_engine->obs_raw) = *(obs_raw); + *(obs_engine->fcst_raw) = *obs_raw; + *(obs_engine->obs_raw) = *obs_raw; - *(obs_engine->fcst_conv) = *(obs_conv); - *(obs_engine->obs_conv) = *(obs_conv); + *(obs_engine->fcst_conv) = *obs_conv; + *(obs_engine->obs_conv) = *obs_conv; - *(obs_engine->fcst_thresh) = *(obs_thresh); - *(obs_engine->obs_thresh) = *(obs_thresh); + *(obs_engine->fcst_thresh) = *obs_thresh; + *(obs_engine->obs_thresh) = *obs_thresh; - *(obs_engine->fcst_mask) = *(obs_mask); - *(obs_engine->obs_mask) = *(obs_mask); + *(obs_engine->fcst_mask) = *obs_mask; + *(obs_engine->obs_mask) = *obs_mask; - *(obs_engine->fcst_split) = *(obs_split); - *(obs_engine->obs_split) = *(obs_split); + *(obs_engine->fcst_split) = *obs_split; + *(obs_engine->obs_split) = *obs_split; obs_engine->n_fcst = n_obs; obs_engine->n_obs = n_obs; diff --git a/src/libcode/vx_shapedata/ihull.h b/src/libcode/vx_shapedata/ihull.h index 3206ed2883..c662ad2eb4 100644 --- a/src/libcode/vx_shapedata/ihull.h +++ b/src/libcode/vx_shapedata/ihull.h @@ -48,7 +48,7 @@ const int y2 = p2.y - p0.y; const int k = x1*y2 - x2*y1; -if ( k > 0 ) return ( 1 ); +if ( k > 0 ) return 1; return ( (k < 0) ? -1 : 0 ); diff --git a/src/libcode/vx_shapedata/mode_conf_info.cc b/src/libcode/vx_shapedata/mode_conf_info.cc index 642db14034..080365c757 100644 --- a/src/libcode/vx_shapedata/mode_conf_info.cc +++ b/src/libcode/vx_shapedata/mode_conf_info.cc @@ -1192,19 +1192,23 @@ void ModeConfInfo::set_perc_thresh(const DataPlane &f_dp, // // Compute percentiles // - Fcst->conv_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Fcst->conv_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->conv_thresh_array), &(Obs->conv_thresh_array)); - Obs->conv_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Obs->conv_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->conv_thresh_array), &(Obs->conv_thresh_array)); - Fcst->merge_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Fcst->merge_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->merge_thresh_array), &(Obs->merge_thresh_array)); - Obs->merge_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Obs->merge_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->merge_thresh_array), &(Obs->merge_thresh_array)); @@ -1253,10 +1257,12 @@ void ModeConfInfo::set_perc_thresh(const DataPlane &dp) // // Compute percentiles by hacking in the same input as if its two // - F->conv_thresh_array.set_perc(&sort, &sort, (NumArray *) 0, + F->conv_thresh_array.set_perc(&sort, &sort, + nullptr, nullptr, &(F->conv_thresh_array), &(F->conv_thresh_array)); - F->merge_thresh_array.set_perc(&sort, &sort, (NumArray *) 0, + F->merge_thresh_array.set_perc(&sort, &sort, + nullptr, nullptr, &(F->merge_thresh_array), &(F->merge_thresh_array)); return; @@ -1789,14 +1795,17 @@ PercThreshType ModeConfInfo::perctype(const Mode_Field_Info &f) const if (f.conv_thresh_array.n() > 0) { pc = f.conv_thresh_array[0].get_ptype(); } - if (pm == perc_thresh_sample_climo || pc == perc_thresh_sample_climo) { + if (pm == perc_thresh_sample_fcst_climo || pm == perc_thresh_sample_obs_climo || + pc == perc_thresh_sample_fcst_climo || pc == perc_thresh_sample_obs_climo) { mlog << Error << "\nModeConfInfo::perctype()\n" - << " Thresholding with 'SCP' in an input not implemented for multivariate mode\n\n"; + << " Thresholding with 'SFCP' or 'SOCP' in an input not implemented " + << "for multivariate mode\n\n"; exit ( 1 ); } - if (pm == perc_thresh_climo_dist || pc == perc_thresh_climo_dist) { + if (is_climo_dist_type(pm) || is_climo_dist_type(pc)) { mlog << Error << "\nModeConfInfo::perctype()\n" - << " Thresholding with 'CDP' in an input not implemented for multivariate mode\n\n"; + << " Thresholding with 'CDP', 'FCDP', or 'OCDP' in an " + << "input not implemented for multivariate mode\n\n"; exit ( 1 ); } if (pm == perc_thresh_freq_bias || diff --git a/src/libcode/vx_shapedata/node.cc b/src/libcode/vx_shapedata/node.cc index 74310ea451..a0ac3397e3 100644 --- a/src/libcode/vx_shapedata/node.cc +++ b/src/libcode/vx_shapedata/node.cc @@ -143,7 +143,7 @@ void Node::add_child(const Polyline * poly) { exit(1); } - child->p = *(poly); + child->p = *poly; child->child = nullptr; child->sibling = nullptr; @@ -171,7 +171,7 @@ void Node::add_child(const Polyline * poly) { exit(1); } - n_ptr->sibling->p = *(poly); + n_ptr->sibling->p = *poly; n_ptr->sibling->child = nullptr; n_ptr->sibling->sibling = nullptr; @@ -213,8 +213,8 @@ Node *Node::get_child(int n) const { if( n >= (children_count = n_children()) ) { mlog << Error << "\nNode::get_child(int) -> " - << "attempting to access child number " << n << " when only " - << children_count << " exist\n\n"; + << "attempting to access child number " << n << " when only " + << children_count << " exist\n\n"; exit(1); } @@ -295,7 +295,7 @@ double Node::angle() const { if(p.n_points < 3 && n_children() == 0) { mlog << Error << "\nNode::angle() -> " - << "not enough points!\n\n"; + << "not enough points!\n\n"; exit(1); } diff --git a/src/libcode/vx_shapedata/shapedata.cc b/src/libcode/vx_shapedata/shapedata.cc index 3e4491e380..f3a8dc14f2 100644 --- a/src/libcode/vx_shapedata/shapedata.cc +++ b/src/libcode/vx_shapedata/shapedata.cc @@ -390,7 +390,6 @@ double ShapeData::complexity() const { double ShapeData::intensity_percentile(const ShapeData *raw_ptr, int perc, bool precip_flag) const { int n = 0; - double * val = (double *) nullptr; double v; double val_sum = 0.0; const int Nxy = data.nx()*data.ny(); @@ -401,7 +400,7 @@ double ShapeData::intensity_percentile(const ShapeData *raw_ptr, int perc, exit(1); } - val = new double [Nxy]; + vector val(Nxy); // Compute the requested percentile of intensity for(int i=0; i Index(2*data.ny()); - if ( !Index ) { + if ( Index.size() < 2*data.ny() ) { mlog << Error << "\nShapedata::convex_hull() -> " << "memory allocation error\n\n"; @@ -889,8 +884,6 @@ Polyline ShapeData::convex_hull_old() const // done // - delete [] Index; Index = (int *) nullptr; - return hull; } @@ -1898,7 +1891,7 @@ void ShapeData::threshold(SingleThresh t) { /////////////////////////////////////////////////////////////////////////////// -void ShapeData::set_to_1_or_0() +void ShapeData::set_to_1_or_0() { int j; double v; diff --git a/src/libcode/vx_shapedata/shapedata.h b/src/libcode/vx_shapedata/shapedata.h index 9654844047..5c1252dbc7 100644 --- a/src/libcode/vx_shapedata/shapedata.h +++ b/src/libcode/vx_shapedata/shapedata.h @@ -186,13 +186,13 @@ class ShapeData { /////////////////////////////////////////////////////////////////////////////// -inline Moments ShapeData::moments() const { return(mom); } +inline Moments ShapeData::moments() const { return mom; } -inline bool ShapeData::is_valid_xy (int x, int y) const { return ( ! ::is_bad_data(data(x, y)) ); } -inline bool ShapeData::is_bad_data (int x, int y) const { return ( ::is_bad_data(data(x, y)) ); } +inline bool ShapeData::is_valid_xy (int x, int y) const { return ! ::is_bad_data(data(x, y) ); } +inline bool ShapeData::is_bad_data (int x, int y) const { return ::is_bad_data(data(x, y) ); } -inline bool ShapeData::is_zero (int x, int y) const { return ( is_eq(data(x, y), 0.0) ); } -inline bool ShapeData::is_nonzero (int x, int y) const { return ( ! is_eq(data(x, y), 0.0) ); } +inline bool ShapeData::is_zero (int x, int y) const { return is_eq(data(x, y), 0.0); } +inline bool ShapeData::is_nonzero (int x, int y) const { return !is_eq(data(x, y), 0.0); } inline void ShapeData::debug_examine() const { data.debug_examine(); } inline std::string ShapeData::sdebug_examine() const { return data.sdebug_examine(); } diff --git a/src/libcode/vx_solar/Makefile.in b/src/libcode/vx_solar/Makefile.in index 3d12890618..ada5952624 100644 --- a/src/libcode/vx_solar/Makefile.in +++ b/src/libcode/vx_solar/Makefile.in @@ -236,6 +236,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_stat_out/Makefile.in b/src/libcode/vx_stat_out/Makefile.in index d12b414cf1..77d31a4ea1 100644 --- a/src/libcode/vx_stat_out/Makefile.in +++ b/src/libcode/vx_stat_out/Makefile.in @@ -236,6 +236,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_stat_out/stat_columns.cc b/src/libcode/vx_stat_out/stat_columns.cc index 07c4df90d6..c805d447fd 100644 --- a/src/libcode/vx_stat_out/stat_columns.cc +++ b/src/libcode/vx_stat_out/stat_columns.cc @@ -39,7 +39,7 @@ void parse_row_col(const char *col_name, int &r, int &c) { mlog << Error << "\nparse_row_col() -> " << "unexpected column name specified: \"" << col_name << "\"\n\n"; - throw(1); + throw 1; } return; @@ -99,21 +99,21 @@ ConcatString append_climo_bin(const ConcatString &mask_name, //////////////////////////////////////////////////////////////////////// -void write_header_row(const char **cols, int n_cols, int hdr_flag, +void write_header_row(const char * const * cols, int n_cols, int hdr_flag, AsciiTable &at, int r, int c) { - int i; // Write the header column names if requested if(hdr_flag) { - for(i=0; i= 1) { - tmp_str.format("%s%i", pct_columns[2], n_thresh); - at.set_entry(r, col, tmp_str); // Threshold - } + StringArray sa(get_pct_columns(n_thresh)); + for(int i=0; i= 1) { + cs.format("%s%i", pct_columns[2], n_thresh); + sa.add(cs); + } + + return sa; +} + + //////////////////////////////////////////////////////////////////////// void write_fho_row(StatHdrColumns &shc, const CTSInfo &cts_info, @@ -605,7 +637,6 @@ void write_cts_row(StatHdrColumns &shc, const CTSInfo &cts_info, STATOutputType out_type, AsciiTable &stat_at, int &stat_row, AsciiTable &txt_at, int &txt_row) { - int i; // CTS line type shc.set_line_type(stat_cts_str); @@ -619,7 +650,7 @@ void write_cts_row(StatHdrColumns &shc, const CTSInfo &cts_info, shc.set_cov_thresh(na_str); // Write a line for each alpha value - for(i=0; in_obs; i++) { + for(int i=0; in_obs; i++) { + + // MET #2893 write individual obs message type + if(update_obtype) shc.set_obtype(pd_ptr->typ_sa[i].c_str()); // Set the observation valid time shc.set_obs_valid_beg(pd_ptr->vld_ta[i]); @@ -1622,6 +1649,7 @@ void write_seeps_mpr_row(StatHdrColumns &shc, const PairDataPoint *pd_ptr, STATOutputType out_type, AsciiTable &stat_at, int &stat_row, AsciiTable &txt_at, int &txt_row, + bool update_obtype, bool update_thresh) { // SEEPS line type @@ -1645,6 +1673,9 @@ void write_seeps_mpr_row(StatHdrColumns &shc, const PairDataPoint *pd_ptr, if(!pd_ptr->seeps_mpr[i] || is_bad_data(pd_ptr->seeps_mpr[i]->score)) continue; + // MET #2893 write individual obs message type + if(update_obtype) shc.set_obtype(pd_ptr->typ_sa[i].c_str()); + // Set the observation valid time shc.set_obs_valid_beg(pd_ptr->vld_ta[i]); shc.set_obs_valid_end(pd_ptr->vld_ta[i]); @@ -1676,7 +1707,6 @@ void write_isc_row(StatHdrColumns &shc, const ISCInfo &isc_info, STATOutputType out_type, AsciiTable &stat_at, int &stat_row, AsciiTable &txt_at, int &txt_row) { - int i; // ISC line type shc.set_line_type(stat_isc_str); @@ -1690,7 +1720,7 @@ void write_isc_row(StatHdrColumns &shc, const ISCInfo &isc_info, // Write a line for each scale plus one for the thresholded binary // field and one for the father wavelet - for(i=-1; i<=isc_info.n_scale; i++) { + for(int i=-1; i<=isc_info.n_scale; i++) { // Write the header columns write_header_cols(shc, stat_at, stat_row); @@ -1877,8 +1907,8 @@ void write_phist_row(StatHdrColumns &shc, const PairDataEnsemble *pd_ptr, void write_orank_row(StatHdrColumns &shc, const PairDataEnsemble *pd_ptr, STATOutputType out_type, AsciiTable &stat_at, int &stat_row, - AsciiTable &txt_at, int &txt_row) { - int i; + AsciiTable &txt_at, int &txt_row, + bool update_obtype) { // Observation Rank line type shc.set_line_type(stat_orank_str); @@ -1890,7 +1920,10 @@ void write_orank_row(StatHdrColumns &shc, const PairDataEnsemble *pd_ptr, shc.set_alpha(bad_data_double); // Write a line for each ensemble pair - for(i=0; in_obs; i++) { + for(int i=0; in_obs; i++) { + + // MET #2893 write individual obs message type + if(update_obtype) shc.set_obtype(pd_ptr->typ_sa[i].c_str()); // Set the observation valid time shc.set_obs_valid_beg(pd_ptr->vld_ta[i]); @@ -1923,7 +1956,6 @@ void write_ssvar_row(StatHdrColumns &shc, const PairDataEnsemble *pd_ptr, double alpha, STATOutputType out_type, AsciiTable &stat_at, int &stat_row, AsciiTable &txt_at, int &txt_row) { - int i; // SSVAR line type shc.set_line_type(stat_ssvar_str); @@ -1937,7 +1969,7 @@ void write_ssvar_row(StatHdrColumns &shc, const PairDataEnsemble *pd_ptr, shc.set_alpha(alpha); // Write a line for each ssvar bin - for(i=0; issvar_bins[0].n_bin; i++) { + for(int i=0; issvar_bins[0].n_bin; i++) { // Write the header columns write_header_cols(shc, stat_at, stat_row); @@ -2064,7 +2096,7 @@ void write_fho_cols(const CTSInfo &cts_info, // O_RATE // at.set_entry(r, c+0, // Total Count - cts_info.cts.n()); + cts_info.cts.n_pairs()); at.set_entry(r, c+1, // Forecast Rate = FY/N cts_info.cts.f_rate()); @@ -2090,7 +2122,7 @@ void write_ctc_cols(const CTSInfo &cts_info, // FN_OY, FN_ON, EC_VALUE // at.set_entry(r, c+0, // Total Count - cts_info.cts.n()); + cts_info.cts.n_pairs()); at.set_entry(r, c+1, // FY_OY cts_info.cts.fy_oy()); @@ -2143,7 +2175,7 @@ void write_cts_cols(const CTSInfo &cts_info, int i, // EC_VALUE // at.set_entry(r, c+0, // Total count - cts_info.cts.n()); + cts_info.cts.n_pairs()); at.set_entry(r, c+1, // Base Rate (oy_tp) cts_info.baser.v); @@ -2781,15 +2813,14 @@ void write_cnt_cols(const CNTInfo &cnt_info, int i, void write_mctc_cols(const MCTSInfo &mcts_info, AsciiTable &at, int r, int c) { - int i, j, col; // // Multi-Category Contingency Table Counts // Dump out the MCTC line: // TOTAL, N_CAT, Fi_Oj, EC_VALUE // - at.set_entry(r, c+0, // Total Count - mcts_info.cts.total()); + at.set_entry(r, c+0, // Total number of pairs + mcts_info.cts.n_pairs()); at.set_entry(r, c+1, // Number of categories mcts_info.cts.nrows()); @@ -2797,8 +2828,9 @@ void write_mctc_cols(const MCTSInfo &mcts_info, // // Loop through the contingency table rows and columns // - for(i=0, col=c+2; in_obs); @@ -4101,14 +4145,20 @@ void write_mpr_cols(const PairDataPoint *pd_ptr, int i, at.set_entry(r, c+9, // Observation Quality Control (string)pd_ptr->o_qc_sa[i]); - at.set_entry(r, c+10, // Climatological Mean Value - pd_ptr->cmn_na[i]); + at.set_entry(r, c+10, // Observation Climatological Mean Value + pd_ptr->ocmn_na[i]); - at.set_entry(r, c+11, // Climatological Standard Deviation Value - pd_ptr->csd_na[i]); + at.set_entry(r, c+11, // Observation Climatological Standard Deviation Value + pd_ptr->ocsd_na[i]); - at.set_entry(r, c+12, // Climatological CDF Value - pd_ptr->cdf_na[i]); + at.set_entry(r, c+12, // Observation Climatological CDF Value + pd_ptr->ocdf_na[i]); + + at.set_entry(r, c+13, // Forecast Climatological Mean Value + pd_ptr->fcmn_na[i]); + + at.set_entry(r, c+14, // Forecast Climatological Standard Deviation Value + pd_ptr->fcsd_na[i]); return; } @@ -4120,35 +4170,40 @@ void write_seeps_cols(const SeepsAggScore *seeps, // // Stable Equitable Error in Probability Space (SEEPS) // Dump out the SEEPS line: - // TOTAL S12, S13, - // S21, S23, S31, - // S32, PF1, PF2, - // PF3, PV1, PV2, - // PV3, MEAN_FCST, MEAN_OBS, - // SEEPS + // TOTAL, + // ODFL, ODFH, OLFD, + // OLFH, OHFD, OHFL, + // PF1, PF2, PF3, + // PV1, PV2, PV3, + // MEAN_FCST, MEAN_OBS, SEEPS // at.set_entry(r, c+0, seeps->n_obs); // Total Number of Pairs - at.set_entry(r, c+1, seeps->s12); // s12 - at.set_entry(r, c+2, seeps->s13); // s13 - at.set_entry(r, c+3, seeps->s21); // s21 - at.set_entry(r, c+4, seeps->s23); // s23 - at.set_entry(r, c+5, seeps->s31); // s31 - at.set_entry(r, c+6, seeps->s32); // s32 - - at.set_entry(r, c+7, seeps->pf1); // pf1 - at.set_entry(r, c+8, seeps->pf2); // pf2 - at.set_entry(r, c+9, seeps->pf3); // pf3 - - at.set_entry(r, c+10, seeps->pv1); // pv1 - at.set_entry(r, c+11, seeps->pv2); // pv2 - at.set_entry(r, c+12, seeps->pv3); // pv3 - - at.set_entry(r, c+13, seeps->mean_fcst); // mean_fcst - at.set_entry(r, c+14, seeps->mean_obs); // mean_obs - - at.set_entry(r, c+15, (use_weighted_seeps ? seeps->weighted_score : seeps->score)); // SEEPS score/weighted score + at.set_entry(r, c+1, seeps->s_odfl); // ODFL + at.set_entry(r, c+2, seeps->s_odfh); // ODFH + at.set_entry(r, c+3, seeps->s_olfd); // OLFD + at.set_entry(r, c+4, seeps->s_olfh); // OLFH + at.set_entry(r, c+5, seeps->s_ohfd); // OHFD + at.set_entry(r, c+6, seeps->s_ohfl); // OHFL + + at.set_entry(r, c+7, seeps->pf1); // PF1 + at.set_entry(r, c+8, seeps->pf2); // PF2 + at.set_entry(r, c+9, seeps->pf3); // PF3 + + at.set_entry(r, c+10, seeps->pv1); // PV1 + at.set_entry(r, c+11, seeps->pv2); // PV2 + at.set_entry(r, c+12, seeps->pv3); // PV3 + + at.set_entry(r, c+13, (use_weighted_seeps ? + seeps->mean_fcst_wgt : + seeps->mean_fcst)); // MEAN_FCST + at.set_entry(r, c+14, (use_weighted_seeps ? + seeps->mean_obs_wgt : + seeps->mean_obs)); // MEAN_OBS + at.set_entry(r, c+15, (use_weighted_seeps ? + seeps->score_wgt: + seeps->score)); // SEEPS return; } @@ -4177,18 +4232,18 @@ void write_seeps_mpr_cols(const PairDataPoint *pd_ptr, int i, at.set_entry(r, c+4, pd_ptr->o_na[i]); // Observation Value - at.set_entry(r, c+5, (string)pd_ptr->o_qc_sa[i]); // Observation Quality Control + at.set_entry(r, c+5, (string)pd_ptr->o_qc_sa[i]); // Observation Quality Control - at.set_entry(r, c+6, pd_ptr->seeps_mpr[i]->fcst_cat); // model category - at.set_entry(r, c+7, pd_ptr->seeps_mpr[i]->obs_cat); // observation category + at.set_entry(r, c+6, pd_ptr->seeps_mpr[i]->fcst_cat); // FCST_CAT + at.set_entry(r, c+7, pd_ptr->seeps_mpr[i]->obs_cat); // OBS_CAT - at.set_entry(r, c+8, pd_ptr->seeps_mpr[i]->p1); // p1 - at.set_entry(r, c+9, pd_ptr->seeps_mpr[i]->p2); // p2 + at.set_entry(r, c+8, pd_ptr->seeps_mpr[i]->p1); // P1 + at.set_entry(r, c+9, pd_ptr->seeps_mpr[i]->p2); // P2 - at.set_entry(r, c+10, pd_ptr->seeps_mpr[i]->t1); // t1 - at.set_entry(r, c+11, pd_ptr->seeps_mpr[i]->t2); // t2 + at.set_entry(r, c+10, pd_ptr->seeps_mpr[i]->t1); // T1 + at.set_entry(r, c+11, pd_ptr->seeps_mpr[i]->t2); // T2 - at.set_entry(r, c+12, pd_ptr->seeps_mpr[i]->score); // SEEPS score + at.set_entry(r, c+12, pd_ptr->seeps_mpr[i]->score); // SEEPS } @@ -4408,7 +4463,6 @@ void write_rps_cols(const RPSInfo &rps_info, void write_rhist_cols(const PairDataEnsemble *pd_ptr, AsciiTable &at, int r, int c) { - int i, col; // // Ensemble Ranked Histogram @@ -4425,7 +4479,8 @@ void write_rhist_cols(const PairDataEnsemble *pd_ptr, // // Write RANK_i count for each bin // - for(i=0, col=c+2; irhist_na.n_elements(); i++) { + int col = c+2; + for(int i=0; irhist_na.n_elements(); i++) { at.set_entry(r, col, // RANK_i nint(pd_ptr->rhist_na[i])); @@ -4439,7 +4494,6 @@ void write_rhist_cols(const PairDataEnsemble *pd_ptr, void write_phist_cols(const PairDataEnsemble *pd_ptr, AsciiTable &at, int r, int c) { - int i, col; // // Probability Integral Transform Histogram @@ -4458,7 +4512,8 @@ void write_phist_cols(const PairDataEnsemble *pd_ptr, // // Write BIN_i count for each bin // - for(i=0, col=c+3; iphist_na.n_elements(); i++) { + int col = c+3; + for(int i=0; iphist_na.n_elements(); i++) { at.set_entry(r, col, // BIN_i nint(pd_ptr->phist_na[i])); @@ -4472,7 +4527,6 @@ void write_phist_cols(const PairDataEnsemble *pd_ptr, void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, AsciiTable &at, int r, int c) { - int j, col; // // Ensemble Observation Rank Matched Pairs @@ -4482,9 +4536,10 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, // OBS_ELV, OBS, PIT, // RANK, N_ENS_VLD, N_ENS, // [ENS_] (for each ensemble member) - // OBS_QC, ENS_MEAN, CLIMO_MEAN, - // SPREAD, ENS_MEAN_OERR, SPREAD_OERR, - // SPREAD_PLUS_OERR, CLIMO_STDEV + // OBS_QC, ENS_MEAN, OBS_CLIMO_MEAN, + // SPREAD, ENS_MEAN_OERR, SPREAD_OERR, + // SPREAD_PLUS_OERR, OBS_CLIMO_STDEV, FCST_CLIMO_MEAN, + // FCST_CLIMO_STDEV // at.set_entry(r, c+0, // Total Number of Pairs pd_ptr->n_obs); // Use n_obs instead of n_pair to include missing data @@ -4525,7 +4580,8 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, // // Write ENS_j for each ensemble member // - for(j=0, col=c+12; jn_ens; j++) { + int col = c+12; + for(int j=0; jn_ens; j++) { at.set_entry(r, col, // ENS_j pd_ptr->e_na[j][i]); @@ -4540,9 +4596,9 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, at.set_entry(r, c+13+pd_ptr->n_ens, pd_ptr->mn_na[i]); - // Climatology mean values + // Observation climatology mean values at.set_entry(r, c+14+pd_ptr->n_ens, - pd_ptr->cmn_na[i]); + pd_ptr->ocmn_na[i]); // Unperturbed ensemble spread values at.set_entry(r, c+15+pd_ptr->n_ens, @@ -4560,9 +4616,17 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, at.set_entry(r, c+18+pd_ptr->n_ens, square_root(pd_ptr->var_plus_oerr_na[i])); - // Climatology standard deviation values + // Observation climatology standard deviation values at.set_entry(r, c+19+pd_ptr->n_ens, - pd_ptr->csd_na[i]); + pd_ptr->ocsd_na[i]); + + // Forecast climatology mean values + at.set_entry(r, c+20+pd_ptr->n_ens, + pd_ptr->fcmn_na[i]); + + // Forecast climatology standard deviation values + at.set_entry(r, c+21+pd_ptr->n_ens, + pd_ptr->fcsd_na[i]); return; } @@ -4579,7 +4643,7 @@ void write_ssvar_cols(const PairDataEnsemble *pd_ptr, int i, // cnt_info.allocate_n_alpha(1); cnt_info.alpha[0] = alpha; - compute_cntinfo(pd_ptr->ssvar_bins[i].sl1l2_info, 0, cnt_info); + compute_cntinfo(pd_ptr->ssvar_bins[i].sl1l2_info, cnt_info); // // Ensemble spread/skill variance bins @@ -4711,7 +4775,6 @@ void write_ssvar_cols(const PairDataEnsemble *pd_ptr, int i, void write_relp_cols(const PairDataEnsemble *pd_ptr, AsciiTable &at, int r, int c) { - int i, col; // // Relative Position @@ -4727,7 +4790,8 @@ void write_relp_cols(const PairDataEnsemble *pd_ptr, // // Write RELP_i count for each bin // - for(i=0, col=c+2; irelp_na.n_elements(); i++) { + int col = c+2; + for(int i=0; irelp_na.n_elements(); i++) { at.set_entry(r, col, // RELP_i pd_ptr->relp_na[i]); diff --git a/src/libcode/vx_stat_out/stat_columns.h b/src/libcode/vx_stat_out/stat_columns.h index 923425dabc..e8998e1fa4 100644 --- a/src/libcode/vx_stat_out/stat_columns.h +++ b/src/libcode/vx_stat_out/stat_columns.h @@ -35,7 +35,7 @@ extern void close_txt_file(std::ofstream *&, const char *); extern ConcatString append_climo_bin(const ConcatString &, int, int); // Write out the header row for fixed length line types -extern void write_header_row(const char **, int, int, AsciiTable &, int, int); +extern void write_header_row(const char * const *, int, int, AsciiTable &, int, int); // Write out the header row for variable length line types extern void write_mctc_header_row (int, int, AsciiTable &, int, int); @@ -49,6 +49,9 @@ extern void write_phist_header_row (int, int, AsciiTable &, int, int); extern void write_orank_header_row (int, int, AsciiTable &, int, int); extern void write_relp_header_row (int, int, AsciiTable &, int, int); +extern StringArray get_mctc_columns (int); +extern StringArray get_pct_columns (int); + extern void write_fho_row (StatHdrColumns &, const CTSInfo &, STATOutputType, AsciiTable &, int &, AsciiTable &, int &); extern void write_ctc_row (StatHdrColumns &, const CTSInfo &, STATOutputType, @@ -103,13 +106,13 @@ extern void write_dmap_row (StatHdrColumns &, const DMAPInfo &, STATOutputType, AsciiTable &, int &, AsciiTable &, int &); extern void write_mpr_row (StatHdrColumns &, const PairDataPoint *, STATOutputType, AsciiTable &, int &, AsciiTable &, int &, - bool update_thresh = true); + bool update_obtype, bool update_thresh = true); extern void write_seeps_row (StatHdrColumns &, const SeepsAggScore *, STATOutputType, AsciiTable &, int &, AsciiTable &, int &, bool update_thresh = true); extern void write_seeps_mpr_row (StatHdrColumns &, const PairDataPoint *, STATOutputType, AsciiTable &, int &, AsciiTable &, int &, - bool update_thresh = true); + bool update_obtype, bool update_thresh = true); extern void write_isc_row (StatHdrColumns &, const ISCInfo &, STATOutputType, AsciiTable &, int &, AsciiTable &, int &); extern void write_ecnt_row (StatHdrColumns &, const ECNTInfo &, STATOutputType, @@ -121,7 +124,8 @@ extern void write_rhist_row (StatHdrColumns &, const PairDataEnsemble *, STATOut extern void write_phist_row (StatHdrColumns &, const PairDataEnsemble *, STATOutputType, AsciiTable &, int &, AsciiTable &, int &); extern void write_orank_row (StatHdrColumns &, const PairDataEnsemble *, STATOutputType, - AsciiTable &, int &, AsciiTable &, int &); + AsciiTable &, int &, AsciiTable &, int &, + bool update_obtype = false); extern void write_ssvar_row (StatHdrColumns &, const PairDataEnsemble *, double, STATOutputType, AsciiTable &, int &, AsciiTable &, int &); extern void write_relp_row (StatHdrColumns &, const PairDataEnsemble *, STATOutputType, diff --git a/src/libcode/vx_stat_out/stat_hdr_columns.cc b/src/libcode/vx_stat_out/stat_hdr_columns.cc index 005499d8ed..3c3560b587 100644 --- a/src/libcode/vx_stat_out/stat_hdr_columns.cc +++ b/src/libcode/vx_stat_out/stat_hdr_columns.cc @@ -15,6 +15,9 @@ using namespace std; +//////////////////////////////////////////////////////////////////////// + +static const string case_str = "CASE"; //////////////////////////////////////////////////////////////////////// // @@ -337,6 +340,217 @@ void StatHdrColumns::set_alpha(const double a) { //////////////////////////////////////////////////////////////////////// +void StatHdrColumns::apply_set_hdr_opts( + const StringArray &hdr_cols, const StringArray &hdr_vals) { + StringArray case_cols; + StringArray case_vals; + + // Call other implementation without case information + apply_set_hdr_opts(hdr_cols, hdr_vals, case_cols, case_vals); + + return; +} + +//////////////////////////////////////////////////////////////////////// +// +// Use the current -set_hdr options to populate the STAT header columns, +// substituting in case-specific values, as needed. +// +//////////////////////////////////////////////////////////////////////// + +void StatHdrColumns::apply_set_hdr_opts( + const StringArray &hdr_cols, const StringArray &hdr_vals, + const StringArray &case_cols, const StringArray &case_vals) { + + // No updates needed + if(hdr_cols.n() == 0) return; + + // Sanity check lengths + if(hdr_cols.n() != hdr_vals.n()) { + mlog << Error << "\nStatHdrColumns::apply_set_hdr_opts() -> " + << "the number of -set_hdr columns names (" << hdr_cols.n() + << " and values (" << hdr_vals.n() << " must match!\n\n"; + exit(1); + } + if(case_cols.n() != case_vals.n()) { + mlog << Error << "\nStatHdrColumns::apply_set_hdr_opts() -> " + << "the number of case columns names (" << case_cols.n() + << " and values (" << case_vals.n() << " must match!\n\n"; + exit(1); + } + + int index; + ConcatString cs; + SingleThresh st; + + // MODEL + if(hdr_cols.has("MODEL", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_model(cs.c_str()); + } + + // DESC + if(hdr_cols.has("DESC", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_desc(cs.c_str()); + } + + // FCST_LEAD + if(hdr_cols.has("FCST_LEAD", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_fcst_lead_sec(timestring_to_sec(cs.c_str())); + } + + // FCST_VALID_BEG, FCST_VALID_END + if(hdr_cols.has("FCST_VALID_BEG", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_fcst_valid_beg(timestring_to_unix(cs.c_str())); + } + if(hdr_cols.has("FCST_VALID_END", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_fcst_valid_end(timestring_to_unix(cs.c_str())); + } + + // OBS_LEAD + if(hdr_cols.has("OBS_LEAD", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obs_lead_sec(timestring_to_sec(cs.c_str())); + } + + // OBS_VALID_BEG, OBS_VALID_END + if(hdr_cols.has("OBS_VALID_BEG", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obs_valid_beg(timestring_to_unix(cs.c_str())); + } + if(hdr_cols.has("OBS_VALID_END", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obs_valid_end(timestring_to_unix(cs.c_str())); + } + + // FCST_VAR + if(hdr_cols.has("FCST_VAR", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_fcst_var(cs.c_str()); + } + + // FCST_UNITS + if(hdr_cols.has("FCST_UNITS", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_fcst_units(cs.c_str()); + } + + // FCST_LEV + if(hdr_cols.has("FCST_LEV", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_fcst_lev(cs.c_str()); + } + + // OBS_VAR + if(hdr_cols.has("OBS_VAR", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obs_var(cs.c_str()); + } + + // OBS_UNITS + if(hdr_cols.has("OBS_UNITS", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obs_units(cs.c_str()); + } + + // OBS_LEV + if(hdr_cols.has("OBS_LEV", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obs_lev(cs.c_str()); + } + + // OBTYPE + if(hdr_cols.has("OBTYPE", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_obtype(cs.c_str()); + } + + // VX_MASK + if(hdr_cols.has("VX_MASK", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_mask(cs.c_str()); + } + + // INTERP_MTHD + if(hdr_cols.has("INTERP_MTHD", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_interp_mthd(cs.c_str()); + } + + // INTERP_PNTS + if(hdr_cols.has("INTERP_PNTS", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_interp_wdth(nint(sqrt(atof(cs.c_str())))); + } + + // FCST_THRESH + if(hdr_cols.has("FCST_THRESH", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + st.set(cs.c_str()); + set_fcst_thresh(st); + } + + // OBS_THRESH + if(hdr_cols.has("OBS_THRESH", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + st.set(cs.c_str()); + set_obs_thresh(st); + } + + // COV_THRESH + if(hdr_cols.has("COV_THRESH", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + st.set(cs.c_str()); + set_cov_thresh(st); + } + + // ALPHA + if(hdr_cols.has("ALPHA", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_alpha(atof(cs.c_str())); + } + + // LINE_TYPE + if(hdr_cols.has("LINE_TYPE", index)) { + cs = get_set_hdr_str(hdr_vals[index], case_cols, case_vals); + set_line_type(cs.c_str()); + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +ConcatString StatHdrColumns::get_set_hdr_str(const std::string &hdr_val, + const StringArray &case_cols, const StringArray &case_vals) const { + ConcatString cs; + int index; + + // Check for the full CASE string + if(case_str.compare(hdr_val) == 0) { + cs = case_vals.serialize(":"); + } + // Check for one of the case columns + else if(case_cols.has(hdr_val, index)) { + cs = case_vals[index]; + } + // Otherwise, use the current header value + else { + cs = hdr_val; + } + + // Sanity check for empty strings + if(cs.empty()) cs = na_str; + + return cs; +} + +//////////////////////////////////////////////////////////////////////// + ConcatString StatHdrColumns::get_fcst_thresh_str() const { ConcatString cs; diff --git a/src/libcode/vx_stat_out/stat_hdr_columns.h b/src/libcode/vx_stat_out/stat_hdr_columns.h index 6a1c5da0fb..0eab56d6bd 100644 --- a/src/libcode/vx_stat_out/stat_hdr_columns.h +++ b/src/libcode/vx_stat_out/stat_hdr_columns.h @@ -151,6 +151,14 @@ class StatHdrColumns { void set_cov_thresh (const ThreshArray); void set_alpha (const double); + // Apply -set_hdr overrides + void apply_set_hdr_opts(const StringArray &, const StringArray &); + void apply_set_hdr_opts(const StringArray &, const StringArray &, + const StringArray &, const StringArray &); + + ConcatString get_set_hdr_str(const std::string &, + const StringArray &, const StringArray &) const; + // Get functions ConcatString get_model () const; ConcatString get_desc () const; @@ -206,55 +214,55 @@ class StatHdrColumns { //////////////////////////////////////////////////////////////////////// -inline ConcatString StatHdrColumns::get_model () const { return(model.contents(na_str)); } -inline ConcatString StatHdrColumns::get_desc () const { return(desc.contents(na_str)); } +inline ConcatString StatHdrColumns::get_model () const { return model.contents(na_str); } +inline ConcatString StatHdrColumns::get_desc () const { return desc.contents(na_str); } -inline int StatHdrColumns::get_fcst_lead_sec () const { return(fcst_lead_sec); } -inline ConcatString StatHdrColumns::get_fcst_lead_str () const { return(fcst_lead_str.contents(na_str)); } +inline int StatHdrColumns::get_fcst_lead_sec () const { return fcst_lead_sec; } +inline ConcatString StatHdrColumns::get_fcst_lead_str () const { return fcst_lead_str.contents(na_str); } -inline unixtime StatHdrColumns::get_fcst_valid_beg () const { return(fcst_valid_beg); } -inline ConcatString StatHdrColumns::get_fcst_valid_beg_str() const { return(fcst_valid_beg_str.contents(na_str)); } +inline unixtime StatHdrColumns::get_fcst_valid_beg () const { return fcst_valid_beg; } +inline ConcatString StatHdrColumns::get_fcst_valid_beg_str() const { return fcst_valid_beg_str.contents(na_str); } -inline unixtime StatHdrColumns::get_fcst_valid_end () const { return(fcst_valid_end); } -inline ConcatString StatHdrColumns::get_fcst_valid_end_str() const { return(fcst_valid_end_str.contents(na_str)); } +inline unixtime StatHdrColumns::get_fcst_valid_end () const { return fcst_valid_end; } +inline ConcatString StatHdrColumns::get_fcst_valid_end_str() const { return fcst_valid_end_str.contents(na_str); } -inline int StatHdrColumns::get_obs_lead_sec () const { return(obs_lead_sec); } -inline ConcatString StatHdrColumns::get_obs_lead_str () const { return(obs_lead_str.contents(na_str)); } +inline int StatHdrColumns::get_obs_lead_sec () const { return obs_lead_sec; } +inline ConcatString StatHdrColumns::get_obs_lead_str () const { return obs_lead_str.contents(na_str); } -inline unixtime StatHdrColumns::get_obs_valid_beg () const { return(obs_valid_beg); } -inline ConcatString StatHdrColumns::get_obs_valid_beg_str () const { return(obs_valid_beg_str.contents(na_str)); } +inline unixtime StatHdrColumns::get_obs_valid_beg () const { return obs_valid_beg; } +inline ConcatString StatHdrColumns::get_obs_valid_beg_str () const { return obs_valid_beg_str.contents(na_str); } -inline unixtime StatHdrColumns::get_obs_valid_end () const { return(obs_valid_end); } -inline ConcatString StatHdrColumns::get_obs_valid_end_str () const { return(obs_valid_end_str.contents(na_str)); } +inline unixtime StatHdrColumns::get_obs_valid_end () const { return obs_valid_end; } +inline ConcatString StatHdrColumns::get_obs_valid_end_str () const { return obs_valid_end_str.contents(na_str); } -inline ConcatString StatHdrColumns::get_fcst_var () const { return(fcst_var.contents(na_str)); } -inline ConcatString StatHdrColumns::get_fcst_units () const { return(fcst_units.contents(na_str)); } -inline ConcatString StatHdrColumns::get_fcst_lev () const { return(fcst_lev.contents(na_str)); } +inline ConcatString StatHdrColumns::get_fcst_var () const { return fcst_var.contents(na_str); } +inline ConcatString StatHdrColumns::get_fcst_units () const { return fcst_units.contents(na_str); } +inline ConcatString StatHdrColumns::get_fcst_lev () const { return fcst_lev.contents(na_str); } -inline ConcatString StatHdrColumns::get_obs_var () const { return(obs_var.contents(na_str)); } -inline ConcatString StatHdrColumns::get_obs_units () const { return(obs_units.contents(na_str)); } -inline ConcatString StatHdrColumns::get_obs_lev () const { return(obs_lev.contents(na_str)); } +inline ConcatString StatHdrColumns::get_obs_var () const { return obs_var.contents(na_str); } +inline ConcatString StatHdrColumns::get_obs_units () const { return obs_units.contents(na_str); } +inline ConcatString StatHdrColumns::get_obs_lev () const { return obs_lev.contents(na_str); } -inline ConcatString StatHdrColumns::get_obtype () const { return(obtype.contents(na_str)); } -inline ConcatString StatHdrColumns::get_mask () const { return(mask.contents(na_str)); } +inline ConcatString StatHdrColumns::get_obtype () const { return obtype.contents(na_str); } +inline ConcatString StatHdrColumns::get_mask () const { return mask.contents(na_str); } -inline ConcatString StatHdrColumns::get_interp_mthd () const { return(interp_mthd.contents(na_str)); } -inline int StatHdrColumns::get_interp_pnts () const { return(interp_pnts); } -inline ConcatString StatHdrColumns::get_interp_pnts_str () const { return(interp_pnts_str.contents(na_str)); } +inline ConcatString StatHdrColumns::get_interp_mthd () const { return interp_mthd.contents(na_str); } +inline int StatHdrColumns::get_interp_pnts () const { return interp_pnts; } +inline ConcatString StatHdrColumns::get_interp_pnts_str () const { return interp_pnts_str.contents(na_str); } -inline ConcatString StatHdrColumns::get_line_type () const { return(line_type.contents(na_str)); } +inline ConcatString StatHdrColumns::get_line_type () const { return line_type.contents(na_str); } -inline ThreshArray StatHdrColumns::get_fcst_thresh () const { return(fcst_thresh); } +inline ThreshArray StatHdrColumns::get_fcst_thresh () const { return fcst_thresh; } -inline ThreshArray StatHdrColumns::get_obs_thresh () const { return(obs_thresh); } -inline ConcatString StatHdrColumns::get_obs_thresh_str () const { return(obs_thresh.get_str()); } +inline ThreshArray StatHdrColumns::get_obs_thresh () const { return obs_thresh; } +inline ConcatString StatHdrColumns::get_obs_thresh_str () const { return obs_thresh.get_str(); } -inline SetLogic StatHdrColumns::get_thresh_logic () const { return(thresh_logic); } +inline SetLogic StatHdrColumns::get_thresh_logic () const { return thresh_logic; } -inline ThreshArray StatHdrColumns::get_cov_thresh () const { return(cov_thresh); } -inline ConcatString StatHdrColumns::get_cov_thresh_str () const { return(prob_thresh_to_string(cov_thresh)); } +inline ThreshArray StatHdrColumns::get_cov_thresh () const { return cov_thresh; } +inline ConcatString StatHdrColumns::get_cov_thresh_str () const { return prob_thresh_to_string(cov_thresh); } -inline double StatHdrColumns::get_alpha () const { return(alpha); } +inline double StatHdrColumns::get_alpha () const { return alpha; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/Makefile.in b/src/libcode/vx_statistics/Makefile.in index dbd2500d6a..ef3c09e5e9 100644 --- a/src/libcode/vx_statistics/Makefile.in +++ b/src/libcode/vx_statistics/Makefile.in @@ -261,6 +261,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_statistics/apply_mask.cc b/src/libcode/vx_statistics/apply_mask.cc index 03e31e97c7..3777adc5f0 100644 --- a/src/libcode/vx_statistics/apply_mask.cc +++ b/src/libcode/vx_statistics/apply_mask.cc @@ -60,19 +60,19 @@ Grid parse_vx_grid(const RegridInfo info, const Grid *fgrid, const Grid *ogrid) // Verify on the forecast grid if(info.field == FieldType::Fcst) { - mlog << Debug(2) << "Using the forecast grid as the verification grid\n"; + mlog << Debug(2) << "Using the forecast grid as the verification grid.\n"; vx_grid = *fgrid; } // Verify on the observation grid else if(info.field == FieldType::Obs) { - mlog << Debug(2) << "Using the observation grid as the verification grid\n"; + mlog << Debug(2) << "Using the observation grid as the verification grid.\n"; vx_grid = *ogrid; } // Parse a named grid, grid specification string, // or gridded data file else { - mlog << Debug(2) << "Using named grid as the verification grid. " - << "Name=" << info.name << "\n"; + mlog << Debug(2) << "Using named grid (" << info.name + << ") as the verification grid.\n"; parse_grid_mask(info.name, vx_grid); } } @@ -89,21 +89,8 @@ Grid parse_grid_string(const char *grid_str) { Grid grid; StringArray sa; - // Parse as a white-space separated string - sa.parse_wsss(grid_str); - - // Search for a named grid - if(sa.n() == 1 && find_grid_by_name(sa[0].c_str(), grid)) { - mlog << Debug(3) << "Use the grid named \"" - << grid_str << "\".\n"; - } - // Parse grid definition - else if(sa.n() > 1 && parse_grid_def(sa, grid)) { - mlog << Debug(3) << "Use the grid defined by string \"" - << grid_str << "\".\n"; - } - // Extract the grid from a gridded data file - else { + if (!build_grid_by_grid_string(grid_str, grid, "parse_grid_strin", false)) { + // Extract the grid from a gridded data file mlog << Debug(3) << "Use the grid defined by file \"" << grid_str << "\".\n"; @@ -151,7 +138,7 @@ void parse_grid_weight(const Grid &grid, const GridWeightType t, w = grid.calc_area(x, y); } else { - w = default_grid_weight; + w = default_weight; } // Store the current weight @@ -228,24 +215,8 @@ void parse_grid_mask(const ConcatString &mask_grid_str, Grid &grid) { // Check for empty input string if(mask_grid_str.empty()) return; - // Parse mask_grid_str as a white-space separated string - StringArray sa; - sa.parse_wsss(mask_grid_str); - - // Named grid - if(sa.n() == 1 && find_grid_by_name(mask_grid_str.c_str(), grid)) { - mlog << Debug(3) - << "Use the grid named \"" << mask_grid_str << "\".\n"; - } - // Grid specification string - else if(sa.n() > 1 && parse_grid_def(sa, grid)) { - mlog << Debug(3) - << "Use the grid defined by string \"" << mask_grid_str - << "\".\n"; - } - // Extract the grid from a gridded data file - else { - + if (!build_grid_by_grid_string(mask_grid_str, grid, "parse_grid_mask", false)) { + // Extract the grid from a gridded data file mlog << Debug(3) << "Use the grid defined by file \"" << mask_grid_str << "\".\n"; @@ -662,7 +633,8 @@ DataPlane parse_geog_data(Dictionary *dict, const Grid &vx_grid, regrid_info = parse_conf_regrid(dict); mlog << Debug(2) << "Regridding geography mask data " << info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << regrid_info.get_str() << ".\n"; dp = met_regrid(dp, mtddf->grid(), vx_grid, regrid_info); } } diff --git a/src/libcode/vx_statistics/compute_ci.cc b/src/libcode/vx_statistics/compute_ci.cc index e444ab59e5..3633d61acb 100644 --- a/src/libcode/vx_statistics/compute_ci.cc +++ b/src/libcode/vx_statistics/compute_ci.cc @@ -61,14 +61,14 @@ void compute_normal_ci(double v, double alpha, double se, // //////////////////////////////////////////////////////////////////////// -void compute_proportion_ci(double p, int n, double alpha, double vif, +void compute_proportion_ci(double p, int n_pairs, double alpha, double vif, double &p_cl, double &p_cu) { // // Compute the confidence interval using the Wilson method for all // sizes of n, since it provides a better approximation // - compute_wilson_ci(p, n, alpha, vif, p_cl, p_cu); + compute_wilson_ci(p, n_pairs, alpha, vif, p_cl, p_cu); return; } @@ -81,7 +81,7 @@ void compute_proportion_ci(double p, int n, double alpha, double vif, // //////////////////////////////////////////////////////////////////////// -void compute_wald_ci(double p, int n, double alpha, double vif, +void compute_wald_ci(double p, int n_pairs, double alpha, double vif, double &p_cl, double &p_cu) { double v, cv_normal_l, cv_normal_u; @@ -100,7 +100,7 @@ void compute_wald_ci(double p, int n, double alpha, double vif, // // Compute the upper and lower bounds of the confidence interval // - v = vif*p*(1.0-p)/n; + v = vif*p*(1.0-p)/n_pairs; if(v < 0.0) { p_cl = bad_data_double; @@ -122,10 +122,10 @@ void compute_wald_ci(double p, int n, double alpha, double vif, // //////////////////////////////////////////////////////////////////////// -void compute_wilson_ci(double p, int n_int, double alpha, double vif, +void compute_wilson_ci(double p, int n_pairs, double alpha, double vif, double &p_cl, double &p_cu) { double v, cv_normal_l, cv_normal_u; - long long n = n_int; + long long n = n_pairs; if(is_bad_data(p)) { p_cl = p_cu = bad_data_double; @@ -171,9 +171,8 @@ void compute_wilson_ci(double p, int n_int, double alpha, double vif, //////////////////////////////////////////////////////////////////////// void compute_woolf_ci(double odds, double alpha, - int fy_oy, int fy_on, int fn_oy, int fn_on, + double fy_oy, double fy_on, double fn_oy, double fn_on, double &odds_cl, double &odds_cu) { - double cv_normal_l, cv_normal_u, a, b; if(is_bad_data(odds) || fy_oy == 0 || fy_on == 0 || fn_oy == 0 || fn_on == 0) { @@ -185,14 +184,14 @@ void compute_woolf_ci(double odds, double alpha, // Compute the upper and lower critical values from the // normal distribution. // - cv_normal_l = normal_cdf_inv(alpha/2.0, 0.0, 1.0); - cv_normal_u = normal_cdf_inv(1.0 - (alpha/2.0), 0.0, 1.0); + double cv_normal_l = normal_cdf_inv(alpha/2.0, 0.0, 1.0); + double cv_normal_u = normal_cdf_inv(1.0 - (alpha/2.0), 0.0, 1.0); // // Compute the upper and lower bounds of the confidence interval // - a = exp(cv_normal_l*sqrt(1.0/fy_oy + 1.0/fy_on + 1.0/fn_oy + 1.0/fn_on)); - b = exp(cv_normal_u*sqrt(1.0/fy_oy + 1.0/fy_on + 1.0/fn_oy + 1.0/fn_on)); + double a = exp(cv_normal_l*sqrt(1.0/fy_oy + 1.0/fy_on + 1.0/fn_oy + 1.0/fn_on)); + double b = exp(cv_normal_u*sqrt(1.0/fy_oy + 1.0/fy_on + 1.0/fn_oy + 1.0/fn_on)); odds_cl = odds * a; odds_cu = odds * b; @@ -210,19 +209,16 @@ void compute_woolf_ci(double odds, double alpha, //////////////////////////////////////////////////////////////////////// void compute_hk_ci(double hk, double alpha, double vif, - int fy_oy, int fy_on, int fn_oy, int fn_on, + double fy_oy, double fy_on, double fn_oy, double fn_on, double &hk_cl, double &hk_cu) { - double cv_normal, stdev; - double h, h_var, f_var; - int h_n, f_n; // // Get the counts // - h_n = fy_oy + fn_oy; - f_n = fn_on + fy_on; + double h_n = fy_oy + fn_oy; + double f_n = fn_on + fy_on; - if(is_bad_data(hk) || h_n == 0 || f_n == 0) { + if(is_bad_data(hk) || is_eq(h_n, 0.0) || is_eq(f_n, 0.0)) { hk_cl = hk_cu = bad_data_double; return; } @@ -231,26 +227,26 @@ void compute_hk_ci(double hk, double alpha, double vif, // Compute the critical value for the normal distribution based // on the sample size // - cv_normal = normal_cdf_inv(alpha/2.0, 0.0, 1.0); + double cv_normal = normal_cdf_inv(alpha/2.0, 0.0, 1.0); // // Compute the hit rate and false alarm rate // - h = (double) fy_oy/h_n; + double h = fy_oy/h_n; // // Compute a variance for H and F // - h_var = sqrt(h*(1.0-h)/h_n + cv_normal*cv_normal/(4.0*h_n*h_n)) - / (1.0 + cv_normal*cv_normal/h_n); + double h_var = sqrt(h*(1.0-h)/h_n + cv_normal*cv_normal/(4.0*h_n*h_n)) + / (1.0 + cv_normal*cv_normal/h_n); - f_var = sqrt(h*(1.0-h)/f_n + cv_normal*cv_normal/(4.0*f_n*f_n)) - / (1.0 + cv_normal*cv_normal/f_n); + double f_var = sqrt(h*(1.0-h)/f_n + cv_normal*cv_normal/(4.0*f_n*f_n)) + / (1.0 + cv_normal*cv_normal/f_n); // // Compute the standard deviation for HK // - stdev = sqrt(vif*(h_var*h_var + f_var*f_var)); + double stdev = sqrt(vif*(h_var*h_var + f_var*f_var)); // // Compute the upper and lower bounds of the confidence interval @@ -384,7 +380,7 @@ void compute_cts_stats_ci_bca(const gsl_rng *rng_ptr, if(cts_r_out) { delete [] cts_r_out; cts_r_out = (ofstream *) nullptr; } if(cts_i_file) { delete [] cts_i_file; cts_i_file = (ConcatString *) nullptr; } if(cts_r_file) { delete [] cts_r_file; cts_r_file = (ConcatString *) nullptr; } - throw(1); + throw 1; } } @@ -812,7 +808,7 @@ void compute_mcts_stats_ci_bca(const gsl_rng *rng_ptr, << "can't open one or more temporary files for writing:\n" << mcts_i_file << "\n" << mcts_r_file << "\n\n"; - throw(1); + throw 1; } // @@ -1024,7 +1020,7 @@ void compute_cnt_stats_ci_bca(const gsl_rng *rng_ptr, << "can't open one or more temporary files for writing:\n" << cnt_i_file << "\n" << cnt_r_file << "\n\n"; - throw(1); + throw 1; } // @@ -1512,7 +1508,7 @@ void compute_cts_stats_ci_perc(const gsl_rng *rng_ptr, if(cts_r_out) { delete [] cts_r_out; cts_r_out = (ofstream *) nullptr; } if(cts_r_file) { delete [] cts_r_file; cts_r_file = (ConcatString *) nullptr; } - throw(1); + throw 1; } } @@ -1897,7 +1893,7 @@ void compute_mcts_stats_ci_perc(const gsl_rng *rng_ptr, mlog << Error << "\ncompute_mcts_stats_ci_perc() -> " << "can't open the temporary file for writing:\n" << mcts_r_file << "\n\n"; - throw(1); + throw 1; } // @@ -2086,7 +2082,7 @@ void compute_cnt_stats_ci_perc(const gsl_rng *rng_ptr, mlog << Error << "\ncompute_cnt_stats_ci_perc() -> " << "can't open the temporary file for writing:\n" << cnt_r_file << "\n\n"; - throw(1); + throw 1; } // @@ -2543,7 +2539,7 @@ void compute_nbrcts_stats_ci_bca(const gsl_rng *rng_ptr, if(nbrcts_i_file) { delete [] nbrcts_i_file; nbrcts_i_file = (ConcatString *) nullptr; } if(nbrcts_r_file) { delete [] nbrcts_r_file; nbrcts_r_file = (ConcatString *) nullptr; } - throw(1); + throw 1; } } @@ -2943,7 +2939,7 @@ void compute_nbrcnt_stats_ci_bca(const gsl_rng *rng_ptr, << "can't open one or more temporary files for writing:\n" << nbrcnt_i_file << "\n" << nbrcnt_r_file << "\n\n"; - throw(1); + throw 1; } // @@ -3184,7 +3180,7 @@ void compute_nbrcts_stats_ci_perc(const gsl_rng *rng_ptr, if(nbrcts_r_out) { delete [] nbrcts_r_out; nbrcts_r_out = (ofstream *) nullptr; } if(nbrcts_r_file) { delete [] nbrcts_r_file; nbrcts_r_file = (ConcatString *) nullptr; } - throw(1); + throw 1; } } @@ -3543,7 +3539,7 @@ void compute_nbrcnt_stats_ci_perc(const gsl_rng *rng_ptr, mlog << Error << "\ncompute_nbrcnt_stats_ci_perc() -> " << "can't open the temporary file for writing:\n" << nbrcnt_r_file << "\n\n"; - throw(1); + throw 1; } // @@ -4009,7 +4005,7 @@ void read_ldf(const ConcatString file_name, int col, NumArray &na) { mlog << Error << "\nread_ldf() -> " << "can't open file: " << file_name << "\n\n" ; - throw(1); + throw 1; } // diff --git a/src/libcode/vx_statistics/compute_ci.h b/src/libcode/vx_statistics/compute_ci.h index 03dc64d8cf..5617ced8f3 100644 --- a/src/libcode/vx_statistics/compute_ci.h +++ b/src/libcode/vx_statistics/compute_ci.h @@ -28,21 +28,21 @@ static const int wald_sample_threshold = 100; extern void compute_normal_ci(double x, double alpha, double se, double &cl, double &cu); -extern void compute_proportion_ci(double p, int n, double alpha, +extern void compute_proportion_ci(double p, int n_pairs, double alpha, double vif, double &p_cl, double &p_cu); -extern void compute_wald_ci(double p, int n, double alpha, +extern void compute_wald_ci(double p, int n_pairs, double alpha, double vif, double &p_cl, double &p_cu); -extern void compute_wilson_ci(double p, int n, double alpha, +extern void compute_wilson_ci(double p, int n_pairs, double alpha, double vif, double &p_cl, double &p_cu); extern void compute_woolf_ci(double odds, double alpha, - int fy_oy, int fy_on, int fn_oy, int fn_on, + double fy_oy, double fy_on, double fn_oy, double fn_on, double &odds_cl, double &odds_cu); extern void compute_hk_ci(double hk, double alpha, double vif, - int fy_oy, int fy_on, int fn_oy, int fn_on, + double fy_oy, double fy_on, double fn_oy, double fn_on, double &hk_cl, double &hk_cu); extern void compute_cts_stats_ci_bca(const gsl_rng *, diff --git a/src/libcode/vx_statistics/compute_stats.cc b/src/libcode/vx_statistics/compute_stats.cc index e4fe518558..3ebd4b9058 100644 --- a/src/libcode/vx_statistics/compute_stats.cc +++ b/src/libcode/vx_statistics/compute_stats.cc @@ -27,112 +27,97 @@ using namespace std; const int detailed_debug_level = 5; - //////////////////////////////////////////////////////////////////////// -void compute_cntinfo(const SL1L2Info &s, bool aflag, CNTInfo &cnt_info) { - double fbar, obar, ffbar, fobar, oobar, den; - int n; +void compute_cntinfo(const SL1L2Info &s, CNTInfo &cnt_info) { + + // Initialize statistics + cnt_info.zero_out(); - // Set the quantities that can't be derived from SL1L2Info to bad data - cnt_info.sp_corr.set_bad_data(); - cnt_info.kt_corr.set_bad_data(); - cnt_info.e10.set_bad_data(); - cnt_info.e25.set_bad_data(); - cnt_info.e50.set_bad_data(); - cnt_info.e75.set_bad_data(); - cnt_info.e90.set_bad_data(); - cnt_info.eiqr.set_bad_data(); - cnt_info.mad.set_bad_data(); - cnt_info.n_ranks = 0; - cnt_info.frank_ties = 0; - cnt_info.orank_ties = 0; - - // Get partial sums - n = (aflag ? s.sacount : s.scount); - fbar = (aflag ? s.fabar : s.fbar); - obar = (aflag ? s.oabar : s.obar); - fobar = (aflag ? s.foabar : s.fobar); - ffbar = (aflag ? s.ffabar : s.ffbar); - oobar = (aflag ? s.ooabar : s.oobar); + // Check for consistent counts + if(s.scount > 0 && s.sacount > 0 && + s.scount != s.sacount) { + mlog << Error << "\ncompute_cntinfo() -> " + << "the scalar partial sum and scalar anomaly partial sum " + << "counts are both non-zero but do not match (" + << s.scount << " != " << s.sacount << ").\n\n"; + exit(1); + } // Number of matched pairs + int n = max(s.scount, s.sacount); cnt_info.n = n; - // Forecast mean and standard deviation - cnt_info.fbar.v = fbar; - cnt_info.fstdev.v = compute_stdev(fbar*n, ffbar*n, n); - - // Observation mean and standard deviation - cnt_info.obar.v = obar; - cnt_info.ostdev.v = compute_stdev(obar*n, oobar*n, n); - - // Multiplicative bias - cnt_info.mbias.v = (is_eq(obar, 0.0) ? bad_data_double : fbar/obar); - - // Correlation coefficient - - // Handle SAL1L2 data - if(aflag) { - cnt_info.pr_corr.v = bad_data_double; - cnt_info.anom_corr.v = compute_corr( fbar*n, obar*n, - ffbar*n, oobar*n, - fobar*n, n); - cnt_info.rmsfa.v = sqrt(ffbar); - cnt_info.rmsoa.v = sqrt(oobar); - cnt_info.anom_corr_uncntr.v = compute_anom_corr_uncntr(ffbar, oobar, - fobar); - } - // Handle SL1L2 data - else { - cnt_info.pr_corr.v = compute_corr( fbar*n, obar*n, - ffbar*n, oobar*n, - fobar*n, n); - cnt_info.anom_corr.v = bad_data_double; - cnt_info.rmsfa.v = bad_data_double; - cnt_info.rmsoa.v = bad_data_double; - cnt_info.anom_corr_uncntr.v = bad_data_double; - } + // Process scalar partial sum statistics + if(s.scount > 0) { - // Compute mean error - cnt_info.me.v = fbar - obar; + // Forecast mean and standard deviation + cnt_info.fbar.v = s.fbar; + cnt_info.fstdev.v = compute_stdev(s.fbar*n, s.ffbar*n, n); - // Compute mean error squared - cnt_info.me2.v = cnt_info.me.v * cnt_info.me.v; + // Observation mean and standard deviation + cnt_info.obar.v = s.obar; + cnt_info.ostdev.v = compute_stdev(s.obar*n, s.oobar*n, n); - // Compute mean absolute error - cnt_info.mae.v = s.mae; + // Multiplicative bias + cnt_info.mbias.v = (is_eq(s.obar, 0.0) ? bad_data_double : s.fbar/s.obar); - // Compute mean squared error - cnt_info.mse.v = ffbar + oobar - 2.0*fobar; + // Correlation coefficient + cnt_info.pr_corr.v = compute_corr( s.fbar*n, s.obar*n, + s.ffbar*n, s.oobar*n, + s.fobar*n, n); - // Compute mean squared error skill score - den = cnt_info.ostdev.v * cnt_info.ostdev.v; - if(!is_eq(den, 0.0)) { - cnt_info.msess.v = 1.0 - cnt_info.mse.v / den; - } - else { - cnt_info.msess.v = bad_data_double; - } + // Compute mean error + cnt_info.me.v = s.fbar - s.obar; - // Compute standard deviation of the mean error - cnt_info.estdev.v = compute_stdev(cnt_info.me.v*n, - cnt_info.mse.v*n, n); + // Compute mean error squared + cnt_info.me2.v = cnt_info.me.v * cnt_info.me.v; - // Compute bias corrected mean squared error (decomposition of MSE) - cnt_info.bcmse.v = cnt_info.mse.v - (fbar - obar)*(fbar - obar); + // Compute mean absolute error + cnt_info.mae.v = s.smae; - // Compute root mean squared error - cnt_info.rmse.v = sqrt(cnt_info.mse.v); + // Compute mean squared error + cnt_info.mse.v = s.ffbar + s.oobar - 2.0*s.fobar; - // Compute Scatter Index (SI) - if(!is_eq(cnt_info.obar.v, 0.0)) { - cnt_info.si.v = cnt_info.rmse.v / cnt_info.obar.v; + // Compute mean squared error skill score + double den = cnt_info.ostdev.v * cnt_info.ostdev.v; + if(!is_eq(den, 0.0)) { + cnt_info.msess.v = 1.0 - cnt_info.mse.v / den; + } + else { + cnt_info.msess.v = bad_data_double; + } + + // Compute standard deviation of the mean error + cnt_info.estdev.v = compute_stdev(cnt_info.me.v*n, + cnt_info.mse.v*n, n); + + // Compute bias corrected mean squared error (decomposition of MSE) + cnt_info.bcmse.v = cnt_info.mse.v - (s.fbar - s.obar)*(s.fbar - s.obar); + + // Compute root mean squared error + cnt_info.rmse.v = sqrt(cnt_info.mse.v); + + // Compute Scatter Index (SI) + if(!is_eq(cnt_info.obar.v, 0.0)) { + cnt_info.si.v = cnt_info.rmse.v / cnt_info.obar.v; + } + else { + cnt_info.si.v = bad_data_double; + } } - else { - cnt_info.si.v = bad_data_double; + + // Process scalar anomaly partial sum statistics + if(s.sacount > 0) { + cnt_info.anom_corr.v = compute_corr( s.fabar*n, s.oabar*n, + s.ffabar*n, s.ooabar*n, + s.foabar*n, n); + cnt_info.rmsfa.v = sqrt(s.ffabar); + cnt_info.rmsoa.v = sqrt(s.ooabar); + cnt_info.anom_corr_uncntr.v = compute_anom_corr_uncntr(s.ffabar, s.ooabar, + s.foabar); } - + // Compute normal confidence intervals cnt_info.compute_ci(); @@ -150,11 +135,10 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, bool precip_flag, bool rank_flag, bool normal_ci_flag, CNTInfo &cnt_info) { int i, j, n; - double f, o, c, wgt, wgt_sum; + double f, o, fc, oc, wgt, wgt_sum; double f_bar, o_bar, ff_bar, oo_bar, fo_bar; double fa_bar, oa_bar, ffa_bar, ooa_bar, foa_bar; double err, err_bar, abs_err_bar, err_sq_bar, den; - bool cmn_flag; // // Allocate memory to store the differences @@ -176,7 +160,8 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, // // Flag to process climo // - cmn_flag = set_climo_flag(pd.f_na, pd.cmn_na); + bool cmn_flag = set_climo_flag(pd.f_na, pd.fcmn_na) && + set_climo_flag(pd.f_na, pd.ocmn_na); // // Get the sum of the weights @@ -199,7 +184,8 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, f = pd.f_na[j]; o = pd.o_na[j]; - c = (cmn_flag ? pd.cmn_na[j] : bad_data_double); + fc = (cmn_flag ? pd.fcmn_na[j] : bad_data_double); + oc = (cmn_flag ? pd.ocmn_na[j] : bad_data_double); wgt = pd.wgt_na[i]/wgt_sum; // @@ -207,7 +193,8 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, // if(is_bad_data(f) || is_bad_data(o) || - (cmn_flag && is_bad_data(c))) continue; + (cmn_flag && is_bad_data(fc)) || + (cmn_flag && is_bad_data(oc))) continue; // // Compute the error @@ -226,11 +213,11 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, n++; if(cmn_flag) { - fa_bar += wgt*(f-c); - oa_bar += wgt*(o-c); - foa_bar += wgt*(f-c)*(o-c); - ffa_bar += wgt*(f-c)*(f-c); - ooa_bar += wgt*(o-c)*(o-c); + fa_bar += wgt*(f-fc); + oa_bar += wgt*(o-oc); + foa_bar += wgt*(f-fc)*(o-oc); + ffa_bar += wgt*(f-fc)*(f-fc); + ooa_bar += wgt*(o-oc)*(o-oc); } } // end for i @@ -587,7 +574,9 @@ void compute_ctsinfo(const PairDataPoint &pd, const NumArray &i_na, // // Add this pair to the contingency table // - cts_info.add(pd.f_na[j], pd.o_na[j], pd.cmn_na[j], pd.csd_na[j]); + ClimoPntInfo cpi(pd.fcmn_na[j], pd.fcsd_na[j], + pd.ocmn_na[j], pd.ocsd_na[j]); + cts_info.add(pd.f_na[j], pd.o_na[j], pd.wgt_na[j], &cpi); } // end for i @@ -684,7 +673,9 @@ void compute_mctsinfo(const PairDataPoint &pd, const NumArray &i_na, // // Add this pair to the contingency table // - mcts_info.add(pd.f_na[j], pd.o_na[j], pd.cmn_na[j], pd.csd_na[j]); + ClimoPntInfo cpi(pd.fcmn_na[j], pd.fcsd_na[j], + pd.ocmn_na[j], pd.ocsd_na[j]); + mcts_info.add(pd.f_na[j], pd.o_na[j], pd.wgt_na[j], &cpi); } // end for i @@ -761,15 +752,28 @@ void compute_pctinfo(const PairDataPoint &pd, bool pstd_flag, n_pair = pd.f_na.n(); // Flag to process climo - cmn_flag = (set_climo_flag(pd.f_na, pd.cmn_na) || - (cprob_in && cprob_in->n() > 0)); + cmn_flag = (set_climo_flag(pd.f_na, pd.ocmn_na) || + (cprob_in && cprob_in->n() > 0)); // Use input climatological probabilities or derive them if(cmn_flag) { - if(cprob_in) climo_prob = *cprob_in; - else climo_prob = derive_climo_prob(pd.cdf_info_ptr, - pd.cmn_na, pd.csd_na, - pct_info.othresh); + + // Use climatological probabilities direclty, if supplied + if(cprob_in) { + climo_prob = *cprob_in; + } + // Use observation climatology data, if available + else if(pd.ocmn_na.n() > 0) { + climo_prob = derive_climo_prob(pd.cdf_info_ptr, + pd.ocmn_na, pd.ocsd_na, + pct_info.othresh); + } + // Otherwise, try using forecast climatology data + else { + climo_prob = derive_climo_prob(pd.cdf_info_ptr, + pd.fcmn_na, pd.fcsd_na, + pct_info.othresh); + } } // @@ -797,16 +801,22 @@ void compute_pctinfo(const PairDataPoint &pd, bool pstd_flag, // for(i=0; i 0) { @@ -1109,6 +1119,7 @@ void compute_sl1l2_mean(const SL1L2Info *sl1l2_info, int n, sl1l2_mean.oabar += sl1l2_info[i].oabar; sl1l2_mean.ffabar += sl1l2_info[i].ffabar; sl1l2_mean.ooabar += sl1l2_info[i].ooabar; + sl1l2_mean.samae += sl1l2_info[i].samae; } } // end for i @@ -1118,13 +1129,14 @@ void compute_sl1l2_mean(const SL1L2Info *sl1l2_info, int n, sl1l2_mean.obar /= n_sl1l2; sl1l2_mean.ffbar /= n_sl1l2; sl1l2_mean.oobar /= n_sl1l2; - sl1l2_mean.mae /= n_sl1l2; + sl1l2_mean.smae /= n_sl1l2; } if(sl1l2_mean.sacount > 0) { sl1l2_mean.fabar /= n_sal1l2; sl1l2_mean.oabar /= n_sal1l2; sl1l2_mean.ffabar /= n_sal1l2; sl1l2_mean.ooabar /= n_sal1l2; + sl1l2_mean.samae /= n_sal1l2; } return; @@ -1401,7 +1413,7 @@ void compute_ecnt_mean(const ECNTInfo *ecnt_info, int n, // //////////////////////////////////////////////////////////////////////// -void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { +void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps_agg) { static const char *method_name = "compute_seeps_agg() -> "; // @@ -1414,14 +1426,22 @@ void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { throw(1); } - SeepsScore *seeps_mpr; - int count, count_diagonal; - int c12, c13, c21, c23, c31, c32; - double score_sum, obs_sum, fcst_sum; + SeepsScore *seeps_mpr = nullptr; + int count = 0; + int count_diagonal = 0; + int c_odfl = 0; + int c_odfh = 0; + int c_olfd = 0; + int c_olfh = 0; + int c_ohfd = 0; + int c_ohfl = 0; + double score_sum = 0.0; + double obs_sum_wgt = 0.0; + double fcst_sum_wgt = 0.0; + double obs_sum = 0.0; + double fcst_sum = 0.0; vector seeps_mprs; - score_sum = obs_sum = fcst_sum = 0.0; - count = count_diagonal = c12 = c13 = c21 = c23 = c31 = c32 = 0; for(int i=0; in_obs; i++) { if (i >= pd->seeps_mpr.size()) break; seeps_mpr = pd->seeps_mpr[i]; @@ -1432,49 +1452,64 @@ void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { fcst_sum += pd->f_na[i]; // Forecast Value score_sum += seeps_mpr->score; if (seeps_mpr->fcst_cat == 0) { - if (seeps_mpr->obs_cat == 1) c12++; - else if(seeps_mpr->obs_cat == 2) c13++; + if (seeps_mpr->obs_cat == 1) c_olfd++; + else if(seeps_mpr->obs_cat == 2) c_ohfd++; else count_diagonal++; } else if (seeps_mpr->fcst_cat == 1) { - if (seeps_mpr->obs_cat == 0) c21++; - else if(seeps_mpr->obs_cat == 2) c23++; + if (seeps_mpr->obs_cat == 0) c_odfl++; + else if(seeps_mpr->obs_cat == 2) c_ohfl++; else count_diagonal++; } else if (seeps_mpr->fcst_cat == 2) { - if (seeps_mpr->obs_cat == 0) c31++; - else if (seeps_mpr->obs_cat == 1) c32++; + if (seeps_mpr->obs_cat == 0) c_odfh++; + else if (seeps_mpr->obs_cat == 1) c_olfh++; else count_diagonal++; } seeps_mprs.push_back(seeps_mpr); } if (count > 0) { - vector density_vector; - double pvf[SEEPS_MATRIX_SIZE]; - double weighted_score, weight_sum, weight[count]; - seeps->n_obs = count; - seeps->mean_fcst = fcst_sum / count; - seeps->mean_obs = obs_sum / count; - seeps->score = score_sum / count; + mlog << Debug(9) << method_name + << "Categories c_odfl, c_odfh, c_olfd, c_olfh, c_ohfd, c_ohfl => " + << c_odfl << " " << c_odfh << " " << c_olfd << " " + << c_olfh << " " << c_ohfd << " " << c_ohfl << "\n"; + + // Unweighted means + seeps_agg->n_obs = count; + seeps_agg->mean_fcst = fcst_sum / count; + seeps_agg->mean_obs = obs_sum / count; + seeps_agg->score = score_sum / count; - weighted_score = 0.; - for (int i=0; i " + << seeps_agg->mean_fcst << " " + << seeps_agg->mean_obs << " " + << seeps_agg->score << "\n"; - compute_seeps_density_vector(pd, seeps, density_vector); + double score_sum_wgt = 0.0; + vector pvf(SEEPS_MATRIX_SIZE, 0.0); + vector svf(SEEPS_MATRIX_SIZE, 0.0); + + vector density_vector; + compute_seeps_density_vector(pd, seeps_agg, density_vector); int density_cnt = density_vector.size(); if(density_cnt > count) density_cnt = count; //IDL: w = 1/d - weight_sum = 0.; - for (int i=0; i weight(count, 0.0); for (int i=0; i " + << i << " " << density_vector[i] << " " + << weight[i] << " " << weight_sum << "\n"; } } - if (!is_eq(weight_sum, 0)) { + if (!is_eq(weight_sum, 0.0)) { //IDL: w = w/sum(w) for (int i=0; iscore * weight[i]; + mlog << Debug(9) << method_name + << "i, seeps_mpr, weight(i), s_idx => " + << i << " " << seeps_mpr->score + << " " << weight[i] << " " << seeps_mpr->s_idx << "\n"; + score_sum_wgt += seeps_mpr->score * weight[i]; + obs_sum_wgt += pd->o_na[i] * weight[i]; + fcst_sum_wgt += pd->f_na[i] * weight[i]; + mlog << Debug(9) << method_name + << "score_sum_wgt (seeps_mpr*weight) => " + << score_sum_wgt << "\n"; //IDL: svf(cat{i)) = svf(cat{i)) + c(4+cat(i) * w{i) //IDL: pvf(cat{i)) = pvf(cat{i)) + w{i) pvf[seeps_mpr->s_idx] += weight[i]; + svf[seeps_mpr->s_idx] += seeps_mpr->score * weight[i]; } else { mlog << Debug(1) << method_name @@ -1498,45 +1543,49 @@ void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { } density_vector.clear(); - seeps_mprs.clear(); - // The weight for s12 to s32 should come from climo file, but not available yet - seeps->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs - seeps->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs - seeps->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs - seeps->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast - seeps->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast - seeps->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast - seeps->s12 = c12 * seeps->pf1 * seeps->pv2; - seeps->s13 = c13 * seeps->pf1 * seeps->pv3; - seeps->s21 = c21 * seeps->pf2 * seeps->pv1; - seeps->s23 = c23 * seeps->pf2 * seeps->pv3; - seeps->s31 = c31 * seeps->pf3 * seeps->pv1; - seeps->s32 = c32 * seeps->pf3 * seeps->pv2; - seeps->weighted_score = weighted_score; + // The weight for odfl to ohfl come from climo file + seeps_agg->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs + seeps_agg->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs + seeps_agg->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs + seeps_agg->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast + seeps_agg->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast + seeps_agg->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast + seeps_agg->s_odfl = (is_eq(svf[3], 0.0) ? 0.0 : svf[3]); + seeps_agg->s_odfh = (is_eq(svf[6], 0.0) ? 0.0 : svf[6]); + seeps_agg->s_olfd = (is_eq(svf[1], 0.0) ? 0.0 : svf[1]); + seeps_agg->s_olfh = (is_eq(svf[7], 0.0) ? 0.0 : svf[7]); + seeps_agg->s_ohfd = (is_eq(svf[2], 0.0) ? 0.0 : svf[2]); + seeps_agg->s_ohfl = (is_eq(svf[5], 0.0) ? 0.0 : svf[5]); + seeps_agg->mean_fcst_wgt = fcst_sum_wgt; + seeps_agg->mean_obs_wgt = obs_sum_wgt; + seeps_agg->score_wgt = score_sum_wgt; mlog << Debug(7) << method_name - << "SEEPS score=" << seeps->score << " weighted_score=" << weighted_score - << " pv1=" << seeps->pv1 << " pv2=" << seeps->pv2 << " pv3=" << seeps->pv3 - << " pf1=" << seeps->pf1 << " pf2=" << seeps->pf2 << " pf3=" << seeps->pf3 << "\n"; + << "SEEPS score=" << seeps_agg->score << " score_wgt=" << seeps_agg->score_wgt + << " pv1=" << seeps_agg->pv1 << " pv2=" << seeps_agg->pv2 << " pv3=" << seeps_agg->pv3 + << " pf1=" << seeps_agg->pf1 << " pf2=" << seeps_agg->pf2 << " pf3=" << seeps_agg->pf3 + << "\n"; } else { mlog << Debug(5) << method_name << "no SEEPS_MPR available\n"; } - seeps->c12 = c12; - seeps->c13 = c13; - seeps->c21 = c21; - seeps->c23 = c23; - seeps->c31 = c31; - seeps->c32 = c32; - - if (count != (c12+c13+c21+c23+c31+c32+count_diagonal)){ + + seeps_agg->c_odfl = c_odfl; + seeps_agg->c_odfh = c_odfh; + seeps_agg->c_olfd = c_olfd; + seeps_agg->c_olfh = c_olfh; + seeps_agg->c_ohfd = c_ohfd; + seeps_agg->c_ohfl = c_ohfl; + + if (count != (c_odfl+c_odfh+c_olfd+c_olfh+c_ohfd+c_ohfl+count_diagonal)){ mlog << Debug(6) << method_name - << "INFO check count: all=" << count << " s12=" << c12<< " s13=" << c13 - << " s21=" << c21 << " s23=" << c23 - << " s31=" << c31 << " s32=" << c32 << "\n"; + << "INFO check count: all=" << count + << " s_odfl=" << c_odfl << " s_odfh=" << c_odfh + << " s_olfd=" << c_olfd << " s_olfh=" << c_olfh + << " s_ohfd=" << c_ohfd << " s_ohfl=" << c_ohfl << "\n"; } return; @@ -1546,35 +1595,47 @@ void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &obs_dp, DataPlane &seeps_dp, DataPlane &seeps_dp_fcat, - DataPlane &seeps_dp_ocat,SeepsAggScore *seeps, - int month, int hour, const SingleThresh &seeps_p1_thresh) { - int fcst_cat, obs_cat; - int seeps_count, count_diagonal, nan_count, bad_count; + DataPlane &seeps_dp_ocat, SeepsAggScore *seeps_agg, + int month, int hour, const SingleThresh &seeps_p1_thresh, + const ConcatString &seeps_climo_name) { int nx = fcst_dp.nx(); int ny = fcst_dp.ny(); int dp_size = (nx * ny); - int pvf_cnt[SEEPS_MATRIX_SIZE]; - double pvf[SEEPS_MATRIX_SIZE]; - int c12, c13, c21, c23, c31, c32; - double obs_sum, fcst_sum; - double seeps_score, seeps_score_sum, seeps_score_partial_sum; - SeepsScore *seeps_mpr; static const char *method_name = "compute_aggregated_seeps_grid() -> "; + int fcst_cat = bad_data_int; + int obs_cat = bad_data_int; + int seeps_count = 0; + int count_diagonal = 0; + int nan_count = 0; + int bad_count = 0; + int c_odfl = 0; + int c_odfh = 0; + int c_olfd = 0; + int c_olfh = 0; + int c_ohfd = 0; + int c_ohfl = 0; + double seeps_score = 0.0; + double seeps_score_sum = 0.0; + double seeps_score_partial_sum = 0.0; + double obs_sum = 0.0; + double fcst_sum = 0.0; + seeps_dp.set_size(nx, ny); seeps_dp_fcat.set_size(nx, ny); seeps_dp_ocat.set_size(nx, ny); - obs_sum = fcst_sum = seeps_score_sum = 0.; - seeps_count = count_diagonal = nan_count = bad_count = 0; - c12 = c13 = c21 = c23 = c31 = c32 = 0; - seeps->clear(); - SeepsClimoGrid *seeps_climo = get_seeps_climo_grid(month); + seeps_agg->clear(); + mlog << Debug(9) << method_name + << "month is " << month << "\n"; + + SeepsClimoGrid *seeps_climo = get_seeps_climo_grid(month, seeps_climo_name); seeps_climo->set_p1_thresh(seeps_p1_thresh); - for (int i=0; i pvf_cnt(SEEPS_MATRIX_SIZE, 0); + vector pvf(SEEPS_MATRIX_SIZE, 0.0); + vector svf(SEEPS_MATRIX_SIZE, 0.0); + for (int ix=0; ixget_record(ix, iy, fcst_value, obs_value); + + mlog << Debug(9) << method_name + << "obs_value, fcst_value: " + << obs_value << " " << fcst_value << "\n"; + + if (!is_bad_data(fcst_value) && !is_bad_data(obs_value)) { + SeepsScore *seeps_mpr = seeps_climo->get_record(ix, iy, fcst_value, obs_value); if (seeps_mpr != nullptr) { fcst_cat = seeps_mpr->fcst_cat; obs_cat = seeps_mpr->obs_cat; if (fcst_cat == 0) { - if (obs_cat == 1) c12++; - else if(obs_cat == 2) c13++; + if (obs_cat == 1) c_olfd++; + else if(obs_cat == 2) c_odfh++; else count_diagonal++; } else if (fcst_cat == 1) { - if (obs_cat == 0) c21++; - else if(obs_cat == 2) c23++; + if (obs_cat == 0) c_odfl++; + else if(obs_cat == 2) c_ohfd++; else count_diagonal++; } else if (fcst_cat == 2) { - if (obs_cat == 0) c31++; - else if (obs_cat == 1) c32++; + if (obs_cat == 0) c_odfh++; + else if (obs_cat == 1) c_olfh++; else count_diagonal++; } seeps_score = seeps_mpr->score; + mlog << Debug(9) << method_name + << "ix, iy, obs_cat, fcst_cat, seeps_score:" + << ix << " " << iy << " " << obs_cat << " " << fcst_cat + << " " << seeps_score << "\n"; + if (std::isnan(seeps_score)) { nan_count++; seeps_score = bad_data_double; @@ -1618,9 +1689,13 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob //IDL: pvf(cat{i)) = pvf(cat{i)) + w{i) //pvf[seeps_mpr->s_idx] += weight; pvf_cnt[seeps_mpr->s_idx] += 1; + mlog << Debug(9) << method_name + << "obs_sum, fcst_sum, seeps_score_partial_sum, category: " + << obs_sum << " " << fcst_sum << " " + << seeps_score_partial_sum << " " << seeps_mpr->s_idx << "\n"; } - delete seeps_mpr; + if(seeps_mpr) { delete seeps_mpr; seeps_mpr = nullptr; } } } seeps_dp.set(seeps_score, ix, iy); @@ -1629,43 +1704,52 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob } seeps_score_sum += seeps_score_partial_sum; } + mlog << Debug(9) << method_name + << "dp_size, nan_count, bad_count: " + << dp_size << " " << nan_count << " " << bad_count << "\n"; int cell_count = dp_size - nan_count - bad_count; if (cell_count > 0) { - seeps->weighted_score = seeps_score_sum/cell_count; + seeps_agg->score_wgt = seeps_score_sum/cell_count; for (int i=0; in_obs = seeps_count; - seeps->c12 = c12; - seeps->c13 = c13; - seeps->c21 = c21; - seeps->c23 = c23; - seeps->c31 = c31; - seeps->c32 = c32; + seeps_agg->n_obs = seeps_count; + seeps_agg->c_odfl = c_odfl; + seeps_agg->c_odfh = c_odfh; + seeps_agg->c_olfd = c_olfd; + seeps_agg->c_olfh = c_olfh; + seeps_agg->c_ohfd = c_ohfd; + seeps_agg->c_ohfl = c_ohfl; + if (seeps_count > 0) { - seeps->mean_fcst = fcst_sum / seeps_count; - seeps->mean_obs = obs_sum / seeps_count; - - seeps->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs - seeps->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs - seeps->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs - seeps->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast - seeps->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast - seeps->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast - seeps->s12 = c12 * seeps->pf1 * seeps->pv2; - seeps->s13 = c13 * seeps->pf1 * seeps->pv3; - seeps->s21 = c21 * seeps->pf2 * seeps->pv1; - seeps->s23 = c23 * seeps->pf2 * seeps->pv3; - seeps->s31 = c31 * seeps->pf3 * seeps->pv1; - seeps->s32 = c32 * seeps->pf3 * seeps->pv2; - seeps->score = seeps_score_sum / seeps_count; + seeps_agg->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs + seeps_agg->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs + seeps_agg->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs + seeps_agg->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast + seeps_agg->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast + seeps_agg->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast + + seeps_agg->s_odfl = (is_eq(svf[3], 0.0) ? 0.0 : svf[3]); + seeps_agg->s_odfh = (is_eq(svf[6], 0.0) ? 0.0 : svf[6]); + seeps_agg->s_olfd = (is_eq(svf[1], 0.0) ? 0.0 : svf[1]); + seeps_agg->s_olfh = (is_eq(svf[7], 0.0) ? 0.0 : svf[7]); + seeps_agg->s_ohfd = (is_eq(svf[2], 0.0) ? 0.0 : svf[2]); + seeps_agg->s_ohfl = (is_eq(svf[5], 0.0) ? 0.0 : svf[5]); + + seeps_agg->mean_fcst = fcst_sum / seeps_count; + seeps_agg->mean_obs = obs_sum / seeps_count; + seeps_agg->score = seeps_score_sum / seeps_count; } + mlog << Debug(6) << method_name - << "SEEPS score=" << seeps->score << " weighted_score=" << seeps->weighted_score - << " pv1=" << seeps->pv1 << " pv2=" << seeps->pv2 << " pv3=" << seeps->pv3 - << " pf1=" << seeps->pf1 << " pf2=" << seeps->pf2 << " pf3=" << seeps->pf3 << "\n"; + << "SEEPS score=" << seeps_agg->score + << " score_wgt=" << seeps_agg->score_wgt + << " pv1=" << seeps_agg->pv1 << " pv2=" << seeps_agg->pv2 << " pv3=" << seeps_agg->pv3 + << " pf1=" << seeps_agg->pf1 << " pf2=" << seeps_agg->pf2 << " pf3=" << seeps_agg->pf3 + << "\n"; + if(mlog.verbosity_level() >= detailed_debug_level) { char buffer[100]; ConcatString log_message; @@ -1677,8 +1761,9 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob snprintf ( buffer, 100, " nan: %d ", nan_count); log_message.add(buffer); } - mlog << Debug(7) << method_name << "pvf = " << log_message - << " weight=" << (1. / cell_count) << " (1/" << cell_count << ")" << "\n"; + mlog << Debug(7) << method_name + << "pvf = " << log_message << " weight=" << (1. / cell_count) + << " (1/" << cell_count << ")" << "\n"; } } @@ -1714,7 +1799,8 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob // ; PV-WAVE prints: 2.00000 4.00000 //////////////////////////////////////////////////////////////////////// -void compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *seeps, vector &density_vector) { +void compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *seeps, + vector &density_vector) { int seeps_idx; SeepsScore *seeps_mpr; int seeps_cnt = seeps->n_obs; @@ -1740,7 +1826,14 @@ void compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *seeps, for(int i=0; in_obs; i++) { if (i >= pd->seeps_mpr.size()) break; seeps_mpr = pd->seeps_mpr[i]; + mlog << Debug(9) << method_name + << "seeps_idx, seeps_mpr => " + << seeps_idx << " " << seeps_mpr << "\n"; + if (!seeps_mpr || is_eq(seeps_mpr->score, bad_data_double)) continue; + mlog << Debug(4) << method_name + << "lat, long => " << pd->lat_na[i] << " " + << fmod((pd->lon_na[i] + 360.), 360.) << "\n"; rlat[seeps_idx] = pd->lat_na[i] * rad_per_deg; // radian of lat rlon[seeps_idx] = fmod((pd->lon_na[i] + 360.), 360.) * rad_per_deg; // radian of long @@ -1748,15 +1841,22 @@ void compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *seeps, clon[seeps_idx] = cos(rlon[seeps_idx]); slat[seeps_idx] = sin(rlat[seeps_idx]); slon[seeps_idx] = sin(rlon[seeps_idx]); + mlog << Debug(9) << method_name + << "clat, clon, slat, slon => " + << clat[seeps_idx] << " " << clon[seeps_idx] + << " " << slat[seeps_idx] << " " << slon[seeps_idx] << "\n"; seeps_idx++; } - // prooducs n by n matrix by multipling transposed vector - int v_count; - double density, mask1, mask2, mask3, temp; + // produces n by n matrix by multiplying transposed vector + // Initialize - v_count = 0; + int v_count = 0; + mlog << Debug(9) << method_name + << "seeps_idx, seeps_cnt => " + << seeps_idx << " " << seeps_cnt << "\n"; + if (seeps_idx < seeps_cnt) seeps_cnt = seeps_idx; density_vector.reserve(seeps_cnt); for(int j=0; j " + << j << " " << i << " " << clat_m[i][j] << " " << slat_m[i][j] << " " + << clon_m[i][j] << " " << slon_m[i][j] << "\n"; + //IDL: r=(clat#transpose(clat))*(slon#transpose(slon)) + (clon#transpose(clon))*(slat#transpose(slat)) - density = clat_m[i][j] * slon_m[i][j] + clon_m[i][j] * slat_m[i][j]; + double density = clat_m[i][j] * slon_m[i][j] + clon_m[i][j] * slat_m[i][j]; + double density2 = (clat_m[i][j] * (slon_m[i][j] + clon_m[i][j])) + slat_m[i][j]; + mlog << Debug(9) << method_name + << "density, density2 => " << density << " " << density2 << "\n"; + //IDL: r * ((r lt 1.) and (r gt -1.)) + (r ge 1.) - (r le -1.) - mask1 = (density < 1.0 && density > -1.0) ? 1. : 0.; - mask2 = (density >= 1.0 ) ? 1. : 0.; - mask3 = (density <= -1.0) ? 1. : 0.; + double mask1 = (density < 1.0 && density > -1.0) ? 1. : 0.; + double mask2 = (density >= 1.0 ) ? 1. : 0.; + double mask3 = (density <= -1.0) ? 1. : 0.; + double mask5 = (density2 < 1.0 && density > -1.0) ? 1. : 0.; + double mask6 = (density2 >= 1.0 ) ? 1. : 0.; + double mask7 = (density2 <= -1.0) ? 1. : 0.; density = density * mask1 + mask2 - mask3; + density2 = density2 * mask5 + mask6 - mask7; + mlog << Debug(9) << method_name + << "density, density2 => " << density << " " << density2 << "\n"; + //IDL: r = acos(r) density = acos(density); //IDL: if r0 gt 0.0 then r = exp(-(r/r0)^2) * (r le 4. * r0) else r = (r*0.)+1. - if (density_radius_rad <= 0.) density = 1.0; - else { - mask3 = (density <= 4.0) ? 1. : 0.; - temp = density / density_radius_rad; - density = exp(-(temp * temp)) * mask3 * density_radius_rad; + if (density_radius_rad > 0.) { + if (density < 4.0 * density_radius_rad) { + mask3 = (density <= 4.0) ? 1. : 0.; + double temp = density / density_radius_rad; + density = exp(-(temp * temp)) * mask3 * density_radius_rad; + } + else { + density = 0.; + } + if (density2 < 4.0 * density_radius_rad) { + density2 = exp(-(pow(density2 / density_radius_rad,2))); + } + else { + density2 = 0.; + } + mlog << Debug(4) << method_name + << "final density, density2 => " + << density << " " << density2 << "\n"; + } else { + density = 1.; } - density_vector[j] += density; + mlog << Debug(4) << method_name + << "For Info - Feeding density2 (not density) back as vector " + << "as density all zeros in final output" << "\n"; + density_vector[j] += density2; } if (!is_eq(density_vector[j], 0.)) v_count++; } @@ -1792,8 +1926,8 @@ void compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *seeps, << "no non-zero values at density_vector\n"; } if (seeps_cnt > 0) { - mlog << Debug(10) << method_name - << " non zero count=" << v_count + mlog << Debug(9) << method_name + << " non zero count=" << v_count << " seeps_cnt=" << seeps_cnt << " density_vector[0]=" << density_vector[0] << " density_vector[" << (seeps_cnt-1) << "]=" << density_vector[seeps_cnt-1] << "\n"; } diff --git a/src/libcode/vx_statistics/compute_stats.h b/src/libcode/vx_statistics/compute_stats.h index 0fec8c360a..1649cdcec2 100644 --- a/src/libcode/vx_statistics/compute_stats.h +++ b/src/libcode/vx_statistics/compute_stats.h @@ -24,8 +24,7 @@ // //////////////////////////////////////////////////////////////////////// -extern void compute_cntinfo(const SL1L2Info &, bool, CNTInfo &); - +extern void compute_cntinfo(const SL1L2Info &, CNTInfo &); extern void compute_cntinfo(const PairDataPoint &, const NumArray &, bool, bool, bool, CNTInfo &); extern void compute_i_cntinfo(const PairDataPoint &, int, @@ -64,7 +63,8 @@ extern void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *see extern void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &obs_dp, DataPlane &seeps_dp, DataPlane &seeps_dp_fcat, DataPlane &seeps_dp_ocat,SeepsAggScore *seeps, - int month, int hour, const SingleThresh &seeps_p1_thresh); + int month, int hour, const SingleThresh &seeps_p1_thresh, + const ConcatString &seeps_climo_name); extern void compute_seeps_density_vector(const PairDataPoint *pd, SeepsAggScore *seeps, std::vector &density_vector); diff --git a/src/libcode/vx_statistics/contable.cc b/src/libcode/vx_statistics/contable.cc index 8c6ba55c03..b0e2e2d5e1 100644 --- a/src/libcode/vx_statistics/contable.cc +++ b/src/libcode/vx_statistics/contable.cc @@ -6,11 +6,8 @@ // ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -26,1725 +23,676 @@ using namespace std; - -//////////////////////////////////////////////////////////////////////// - - -static int table_rc_to_n(int r_table, int c_table, int w, int h); - - //////////////////////////////////////////////////////////////////////// - - - // - // Code for class ContingencyTable - // - - +// +// Code for class ContingencyTable +// //////////////////////////////////////////////////////////////////////// - -ContingencyTable::ContingencyTable() -{ - -init_from_scratch(); - +ContingencyTable::ContingencyTable() { + init_from_scratch(); } - //////////////////////////////////////////////////////////////////////// - -ContingencyTable::~ContingencyTable() - -{ - -clear(); - +ContingencyTable::~ContingencyTable() { + clear(); } - //////////////////////////////////////////////////////////////////////// - -ContingencyTable::ContingencyTable(const ContingencyTable & t) -{ - -init_from_scratch(); - -assign(t); - +ContingencyTable::ContingencyTable(const ContingencyTable & t) { + init_from_scratch(); + assign(t); } - //////////////////////////////////////////////////////////////////////// - -ContingencyTable & ContingencyTable::operator=(const ContingencyTable & t) - -{ - -if ( this == &t ) return *this; - -assign(t); - -return *this; - +ContingencyTable & ContingencyTable::operator=(const ContingencyTable & t) { + if(this == &t) return *this; + assign(t); + return *this; } - //////////////////////////////////////////////////////////////////////// +ContingencyTable & ContingencyTable::operator+=(const ContingencyTable & t) { -ContingencyTable & ContingencyTable::operator+=(const ContingencyTable & t) - -{ - -if ( Nrows != t.Nrows || Ncols != t.Ncols ) { - - mlog << Error << "\nContingencyTable::operator+=() -> " - << "table dimensions do not match: (" << Nrows << ", " << Ncols - << ") != (" << t.Nrows << ", " << t.Ncols << ")\n\n"; - - exit ( 1 ); - -} - -if ( !is_eq(ECvalue, t.ECvalue) ) { - - mlog << Error << "\nContingencyTable::operator+=() -> " - << "the expected correct values do not match: " - << ECvalue << " != " << t.ECvalue << "\n\n"; - - exit ( 1 ); + // Check consistent dimensions + if(Nrows != t.Nrows || Ncols != t.Ncols) { + mlog << Error << "\nContingencyTable::operator+=() -> " + << "table dimensions do not match: (" << Nrows << ", " << Ncols + << ") != (" << t.Nrows << ", " << t.Ncols << ")\n\n"; + exit(1); + } -} + // Check consistent expected correct + if(!is_eq(ECvalue, t.ECvalue)) { + mlog << Error << "\nContingencyTable::operator+=() -> " + << "the expected correct values do not match: " + << ECvalue << " != " << t.ECvalue << "\n\n"; + exit(1); + } -if ( E ) { - for ( int i=0; isize(); ++i ) (*E)[i] += (*t.E)[i]; -} + // Increment the number of pairs + Npairs += t.Npairs; -return *this; + // Increment table entries + for(int i=0; i(); - Name.clear(); - Nrows = Ncols = 0; +void ContingencyTable::init_from_scratch() { + // No pointers to initialize } - //////////////////////////////////////////////////////////////////////// +void ContingencyTable::clear() { -void ContingencyTable::clear() -{ - if (E) delete E; - E = new vector(); - + E.clear(); + Nrows = Ncols = 0; + Npairs = 0; ECvalue = bad_data_double; Name.clear(); - Nrows = Ncols = 0; - + return; - } - //////////////////////////////////////////////////////////////////////// +void ContingencyTable::assign(const ContingencyTable & t) { -void ContingencyTable::assign(const ContingencyTable & t) -{ - - clear(); - - if(t.E->size() == 0) return; - - ContingencyTable::set_size(t.Nrows, t.Ncols); - - if (E) delete E; - E = new vector(*(t.E)); - ECvalue = t.ECvalue; - Name = t.Name; - - // - // done - // - - return; - -} + clear(); + E = t.E; + Nrows = t.Nrows; + Ncols = t.Ncols; + Npairs = t.Npairs; + ECvalue = t.ECvalue; + Name = t.Name; + + return; +} //////////////////////////////////////////////////////////////////////// +void ContingencyTable::zero_out() { -void ContingencyTable::zero_out() -{ - - int n = Nrows*Ncols; - - if ( n == 0 ) return; - - E->assign(n, 0); - - return; - + fill(E.begin(), E.end(), 0.0); + Npairs = 0; + + return; } //////////////////////////////////////////////////////////////////////// +void ContingencyTable::dump(ostream & out, int depth) const { + Indent prefix(depth); + ConcatString msg; -void ContingencyTable::dump(ostream & out, int depth) const + out << prefix << "Name = "; -{ + if(Name.nonempty()) out << R"(")" << Name << R"(")" << "\n"; + else out << "(nul)\n"; -int r, c; -Indent prefix(depth); -ConcatString junk; + out << prefix << "Nrows = " << Nrows << "\n"; + out << prefix << "Ncols = " << Ncols << "\n"; + out << prefix << "Npairs = " << Npairs << "\n"; + out << prefix << "ECvalue = " << ECvalue << "\n"; + out << prefix << "\n"; -out << prefix << "Name = "; + if(!E.empty()) { -if ( Name.length() > 0 ) out << '\"' << Name << "\"\n"; -else out << "(nul)\n"; + for(int r=0; rempty() ) { out.flush(); return; } + for(int c=0; c col_width(Ncols); + clear(); -for (c=0; c " + << "# rows (" << NR << ") and # cols (" << NC + << ") must be at least 2!\n\n"; + exit(1); + } - comma_string(c, junk); + Nrows = NR; + Ncols = NC; - col_width[c] = junk.length(); + E.resize(NR*NC, 0.0); - junk.format("%d", (int) col_total(c)); + // Set default expected correct value for square tables + if(Nrows == Ncols) ECvalue = 1.0 / Nrows; - k = junk.length(); + return; +} - if ( k > col_width[c] ) col_width[c] = k; +//////////////////////////////////////////////////////////////////////// - for (r=0; r col_width[c] ) col_width[c] = k; +void ContingencyTable::set_ec_value(double v) { - } + // Do not override the default value with bad data + if(!is_bad_data(v)) ECvalue = v; + return; } -w = 2*hpad*Ncols + Ncols + 1; +//////////////////////////////////////////////////////////////////////// -for (c=0; c table(w*h, ' '); + return; +} - // - // top, bottom - // +//////////////////////////////////////////////////////////////////////// -for (c_table=0; c_table= Nrows || c < 0 || c >= Ncols) { + mlog << Error << "\nContingencyTable::rc_to_n() -> " + << "range check error requesting (" << r << ", " + << c << ") from table with dimension (" << Nrows + << ", " << Ncols << ")!\n\n"; + exit(1); + } + return r*Ncols + c; +} - table[n] = '='; +//////////////////////////////////////////////////////////////////////// - n = table_rc_to_n(h - 1, c_table, w, h); +void ContingencyTable::set_entry(int row, int col, double value) { + E[(rc_to_n(row, col))] = value; - table[n] = '='; + // Number of pairs defined by set_n_pairs(int) + return; } - // - // left, right - // - -for (r_table=1; r_table<(h - 1); ++r_table) { - - n = table_rc_to_n(r_table, 0, w, h); +//////////////////////////////////////////////////////////////////////// - table[n] = v_sep; +void ContingencyTable::inc_entry(int row, int col, double weight) { - n = table_rc_to_n(r_table, w - 1, w, h); + E[(rc_to_n(row, col))] += weight; - table[n] = v_sep; + // Increment pair counter + Npairs++; + return; } - // - // col separators - // - -for (c=1; c " - << "c_table (" << c_table << ") is greater then w (" << w << ")\n\n"; + for(int col=0; col " - << "# rows (" << NR << ") and # cols (" << NC - << ") must be at least 2!\n\n"; - - exit ( 1 ); - +TTContingencyTable::TTContingencyTable(const TTContingencyTable & t) { + assign(t); } -int n; - -n = NR*NC; - -E->resize(n, 0); - -Nrows = NR; -Ncols = NC; - - // - // if square, set default expected correct value - // - -if ( Nrows == Ncols ) { - - ECvalue = 1.0 / Nrows; +//////////////////////////////////////////////////////////////////////// -} +TTContingencyTable & TTContingencyTable::operator=(const TTContingencyTable & t) { - // - // done - // + if(this == &t) return *this; -return; + assign(t); + return *this; } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::set_fn_on(double k) { -void ContingencyTable::set_ec_value(double v) - -{ - - // - // do not override the default value with bad data - // - -if ( !is_bad_data(v) ) ECvalue = v; - -return; + set_entry(FN_row, ON_col, k); + return; } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::set_fy_on(double k) { -void ContingencyTable::set_name(const char * text) - -{ - -Name = text; - -return; + set_entry(FY_row, ON_col, k); + return; } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::set_fn_oy(double k) { -int ContingencyTable::rc_to_n(int r, int c) const - -{ - -if ( (r < 0) || (r >= Nrows) || (c < 0) || (c >= Ncols) ) { - - mlog << Error << "\nContingencyTable::rc_to_n() -> " - << "range check error!\n\n"; - - exit ( 1 ); + set_entry(FN_row, OY_col, k); + return; } -int n; - - -n = r*Ncols + c; - +//////////////////////////////////////////////////////////////////////// +void TTContingencyTable::set_fy_oy(double k) { -return n; + set_entry(FY_row, OY_col, k); + return; } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::inc_fn_on(double weight) { -void ContingencyTable::set_entry(int row, int col, int value) - -{ - -int n; - -n = rc_to_n(row, col); - -(*E)[n] = value; - - -return; + inc_entry(FN_row, ON_col, weight); + return; } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::inc_fy_on(double weight) { -void ContingencyTable::inc_entry(int row, int col) - -{ - -int n; - -n = rc_to_n(row, col); - -++((*E)[n]); - - - -return; + inc_entry(FY_row, ON_col, weight); + return; } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::inc_fn_oy(double weight) { -int ContingencyTable::total() const - -{ - -const int n = Nrows*Ncols; - -if ( n == 0 ) return 0; - -int j, sum; + inc_entry(FN_row, OY_col, weight); -sum = 0; - -for (j=0; j= Nrows) ) { +//////////////////////////////////////////////////////////////////////// - mlog << Error << "\nContingencyTable::row_total() -> " - << "range check error!\n\n"; +double TTContingencyTable::fn_oy() const { + return entry(FN_row, OY_col); +} - exit ( 1 ); +//////////////////////////////////////////////////////////////////////// +double TTContingencyTable::fn_on() const { + return entry(FN_row, ON_col); } -int n, col, sum; - +//////////////////////////////////////////////////////////////////////// -sum = 0; +double TTContingencyTable::fy() const { + return row_total(FY_row); +} -for (col=0; col= Ncols) ) { +//////////////////////////////////////////////////////////////////////// - mlog << Error << "\nContingencyTable::col_total() -> " - << "range check error!\n\n"; +double TTContingencyTable::fy_oy_tp() const { + return compute_proportion(fy_oy(), total()); +} - exit ( 1 ); +//////////////////////////////////////////////////////////////////////// +double TTContingencyTable::fy_on_tp() const { + return compute_proportion(fy_on(), total()); } -int n, row, sum; - +//////////////////////////////////////////////////////////////////////// -sum = 0; +double TTContingencyTable::fn_oy_tp() const { + return compute_proportion(fn_oy(), total()); +} -for (row=0; row a ) a = (*E)[n]; +//////////////////////////////////////////////////////////////////////// +double TTContingencyTable::fn_on_op() const { + return compute_proportion(fn_on(), on()); } +//////////////////////////////////////////////////////////////////////// -return a; - +void TTContingencyTable::set_size(int N) { + mlog << Error << "\nTTContingencyTable::set_size(int) -> " + << "2 x 2 tables cannot be resized!\n\n"; + exit(1); } - //////////////////////////////////////////////////////////////////////// +void TTContingencyTable::set_size(int NR, int NC) { + mlog << Error << "\nTTContingencyTable::set_size(int, int) -> " + << "2 x 2 tables cannot be resized!\n\n"; + exit(1); +} -int ContingencyTable::smallest_entry() const +//////////////////////////////////////////////////////////////////////// +// +// Code for misc functions +// +//////////////////////////////////////////////////////////////////////// -{ +// +// Reference table 7.1a, page 242 in wilks +// -int n = Nrows*Ncols; +TTContingencyTable finley() { + TTContingencyTable t; -if ( n == 0 ) return 0; + t.set_n_pairs(2803); + t.set_fy_oy(28); + t.set_fn_oy(23); + t.set_fy_on(72); + t.set_fn_on(2680); -int j, a; + t.set_name("Finley Tornado Forecasts (1884)"); -a = (*E)[0]; + return t; +} -for (j=1; j " - << "table not square!\n\n"; - - exit ( 1 ); - -} - -if ( (k < 0) || (k >= Nrows) ) { - - mlog << Error << "\nContingencyTable::condition_on() -> " - << "range check error\n\n"; - - exit ( 1 ); - -} - -int r, c; -int n, sum; -TTContingencyTable t; - - // - // - // - -t.set_entry(0, 0, entry(k, k)); - - // - // - // - -sum = 0; - -for (c=0; c " - << "2 x 2 tables cannot be resized!\n\n"; - -exit ( 1 ); - -} - - -//////////////////////////////////////////////////////////////////////// - - -void TTContingencyTable::set_size(int NR, int NC) - -{ - -mlog << Error << "\nTTContingencyTable::set_size(int, int) -> " - << "2 x 2 tables cannot be resized!\n\n"; - -exit ( 1 ); - -} - - -//////////////////////////////////////////////////////////////////////// - - - // - // Code for misc functions - // - - -//////////////////////////////////////////////////////////////////////// - - // - // see table 7.1a, page 242 in wilks - // - -TTContingencyTable finley() - -{ - -TTContingencyTable t; - - -t.set_fy_oy(28); -t.set_fn_oy(23); - -t.set_fy_on(72); -t.set_fn_on(2680); - - -t.set_name("Finley Tornado Forecasts (1884)"); - - -return t; - -} - - -//////////////////////////////////////////////////////////////////////// - - // - // see table 7.1b, page 242 in wilks - // - -TTContingencyTable finley_always_no() - -{ - -TTContingencyTable t; - - -t.set_fy_oy(0); -t.set_fn_oy(51); - -t.set_fy_on(0); -t.set_fn_on(2752); - - -t.set_name("Finley Tornado Forecasts (Always No) (1884)"); - - -return t; - -} - - -//////////////////////////////////////////////////////////////////////// -// r_table < h -// c_table < w - -int table_rc_to_n(int r_table, int c_table, int w, int h) - -{ - -int n; - -n = r_table*w + c_table; - -return n; + // Check for bad data and divide by zero + if(is_bad_data(num) || + is_bad_data(den) || + is_eq(den, 0.0)) { + prop = bad_data_double; + } + else { + prop = num/den; + } + return prop; } - //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/contable.h b/src/libcode/vx_statistics/contable.h index a605300e08..0227e90146 100644 --- a/src/libcode/vx_statistics/contable.h +++ b/src/libcode/vx_statistics/contable.h @@ -6,56 +6,50 @@ // ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - //////////////////////////////////////////////////////////////////////// - #ifndef __CONTINGENCY_TABLE_H__ #define __CONTINGENCY_TABLE_H__ - //////////////////////////////////////////////////////////////////////// - #include #include "vx_util.h" #include "vx_math.h" - //////////////////////////////////////////////////////////////////////// - -class TTContingencyTable; // forward reference - +// Forward reference +class TTContingencyTable; //////////////////////////////////////////////////////////////////////// - - - // - // general contingency table - // - +// +// General contingency table +// +//////////////////////////////////////////////////////////////////////// class ContingencyTable { - protected: + friend class TTContingencyTable; + friend class Nx2ContingencyTable; - void init_from_scratch(); + void init_from_scratch(); - void assign(const ContingencyTable &); + void assign(const ContingencyTable &); - int rc_to_n(int r, int c) const; + int rc_to_n(int r, int c) const; - std::vector *E; // this is really a two-dimensional array + // This is really a two-dimensional array (Nrows, Ncols) + std::vector E; - int Nrows; - int Ncols; + int Nrows; + int Ncols; - double ECvalue; + int Npairs; + double ECvalue; - ConcatString Name; + ConcatString Name; public: @@ -71,101 +65,76 @@ class ContingencyTable { virtual void dump(std::ostream & out, int depth = 0) const; - // - // condition on an event - // - - TTContingencyTable condition_on(int) const; - - // - // set attributes - // - + // Set attributes virtual void set_size(int); virtual void set_size(int NR, int NC); + void set_n_pairs(int); void set_ec_value(double); void set_name(const char *); - // - // get attributes - // - + // Get attributes int nrows() const; int ncols() const; + int n_pairs() const; double ec_value() const; ConcatString name() const; - // - // set counts - // - - void set_entry(int row, int col, int value); - - // - // increment counts - // - - void inc_entry(int row, int col); + // Set table entries + void set_entry(int row, int col, double value); - // - // get counts - // + // Increment table entries + void inc_entry(int row, int col, double weight=default_weight); - int total() const; + // Get values + double total() const; - int row_total(int row) const; - int col_total(int col) const; + double row_total(int row) const; + double col_total(int col) const; - int entry(int row, int col) const; + double entry(int row, int col) const; - int largest_entry() const; - int smallest_entry() const; + double max() const; + double min() const; - // - // statistics - // + bool is_integer() const; + // Statistics virtual double gaccuracy () const; virtual double gheidke () const; virtual double gheidke_ec(double) const; virtual double gkuiper () const; virtual double gerrity () const; - }; //////////////////////////////////////////////////////////////////////// +inline int ContingencyTable::nrows() const { return Nrows; } +inline int ContingencyTable::ncols() const { return Ncols; } -inline int ContingencyTable::nrows() const { return ( Nrows ); } -inline int ContingencyTable::ncols() const { return ( Ncols ); } - -inline double ContingencyTable::ec_value() const { return ( ECvalue ); } -inline ConcatString ContingencyTable::name() const { return ( Name ); } - +inline int ContingencyTable::n_pairs() const { return Npairs; } +inline double ContingencyTable::ec_value() const { return ECvalue; } +inline ConcatString ContingencyTable::name() const { return Name; } //////////////////////////////////////////////////////////////////////// - static const int nx2_event_column = 0; static const int nx2_nonevent_column = 1; - //////////////////////////////////////////////////////////////////////// - - - // - // N x 2 contingency table - // - +// +// N x 2 contingency table +// +//////////////////////////////////////////////////////////////////////// class Nx2ContingencyTable : public ContingencyTable { private: - double * Thresholds; // N + 1 count, increasing + // N + 1 count, parametrically increasing or decreasing + std::vector Thresholds; int value_to_row(double) const; @@ -182,47 +151,34 @@ class Nx2ContingencyTable : public ContingencyTable { void clear(); - void set_size(int NR); - void set_size(int NR, int NC); // NC had better be 2 - - void set_thresholds(const double *); - - // - // get thresholds - // - - double threshold(int index) const; // 0 <= index <= Nrows + void set_size(int NR) override; + void set_size(int NR, int NC) override; // NC must be 2 - // - // increment counts - // + void set_thresholds(const std::vector &); - void inc_event (double); - void inc_nonevent (double); + // Get thresholds + double threshold(int index) const; // 0 <= index <= Nrows - // - // get counts - // + // Increment table entries + void inc_event (double value, double weight=default_weight); + void inc_nonevent (double value, double weight=default_weight); - int event_count_by_thresh(double) const; - int nonevent_count_by_thresh(double) const; + // Get table entries + double event_total_by_thresh(double) const; + double nonevent_total_by_thresh(double) const; - int event_count_by_row(int row) const; - int nonevent_count_by_row(int row) const; + double event_total_by_row(int row) const; + double nonevent_total_by_row(int row) const; - int n() const; + // Set counts + void set_event(int row, double); + void set_nonevent(int row, double); - // - // column totals - // - - int event_col_total() const; - int nonevent_col_total() const; - - // - // statistics - // + // Column totals + double event_col_total() const; + double nonevent_col_total() const; + // Statistics double baser () const; double baser_ci (double alpha, double &cl, double &cu) const; double brier_score () const; @@ -245,27 +201,21 @@ class Nx2ContingencyTable : public ContingencyTable { TTContingencyTable ctc_by_row (int row) const; double roc_auc() const; - }; - //////////////////////////////////////////////////////////////////////// +inline double Nx2ContingencyTable::event_total_by_row (int row) const { return entry(row, nx2_event_column); } +inline double Nx2ContingencyTable::nonevent_total_by_row (int row) const { return entry(row, nx2_nonevent_column); } -inline int Nx2ContingencyTable::event_col_total () const { return ( col_total(nx2_event_column) ); } -inline int Nx2ContingencyTable::nonevent_col_total () const { return ( col_total(nx2_nonevent_column) ); } - -inline int Nx2ContingencyTable::event_count_by_row (int row) const { return ( entry(row, nx2_event_column) ); } -inline int Nx2ContingencyTable::nonevent_count_by_row (int row) const { return ( entry(row, nx2_nonevent_column) ); } - +inline double Nx2ContingencyTable::event_col_total () const { return col_total(nx2_event_column); } +inline double Nx2ContingencyTable::nonevent_col_total () const { return col_total(nx2_nonevent_column); } //////////////////////////////////////////////////////////////////////// - - - // - // 2 x 2 contingency table - // - +// +// 2 x 2 contingency table +// +//////////////////////////////////////////////////////////////////////// class TTContingencyTable : public ContingencyTable { @@ -276,63 +226,45 @@ class TTContingencyTable : public ContingencyTable { TTContingencyTable(const TTContingencyTable &); TTContingencyTable & operator=(const TTContingencyTable &); - void set_size(int); - void set_size(int NR, int NC); - - // - // set counts - // - - void set_fn_on(int); - void set_fy_on(int); - - void set_fn_oy(int); - void set_fy_oy(int); - - // - // increment counts - // - - void inc_fn_on(); - void inc_fy_on(); - - void inc_fn_oy(); - void inc_fy_oy(); + void set_size(int) override; + void set_size(int NR, int NC) override; - // - // get counts - // + // Set table entries + void set_fn_on(double); + void set_fy_on(double); - int fn_on() const; - int fy_on() const; + void set_fn_oy(double); + void set_fy_oy(double); - int fn_oy() const; - int fy_oy() const; + // Increment table entries + void inc_fn_on(double weight=default_weight); + void inc_fy_on(double weight=default_weight); - int on() const; - int oy() const; + void inc_fn_oy(double weight=default_weight); + void inc_fy_oy(double weight=default_weight); - int fn() const; - int fy() const; + // Get table entries + double fn_on() const; + double fy_on() const; - int n() const; + double fn_oy() const; + double fy_oy() const; - // - // FHO rates where: - // f_rate = FY/N - // h_rate = fy_oy/N - // o_rate = OY/N - // + double on() const; + double oy() const; - double f_rate () const; - double h_rate () const; - double o_rate () const; + double fn() const; + double fy() const; - // - // Raw counts as proportions of the - // total count. - // + // FHO rates where: + // f_rate = FY/N + // h_rate = fy_oy/N + // o_rate = OY/N + double f_rate() const; + double h_rate() const; + double o_rate() const; + // Proportions of the total double fy_oy_tp () const; double fy_on_tp () const; double fn_oy_tp () const; @@ -344,31 +276,19 @@ class TTContingencyTable : public ContingencyTable { double oy_tp () const; double on_tp () const; - // - // Raw counts as proportions of the - // total forecast yes count. - // - + // Proportions of forecast double fy_oy_fp () const; double fy_on_fp () const; double fn_oy_fp () const; double fn_on_fp () const; - // - // Raw counts as proportions of the - // total observation yes count. - // - + // Proportions of observation double fy_oy_op () const; double fy_on_op () const; double fn_oy_op () const; double fn_on_op () const; - // - // Contingency Table Statistics with confidence intervals - // when applicable. - // - + // Contingency Table Statistics and confidence intervals double baser () const; double baser_ci (double alpha, double &cl, double &cu) const; double fmean () const; @@ -409,26 +329,19 @@ class TTContingencyTable : public ContingencyTable { double cost_loss (double) const; }; - //////////////////////////////////////////////////////////////////////// - extern TTContingencyTable finley(); extern TTContingencyTable finley_always_no(); - //////////////////////////////////////////////////////////////////////// - - - // - // this is the layout on page 239 of - // - // "Statistical Methods in the Atmospheric Sciences" (1st ed) - // - // by Daniel S. Wilks - // - +// +// Reference page 239 of +// "Statistical Methods in the Atmospheric Sciences" (1st ed) +// by Daniel S. Wilks +// +//////////////////////////////////////////////////////////////////////// static const int OY_col = 0; static const int ON_col = 1; @@ -436,19 +349,15 @@ static const int ON_col = 1; static const int FY_row = 0; static const int FN_row = 1; - //////////////////////////////////////////////////////////////////////// +extern void calc_gerrity_scoring_matrix(int N, const std::vector &p, + std::vector &s); -extern void calc_gerrity_scoring_matrix(int N, const double * p, double * s); - +extern double compute_proportion(double, double); //////////////////////////////////////////////////////////////////////// - #endif // __CONTINGENCY_TABLE_H__ - //////////////////////////////////////////////////////////////////////// - - diff --git a/src/libcode/vx_statistics/contable_nx2.cc b/src/libcode/vx_statistics/contable_nx2.cc index e94cf60dc4..b41ec7a798 100644 --- a/src/libcode/vx_statistics/contable_nx2.cc +++ b/src/libcode/vx_statistics/contable_nx2.cc @@ -6,11 +6,8 @@ // ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -26,947 +23,585 @@ using namespace std; - //////////////////////////////////////////////////////////////////////// - static const int use_center = 1; - -//////////////////////////////////////////////////////////////////////// - - - // - // Code for class Nx2ContingencyTable - // - - //////////////////////////////////////////////////////////////////////// - - -Nx2ContingencyTable::Nx2ContingencyTable() - -{ - -init_from_scratch(); - -} - - +// +// Code for class Nx2ContingencyTable +// //////////////////////////////////////////////////////////////////////// - -Nx2ContingencyTable::~Nx2ContingencyTable() - -{ - -clear(); - +Nx2ContingencyTable::Nx2ContingencyTable() { + init_from_scratch(); } - //////////////////////////////////////////////////////////////////////// - -Nx2ContingencyTable::Nx2ContingencyTable(const Nx2ContingencyTable & t) - -{ - -init_from_scratch(); - -assign(t); - +Nx2ContingencyTable::~Nx2ContingencyTable() { + clear(); } - - //////////////////////////////////////////////////////////////////////// - -void Nx2ContingencyTable::init_from_scratch() - -{ - -ContingencyTable::init_from_scratch(); - -Thresholds = (double *) nullptr; - -clear(); - -return; - +Nx2ContingencyTable::Nx2ContingencyTable(const Nx2ContingencyTable & t) { + init_from_scratch(); + assign(t); } - //////////////////////////////////////////////////////////////////////// - -void Nx2ContingencyTable::clear() - -{ - -ContingencyTable::clear(); - -if ( Thresholds ) { delete [] Thresholds; Thresholds = (double *) nullptr; } - -return; - +void Nx2ContingencyTable::init_from_scratch() { + ContingencyTable::init_from_scratch(); } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::clear() { -Nx2ContingencyTable & Nx2ContingencyTable::operator=(const Nx2ContingencyTable & t) - -{ - -if ( this == &t ) return *this; - -assign(t); - -return *this; + ContingencyTable::clear(); + Thresholds.clear(); + return; } - //////////////////////////////////////////////////////////////////////// +Nx2ContingencyTable & Nx2ContingencyTable::operator=(const Nx2ContingencyTable &t) { -void Nx2ContingencyTable::assign(const Nx2ContingencyTable & t) - -{ - -clear(); - -ContingencyTable::assign(t); - -if(t.Thresholds) set_thresholds(t.Thresholds); - -return; + if(this == &t) return *this; + assign(t); + return *this; } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::assign(const Nx2ContingencyTable & t) { -int Nx2ContingencyTable::n() const - -{ - -int k; + clear(); -k = total(); - -return k; + ContingencyTable::assign(t); + Thresholds = t.Thresholds; + return; } - //////////////////////////////////////////////////////////////////////// - -void Nx2ContingencyTable::set_size(int N) - -{ - -ContingencyTable::set_size(N, 2); - -return; - +void Nx2ContingencyTable::set_size(int N) { + ContingencyTable::set_size(N, 2); + return; } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::set_size(int NR, int NC) { -void Nx2ContingencyTable::set_size(int NR, int NC) - -{ - -if ( NC != 2 ) { - - mlog << Error << "\nNx2ContingencyTable::set_size(int, int) -> must have 2 columns!\n\n"; + if(NC != 2) { + mlog << Error << "\nNx2ContingencyTable::set_size(int, int) -> " + << "must have 2 columns, not " << NC << "!\n\n"; + exit(1); + } - exit ( 1 ); + set_size(NR); + return; } -set_size(NR); - -return; - -} - - //////////////////////////////////////////////////////////////////////// +int Nx2ContingencyTable::value_to_row(double t) const { -int Nx2ContingencyTable::value_to_row(double t) const - -{ - -if ( !Thresholds ) { - - mlog << Error << "\nNx2ContingencyTable::value_to_row(double) const -> thresholds array not set!\n\n"; - - exit ( 1 ); - -} - -if ( t < Thresholds[0] && !is_eq(t, Thresholds[0]) ) return -1; - -if ( t > Thresholds[Nrows] && !is_eq(t, Thresholds[Nrows]) ) return -1; - // Thresholds array is of size Nrows + 1, so - // the last element has index Nrows, not Nrows - 1 - -int j; - -for (j=0; j Thresholds[j ] || is_eq(t, Thresholds[j ]) ) && - ( t < Thresholds[j + 1] && !is_eq(t, Thresholds[j + 1]) ) ) return j; + if(Thresholds.empty()) { + mlog << Error << "\nNx2ContingencyTable::value_to_row(double) const -> " + << "thresholds array not set!\n\n"; + exit(1); + } -} + // Thresholds array is of size Nrows + 1, so + // the last element has index Nrows, not Nrows - 1 + if(t < Thresholds[0] && !is_eq(t, Thresholds[0])) return -1; + if(t > Thresholds[Nrows] && !is_eq(t, Thresholds[Nrows]) ) return -1; -if ( is_eq(t, Thresholds[Nrows]) ) return ( Nrows - 1 ); + for(int j=0; j Thresholds[j] || is_eq(t, Thresholds[j]) ) && + (t < Thresholds[j + 1] && !is_eq(t, Thresholds[j + 1]))) return j; + } -return -1; + if(is_eq(t, Thresholds[Nrows])) return (Nrows - 1); + return -1; } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::set_thresholds(const vector &Values) { -void Nx2ContingencyTable::set_thresholds(const double * Values) - -{ - -if ( E->empty() ) { - - mlog << Error << "\nNx2ContingencyTable::set_thresholds(const double *) -> table empty!\n\n"; - - exit ( 1 ); - -} - -if ( Thresholds ) { delete [] Thresholds; Thresholds = (double *) nullptr; } - -Thresholds = new double [Nrows + 1]; - -memcpy(Thresholds, Values, (Nrows + 1)*sizeof(double)); + if(Values.size() != Nrows + 1) { + mlog << Error << "\nNx2ContingencyTable::set_thresholds(const double *) -> " + << "expected " << Nrows + 1 << " thresholds but only received " + << Values.size() << "!\n\n"; + exit(1); + } -return; + Thresholds = Values; + return; } - //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::threshold(int k) const { -double Nx2ContingencyTable::threshold(int k) const - -{ - -if ( !Thresholds ) { - - mlog << Error << "\nNx2ContingencyTable::threshold(int) const -> no thresholds set!\n\n"; - - exit ( 1 ); - -} - -if ( (k < 0) || (k > Nrows) ) { // there are Nrows + 1 thresholds - - mlog << Error << "\nNx2ContingencyTable::threshold(int) const -> range check error\n\n"; - - exit ( 1 ); - -} - -return Thresholds[k]; + // There are Nrows + 1 thresholds + if(k < 0 || k > Thresholds.size()) { + mlog << Error << "\nNx2ContingencyTable::threshold(int) const -> " + << "range check error!\n\n"; + exit(1); + } + return Thresholds[k]; } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::inc_event(double t, double weight) { + int r = value_to_row(t); -void Nx2ContingencyTable::inc_event(double t) - -{ - -int r; - -r = value_to_row(t); - -if ( r < 0 ) { - - mlog << Error << "\nNx2ContingencyTable::inc_event(double) -> bad value ... " << t << "\n\n"; - - exit ( 1 ); - -} - -inc_entry(r, nx2_event_column); + if(r < 0) { + mlog << Error << "\nNx2ContingencyTable::inc_event(double) -> " + << "bad value ... " << t << "\n\n"; + exit(1); + } -return; + inc_entry(r, nx2_event_column, weight); + return; } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::inc_nonevent(double t, double weight) { + int r = value_to_row(t); -void Nx2ContingencyTable::inc_nonevent(double t) - -{ - -int r; - -r = value_to_row(t); - -if ( r < 0 ) { - - mlog << Error << "\nNx2ContingencyTable::inc_nonevent(double) -> bad value ... " << t << "\n\n"; - - exit ( 1 ); - -} - -inc_entry(r, nx2_nonevent_column); + if(r < 0) { + mlog << Error << "\nNx2ContingencyTable::inc_nonevent(double) -> " + << "bad value ... " << t << "\n\n"; + exit(1); + } -return; + inc_entry(r, nx2_nonevent_column, weight); + return; } - //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::event_total_by_thresh(double t) const { + int r = value_to_row(t); -int Nx2ContingencyTable::event_count_by_thresh(double t) const - -{ - -int r; - -r = value_to_row(t); - -if ( r < 0 ) { - - mlog << Error << "\nNx2ContingencyTable::event_count_by_thresh(double) -> bad value ... " << t << "\n\n"; - - exit ( 1 ); - -} - -int k; - -k = entry(r, nx2_event_column); - -return k; + if(r < 0) { + mlog << Error << "\nNx2ContingencyTable::event_total_by_thresh(double) -> " + << "bad value ... " << t << "\n\n"; + exit(1); + } + return entry(r, nx2_event_column); } - //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::nonevent_total_by_thresh(double t) const { + int r = value_to_row(t); -int Nx2ContingencyTable::nonevent_count_by_thresh(double t) const - -{ - -int r; - -r = value_to_row(t); - -if ( r < 0 ) { - - mlog << Error << "\nNx2ContingencyTable::nonevent_count_by_thresh(double) -> bad value ... " << t << "\n\n"; - - exit ( 1 ); - -} - -int k; - -k = entry(r, nx2_nonevent_column); - -return k; + if(r < 0) { + mlog << Error << "\nNx2ContingencyTable::nonevent_total_by_thresh(double) -> " + << "bad value ... " << t << "\n\n"; + exit(1); + } + return entry(r, nx2_nonevent_column); } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::set_event(int row, double value) { -double Nx2ContingencyTable::row_obar(int row) const + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::set_event(int, double) -> " + << "bad row index ... " << row << "\n\n"; + exit(1); + } -{ + // Number of pairs defined by set_n_pairs(int) -const int obs_count = event_count_by_row(row); -const int Ni = row_total(row); -double x; - -if(Ni == 0) x = bad_data_double; -else x = ((double) obs_count)/((double) Ni); - -return x; + set_entry(row, nx2_event_column, value); + return; } - //////////////////////////////////////////////////////////////////////// +void Nx2ContingencyTable::set_nonevent(int row, double value) { -double Nx2ContingencyTable::obar() const + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::set_nonevent(int, double) -> " + << "bad row index ... " << row << "\n\n"; + exit(1); + } -{ + // Number of pairs defined by set_n_pairs(int) -const int obs_count = event_col_total(); -const int N = n(); -double x; - -if (N == 0) x = bad_data_double; -else x = ((double) obs_count)/((double) N); - -return x; + set_entry(row, nx2_nonevent_column, value); + return; } - //////////////////////////////////////////////////////////////////////// - -double Nx2ContingencyTable::row_proby(int row) const - -{ - -if ( (row < 0) || (row >= Nrows) ) { - - mlog << Error << "\nNx2ContingencyTable::row_proby(int) const -> range check error\n\n"; - - exit ( 1 ); - -} - -double x; - -if ( use_center ) x = 0.5*(Thresholds[row] + Thresholds[row + 1]); -else x = Thresholds[row]; - -return x; - -} - - -//////////////////////////////////////////////////////////////////////// - - double Nx2ContingencyTable::baser() const { - - return (double) event_col_total()/n(); + return compute_proportion(event_col_total(), total()); } - //////////////////////////////////////////////////////////////////////// - double Nx2ContingencyTable::baser_ci(double alpha, - double &cl, double &cu) const { - double v; - - v = baser(); + double &cl, double &cu) const { + double v = baser(); - compute_proportion_ci(v, n(), alpha, 1.0, cl, cu); + compute_proportion_ci(v, Npairs, alpha, 1.0, cl, cu); return v; } - //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::brier_score() const { - // - // Reference: Equation 7.40, page 286 in Wilks, 2nd Ed. - // - -double Nx2ContingencyTable::reliability() const - -{ - -int row; -double sum; -const int N = n(); -int Ni; -double yi, obari, t; - - -sum = 0.0; - -for (row=0; row 1 so that degf > 0 in the call to gsl_cdf_tdist_Pinv() + if(is_bad_data(bs) || N <= 1.0) return bad_data_double; - obari = row_obar(row); + double degf = N - 1.0; + double t = gsl_cdf_tdist_Pinv(1.0 - 0.5*alpha, degf); + double ob = obar(); - // When obari is not defined, don't include it in the sum - if(is_bad_data(obari)) continue; + double af1 = 0.0; + double sf2 = 0.0; + double sf3 = 0.0; + double af4 = 0.0; - t = obari - Obar; + for(int j=0; j= Nrows) ) { - - mlog << Error << "\nNx2ContingencyTable::row_calibration(int) const -> range check error\n\n"; - - exit ( 1 ); - -} - -double num, denom; -double x; - -num = (double) event_count_by_row(row); - -denom = num + nonevent_count_by_row(row); - -if(is_eq(denom, 0.0)) x = bad_data_double; -else x = num/denom; +double Nx2ContingencyTable::uncertainty() const { + double a = obar(); + double v; -return x; + if(is_bad_data(a)) v = bad_data_double; + else v = a*(1.0 - a); + return v; } - +//////////////////////////////////////////////////////////////////////// +// +// Reference: Equation 8.43, page 340 in Wilks, 3rd Ed. +// //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::bss_smpl() const { + double res = resolution(); + double rel = reliability(); + double unc = uncertainty(); + double bss; -double Nx2ContingencyTable::row_refinement(int row) const - -{ - -if ( (row < 0) || (row >= Nrows) ) { - - mlog << Error << "\nNx2ContingencyTable::row_refinement(int) const -> range check error\n\n"; - - exit ( 1 ); - -} - -int py_o1, py_o2; -const int N = n(); -double x; - -py_o1 = event_count_by_row(row); -py_o2 = nonevent_count_by_row(row); - -x = (double) (py_o1 + py_o2); - -if (N == 0) x = bad_data_double; -else x /= N; - -return x; + if(is_bad_data(res) || is_bad_data(rel) || + is_bad_data(unc) || is_eq(unc, 0.0)) { + bss = bad_data_double; + } + else { + bss = (res - rel)/unc; + } + return bss; } - //////////////////////////////////////////////////////////////////////// - -double Nx2ContingencyTable::row_event_likelihood(int row) const - -{ - -if ( (row < 0) || (row >= Nrows) ) { - - mlog << Error << "\nNx2ContingencyTable::row_event_likelihood(int) const -> range check error\n\n"; - - exit ( 1 ); - +double Nx2ContingencyTable::row_obar(int row) const { + return compute_proportion(event_total_by_row(row), row_total(row)); } -double x, num, denom; - -denom = (double) event_col_total(); - -num = (double) event_count_by_row(row); - -if(is_eq(denom, 0.0)) x = bad_data_double; -else x = num/denom; - -return x; +//////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::obar() const { + return compute_proportion(event_col_total(), total()); } - //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::row_proby(int row) const { -double Nx2ContingencyTable::row_nonevent_likelihood(int row) const - -{ - -if ( (row < 0) || (row >= Nrows) ) { + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::row_proby(int) const -> " + << "range check error\n\n"; + exit(1); + } - mlog << Error << "\nNx2ContingencyTable::row_nonevent_likelihood(int) const -> range check error\n\n"; + double v; - exit ( 1 ); + if(use_center) v = 0.5*(Thresholds[row] + Thresholds[row + 1]); + else v = Thresholds[row]; + return v; } -double x, num, denom; - -denom = (double) nonevent_col_total(); +//////////////////////////////////////////////////////////////////////// -num = (double) nonevent_count_by_row(row); +double Nx2ContingencyTable::row_calibration(int row) const { -if(is_eq(denom, 0.0)) x = bad_data_double; -else x = num/denom; + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::row_calibration(int) const -> " + << "range check error\n\n"; + exit(1); + } -return x; + double num = event_total_by_row(row); + double den = num + nonevent_total_by_row(row); + return compute_proportion(num, den); } - //////////////////////////////////////////////////////////////////////// +double Nx2ContingencyTable::row_refinement(int row) const { -TTContingencyTable Nx2ContingencyTable::ctc_by_row(int row) const - -{ - -TTContingencyTable tt; - -if ( (row < 0) || (row >= Nrows) ) { - - mlog << Error << "\nNx2ContingencyTable::ctc_by_row(int) const -> range check error\n\n"; - - exit ( 1 ); + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::row_refinement(int) const -> " + << "range check error\n\n"; + exit(1); + } + return compute_proportion( event_total_by_row(row) + + nonevent_total_by_row(row), total()); } -int j; -int sy, sn; - - /////////////////// - -sy = sn = 0; +//////////////////////////////////////////////////////////////////////// -for (j=(row + 1); j= Nrows) { + mlog << Error << "\nNx2ContingencyTable::row_event_likelihood(int) const -> " + << "range check error\n\n"; + exit(1); + } + return compute_proportion(event_total_by_row(row), + event_col_total()); } -tt.set_fy_oy(sy); -tt.set_fy_on(sn); - - /////////////////// - -sy = sn = 0; - -for (j=0; j<=row; ++j) { - - sy += event_count_by_row(j); - sn += nonevent_count_by_row(j); - -} +//////////////////////////////////////////////////////////////////////// -tt.set_fn_oy(sy); -tt.set_fn_on(sn); - /////////////////// +double Nx2ContingencyTable::row_nonevent_likelihood(int row) const { -return tt; + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::row_nonevent_likelihood(int) const -> " + << "range check error\n\n"; + exit(1); + } + return compute_proportion(nonevent_total_by_row(row), + nonevent_col_total()); } - //////////////////////////////////////////////////////////////////////// +TTContingencyTable Nx2ContingencyTable::ctc_by_row(int row) const { + TTContingencyTable tt; -double Nx2ContingencyTable::roc_auc() const - -{ + if(row < 0 || row >= Nrows) { + mlog << Error << "\nNx2ContingencyTable::ctc_by_row(int) const -> " + << "range check error\n\n"; + exit(1); + } -int j; -TTContingencyTable ct; -double area, x_prev, y_prev, x, y; + // Store the number of pairs + tt.set_n_pairs(Npairs); -x_prev = y_prev = 1.0; + double sy = 0.0; + double sn = 0.0; -for(j=0, area=bad_data_double; j 1 so that degf > 0 in the call to gsl_cdf_tdist_Pinv() - -if(is_bad_data(bs = brier_score()) || N <= 1) return bad_data_double; - -degf = N - 1.0; - -t = gsl_cdf_tdist_Pinv(1.0 - 0.5*alpha, degf); - -ob = obar(); - -af1 = sf2 = sf3 = af4 = 0.0; -for (j=0; j " << "table not square!\n\n"; - - exit ( 1 ); - + exit(1); } - for(i=0, num=0.0; i " - << "table not square!\n\n"; - - exit ( 1 ); - -} - -const int N = total(); - - // - // MET #2542: return bad data for empty tables rather than erroring out - // - -if ( N == 0 ) return bad_data_double; - -const double DN = (double) N; -int j, k, m, n; -double num, denom, sum, ans; - - // - // first term in numerator - // - -sum = 0.0; - -for (j=0; j " + << "table not square!\n\n"; + exit(1); + } - n = rc_to_n(j, j); + // MET #2542: return bad data for empty tables rather than erroring out + if(E.empty()) return bad_data_double; - sum += ((*E)[n])/DN; + // First term in numerator + const double DN = total(); + double sum = 0.0; -} + for(int j=0; j " - << "table not square!\n\n"; - - exit ( 1 ); - -} - -if ( ec_value < 0.0 || ec_value >= 1.0 ) { - - mlog << Error << "\nContingencyTable::gheidke_ec(double) -> " - << "ec_value (" << ec_value << ") must be >=0 and <1.0!\n\n"; - - exit ( 1 ); - -} - -const int N = total(); - - // - // MET #2542: return bad data for empty tables rather than erroring out - // - -if ( N == 0 ) return bad_data_double; - -int j, sum; -double num, denom, ans; - - // - // sum counts on the diagonal - // - -for (j=0, sum=0; j " + << "table not square!\n\n"; + exit(1); + } -num = (double) sum - ec; -denom = (double) N - ec; + if(ec_value < 0.0 || ec_value >= 1.0) { + mlog << Error << "\nContingencyTable::gheidke_ec() -> " + << "ec_value (" << ec_value << ") must be >=0 and <1.0!\n\n"; + exit(1); + } - // - // result - // + // MET #2542: return bad data for empty tables rather than erroring out + if(E.empty()) return bad_data_double; -if (is_eq(denom, 0.0)) ans = bad_data_double; -else ans = num/denom; + // Sum entries on the diagonal + double sum = 0.0; + for(int j=0; j " - << "table not square!\n\n"; - - exit ( 1 ); - -} - -const int N = total(); - - // - // MET #2542: return bad data for empty tables rather than erroring out - // - -if ( N == 0 ) return bad_data_double; - -const double DN = (double) N; -int j, k, m, n; -double num, denom, sum, t, ans; - - // - // first term in numerator - // - -sum = 0.0; - -for (j=0; j " - << "table not square!\n\n"; - - exit ( 1 ); - -} - -const int N = total(); - - // - // MET #2542: return bad data for empty tables rather than erroring out - // - -if ( N == 0 ) return bad_data_double; - -int j, k, m, n; -const double DN = (double) N; -double t, sum; -double * p = (double *) nullptr; -double * s = (double *) nullptr; + if(Nrows != Ncols) { + mlog << Error << "\nContingencyTable::gkuiper() -> " + << "table not square!\n\n"; + exit(1); + } - // - // can't compute gerrity when the first column contains all zeros - // + // MET #2542: return bad data for empty tables rather than erroring out + if(E.empty()) return bad_data_double; -if ( col_total(0) == 0 ) return bad_data_double; + const double DN = total(); -p = new double [Nrows]; + // First term in numerator + double sum = 0.0; + for(int j=0; j " + << "table not square!\n\n"; + exit(1); } -} + // MET #2542: return bad data for empty tables rather than erroring out + if(E.empty()) return bad_data_double; - // - // replace nan with bad data - // + double DN = total(); - if (std::isnan(sum)) sum = bad_data_double; + // Can't compute gerrity when the first column contains all zeros + if(is_eq(col_total(0), 0.0)) return bad_data_double; - // - // done - // + // the p array + vector p(Nrows); + for(int j=0; j s(Nrows*Nrows); + calc_gerrity_scoring_matrix(Nrows, p, s); + + // Calculate score + double sum = 0.0; + for(int j=0; j &p, + vector &s) { + double b = 1.0/(N - 1.0); -void calc_gerrity_scoring_matrix(int N, const double * p, double * s) - -{ - -int j, k, n; -double b, t, sum; -double * a = (double *) nullptr; -double * recip_sum = (double *) nullptr; -double * direct_sum = (double *) nullptr; - - -a = new double [N]; -recip_sum = new double [N]; -direct_sum = new double [N]; - -b = 1.0/(N - 1.0); - - - // - // the a array - // - -sum = 0.0; - -for (j=0; j a(N); + double sum = 0.0; + for(int j=0; j recip_sum(N); + recip_sum[0] = 0.0; + sum = 0.0; + for(int j=1; j=0; --j) { - - sum += a[j]; - - direct_sum[j] = sum; - -} - - // - // entries of the scoring matrix - // - -for (j=0; j direct_sum(N); + direct_sum[N - 1] = 0.0; + sum = 0.0; + for(int j=(N - 2); j>=0; --j) { + sum += a[j]; + direct_sum[j] = sum; } -} + // Entries of the scoring matrix + for(int j=0; j " + << "unexpected line type \"" << statlinetype_to_string(lt) + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double CTSInfo::get_stat_fho(const string &stat_name) const { + double v = bad_data_double; + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = (double) cts.n_pairs(); + else if(stat_name == "F_RATE") v = cts.f_rate(); + else if(stat_name == "H_RATE") v = cts.h_rate(); + else if(stat_name == "O_RATE") v = cts.o_rate(); + else { + mlog << Error << "\nCTSInfo::get_stat_fho() -> " + << "unknown categorical statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + // Return bad data for 0 pairs + if(cts.n_pairs() == 0 && stat_name != "TOTAL") { + v = bad_data_double; + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double CTSInfo::get_stat_ctc(const string &stat_name) const { + double v = bad_data_double; + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = (double) cts.n_pairs(); + else if(stat_name == "FY_OY" ) v = cts.fy_oy(); + else if(stat_name == "FY_ON" ) v = cts.fy_on(); + else if(stat_name == "FN_OY" ) v = cts.fn_oy(); + else if(stat_name == "FN_ON" ) v = cts.fn_on(); + else if(stat_name == "EC_VALUE") v = cts.ec_value(); + else { + mlog << Error << "\nCTSInfo::get_stat_ctc() -> " + << "unknown categorical statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + // Return bad data for 0 pairs + if(cts.n_pairs() == 0 && stat_name != "TOTAL") { + v = bad_data_double; + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double CTSInfo::get_stat_cts(const string &stat_name, int i_alpha) const { + double v = bad_data_double; + + // Range check alpha index + if(i_alpha >= n_alpha && is_ci_stat_name(stat_name)) { + mlog << Error << "\nCTSInfo::get_stat_cts() -> " + << "alpha index out of range (" << i_alpha << " >= " + << n_alpha << ")!\n\n"; + exit(1); + } + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = (double) cts.n_pairs(); + else if(stat_name == "BASER" ) v = baser.v; + else if(stat_name == "BASER_NCL" ) v = baser.v_ncl[i_alpha]; + else if(stat_name == "BASER_NCU" ) v = baser.v_ncu[i_alpha]; + else if(stat_name == "BASER_BCL" ) v = baser.v_bcl[i_alpha]; + else if(stat_name == "BASER_BCU" ) v = baser.v_bcu[i_alpha]; + else if(stat_name == "FMEAN" ) v = fmean.v; + else if(stat_name == "FMEAN_NCL" ) v = fmean.v_ncl[i_alpha]; + else if(stat_name == "FMEAN_NCU" ) v = fmean.v_ncu[i_alpha]; + else if(stat_name == "FMEAN_BCL" ) v = fmean.v_bcl[i_alpha]; + else if(stat_name == "FMEAN_BCU" ) v = fmean.v_bcu[i_alpha]; + else if(stat_name == "ACC" ) v = acc.v; + else if(stat_name == "ACC_NCL" ) v = acc.v_ncl[i_alpha]; + else if(stat_name == "ACC_NCU" ) v = acc.v_ncu[i_alpha]; + else if(stat_name == "ACC_BCL" ) v = acc.v_bcl[i_alpha]; + else if(stat_name == "ACC_BCU" ) v = acc.v_bcu[i_alpha]; + else if(stat_name == "FBIAS" ) v = fbias.v; + else if(stat_name == "FBIAS_BCL" ) v = fbias.v_bcl[i_alpha]; + else if(stat_name == "FBIAS_BCU" ) v = fbias.v_bcu[i_alpha]; + else if(stat_name == "PODY" ) v = pody.v; + else if(stat_name == "PODY_NCL" ) v = pody.v_ncl[i_alpha]; + else if(stat_name == "PODY_NCU" ) v = pody.v_ncu[i_alpha]; + else if(stat_name == "PODY_BCL" ) v = pody.v_bcl[i_alpha]; + else if(stat_name == "PODY_BCU" ) v = pody.v_bcu[i_alpha]; + else if(stat_name == "PODN" ) v = podn.v; + else if(stat_name == "PODN_NCL" ) v = podn.v_ncl[i_alpha]; + else if(stat_name == "PODN_NCU" ) v = podn.v_ncu[i_alpha]; + else if(stat_name == "PODN_BCL" ) v = podn.v_bcl[i_alpha]; + else if(stat_name == "PODN_BCU" ) v = podn.v_bcu[i_alpha]; + else if(stat_name == "POFD" ) v = pofd.v; + else if(stat_name == "POFD_NCL" ) v = pofd.v_ncl[i_alpha]; + else if(stat_name == "POFD_NCU" ) v = pofd.v_ncu[i_alpha]; + else if(stat_name == "POFD_BCL" ) v = pofd.v_bcl[i_alpha]; + else if(stat_name == "POFD_BCU" ) v = pofd.v_bcu[i_alpha]; + else if(stat_name == "FAR" ) v = far.v; + else if(stat_name == "FAR_NCL" ) v = far.v_ncl[i_alpha]; + else if(stat_name == "FAR_NCU" ) v = far.v_ncu[i_alpha]; + else if(stat_name == "FAR_BCL" ) v = far.v_bcl[i_alpha]; + else if(stat_name == "FAR_BCU" ) v = far.v_bcu[i_alpha]; + else if(stat_name == "CSI" ) v = csi.v; + else if(stat_name == "CSI_NCL" ) v = csi.v_ncl[i_alpha]; + else if(stat_name == "CSI_NCU" ) v = csi.v_ncu[i_alpha]; + else if(stat_name == "CSI_BCL" ) v = csi.v_bcl[i_alpha]; + else if(stat_name == "CSI_BCU" ) v = csi.v_bcu[i_alpha]; + else if(stat_name == "GSS" ) v = gss.v; + else if(stat_name == "GSS_BCL" ) v = gss.v_bcl[i_alpha]; + else if(stat_name == "GSS_BCU" ) v = gss.v_bcu[i_alpha]; + else if(stat_name == "HK" ) v = hk.v; + else if(stat_name == "HK_NCL" ) v = hk.v_ncl[i_alpha]; + else if(stat_name == "HK_NCU" ) v = hk.v_ncu[i_alpha]; + else if(stat_name == "HK_BCL" ) v = hk.v_bcl[i_alpha]; + else if(stat_name == "HK_BCU" ) v = hk.v_bcu[i_alpha]; + else if(stat_name == "HSS" ) v = hss.v; + else if(stat_name == "HSS_BCL" ) v = hss.v_bcl[i_alpha]; + else if(stat_name == "HSS_BCU" ) v = hss.v_bcu[i_alpha]; + else if(stat_name == "ODDS" ) v = odds.v; + else if(stat_name == "ODDS_NCL" ) v = odds.v_ncl[i_alpha]; + else if(stat_name == "ODDS_NCU" ) v = odds.v_ncu[i_alpha]; + else if(stat_name == "ODDS_BCL" ) v = odds.v_bcl[i_alpha]; + else if(stat_name == "ODDS_BCU" ) v = odds.v_bcu[i_alpha]; + else if(stat_name == "LODDS" ) v = lodds.v; + else if(stat_name == "LODDS_NCL" ) v = lodds.v_ncl[i_alpha]; + else if(stat_name == "LODDS_NCU" ) v = lodds.v_ncu[i_alpha]; + else if(stat_name == "LODDS_BCL" ) v = lodds.v_bcl[i_alpha]; + else if(stat_name == "LODDS_BCU" ) v = lodds.v_bcu[i_alpha]; + else if(stat_name == "ORSS" ) v = orss.v; + else if(stat_name == "ORSS_NCL" ) v = orss.v_ncl[i_alpha]; + else if(stat_name == "ORSS_NCU" ) v = orss.v_ncu[i_alpha]; + else if(stat_name == "ORSS_BCL" ) v = orss.v_bcl[i_alpha]; + else if(stat_name == "ORSS_BCU" ) v = orss.v_bcu[i_alpha]; + else if(stat_name == "EDS" ) v = eds.v; + else if(stat_name == "EDS_NCL" ) v = eds.v_ncl[i_alpha]; + else if(stat_name == "EDS_NCU" ) v = eds.v_ncu[i_alpha]; + else if(stat_name == "EDS_BCL" ) v = eds.v_bcl[i_alpha]; + else if(stat_name == "EDS_BCU" ) v = eds.v_bcu[i_alpha]; + else if(stat_name == "SEDS" ) v = seds.v; + else if(stat_name == "SEDS_NCL" ) v = seds.v_ncl[i_alpha]; + else if(stat_name == "SEDS_NCU" ) v = seds.v_ncu[i_alpha]; + else if(stat_name == "SEDS_BCL" ) v = seds.v_bcl[i_alpha]; + else if(stat_name == "SEDS_BCU" ) v = seds.v_bcu[i_alpha]; + else if(stat_name == "EDI" ) v = edi.v; + else if(stat_name == "EDI_NCL" ) v = edi.v_ncl[i_alpha]; + else if(stat_name == "EDI_NCU" ) v = edi.v_ncu[i_alpha]; + else if(stat_name == "EDI_BCL" ) v = edi.v_bcl[i_alpha]; + else if(stat_name == "EDI_BCU" ) v = edi.v_bcu[i_alpha]; + else if(stat_name == "SEDI" ) v = sedi.v; + else if(stat_name == "SEDI_NCL" ) v = sedi.v_ncl[i_alpha]; + else if(stat_name == "SEDI_NCU" ) v = sedi.v_ncu[i_alpha]; + else if(stat_name == "SEDI_BCL" ) v = sedi.v_bcl[i_alpha]; + else if(stat_name == "SEDI_BCU" ) v = sedi.v_bcu[i_alpha]; + else if(stat_name == "BAGSS" ) v = bagss.v; + else if(stat_name == "BAGSS_BCL" ) v = bagss.v_bcl[i_alpha]; + else if(stat_name == "BAGSS_BCU" ) v = bagss.v_bcu[i_alpha]; + else if(stat_name == "HSS_EC" ) v = hss_ec.v; + else if(stat_name == "HSS_EC_BCL") v = hss_ec.v_bcl[i_alpha]; + else if(stat_name == "HSS_EC_BCU") v = hss_ec.v_bcu[i_alpha]; + else if(stat_name == "EC_VALUE" ) v = cts.ec_value(); + else { + mlog << Error << "\nCTSInfo::get_stat_cts() -> " << "unknown categorical statistic name \"" << stat_name << "\"!\n\n"; exit(1); } // Return bad data for 0 pairs - if(cts.n() == 0 && strcmp(stat_name, "TOTAL") != 0) { + if(cts.n_pairs() == 0 && stat_name != "TOTAL") { v = bad_data_double; } @@ -613,22 +774,15 @@ void MCTSInfo::set_othresh(const ThreshArray &ta) { //////////////////////////////////////////////////////////////////////// -void MCTSInfo::add(double f, double o) { - add(f, o, bad_data_double, bad_data_double); - return; -} - -//////////////////////////////////////////////////////////////////////// - -void MCTSInfo::add(double f, double o, double cmn, double csd) { +void MCTSInfo::add(double f, double o, double wgt, const ClimoPntInfo *cpi) { int r, c; // Find the row and column for the forecast and observation values. - r = fthresh.check_bins(f, cmn, csd); - c = othresh.check_bins(o, cmn, csd); + r = fthresh.check_bins(f, cpi); + c = othresh.check_bins(o, cpi); // Increment the corresponding contingency table entry. - cts.inc_entry(r, c); + cts.inc_entry(r, c, wgt); return; } @@ -660,13 +814,129 @@ void MCTSInfo::compute_ci() { // Compute confidence intervals for the scores based on // proportions // - compute_proportion_ci(acc.v, cts.total(), alpha[i], acc.vif, + compute_proportion_ci(acc.v, cts.n_pairs(), alpha[i], acc.vif, acc.v_ncl[i], acc.v_ncu[i]); } // end for i return; } +//////////////////////////////////////////////////////////////////////// + +double MCTSInfo::get_stat(STATLineType lt, + const string &stat_name, + ConcatString &col_name, + int i_alpha) const { + double v = bad_data_double; + + // Initialize + col_name = stat_name; + + // Get statistic by line type + if(lt == STATLineType::mctc) v = get_stat_mctc(stat_name, col_name); + else if(lt == STATLineType::mcts) v = get_stat_mcts(stat_name, i_alpha); + else { + mlog << Error << "\nMCTSInfo::get_stat() -> " + << "unexpected line type \"" << statlinetype_to_string(lt) + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double MCTSInfo::get_stat_mctc(const string &stat_name, + ConcatString &col_name) const { + double v = bad_data_double; + col_name = stat_name; + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = (double) cts.n_pairs(); + else if(stat_name == "N_CAT" ) v = (double) cts.nrows(); + else if(stat_name == "EC_VALUE") v = cts.ec_value(); + else if(check_reg_exp("F[0-9]*_O[0-9]*", stat_name.c_str())) { + + col_name = "FI_OJ"; + + // Parse column name to retrieve index values + ConcatString cs(stat_name); + StringArray sa = cs.split("_"); + int i = atoi(sa[0].c_str()+1) - 1; + int j = atoi(sa[1].c_str()+1) - 1; + + // Range check + if(i < 0 || i >= cts.nrows() || + j < 0 || j >= cts.ncols()) { + mlog << Error << "\nget_stat_mctc() -> " + << "range check error for column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + + // Retrieve the value + v = (double) cts.entry(i, j); + } + else { + mlog << Error << "\nMCTSInfo::get_stat_mctc() -> " + << "unknown multi-category statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double MCTSInfo::get_stat_mcts(const string &stat_name, int i_alpha) const { + double v = bad_data_double; + + // Range check alpha index + if(i_alpha >= n_alpha && is_ci_stat_name(stat_name)) { + mlog << Error << "\nMCTSInfo::get_stat_mcts() -> " + << "alpha index out of range (" << i_alpha << " >= " + << n_alpha << ")!\n\n"; + exit(1); + } + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = (double) cts.n_pairs(); + else if(stat_name == "N_CAT" ) v = (double) cts.nrows(); + else if(stat_name == "ACC" ) v = acc.v; + else if(stat_name == "ACC_NCL" ) v = acc.v_ncl[i_alpha]; + else if(stat_name == "ACC_NCU" ) v = acc.v_ncu[i_alpha]; + else if(stat_name == "ACC_BCL" ) v = acc.v_bcl[i_alpha]; + else if(stat_name == "ACC_BCU" ) v = acc.v_bcu[i_alpha]; + else if(stat_name == "HK" ) v = hk.v; + else if(stat_name == "HK_BCL" ) v = hk.v_bcl[i_alpha]; + else if(stat_name == "HK_BCU" ) v = hk.v_bcu[i_alpha]; + else if(stat_name == "HSS" ) v = hss.v; + else if(stat_name == "HSS_BCL" ) v = hss.v_bcl[i_alpha]; + else if(stat_name == "HSS_BCU" ) v = hss.v_bcu[i_alpha]; + else if(stat_name == "GER" ) v = ger.v; + else if(stat_name == "GER_BCL" ) v = ger.v_bcl[i_alpha]; + else if(stat_name == "GER_BCU" ) v = ger.v_bcu[i_alpha]; + else if(stat_name == "HSS_EC" ) v = hss_ec.v; + else if(stat_name == "HSS_EC_BCL") v = hss_ec.v_bcl[i_alpha]; + else if(stat_name == "HSS_EC_BCU") v = hss_ec.v_bcu[i_alpha]; + else if(stat_name == "EC_VALUE" ) v = cts.ec_value(); + else { + mlog << Error << "\nMCTSInfo::get_stat_mcts() -> " + << "unknown multi-category statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + // Return bad data for 0 pairs + if(cts.n_pairs() == 0 && stat_name != "TOTAL") { + v = bad_data_double; + } + + return v; +} + //////////////////////////////////////////////////////////////////////// // // Code for class CNTInfo @@ -716,6 +986,44 @@ void CNTInfo::init_from_scratch() { //////////////////////////////////////////////////////////////////////// +void CNTInfo::zero_out() { + + fbar.set_bad_data(); + fstdev.set_bad_data(); + obar.set_bad_data(); + ostdev.set_bad_data(); + pr_corr.set_bad_data(); + sp_corr.set_bad_data(); + kt_corr.set_bad_data(); + anom_corr.set_bad_data(); + rmsfa.set_bad_data(); + rmsoa.set_bad_data(); + anom_corr_uncntr.set_bad_data(); + me.set_bad_data(); + me2.set_bad_data(); + estdev.set_bad_data(); + mbias.set_bad_data(); + mae.set_bad_data(); + mse.set_bad_data(); + msess.set_bad_data(); + bcmse.set_bad_data(); + rmse.set_bad_data(); + si.set_bad_data(); + e10.set_bad_data(); + e25.set_bad_data(); + e50.set_bad_data(); + e75.set_bad_data(); + e90.set_bad_data(); + eiqr.set_bad_data(); + mad.set_bad_data(); + + n_ranks = frank_ties = orank_ties = 0; + + return; +} + +//////////////////////////////////////////////////////////////////////// + void CNTInfo::clear() { n = 0; @@ -1037,42 +1345,121 @@ void CNTInfo::compute_ci() { //////////////////////////////////////////////////////////////////////// -double CNTInfo::get_stat(const char *stat_name) { +double CNTInfo::get_stat(const string &stat_name, int i_alpha) const { double v = bad_data_double; + // Range check alpha index + if(i_alpha >= n_alpha && is_ci_stat_name(stat_name)) { + mlog << Error << "\nCNTInfo::get_stat() -> " + << "alpha index out of range (" << i_alpha << " >= " + << n_alpha << ")!\n\n"; + exit(1); + } + // Find the statistic by name - if(strcmp(stat_name, "TOTAL" ) == 0) v = n; - else if(strcmp(stat_name, "FBAR" ) == 0) v = fbar.v; - else if(strcmp(stat_name, "FSTDEV" ) == 0) v = fstdev.v; - else if(strcmp(stat_name, "OBAR" ) == 0) v = obar.v; - else if(strcmp(stat_name, "OSTDEV" ) == 0) v = ostdev.v; - else if(strcmp(stat_name, "PR_CORR" ) == 0) v = pr_corr.v; - else if(strcmp(stat_name, "SP_CORR" ) == 0) v = sp_corr.v; - else if(strcmp(stat_name, "KT_CORR" ) == 0) v = kt_corr.v; - else if(strcmp(stat_name, "RANKS" ) == 0) v = n_ranks; - else if(strcmp(stat_name, "FRANK_TIES" ) == 0) v = frank_ties; - else if(strcmp(stat_name, "ORANK_TIES" ) == 0) v = orank_ties; - else if(strcmp(stat_name, "ME" ) == 0) v = me.v; - else if(strcmp(stat_name, "ESTDEV" ) == 0) v = estdev.v; - else if(strcmp(stat_name, "MBIAS" ) == 0) v = mbias.v; - else if(strcmp(stat_name, "MAE" ) == 0) v = mae.v; - else if(strcmp(stat_name, "MSE" ) == 0) v = mse.v; - else if(strcmp(stat_name, "BCMSE" ) == 0) v = bcmse.v; - else if(strcmp(stat_name, "RMSE" ) == 0) v = rmse.v; - else if(strcmp(stat_name, "SI" ) == 0) v = si.v; - else if(strcmp(stat_name, "E10" ) == 0) v = e10.v; - else if(strcmp(stat_name, "E25" ) == 0) v = e25.v; - else if(strcmp(stat_name, "E50" ) == 0) v = e50.v; - else if(strcmp(stat_name, "E75" ) == 0) v = e75.v; - else if(strcmp(stat_name, "E90" ) == 0) v = e90.v; - else if(strcmp(stat_name, "EIQR" ) == 0) v = eiqr.v; - else if(strcmp(stat_name, "MAD " ) == 0) v = mad.v; - else if(strcmp(stat_name, "ANOM_CORR" ) == 0) v = anom_corr.v; - else if(strcmp(stat_name, "ME2" ) == 0) v = me2.v; - else if(strcmp(stat_name, "MSESS" ) == 0) v = msess.v; - else if(strcmp(stat_name, "RMSFA" ) == 0) v = rmsfa.v; - else if(strcmp(stat_name, "RMSOA" ) == 0) v = rmsoa.v; - else if(strcmp(stat_name, "ANOM_CORR_UNCNTR") == 0) v = anom_corr_uncntr.v; + if(stat_name == "TOTAL" ) v = (double) n; + else if(stat_name == "FBAR" ) v = fbar.v; + else if(stat_name == "FBAR_NCL" ) v = fbar.v_ncl[i_alpha]; + else if(stat_name == "FBAR_NCU" ) v = fbar.v_ncu[i_alpha]; + else if(stat_name == "FBAR_BCL" ) v = fbar.v_bcl[i_alpha]; + else if(stat_name == "FBAR_BCU" ) v = fbar.v_bcu[i_alpha]; + else if(stat_name == "FSTDEV" ) v = fstdev.v; + else if(stat_name == "FSTDEV_NCL" ) v = fstdev.v_ncl[i_alpha]; + else if(stat_name == "FSTDEV_NCU" ) v = fstdev.v_ncu[i_alpha]; + else if(stat_name == "FSTDEV_BCL" ) v = fstdev.v_bcl[i_alpha]; + else if(stat_name == "FSTDEV_BCU" ) v = fstdev.v_bcu[i_alpha]; + else if(stat_name == "OBAR" ) v = obar.v; + else if(stat_name == "OBAR_NCL" ) v = obar.v_ncl[i_alpha]; + else if(stat_name == "OBAR_NCU" ) v = obar.v_ncu[i_alpha]; + else if(stat_name == "OBAR_BCL" ) v = obar.v_bcl[i_alpha]; + else if(stat_name == "OBAR_BCU" ) v = obar.v_bcu[i_alpha]; + else if(stat_name == "OSTDEV" ) v = ostdev.v; + else if(stat_name == "OSTDEV_NCL" ) v = ostdev.v_ncl[i_alpha]; + else if(stat_name == "OSTDEV_NCU" ) v = ostdev.v_ncu[i_alpha]; + else if(stat_name == "OSTDEV_BCL" ) v = ostdev.v_bcl[i_alpha]; + else if(stat_name == "OSTDEV_BCU" ) v = ostdev.v_bcu[i_alpha]; + else if(stat_name == "PR_CORR" ) v = pr_corr.v; + else if(stat_name == "PR_CORR_NCL" ) v = pr_corr.v_ncl[i_alpha]; + else if(stat_name == "PR_CORR_NCU" ) v = pr_corr.v_ncu[i_alpha]; + else if(stat_name == "PR_CORR_BCL" ) v = pr_corr.v_bcl[i_alpha]; + else if(stat_name == "PR_CORR_BCU" ) v = pr_corr.v_bcu[i_alpha]; + else if(stat_name == "SP_CORR" ) v = sp_corr.v; + else if(stat_name == "KT_CORR" ) v = kt_corr.v; + else if(stat_name == "RANKS" ) v = n_ranks; + else if(stat_name == "FRANK_TIES" ) v = frank_ties; + else if(stat_name == "ORANK_TIES" ) v = orank_ties; + else if(stat_name == "ME" ) v = me.v; + else if(stat_name == "ME_NCL" ) v = me.v_ncl[i_alpha]; + else if(stat_name == "ME_NCU" ) v = me.v_ncu[i_alpha]; + else if(stat_name == "ME_BCL" ) v = me.v_bcl[i_alpha]; + else if(stat_name == "ME_BCU" ) v = me.v_bcu[i_alpha]; + else if(stat_name == "ESTDEV" ) v = estdev.v; + else if(stat_name == "ESTDEV_NCL" ) v = estdev.v_ncl[i_alpha]; + else if(stat_name == "ESTDEV_NCU" ) v = estdev.v_ncu[i_alpha]; + else if(stat_name == "ESTDEV_BCL" ) v = estdev.v_bcl[i_alpha]; + else if(stat_name == "ESTDEV_BCU" ) v = estdev.v_bcu[i_alpha]; + else if(stat_name == "MBIAS" ) v = mbias.v; + else if(stat_name == "MBIAS_BCL" ) v = mbias.v_bcl[i_alpha]; + else if(stat_name == "MBIAS_BCU" ) v = mbias.v_bcu[i_alpha]; + else if(stat_name == "MAE" ) v = mae.v; + else if(stat_name == "MAE_BCL" ) v = mae.v_bcl[i_alpha]; + else if(stat_name == "MAE_BCU" ) v = mae.v_bcu[i_alpha]; + else if(stat_name == "MSE" ) v = mse.v; + else if(stat_name == "MSE_BCL" ) v = mse.v_bcl[i_alpha]; + else if(stat_name == "MSE_BCU" ) v = mse.v_bcu[i_alpha]; + else if(stat_name == "BCMSE" ) v = bcmse.v; + else if(stat_name == "BCMSE_BCL" ) v = bcmse.v_bcl[i_alpha]; + else if(stat_name == "BCMSE_BCU" ) v = bcmse.v_bcu[i_alpha]; + else if(stat_name == "RMSE" ) v = rmse.v; + else if(stat_name == "RMSE_BCL" ) v = rmse.v_bcl[i_alpha]; + else if(stat_name == "RMSE_BCU" ) v = rmse.v_bcu[i_alpha]; + else if(stat_name == "SI" ) v = si.v; + else if(stat_name == "SI_BCL" ) v = si.v_bcl[i_alpha]; + else if(stat_name == "SI_BCU" ) v = si.v_bcu[i_alpha]; + else if(stat_name == "E10" ) v = e10.v; + else if(stat_name == "E10_BCL" ) v = e10.v_bcl[i_alpha]; + else if(stat_name == "E10_BCU" ) v = e10.v_bcu[i_alpha]; + else if(stat_name == "E25" ) v = e25.v; + else if(stat_name == "E25_BCL" ) v = e25.v_bcl[i_alpha]; + else if(stat_name == "E25_BCU" ) v = e25.v_bcu[i_alpha]; + else if(stat_name == "E50" ) v = e50.v; + else if(stat_name == "E50_BCL" ) v = e50.v_bcl[i_alpha]; + else if(stat_name == "E50_BCU" ) v = e50.v_bcu[i_alpha]; + else if(stat_name == "E75" ) v = e75.v; + else if(stat_name == "E75_BCL" ) v = e75.v_bcl[i_alpha]; + else if(stat_name == "E75_BCU" ) v = e75.v_bcu[i_alpha]; + else if(stat_name == "E90" ) v = e90.v; + else if(stat_name == "E90_BCL" ) v = e90.v_bcl[i_alpha]; + else if(stat_name == "E90_BCU" ) v = e90.v_bcu[i_alpha]; + else if(stat_name == "EIQR" ) v = eiqr.v; + else if(stat_name == "EIQR_BCL" ) v = eiqr.v_bcl[i_alpha]; + else if(stat_name == "EIQR_BCU" ) v = eiqr.v_bcu[i_alpha]; + else if(stat_name == "MAD" ) v = mad.v; + else if(stat_name == "MAD_BCL" ) v = mad.v_bcl[i_alpha]; + else if(stat_name == "MAD_BCU" ) v = mad.v_bcu[i_alpha]; + else if(stat_name == "ANOM_CORR" ) v = anom_corr.v; + else if(stat_name == "ANOM_CORR_NCL" ) v = anom_corr.v_ncl[i_alpha]; + else if(stat_name == "ANOM_CORR_NCU" ) v = anom_corr.v_ncu[i_alpha]; + else if(stat_name == "ANOM_CORR_BCL" ) v = anom_corr.v_bcl[i_alpha]; + else if(stat_name == "ANOM_CORR_BCU" ) v = anom_corr.v_bcu[i_alpha]; + else if(stat_name == "ME2" ) v = me2.v; + else if(stat_name == "ME2_BCL" ) v = me2.v_bcl[i_alpha]; + else if(stat_name == "ME2_BCU" ) v = me2.v_bcu[i_alpha]; + else if(stat_name == "MSESS" ) v = msess.v; + else if(stat_name == "MSESS_BCL" ) v = msess.v_bcl[i_alpha]; + else if(stat_name == "MSESS_BCU" ) v = msess.v_bcu[i_alpha]; + else if(stat_name == "RMSFA" ) v = rmsfa.v; + else if(stat_name == "RMSFA_BCL" ) v = rmsfa.v_bcl[i_alpha]; + else if(stat_name == "RMSFA_BCU" ) v = rmsfa.v_bcu[i_alpha]; + else if(stat_name == "RMSOA" ) v = rmsoa.v; + else if(stat_name == "RMSOA_BCL" ) v = rmsoa.v_bcl[i_alpha]; + else if(stat_name == "RMSOA_BCU" ) v = rmsoa.v_bcu[i_alpha]; + else if(stat_name == "ANOM_CORR_UNCNTR" ) v = anom_corr_uncntr.v; + else if(stat_name == "ANOM_CORR_UNCNTR_BCL") v = anom_corr_uncntr.v_bcl[i_alpha]; + else if(stat_name == "ANOM_CORR_UNCNTR_BCU") v = anom_corr_uncntr.v_bcu[i_alpha]; + else if(stat_name == "SI" ) v = si.v; + else if(stat_name == "SI_BCL" ) v = si.v_bcl[i_alpha]; + else if(stat_name == "SI_BCU" ) v = si.v_bcu[i_alpha]; else { mlog << Error << "\nCNTInfo::get_stat() -> " << "unknown continuous statistic name \"" << stat_name @@ -1081,7 +1468,7 @@ double CNTInfo::get_stat(const char *stat_name) { } // Return bad data for 0 pairs - if(n == 0 && strcmp(stat_name, "TOTAL") != 0) { + if(n == 0 && stat_name != "TOTAL") { v = bad_data_double; } @@ -1127,7 +1514,8 @@ SL1L2Info & SL1L2Info::operator=(const SL1L2Info &c) { //////////////////////////////////////////////////////////////////////// SL1L2Info & SL1L2Info::operator+=(const SL1L2Info &c) { - SL1L2Info s_info; + SL1L2Info s_info = *this; + s_info.zero_out(); s_info.scount = scount + c.scount; @@ -1138,11 +1526,11 @@ SL1L2Info & SL1L2Info::operator+=(const SL1L2Info &c) { s_info.ffbar = (ffbar*scount + c.ffbar*c.scount)/s_info.scount; s_info.oobar = (oobar*scount + c.oobar*c.scount)/s_info.scount; - if(is_bad_data(mae) || is_bad_data(c.mae)) { - s_info.mae = bad_data_double; + if(is_bad_data(smae) || is_bad_data(c.smae)) { + s_info.smae = bad_data_double; } else { - s_info.mae = (mae*scount + c.mae*c.scount)/s_info.scount; + s_info.smae = (smae*scount + c.smae*c.scount)/s_info.scount; } } @@ -1155,11 +1543,11 @@ SL1L2Info & SL1L2Info::operator+=(const SL1L2Info &c) { s_info.ffabar = (ffabar*sacount + c.ffabar*c.sacount)/s_info.sacount; s_info.ooabar = (ooabar*sacount + c.ooabar*c.sacount)/s_info.sacount; - if(is_bad_data(mae) || is_bad_data(c.mae)) { - s_info.mae = bad_data_double; + if(is_bad_data(samae) || is_bad_data(c.samae)) { + s_info.samae = bad_data_double; } else { - s_info.mae = (mae*sacount + c.mae*c.sacount)/s_info.sacount; + s_info.samae = (samae*sacount + c.samae*c.sacount)/s_info.sacount; } } @@ -1184,15 +1572,15 @@ void SL1L2Info::zero_out() { // SL1L2 Quantities fbar = obar = 0.0; fobar = ffbar = oobar = 0.0; + smae = 0.0; scount = 0; // SAL1L2 Quantities fabar = oabar = 0.0; foabar = ffabar = ooabar = 0.0; + samae = 0.0; sacount = 0; - mae = 0.0; - return; } @@ -1225,6 +1613,7 @@ void SL1L2Info::assign(const SL1L2Info &c) { fobar = c.fobar; ffbar = c.ffbar; oobar = c.oobar; + smae = c.smae; scount = c.scount; // SAL1L2 Quantities @@ -1233,10 +1622,9 @@ void SL1L2Info::assign(const SL1L2Info &c) { foabar = c.foabar; ffabar = c.ffabar; ooabar = c.ooabar; + samae = c.samae; sacount = c.sacount; - mae = c.mae; - return; } @@ -1244,7 +1632,7 @@ void SL1L2Info::assign(const SL1L2Info &c) { void SL1L2Info::set(const PairDataPoint &pd_all) { int i; - double f, o, c, wgt, wgt_sum; + double f, o, fc, oc, wgt, wgt_sum; PairDataPoint pd; // Check for mismatch @@ -1273,7 +1661,8 @@ void SL1L2Info::set(const PairDataPoint &pd_all) { f = pd.f_na[i]; o = pd.o_na[i]; - c = pd.cmn_na[i]; + fc = pd.fcmn_na[i]; + oc = pd.ocmn_na[i]; wgt = pd.wgt_na[i]/wgt_sum; // Skip bad data values in the forecast or observation fields @@ -1285,16 +1674,17 @@ void SL1L2Info::set(const PairDataPoint &pd_all) { fobar += wgt*f*o; ffbar += wgt*f*f; oobar += wgt*o*o; - mae += wgt*fabs(f-o); + smae += wgt*fabs(f-o); scount++; // SAL1L2 sums - if(!is_bad_data(c)) { - fabar += wgt*(f-c); - oabar += wgt*(o-c); - foabar += wgt*(f-c)*(o-c); - ffabar += wgt*(f-c)*(f-c); - ooabar += wgt*(o-c)*(o-c); + if(!is_bad_data(fc) && !is_bad_data(oc)) { + fabar += wgt*(f-fc); + oabar += wgt*(o-oc); + foabar += wgt*(f-fc)*(o-oc); + ffabar += wgt*(f-fc)*(f-fc); + ooabar += wgt*(o-oc)*(o-oc); + samae += wgt*fabs((f-fc)-(o-oc)); sacount++; } } @@ -1317,6 +1707,110 @@ void SL1L2Info::set(const PairDataPoint &pd_all) { return; } +//////////////////////////////////////////////////////////////////////// + +void SL1L2Info::set_stat_sl1l2(const string &stat_name, double v) { + + if(stat_name == "TOTAL") scount = nint(v); + else if(stat_name == "FBAR" ) fbar = v; + else if(stat_name == "OBAR" ) obar = v; + else if(stat_name == "FOBAR") fobar = v; + else if(stat_name == "FFBAR") ffbar = v; + else if(stat_name == "OOBAR") oobar = v; + else if(stat_name == "MAE" ) smae = v; + else { + mlog << Error << "\nSL1L2Info::set_stat_sl1l2() -> " + << "unknown scalar partial sum statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void SL1L2Info::set_stat_sal1l2(const string &stat_name, double v) { + + if(stat_name == "TOTAL" ) sacount = nint(v); + else if(stat_name == "FABAR" ) fabar = v; + else if(stat_name == "OABAR" ) oabar = v; + else if(stat_name == "FOABAR") foabar = v; + else if(stat_name == "FFABAR") ffabar = v; + else if(stat_name == "OOABAR") ooabar = v; + else if(stat_name == "MAE" ) samae = v; + else { + mlog << Error << "\nSL1L2Info::set_stat_sal1l2() -> " + << "unknown scalar anomaly partial sum statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +double SL1L2Info::get_stat(STATLineType lt, const string &stat_name) const { + double v = bad_data_double; + + // Get statistic by line type + if(lt == STATLineType::sl1l2) v = get_stat_sl1l2(stat_name); + else if(lt == STATLineType::sal1l2) v = get_stat_sal1l2(stat_name); + else { + mlog << Error << "\nSL1L2Info::get_stat() -> " + << "unexpected line type \"" << statlinetype_to_string(lt) + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double SL1L2Info::get_stat_sl1l2(const string &stat_name) const { + double v = bad_data_double; + + if(stat_name == "TOTAL") v = (double) scount; + else if(stat_name == "FBAR" ) v = fbar; + else if(stat_name == "OBAR" ) v = obar; + else if(stat_name == "FOBAR") v = fobar; + else if(stat_name == "FFBAR") v = ffbar; + else if(stat_name == "OOBAR") v = oobar; + else if(stat_name == "MAE" ) v = smae; + else { + mlog << Error << "\nSL1L2Info::get_stat_sl1l2() -> " + << "unknown scalar partial sum statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double SL1L2Info::get_stat_sal1l2(const string &stat_name) const { + double v = bad_data_double; + + if(stat_name == "TOTAL" ) v = (double) sacount; + else if(stat_name == "FABAR" ) v = fabar; + else if(stat_name == "OABAR" ) v = oabar; + else if(stat_name == "FOABAR") v = foabar; + else if(stat_name == "FFABAR") v = ffabar; + else if(stat_name == "OOABAR") v = ooabar; + else if(stat_name == "MAE" ) v = samae; + else { + mlog << Error << "\nSL1L2Info::get_stat_sal1l2() -> " + << "unknown scalar anomaly partial sum statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return v; +} + //////////////////////////////////////////////////////////////////////// // // Code for class VL1L2Info @@ -1356,13 +1850,11 @@ VL1L2Info & VL1L2Info::operator=(const VL1L2Info &c) { //////////////////////////////////////////////////////////////////////// VL1L2Info & VL1L2Info::operator+=(const VL1L2Info &c) { - VL1L2Info v_info; - - // Store alpha values - v_info.allocate_n_alpha(n_alpha); - for(int i=0; i 0) { v_info.uf_bar = (uf_bar*vcount + c.uf_bar*c.vcount) /v_info.vcount; @@ -1374,12 +1866,15 @@ VL1L2Info & VL1L2Info::operator+=(const VL1L2Info &c) { v_info.uvoo_bar = (uvoo_bar*vcount + c.uvoo_bar*c.vcount) /v_info.vcount; v_info.f_speed_bar = (f_speed_bar*vcount + c.f_speed_bar*c.vcount)/v_info.vcount; v_info.o_speed_bar = (o_speed_bar*vcount + c.o_speed_bar*c.vcount)/v_info.vcount; - v_info.dir_bar = (dir_bar*vcount + c.dir_bar*c.vcount) /v_info.vcount; - v_info.absdir_bar = (absdir_bar*vcount + c.absdir_bar*c.vcount) /v_info.vcount; - v_info.dir2_bar = (dir2_bar*vcount + c.dir2_bar*c.vcount) /v_info.vcount; + } + if(v_info.dcount > 0) { + v_info.dir_bar = (dir_bar*dcount + c.dir_bar*c.dcount) /v_info.dcount; + v_info.absdir_bar = (absdir_bar*dcount + c.absdir_bar*c.dcount) /v_info.dcount; + v_info.dir2_bar = (dir2_bar*dcount + c.dir2_bar*c.dcount) /v_info.dcount; } v_info.vacount = vacount + c.vacount; + v_info.dacount = dacount + c.dacount; if(v_info.vacount > 0) { v_info.ufa_bar = (ufa_bar*vacount + c.ufa_bar*c.vacount) /v_info.vacount; @@ -1391,9 +1886,11 @@ VL1L2Info & VL1L2Info::operator+=(const VL1L2Info &c) { v_info.uvooa_bar = (uvooa_bar*vacount + c.uvooa_bar*c.vacount) /v_info.vacount; v_info.fa_speed_bar = (fa_speed_bar*vacount + c.fa_speed_bar*c.vacount)/v_info.vacount; v_info.oa_speed_bar = (oa_speed_bar*vacount + c.oa_speed_bar*c.vacount)/v_info.vacount; - v_info.dira_bar = (dira_bar*vacount + c.dira_bar*c.vacount) /v_info.vacount; - v_info.absdira_bar = (absdira_bar*vacount + c.absdira_bar*c.vacount) /v_info.vacount; - v_info.dira2_bar = (dira2_bar*vacount + c.dira2_bar*c.vacount) /v_info.vacount; + } + if(v_info.dacount > 0) { + v_info.dira_bar = (dira_bar*dacount + c.dira_bar*c.dacount) /v_info.dacount; + v_info.absdira_bar = (absdira_bar*dacount + c.absdira_bar*c.dacount) /v_info.dacount; + v_info.dira2_bar = (dira2_bar*dacount + c.dira2_bar*c.dacount) /v_info.dacount; } v_info.compute_stats(); @@ -1436,6 +1933,7 @@ void VL1L2Info::zero_out() { dir2_bar = 0.0; vcount = 0; + dcount = 0; // // VAL1L2 Quantities @@ -1455,6 +1953,7 @@ void VL1L2Info::zero_out() { dira2_bar = 0.0; vacount = 0; + dacount = 0; return; } @@ -1464,8 +1963,6 @@ void VL1L2Info::zero_out() { void VL1L2Info::clear() { n = 0; - n_dir_undef = 0; - n_dira_undef = 0; n_alpha = 0; if(alpha) { delete [] alpha; alpha = (double *) nullptr; } @@ -1515,8 +2012,6 @@ void VL1L2Info::assign(const VL1L2Info &c) { logic = c.logic; n = c.n; - n_dir_undef = c.n_dir_undef; - n_dira_undef = c.n_dira_undef; allocate_n_alpha(c.n_alpha); for(i=0; i 0) { - mlog << Warning << "\nVL1L2Info::compute_stats() -> " - << "Skipping " << n_dir_undef << " of " << vcount - << " vector pairs for which the direction difference is undefined.\n" - << "Set the \"wind_thresh\" and \"wind_logic\" configuration options " - << "to exclude zero vectors.\n\n"; - } - DIR_ME.v = dir_bar; DIR_MAE.v = absdir_bar; DIR_MSE.v = dir2_bar; @@ -1912,15 +2401,6 @@ void VL1L2Info::compute_stats() { } ANOM_CORR_UNCNTR.v = compute_anom_corr_uncntr(uvffa_bar, uvooa_bar, uvfoa_bar); - - // Print undefined wind direction warning message - if(n_dira_undef > 0) { - mlog << Warning << "\nVL1L2Info::compute_stats() -> " - << "Skipping " << n_dira_undef << " of " << vacount - << " anomaly vector pairs for which the direction difference is undefined.\n" - << "Set the \"wind_thresh\" and \"wind_logic\" configuration options " - << "to exclude zero vectors.\n\n"; - } } // Compute parametric confidence intervals @@ -1989,40 +2469,99 @@ void VL1L2Info::compute_ci() { return; } +//////////////////////////////////////////////////////////////////////// + +double VL1L2Info::get_stat_vl1l2(const string &stat_name) const { + double v = bad_data_double; + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = vcount; + else if(stat_name == "UFBAR" ) v = uf_bar; + else if(stat_name == "VFBAR" ) v = vf_bar; + else if(stat_name == "UOBAR" ) v = uo_bar; + else if(stat_name == "VOBAR" ) v = vo_bar; + else if(stat_name == "UVFOBAR" ) v = uvfo_bar; + else if(stat_name == "UVFFBAR" ) v = uvff_bar; + else if(stat_name == "UVOOBAR" ) v = uvoo_bar; + else if(stat_name == "F_SPEED_BAR") v = f_speed_bar; + else if(stat_name == "O_SPEED_BAR") v = o_speed_bar; + else if(stat_name == "TOTAL_DIR" ) v = dcount; + else if(stat_name == "DIR_ME" ) v = dir_bar; + else if(stat_name == "DIR_MAE" ) v = absdir_bar; + else if(stat_name == "DIR_MSE" ) v = dir2_bar; + else { + mlog << Error << "\nVL1L2Info::get_stat_vl1l2() -> " + << "unknown vector partial sums statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return v; +} //////////////////////////////////////////////////////////////////////// -double VL1L2Info::get_stat(const char *stat_name) { +double VL1L2Info::get_stat_val1l2(const string &stat_name) const { double v = bad_data_double; - if(strcmp(stat_name, "TOTAL" ) == 0) v = vcount; - else if(strcmp(stat_name, "FBAR" ) == 0) v = FBAR.v; - else if(strcmp(stat_name, "OBAR" ) == 0) v = OBAR.v; - else if(strcmp(stat_name, "FS_RMS" ) == 0) v = FS_RMS.v; - else if(strcmp(stat_name, "OS_RMS" ) == 0) v = OS_RMS.v; - else if(strcmp(stat_name, "MSVE" ) == 0) v = MSVE.v; - else if(strcmp(stat_name, "RMSVE" ) == 0) v = RMSVE.v; - else if(strcmp(stat_name, "FSTDEV" ) == 0) v = FSTDEV.v; - else if(strcmp(stat_name, "OSTDEV" ) == 0) v = OSTDEV.v; - else if(strcmp(stat_name, "FDIR" ) == 0) v = FDIR.v; - else if(strcmp(stat_name, "ODIR" ) == 0) v = ODIR.v; - else if(strcmp(stat_name, "FBAR_SPEED" ) == 0) v = FBAR_SPEED.v; - else if(strcmp(stat_name, "OBAR_SPEED" ) == 0) v = OBAR_SPEED.v; - else if(strcmp(stat_name, "VDIFF_SPEED" ) == 0) v = VDIFF_SPEED.v; - else if(strcmp(stat_name, "VDIFF_DIR" ) == 0) v = VDIFF_DIR.v; - else if(strcmp(stat_name, "SPEED_ERR" ) == 0) v = SPEED_ERR.v; - else if(strcmp(stat_name, "SPEED_ABSERR" ) == 0) v = SPEED_ABSERR.v; - else if(strcmp(stat_name, "DIR_ERR" ) == 0) v = DIR_ERR.v; - else if(strcmp(stat_name, "DIR_ABSERR" ) == 0) v = DIR_ABSERR.v; - else if(strcmp(stat_name, "ANOM_CORR" ) == 0) v = ANOM_CORR.v; - else if(strcmp(stat_name, "ANOM_CORR_UNCNTR") == 0) v = ANOM_CORR_UNCNTR.v; - else if(strcmp(stat_name, "DIR_ME" ) == 0) v = DIR_ME.v; - else if(strcmp(stat_name, "DIR_MAE" ) == 0) v = DIR_MAE.v; - else if(strcmp(stat_name, "DIR_MSE" ) == 0) v = DIR_MSE.v; - else if(strcmp(stat_name, "DIR_RMSE" ) == 0) v = DIR_RMSE.v; + // Find the statistic by name + if(stat_name == "TOTAL" ) v = vacount; + else if(stat_name == "UFABAR" ) v = ufa_bar; + else if(stat_name == "VFABAR" ) v = vfa_bar; + else if(stat_name == "UOABAR" ) v = uoa_bar; + else if(stat_name == "VOABAR" ) v = voa_bar; + else if(stat_name == "UVFOABAR" ) v = uvfoa_bar; + else if(stat_name == "UVFFABAR" ) v = uvffa_bar; + else if(stat_name == "UVOOABAR" ) v = uvooa_bar; + else if(stat_name == "FA_SPEED_BAR") v = fa_speed_bar; + else if(stat_name == "OA_SPEED_BAR") v = oa_speed_bar; + else if(stat_name == "TOTAL_DIR" ) v = dacount; + else if(stat_name == "DIRA_ME" ) v = dira_bar; + else if(stat_name == "DIRA_MAE" ) v = absdira_bar; + else if(stat_name == "DIRA_MSE" ) v = dira2_bar; else { mlog << Error << "\nVL1L2Info::get_stat() -> " - << "unknown continuous statistic name \"" << stat_name + << "unknown vector anomaly partial sums statistic name \"" << stat_name + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double VL1L2Info::get_stat_vcnt(const string &stat_name) const { + double v = bad_data_double; + + if(stat_name == "TOTAL" ) v = vcount; + else if(stat_name == "FBAR" ) v = FBAR.v; + else if(stat_name == "OBAR" ) v = OBAR.v; + else if(stat_name == "FS_RMS" ) v = FS_RMS.v; + else if(stat_name == "OS_RMS" ) v = OS_RMS.v; + else if(stat_name == "MSVE" ) v = MSVE.v; + else if(stat_name == "RMSVE" ) v = RMSVE.v; + else if(stat_name == "FSTDEV" ) v = FSTDEV.v; + else if(stat_name == "OSTDEV" ) v = OSTDEV.v; + else if(stat_name == "FDIR" ) v = FDIR.v; + else if(stat_name == "ODIR" ) v = ODIR.v; + else if(stat_name == "FBAR_SPEED" ) v = FBAR_SPEED.v; + else if(stat_name == "OBAR_SPEED" ) v = OBAR_SPEED.v; + else if(stat_name == "VDIFF_SPEED" ) v = VDIFF_SPEED.v; + else if(stat_name == "VDIFF_DIR" ) v = VDIFF_DIR.v; + else if(stat_name == "SPEED_ERR" ) v = SPEED_ERR.v; + else if(stat_name == "SPEED_ABSERR" ) v = SPEED_ABSERR.v; + else if(stat_name == "DIR_ERR" ) v = DIR_ERR.v; + else if(stat_name == "DIR_ABSERR" ) v = DIR_ABSERR.v; + else if(stat_name == "ANOM_CORR" ) v = ANOM_CORR.v; + else if(stat_name == "ANOM_CORR_UNCNTR") v = ANOM_CORR_UNCNTR.v; + else if(stat_name == "DIR_ME" ) v = DIR_ME.v; + else if(stat_name == "DIR_MAE" ) v = DIR_MAE.v; + else if(stat_name == "DIR_MSE" ) v = DIR_MSE.v; + else if(stat_name == "DIR_RMSE" ) v = DIR_RMSE.v; + else { + mlog << Error << "\nVL1L2Info::get_stat_vcnt() -> " + << "unknown vector continuous statistic name \"" << stat_name << "\"!\n\n"; exit(1); } @@ -2151,7 +2690,8 @@ NBRCNTInfo & NBRCNTInfo::operator=(const NBRCNTInfo &c) { //////////////////////////////////////////////////////////////////////// NBRCNTInfo & NBRCNTInfo::operator+=(const NBRCNTInfo &c) { - NBRCNTInfo n_info; + NBRCNTInfo n_info = *this; + n_info.sl1l2_info.zero_out(); double den; n_info.sl1l2_info.scount = sl1l2_info.scount + c.sl1l2_info.scount; @@ -2503,7 +3043,7 @@ void ISCInfo::compute_isc() { int i; // Get the Total, Base Rate, and Frequency Bias - total = cts.n(); + total = cts.n_pairs(); fbias = cts.fbias(); baser = cts.baser(); @@ -2537,7 +3077,7 @@ void ISCInfo::compute_isc(int i) { double den; // Get the Total, Base Rate, and Frequency Bias - total = cts.n(); + total = cts.n_pairs(); fbias = cts.fbias(); baser = cts.baser(); @@ -2621,8 +3161,8 @@ void PCTInfo::clear() { n_alpha = 0; if(alpha) { delete [] alpha; alpha = (double *) nullptr; } - pct.zero_out(); - climo_pct.zero_out(); + pct.clear(); + climo_pct.clear(); fthresh.clear(); othresh.clear(); @@ -2718,7 +3258,7 @@ void PCTInfo::set_fthresh(const ThreshArray &ta) { void PCTInfo::compute_stats() { - total = pct.n(); + total = pct.n_pairs(); baser.v = pct.baser(); reliability = pct.reliability(); resolution = pct.resolution(); @@ -2753,7 +3293,7 @@ void PCTInfo::compute_ci() { // for(i=0; i " + << "unexpected line type \"" << statlinetype_to_string(lt) + << "\"!\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double PCTInfo::get_stat_pct(const string &stat_name, + ConcatString &col_name) const { + int i = 0; + double v = bad_data_double; + col_name = stat_name; + + // Get index value for variable column numbers + if(check_reg_exp("_[0-9]", stat_name.c_str())) { + + // Parse the index value from the column name + i = atoi(strrchr(stat_name.c_str(), '_') + 1) - 1; + + // Range check (allow THRESH_N for N == nrows) + if(i < 0 || i > pct.nrows()) { + mlog << Error << "\nPCTInfo::get_stat_pct() -> " + << "range check error for column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + } // end if + + // Find the statistic by name + if(stat_name == "TOTAL") { + v = (double) pct.n_pairs(); + } + else if(stat_name == "N_THRESH") { + v = (double) pct.nrows() + 1; + } + else if(check_reg_exp("THRESH_[0-9]", stat_name.c_str())) { + v = pct.threshold(i); + col_name = "THRESH_I"; + } + else if(check_reg_exp("OY_[0-9]", stat_name.c_str())){ + v = pct.event_total_by_row(i); + col_name = "OY_I"; + } + else if(check_reg_exp("ON_[0-9]", stat_name.c_str())) { + v = pct.nonevent_total_by_row(i); + col_name = "ON_I"; + } + else { + mlog << Error << "\nPCTInfo::get_stat_pct() -> " + << "unsupported column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double PCTInfo::get_stat_pjc(const string &stat_name, + ConcatString &col_name) const { + int i = 0; + double v = bad_data_double; + col_name = stat_name; + + // Get index value for variable column numbers + if(check_reg_exp("_[0-9]", stat_name.c_str())) { + + // Parse the index value from the column name + i = atoi(strrchr(stat_name.c_str(), '_') + 1) - 1; + + // Range check + if(i < 0 || i >= pct.nrows()) { + mlog << Error << "\nPCTInfo::get_stat_pjc() -> " + << "range check error for column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + } // end if + + // Find the statistic by name + if(stat_name == "TOTAL") { + v = (double) pct.n_pairs(); + } + else if(stat_name == "N_THRESH") { + v = (double) pct.nrows() + 1; + } + else if(check_reg_exp("THRESH_[0-9]", stat_name.c_str())) { + v = pct.threshold(i); + col_name = "THRESH_I"; + } + else if(check_reg_exp("OY_TP_[0-9]", stat_name.c_str())) { + v = pct.event_total_by_row(i)/pct.total(); + col_name = "OY_TP_I"; + } + else if(check_reg_exp("ON_TP_[0-9]", stat_name.c_str())) { + v = pct.nonevent_total_by_row(i)/pct.total(); + col_name = "ON_TP_I"; + } + else if(check_reg_exp("CALIBRATION_[0-9]", stat_name.c_str())) { + v = pct.row_calibration(i); + col_name = "CALIBRATION_I"; + } + else if(check_reg_exp("REFINEMENT_[0-9]", stat_name.c_str())) { + v = pct.row_refinement(i); + col_name = "REFINEMENT_I"; + } + else if(check_reg_exp("LIKELIHOOD_[0-9]", stat_name.c_str())) { + v = pct.row_event_likelihood(i); + col_name = "LIKELIHOOD_I"; + } + else if(check_reg_exp("BASER_[0-9]", stat_name.c_str())) { + v = pct.row_obar(i); + col_name = "BASER_I"; + } + else { + mlog << Error << "\nPCTInfo::get_stat_pjc() -> " + << "unsupported column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + + // Return bad data for 0 pairs + if(pct.n_pairs() == 0 && stat_name != "TOTAL") { + v = bad_data_double; + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double PCTInfo::get_stat_prc(const string &stat_name, + ConcatString &col_name) const { + int i = bad_data_int; + double v = bad_data_double; + col_name = stat_name; + TTContingencyTable ct; + + // Get index value for variable column numbers + if(check_reg_exp("_[0-9]", stat_name.c_str())) { + + // Parse the index value from the column name + i = atoi(strrchr(stat_name.c_str(), '_') + 1) - 1; + + // Range check + if(i < 0 || i >= pct.nrows()) { + mlog << Error << "\nPCTInfo::get_stat_prc() -> " + << "range check error for column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + + // Get the 2x2 contingency table for this row + ct = pct.ctc_by_row(i); + + } // end if + + // Find the statistic by name + if(stat_name == "TOTAL") { + v = (double) pct.n_pairs(); + } + else if(stat_name == "N_THRESH") { + v = (double) pct.nrows() + 1; + } + else if(check_reg_exp("THRESH_[0-9]", stat_name.c_str())) { + v = pct.threshold(i); + col_name = "THRESH_I"; + } + else if(check_reg_exp("PODY_[0-9]", stat_name.c_str())) { + v = ct.pod_yes(); + col_name = "PODY_I"; + } + else if(check_reg_exp("POFD_[0-9]", stat_name.c_str())) { + v = ct.pofd(); + col_name = "POFD_I"; + } + else { + mlog << Error << "\nPCTInfo::get_stat_prc() -> " + << "unsupported column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + + // Return bad data for 0 pairs + if(pct.n_pairs() == 0 && stat_name != "TOTAL") { + v = bad_data_double; + } + + return v; +} + +//////////////////////////////////////////////////////////////////////// + +double PCTInfo::get_stat_pstd(const string &stat_name, + ConcatString &col_name, + int i_alpha) const { + int i = 0; + double v = bad_data_double; + col_name = stat_name; + + // Range check alpha index + if(i_alpha >= n_alpha && is_ci_stat_name(stat_name)) { + mlog << Error << "\nPCTInfo::get_stat_pstd() -> " + << "alpha index out of range (" << i_alpha << " >= " + << n_alpha << ")!\n\n"; + exit(1); + } + + // Get index value for variable column numbers + if(check_reg_exp("_[0-9]", stat_name.c_str())) { + + // Parse the index value from the column name + i = atoi(strrchr(stat_name.c_str(), '_') + 1) - 1; + + // Range check + if(i < 0 || i >= pct.nrows()) { + mlog << Error << "\nPCTInfo::get_stat_pstd() -> " + << "range check error for column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + } // end if + + // Find the statistic by name + if(stat_name == "TOTAL" ) v = (double) pct.n_pairs(); + else if(stat_name == "N_THRESH" ) v = (double) pct.nrows() + 1; + else if(stat_name == "BASER" ) v = baser.v; + else if(stat_name == "BASER_NCL" ) v = baser.v_ncl[i_alpha]; + else if(stat_name == "BASER_NCU" ) v = baser.v_ncu[i_alpha]; + else if(stat_name == "RELIABILITY") v = pct.reliability(); + else if(stat_name == "RESOLUTION" ) v = pct.resolution(); + else if(stat_name == "UNCERTAINTY") v = pct.uncertainty(); + else if(stat_name == "ROC_AUC" ) v = pct.roc_auc(); + else if(stat_name == "BRIER" ) v = brier.v; + else if(stat_name == "BRIER_NCL" ) v = brier.v_ncl[i_alpha]; + else if(stat_name == "BRIER_NCU" ) v = brier.v_ncu[i_alpha]; + else if(stat_name == "BRIERCL" ) v = briercl.v; + else if(stat_name == "BRIERCL_NCL") v = briercl.v_ncl[i_alpha]; + else if(stat_name == "BRIERCL_NCU") v = briercl.v_ncu[i_alpha]; + else if(stat_name == "BSS" ) v = bss; + else if(stat_name == "BSS_SMPL" ) v = bss_smpl; + else if(check_reg_exp("THRESH_[0-9]", stat_name.c_str())) { + v = pct.threshold(i); + col_name = "THRESH_I"; + } + else { + mlog << Error << "\nPCTInfo::get_stat_pstd() -> " + << "unsupported column name requested \"" << stat_name + << "\"\n\n"; + exit(1); + } + + // Return bad data for 0 pairs + if(pct.n_pairs() == 0 && stat_name != "TOTAL") { + v = bad_data_double; + } + + return v; +} + //////////////////////////////////////////////////////////////////////// // // Code for class GRADInfo @@ -2948,7 +3767,7 @@ void GRADInfo::set(int grad_dx, int grad_dy, << "count mismatch (" << fgx_na.n() << ", " << fgy_na.n() << ", " << ogx_na.n() << ", " << ogy_na.n() << ", " - << wgt_na.n() << ")\n\n"; + << wgt_na.n() << ")\n\n"; exit(1); } @@ -3472,7 +4291,7 @@ double compute_mean(double sum, int n) { v = sum / n; } - return(v); + return v; } //////////////////////////////////////////////////////////////////////// @@ -3593,7 +4412,7 @@ double compute_ufss(double o_rate) { if(is_bad_data(o_rate)) ufss = bad_data_double; else ufss = 0.5 + o_rate/2.0; - return(ufss); + return ufss; } /////////////////////////////////////////////////////////////////////////////// @@ -3650,3 +4469,10 @@ int compute_rank(const DataPlane &dp, DataPlane &dp_rank, double *data_rank, int } //////////////////////////////////////////////////////////////////////// + +bool is_ci_stat_name(const string &stat_name) { + return (stat_name.find("_NC") != string::npos || + stat_name.find("_BC") != string::npos); +} + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/met_stats.h b/src/libcode/vx_statistics/met_stats.h index 1d5def71a7..1b1d69ec7d 100644 --- a/src/libcode/vx_statistics/met_stats.h +++ b/src/libcode/vx_statistics/met_stats.h @@ -93,12 +93,17 @@ class CTSInfo { void clear(); void allocate_n_alpha(int); - void add(double, double); - void add(double, double, double, double); + void add(double f, double o, double wgt, + const ClimoPntInfo *cpi = nullptr); void compute_stats(); void compute_ci(); - double get_stat(const char *); + void set_stat_ctc(const std::string &, double); + + double get_stat(STATLineType, const std::string &, int i_alpha=0) const; + double get_stat_fho(const std::string &) const; + double get_stat_ctc(const std::string &) const; + double get_stat_cts(const std::string &, int i_alpha=0) const; }; //////////////////////////////////////////////////////////////////////// @@ -134,10 +139,14 @@ class MCTSInfo { void allocate_n_alpha(int); void set_fthresh(const ThreshArray &); void set_othresh(const ThreshArray &); - void add(double, double); - void add(double, double, double, double); + void add(double f, double o, double wgt, + const ClimoPntInfo *cpi = nullptr); void compute_stats(); void compute_ci(); + + double get_stat(STATLineType, const std::string &, ConcatString &, int i_alpha=0) const; + double get_stat_mctc(const std::string &, ConcatString &) const; + double get_stat_mcts(const std::string &, int i_alpha=0) const; }; //////////////////////////////////////////////////////////////////////// @@ -190,11 +199,13 @@ class CNTInfo { int n_ranks, frank_ties, orank_ties; + void zero_out(); void clear(); + void allocate_n_alpha(int); void compute_ci(); - double get_stat(const char *); + double get_stat(const std::string &, int i_alpha=0) const; }; //////////////////////////////////////////////////////////////////////// @@ -226,22 +237,28 @@ class SL1L2Info { double fbar, obar; double fobar; double ffbar, oobar; + double smae; int scount; // SAL1L2 Quantities double fabar, oabar; double foabar; double ffabar, ooabar; + double samae; int sacount; - // Mean absolute error - double mae; - // Compute sums void set(const PairDataPoint &); void zero_out(); void clear(); + + void set_stat_sl1l2(const std::string &, double); + void set_stat_sal1l2(const std::string &, double); + + double get_stat(STATLineType, const std::string &) const; + double get_stat_sl1l2(const std::string &) const; + double get_stat_sal1l2(const std::string &) const; }; //////////////////////////////////////////////////////////////////////// @@ -277,10 +294,6 @@ class VL1L2Info { // Number of points int n; - // Number of points for which the wind direction difference is undefined - int n_dir_undef; - int n_dira_undef; - // VL1L2 Quantities double uf_bar; @@ -336,7 +349,8 @@ class VL1L2Info { CIInfo DIR_MSE; CIInfo DIR_RMSE; - int vcount; + int vcount; // Vector count + int dcount; // Direction count // VAL1L2 Quantities double ufa_bar; @@ -355,19 +369,22 @@ class VL1L2Info { double absdira_bar; // Average anomalous absolute direction difference double dira2_bar; // Average anomalous squared direction difference - int vacount; + int vacount; // Vector anomaly count + int dacount; // Direction anomaly count // Compute sums void set(const PairDataPoint &, const PairDataPoint &); - - void clear(); + void zero_out(); + void clear(); void allocate_n_alpha(int); void compute_stats(); void compute_ci(); - double get_stat(const char *); + double get_stat_vl1l2(const std::string &) const; + double get_stat_val1l2(const std::string &) const; + double get_stat_vcnt(const std::string &) const; }; //////////////////////////////////////////////////////////////////////// @@ -507,8 +524,9 @@ class ISCInfo { double baser; double fbias; - void clear(); void zero_out(); + void clear(); + void allocate_n_scale(int); void compute_isc(); void compute_isc(int); @@ -563,6 +581,12 @@ class PCTInfo { void set_fthresh(const ThreshArray &); void compute_stats(); void compute_ci(); + + double get_stat(STATLineType, const std::string &, ConcatString &, int i_alpha=0) const; + double get_stat_pct(const std::string &, ConcatString &) const; + double get_stat_pjc(const std::string &, ConcatString &) const; + double get_stat_prc(const std::string &, ConcatString &) const; + double get_stat_pstd(const std::string &, ConcatString &, int i_alpha=0) const; }; //////////////////////////////////////////////////////////////////////// @@ -677,7 +701,7 @@ class DMAPInfo { //////////////////////////////////////////////////////////////////////// -inline double DMAPInfo::get_beta_value() const { return(beta_value); } +inline double DMAPInfo::get_beta_value() const { return beta_value; } //////////////////////////////////////////////////////////////////////// // @@ -742,6 +766,8 @@ extern double compute_ufss(double); extern int compute_rank(const DataPlane &, DataPlane &, double *, int &); +extern bool is_ci_stat_name(const std::string &); + //////////////////////////////////////////////////////////////////////// #endif // __MET_STATS_H__ diff --git a/src/libcode/vx_statistics/obs_error.h b/src/libcode/vx_statistics/obs_error.h index 11cce141dc..ec6d3878b5 100644 --- a/src/libcode/vx_statistics/obs_error.h +++ b/src/libcode/vx_statistics/obs_error.h @@ -149,8 +149,8 @@ class ObsErrorTable { //////////////////////////////////////////////////////////////////////// -inline int ObsErrorTable::n() const { return(N_elements); } -inline bool ObsErrorTable::is_set() const { return(IsSet); } +inline int ObsErrorTable::n() const { return N_elements; } +inline bool ObsErrorTable::is_set() const { return IsSet; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_base.cc b/src/libcode/vx_statistics/pair_base.cc index f3ffaed3fb..a3b383f9fa 100644 --- a/src/libcode/vx_statistics/pair_base.cc +++ b/src/libcode/vx_statistics/pair_base.cc @@ -18,6 +18,7 @@ #include "pair_base.h" +#include "vx_data2d_factory.h" #include "vx_util.h" #include "vx_grid.h" #include "vx_gsl_prob.h" @@ -32,6 +33,22 @@ using namespace std; static bool sort_obs(ob_val_t a, ob_val_t b) { return a.val " + // The o_na, ocmn_na, and ocsd_na have already been populated + if(o_na.n() != ocmn_na.n() || o_na.n() != ocsd_na.n()) { + mlog << Error << "\nPairBase::compute_climo_cdf() -> " << "the observation, climo mean, and climo stdev arrays " << "must all have the same length (" << o_na.n() << ").\n\n"; exit(1); } - cdf_na.extend(o_na.n()); + ocdf_na.extend(o_na.n()); for(i=0; i= n_obs) { mlog << Error << "\nPairBase::set_point_obs() -> " @@ -501,6 +538,7 @@ void PairBase::set_point_obs(int i_obs, const char *sid, exit(1); } + typ_sa.set(i_obs, typ); sid_sa.set(i_obs, sid); lat_na.set(i_obs, lat); lon_na.set(i_obs, lon); @@ -512,7 +550,7 @@ void PairBase::set_point_obs(int i_obs, const char *sid, elv_na.set(i_obs, elv); o_na.set(i_obs, o); o_qc_sa.set(i_obs, qc); - set_climo(i_obs, o, cmn, csd); + set_climo(i_obs, o, cpi); return; } @@ -626,7 +664,7 @@ ob_val_t PairBase::compute_percentile(string obs_key, int perc) { //////////////////////////////////////////////////////////////////////// -void PairBase::print_obs_summary(){ +void PairBase::print_obs_summary() const { if(!IsPointVx) return; @@ -637,7 +675,7 @@ void PairBase::print_obs_summary(){ // iterate over ordered list map keys in the station id map for(int i=0; iname() << "\" station ID masking region.\n"; + + // Print warning if no weights are provided + if(!mask_sid_ptr->has_weights()) { + mlog << Warning << "\n" << method_name + << "station ID point weighting requested but no weights " + << "were defined in the \"" << mask_sid_ptr->name() + << "\" station ID mask. Using default weights of " + << default_weight << ".\n\n"; + } + + // Loop through the point observations + for(int i_obs=0; i_obshas_sid(sid_sa[i_obs], wgt)) { + wgt_na.set(i_obs, wgt); + } + else { + mlog << Warning << "\n" << method_name + << "no match found for station id: " + << sid_sa[i_obs] << "\n\n"; + } + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void PairBase::add_grid_obs(double o, + const ClimoPntInfo &cpi, double wgt) { // @@ -758,7 +842,7 @@ void PairBase::add_grid_obs(double o, double cmn, double csd, o_na.add(o); wgt_na.add(wgt); - add_climo(o, cmn, csd); + add_climo(o, cpi); // Increment the number of observations n_obs += 1; @@ -768,11 +852,11 @@ void PairBase::add_grid_obs(double o, double cmn, double csd, //////////////////////////////////////////////////////////////////////// -void PairBase::add_grid_obs(double x, double y, - double o, double cmn, double csd, +void PairBase::add_grid_obs(double x, double y, double o, + const ClimoPntInfo &cpi, double wgt) { - add_grid_obs(o, cmn, csd, wgt); + add_grid_obs(o, cpi, wgt); x_na.add(x); y_na.add(y); @@ -782,7 +866,7 @@ void PairBase::add_grid_obs(double x, double y, //////////////////////////////////////////////////////////////////////// -double PairBase::process_obs(VarInfo *vinfo, double v) { +double PairBase::process_obs(const VarInfo *vinfo, double v) const { if(!vinfo) return v; @@ -808,281 +892,1614 @@ double PairBase::process_obs(VarInfo *vinfo, double v) { //////////////////////////////////////////////////////////////////////// // -// Begin miscellaneous utility functions +// Code for class VxPairBase // //////////////////////////////////////////////////////////////////////// -void find_vert_lvl(const DataPlaneArray &dpa, const double obs_lvl, - int &i_blw, int &i_abv) { - int i; - double dist, dist_blw, dist_abv; +VxPairBase::VxPairBase() { + init_from_scratch(); +} - // Check for no data - if(dpa.n_planes() == 0) { - i_blw = i_abv = bad_data_int; - return; - } +//////////////////////////////////////////////////////////////////////// - // Find the closest levels above and below the observation - dist_blw = dist_abv = 1.0e30; - for(i=0; i= 0 && fabs(dist) < dist_abv) { - dist_abv = fabs(dist); - i_abv = i; - } - } + init_from_scratch(); - // Check if the observation is above the forecast range - if(is_eq(dist_blw, 1.0e30) && !is_eq(dist_abv, 1.0e30)) { + assign(vx_pb); +} - // Set the index below to the index above and perform no vertical - // interpolation - i_blw = i_abv; - } - // Check if the observation is below the forecast range - else if(!is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { +//////////////////////////////////////////////////////////////////////// - // Set the index above to the index below and perform no vertical - // interpolation - i_abv = i_blw; - } - // Check if an error occurred - else if(is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { +VxPairBase & VxPairBase::operator=(const VxPairBase &vx_pb) { - mlog << Error << "\nfind_vert_lvl() -> " - << "could not find a level above and/or below the " - << "observation level of " << obs_lvl << ".\n\n"; - exit(1); - } + if(this == &vx_pb) return *this; - return; + assign(vx_pb); + + return *this; } //////////////////////////////////////////////////////////////////////// -double compute_interp(const DataPlaneArray &dpa, - const double obs_x, const double obs_y, - const double obs_v, const double cmn, const double csd, - const InterpMthd method, const int width, - const GridTemplateFactory::GridTemplates shape, - const bool wrap_lon, - const double thresh, - const bool spfh_flag, const LevelType lvl_typ, - const double to_lvl, const int i_blw, const int i_abv, - const SingleThresh *cat_thresh) { - double v, v_blw, v_abv, t; - - // Check for no data - if(dpa.n_planes() == 0) return bad_data_double; - - v_blw = compute_horz_interp(dpa[i_blw], obs_x, obs_y, obs_v, cmn, csd, - method, width, shape, wrap_lon, - thresh, cat_thresh); +void VxPairBase::init_from_scratch() { - if(i_blw == i_abv) { - v = v_blw; - } - else { - v_abv = compute_horz_interp(dpa[i_abv], obs_x, obs_y, obs_v, cmn, csd, - method, width, shape, wrap_lon, - thresh, cat_thresh); + fcst_info = (VarInfo *) nullptr; + obs_info = (VarInfo *) nullptr; - // Check for bad data prior to vertical interpolation - if(is_bad_data(v_blw) || is_bad_data(v_abv)) { - return bad_data_double; - } + fclm_info = (VarInfo *) nullptr; + oclm_info = (VarInfo *) nullptr; - // If verifying specific humidity, do vertical interpolation in - // the natural log of q - if(spfh_flag) { - t = compute_vert_pinterp(log(v_blw), dpa.lower(i_blw), - log(v_abv), dpa.lower(i_abv), - to_lvl); - v = exp(t); - } - // Vertically interpolate to the observation pressure level - else if(lvl_typ == LevelType_Pres) { - v = compute_vert_pinterp(v_blw, dpa.lower(i_blw), - v_abv, dpa.lower(i_abv), - to_lvl); - } - // Vertically interpolate to the observation height - else { - v = compute_vert_zinterp(v_blw, dpa.lower(i_blw), - v_abv, dpa.lower(i_abv), - to_lvl); - } - } + clear(); - return v; + return; } - //////////////////////////////////////////////////////////////////////// -void get_interp_points(const DataPlaneArray &dpa, - const double obs_x, const double obs_y, - const InterpMthd method, const int width, - const GridTemplateFactory::GridTemplates shape, - const bool wrap_lon, - const double thresh, const bool spfh_flag, - const LevelType lvl_typ, const double to_lvl, - const int i_blw, const int i_abv, - NumArray &interp_pnts) { +void VxPairBase::clear() { - // Initialize - interp_pnts.erase(); + if(fcst_info) { delete fcst_info; fcst_info = (VarInfo *) nullptr; } + if(obs_info) { delete obs_info; obs_info = (VarInfo *) nullptr; } - // Check for no data - if(dpa.n_planes() == 0) return; + if(fclm_info) { delete fclm_info; fclm_info = (VarInfo *) nullptr; } + if(oclm_info) { delete oclm_info; oclm_info = (VarInfo *) nullptr; } - double v; - int i, n_vld; - NumArray pts_blw, pts_abv; - GridTemplateFactory gtf; - const GridTemplate* gt = gtf.buildGT(shape, width, wrap_lon); + desc.clear(); - // Get interpolation points below the observation - pts_blw = interp_points(dpa[i_blw], *gt, obs_x, obs_y); + interp_thresh = 0; - // For multiple levels, get interpolation points above - if(i_blw != i_abv) { - pts_abv = interp_points(dpa[i_abv], *gt, obs_x, obs_y); + fcst_dpa.clear(); + fcmn_dpa.clear(); + fcsd_dpa.clear(); + ocmn_dpa.clear(); + ocsd_dpa.clear(); - if(pts_abv.n() != pts_blw.n()) { - mlog << Error << "\nget_interp_points() -> " - << "the number of interpolation points above (" - << pts_abv.n() << ") and below (" << pts_blw.n() - << ") should match!\n\n"; - exit(1); - } - } + sid_inc_filt.clear(); + sid_exc_filt.clear(); + obs_qty_inc_filt.clear(); + obs_qty_exc_filt.clear(); - // Interpolate each point vertically - for(i=0, n_vld=0; isize()) < thresh) { - interp_pnts.erase(); - } + n_msg_typ = 0; + n_mask = 0; + n_interp = 0; + n_vx = 0; - if ( gt ) { delete gt; gt = (const GridTemplate *) nullptr; } + pb_ptr.clear(); + + n_try = 0; + rej_sid = 0; + rej_var = 0; + rej_vld = 0; + rej_obs = 0; + rej_grd = 0; + rej_lvl = 0; + rej_topo = 0; + rej_qty = 0; + + rej_typ.clear(); + rej_mask.clear(); + rej_fcst.clear(); + rej_cmn.clear(); + rej_csd.clear(); + rej_mpr.clear(); + rej_dup.clear(); return; } //////////////////////////////////////////////////////////////////////// -bool set_climo_flag(const NumArray &f_na, const NumArray &c_na) { +void VxPairBase::assign(const VxPairBase &vx_pb) { - // The climo values must have non-zero, consistent length and - // cannot all be bad data - if(c_na.n() != f_na.n() || c_na.n() < 1 || is_bad_data(c_na.max())) { - return false; - } + clear(); - return true; -} + set_fcst_info(vx_pb.fcst_info); + set_obs_info(vx_pb.obs_info); -//////////////////////////////////////////////////////////////////////// + set_fcst_climo_info(vx_pb.fclm_info); + set_obs_climo_info(vx_pb.oclm_info); -void derive_climo_vals(const ClimoCDFInfo *cdf_info_ptr, - double m, double s, - NumArray &climo_vals) { + desc = vx_pb.desc; - // Initialize - climo_vals.erase(); + interp_thresh = vx_pb.interp_thresh; - // Check for no work to do - if(!cdf_info_ptr) return; + fcst_dpa = vx_pb.fcst_dpa; + fcmn_dpa = vx_pb.fcmn_dpa; + fcsd_dpa = vx_pb.fcsd_dpa; + ocmn_dpa = vx_pb.ocmn_dpa; + ocsd_dpa = vx_pb.ocsd_dpa; - // cdf_info_ptr->cdf_ta starts with >=0.0 and ends with >=1.0. - // The number of bins is the number of thresholds minus 1. + fcst_ut = vx_pb.fcst_ut; + beg_ut = vx_pb.beg_ut; + end_ut = vx_pb.end_ut; - // Check for bad mean value - if(is_bad_data(m) || cdf_info_ptr->cdf_ta.n() < 2) { - return; - } - // Single climo bin - else if(cdf_info_ptr->cdf_ta.n() == 2) { - climo_vals.add(m); - } - // Check for bad standard deviation value - else if(is_bad_data(s)) { - return; - } - // Extract climo distribution values - else { + sid_inc_filt = vx_pb.sid_inc_filt; + sid_exc_filt = vx_pb.sid_exc_filt; + obs_qty_inc_filt = vx_pb.obs_qty_inc_filt; + obs_qty_exc_filt = vx_pb.obs_qty_exc_filt; - // Skip the first and last thresholds - for(int i=1; icdf_ta.n()-1; i++) { - climo_vals.add( - normal_cdf_inv(cdf_info_ptr->cdf_ta[i].get_value(), m, s)); - } - } + mpr_column = vx_pb.mpr_column; + mpr_thresh = vx_pb.mpr_thresh; + + msg_typ_sfc = vx_pb.msg_typ_sfc; + msg_typ_lnd = vx_pb.msg_typ_lnd; + msg_typ_wtr = vx_pb.msg_typ_wtr; + + sfc_info = vx_pb.sfc_info; + + set_size(vx_pb.n_msg_typ, vx_pb.n_mask, vx_pb.n_interp); + + pb_ptr = vx_pb.pb_ptr; + + n_try = vx_pb.n_try; + rej_typ = vx_pb.rej_typ; + rej_mask = vx_pb.rej_mask; + rej_fcst = vx_pb.rej_fcst; + rej_cmn = vx_pb.rej_cmn; + rej_csd = vx_pb.rej_csd; + rej_mpr = vx_pb.rej_mpr; + rej_dup = vx_pb.rej_dup; + rej_typ = vx_pb.rej_typ; + rej_mask = vx_pb.rej_mask; + rej_fcst = vx_pb.rej_fcst; + rej_cmn = vx_pb.rej_cmn; + rej_csd = vx_pb.rej_csd; + rej_mpr = vx_pb.rej_mpr; + rej_dup = vx_pb.rej_dup; return; } //////////////////////////////////////////////////////////////////////// -NumArray derive_climo_prob(const ClimoCDFInfo *cdf_info_ptr, - const NumArray &mn_na, const NumArray &sd_na, - const SingleThresh &othresh) { - int i, n_mn, n_sd; - NumArray climo_prob, climo_vals; - double prob; +void VxPairBase::copy_var_info(const VarInfo *info, VarInfo *©) { + VarInfoFactory f; - // Number of valid climo mean and standard deviation - n_mn = mn_na.n_valid(); - n_sd = sd_na.n_valid(); + // Deallocate, if necessary + if(copy) { delete copy; copy = (VarInfo *) nullptr; } - // Check for constant climo probability - if(!is_bad_data(prob = othresh.get_climo_prob())) { + // Perform a deep copy + copy = f.new_var_info(info->file_type()); + *copy = *info; - mlog << Debug(4) - << "For threshold " << othresh.get_str() + return; +} + +//////////////////////////////////////////////////////////////////////// + +int VxPairBase::three_to_one(int i_msg_typ, int i_mask, int i_interp) const { + + int n = (i_interp * n_mask + i_mask)*n_msg_typ + i_msg_typ; + + if(n < 0 || n >= n_vx) { + mlog << Error << "\nVxPairBase::three_to_one() -> " + << "range check error for n (" << n << " < 0 or n >= " << n_vx + << ") for i_msg_typ (" << i_msg_typ << "), i_mask (" + << i_mask << "), i_interp (" << i_interp << "), and n_msg_typ (" + << n_msg_typ << "), n_mask (" << n_mask << "), n_interp (" + << n_interp << ")!\n\n"; + exit(1); + } + + return n; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_info(const VarInfo *info) { + + copy_var_info(info, fcst_info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_info(const VarInfo *info) { + + copy_var_info(info, obs_info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_climo_info(const VarInfo *info) { + + copy_var_info(info, fclm_info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_climo_info(const VarInfo *info) { + + copy_var_info(info, oclm_info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_desc(const char *s) { + + desc = s; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_interp_thresh(double t) { + + interp_thresh = t; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_dpa(const DataPlaneArray &dpa) { + + fcst_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_climo_mn_dpa(const DataPlaneArray &dpa) { + + fcmn_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_climo_sd_dpa(const DataPlaneArray &dpa) { + + fcsd_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_climo_mn_dpa(const DataPlaneArray &dpa) { + + ocmn_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_climo_sd_dpa(const DataPlaneArray &dpa) { + + ocsd_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_ut(const unixtime ut) { + + fcst_ut = ut; + + // Set for all PairBase instances, used for duplicate logic + for(auto &x : pb_ptr) x->set_fcst_ut(ut); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_beg_ut(const unixtime ut) { + + beg_ut = ut; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_end_ut(const unixtime ut) { + + end_ut = ut; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_sid_inc_filt(const StringArray &sa) { + + sid_inc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_sid_exc_filt(const StringArray &sa) { + + sid_exc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_qty_inc_filt(const StringArray &sa) { + + obs_qty_inc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_qty_exc_filt(const StringArray &sa) { + + obs_qty_exc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_size(int types, int masks, int interps) { + + // Store the dimensions for the PairBase array + n_msg_typ = types; + n_mask = masks; + n_interp = interps; + n_vx = types * masks * interps; + + // Resize the PairBase pointer vector + pb_ptr.resize(n_vx); + + // Initialize 3-D rejection count vectors + vector rej_counts(n_vx, 0); + rej_typ = rej_counts; + rej_mask = rej_counts; + rej_fcst = rej_counts; + rej_cmn = rej_counts; + rej_csd = rej_counts; + rej_mpr = rej_counts; + rej_dup = rej_counts; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ(int i_msg_typ, const char *name) { + + for(int i_mask=0; i_maskset_msg_typ(name); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_vals(int i_msg_typ, const StringArray &sa) { + + for(int i_mask=0; i_maskset_msg_typ_vals(sa); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mask_area(int i_mask, const char *name, + MaskPlane *mp_ptr) { + + for(int i_msg_typ=0; i_msg_typset_mask_name(name); + pb_ptr[n]->set_mask_area_ptr(mp_ptr); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mask_sid(int i_mask, const char *name, + MaskSID *ms_ptr) { + + if(!ms_ptr) return; + + for(int i_msg_typ=0; i_msg_typset_mask_name(name); + pb_ptr[n]->set_mask_sid_ptr(ms_ptr); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mask_llpnt(int i_mask, const char *name, + MaskLatLon *llpnt_ptr) { + + for(int i_msg_typ=0; i_msg_typset_mask_name(name); + pb_ptr[n]->set_mask_llpnt_ptr(llpnt_ptr); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_interp(int i_interp, + const char *interp_mthd_str, int width, + GridTemplateFactory::GridTemplates shape) { + + for(int i_msg_typ=0; i_msg_typset_interp_mthd(interp_mthd_str); + pb_ptr[n]->set_interp_wdth(width); + pb_ptr[n]->set_interp_shape(shape); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_interp(int i_interp, + InterpMthd mthd, int width, + GridTemplateFactory::GridTemplates shape) { + + for(int i_msg_typ=0; i_msg_typset_interp_mthd(mthd); + pb_ptr[n]->set_interp_wdth(width); + pb_ptr[n]->set_interp_shape(shape); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mpr_thresh(const StringArray &sa, const ThreshArray &ta) { + + // Check for constant length + if(sa.n() != ta.n()) { + mlog << Error << "\nVxPairBase::set_mpr_thresh() -> " + << "the \"" << conf_key_mpr_column << "\" (" + << write_css(sa) << ") and \"" << conf_key_mpr_thresh + << "\" (" << write_css(ta) + << ") config file entries must have the same length!\n\n"; + exit(1); + } + + mpr_column = sa; + mpr_thresh = ta; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_climo_cdf_info_ptr(const ClimoCDFInfo *info) { + + for(auto &x : pb_ptr) x->set_climo_cdf_info_ptr(info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_sfc(const StringArray &sa) { + + msg_typ_sfc = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_lnd(const StringArray &sa) { + + msg_typ_lnd = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_wtr(const StringArray &sa) { + + msg_typ_wtr = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_sfc_info(const SurfaceInfo &si) { + + sfc_info = si; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +int VxPairBase::get_n_pair() const { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::get_n_pair() -> " + << "set_size() has not been called yet!\n\n"; + } + + int n = 0; + + for(auto &x : pb_ptr) n += x->n_obs; + + return n; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_duplicate_flag(DuplicateType duplicate_flag) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_duplicate_flag() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_check_unique(duplicate_flag == DuplicateType::Unique); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_summary(ObsSummary s) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_obs_summary() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_obs_summary(s); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_perc_value(int percentile) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_obs_perc_value() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_obs_perc_value(percentile); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::print_obs_summary() const { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::print_obs_summary() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->print_obs_summary(); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::calc_obs_summary() { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::calc_obs_summary() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->calc_obs_summary(); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_point_weight(const PointWeightType wgt_flag) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_point_weight() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_point_weight(wgt_flag); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_sid( + const char *pnt_obs_str, const char *hdr_sid_str) { + bool keep = true; + + // Check the station ID inclusion and exclusion lists + if((sid_inc_filt.n() && !sid_inc_filt.has(hdr_sid_str)) || + (sid_exc_filt.n() && sid_exc_filt.has(hdr_sid_str))) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "station id:\n" + << pnt_obs_str << "\n"; + } + + rej_sid++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_var( + const char *pnt_obs_str, const char *var_name, int grib_code) { + bool keep = true; + + const auto obs_info_grib = (VarInfoGrib *) obs_info; + + // Check for matching variable name or GRIB code + if((var_name != nullptr) && (m_strlen(var_name) > 0)) { + + if(var_name != obs_info->name()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "variable name:\n" + << pnt_obs_str << "\n"; + } + + rej_var++; + keep = false; + } + } + else if(obs_info_grib && obs_info_grib->code() != nint(grib_code)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "GRIB code:\n" + << pnt_obs_str << "\n"; + } + + rej_var++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_qty( + const char *pnt_obs_str, const char *obs_qty) { + bool keep = true; + + // Check the observation quality include and exclude options + if((obs_qty_inc_filt.n() > 0 && !obs_qty_inc_filt.has(obs_qty)) || + (obs_qty_exc_filt.n() > 0 && obs_qty_exc_filt.has(obs_qty))) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "quality control string:\n" + << pnt_obs_str << "\n"; + } + + rej_qty++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_vld( + const char *pnt_obs_str, unixtime hdr_ut) { + bool keep = true; + + // Check the observation valid time + if(hdr_ut < beg_ut || hdr_ut > end_ut) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "valid time:\n" + << pnt_obs_str << "\n"; + } + + rej_vld++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_obs( + const char *pnt_obs_str, double &obs_v) { + bool keep = true; + + // Apply observation processing logic + obs_v = pb_ptr[0]->process_obs(obs_info, obs_v); + + // Check whether the observation value contains valid data + if(is_bad_data(obs_v)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "with bad data value:\n" + << pnt_obs_str << "\n"; + } + + rej_obs++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_grd( + const char *pnt_obs_str, const Grid &gr, + double hdr_lat, double hdr_lon, + double &obs_x, double &obs_y) { + bool keep = true; + + // Convert the lat/lon value to x/y + gr.latlon_to_xy(hdr_lat, -1.0*hdr_lon, obs_x, obs_y); + int x = nint(obs_x); + int y = nint(obs_y); + + // Check if the observation's lat/lon is on the grid + if(((x < 0 || x >= gr.nx()) && !gr.wrap_lon()) || + y < 0 || y >= gr.ny()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "off the grid where (x, y) = (" << x << ", " << y + << ") and grid (nx, ny) = (" << gr.nx() << ", " << gr.ny() << "):\n" + << pnt_obs_str << "\n"; + } + + rej_grd++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_topo( + const char *pnt_obs_str, const Grid &gr, + double obs_x, double obs_y, + const char *hdr_typ_str, double hdr_elv) { + bool keep = true; + + // Check for a large topography difference + if(sfc_info.topo_ptr && msg_typ_sfc.reg_exp_match(hdr_typ_str)) { + + // Interpolate model topography to observation location + double topo = compute_horz_interp( + *sfc_info.topo_ptr, obs_x, obs_y, hdr_elv, + InterpMthd::Bilin, 2, + GridTemplateFactory::GridTemplates::Square, + gr.wrap_lon(), 1.0); + + // Skip bad topography values + if(is_bad_data(hdr_elv) || is_bad_data(topo)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "due to bad topography values where observation elevation = " + << hdr_elv << " and model topography = " << topo << ":\n" + << pnt_obs_str << "\n"; + } + + rej_topo++; + keep = false; + } + + // Check the topography difference threshold + else if(!sfc_info.topo_use_obs_thresh.check(topo - hdr_elv)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "due to topography difference where observation elevation (" + << hdr_elv << ") minus model topography (" << topo << ") = " + << topo - hdr_elv << " is not " + << sfc_info.topo_use_obs_thresh.get_str() << ":\n" + << pnt_obs_str << "\n"; + } + + rej_topo++; + keep = false; + } + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_lvl( + const char *pnt_obs_str, const char *hdr_typ_str, + double obs_lvl, double obs_hgt) { + bool keep = true; + + // For pressure levels, check if the observation pressure level + // falls in the requested range. + if(obs_info->level().type() == LevelType_Pres) { + + if(obs_lvl < obs_info->level().lower() || + obs_lvl > obs_info->level().upper()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "pressure level value:\n" + << pnt_obs_str << "\n"; + } + + rej_lvl++; + keep = false; + } + } + // For accumulations, check if the observation accumulation interval + // matches the requested interval. + else if(obs_info->level().type() == LevelType_Accum) { + + if(obs_lvl < obs_info->level().lower() || + obs_lvl > obs_info->level().upper()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "accumulation interval:\n" + << pnt_obs_str << "\n"; + } + + rej_lvl++; + keep = false; + } + } + // For all other level types (VertLevel, RecNumber, NoLevel), + // check for a surface message type or if the observation height + // falls within the requested range. + else { + + if(!msg_typ_sfc.reg_exp_match(hdr_typ_str) && + (obs_hgt < obs_info->level().lower() || + obs_hgt > obs_info->level().upper())) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "level value:\n" + << pnt_obs_str << "\n"; + } + + rej_lvl++; + keep = false; + } + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_typ( + const char *pnt_obs_str, int i_msg_typ, + const char *hdr_typ_str) { + bool keep = true; + + int n = three_to_one(i_msg_typ, 0, 0); + + // Check for a matching message type + if(!pb_ptr[n]->msg_typ_vals.has(hdr_typ_str)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "message type:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_typ, i_msg_typ); + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_mask( + const char *pnt_obs_str, int i_msg_typ, int i_mask, int x, int y, + const char *hdr_sid_str, double hdr_lat, double hdr_lon) { + bool keep = true; + + int n = three_to_one(i_msg_typ, i_mask, 0); + + // Check for the obs falling within the masking region + if( pb_ptr[n]->mask_area_ptr != nullptr && + !pb_ptr[n]->mask_area_ptr->s_is_on(x, y)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on spatial masking region:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_mask, i_msg_typ, i_mask); + keep = false; + } + // Otherwise, check for the masking SID list + else if( pb_ptr[n]->mask_sid_ptr != nullptr && + !pb_ptr[n]->mask_sid_ptr->has_sid(hdr_sid_str)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on masking station id list:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_mask, i_msg_typ, i_mask); + keep = false; + } + // Otherwise, check observation lat/lon thresholds + else if( pb_ptr[n]->mask_llpnt_ptr != nullptr && + (!pb_ptr[n]->mask_llpnt_ptr->lat_thresh.check(hdr_lat) || + !pb_ptr[n]->mask_llpnt_ptr->lon_thresh.check(hdr_lon))) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on latitude/longitude thesholds:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_mask, i_msg_typ, i_mask); + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_climo( + const char *pnt_obs_str, + int i_msg_typ, int i_mask, int i_interp, + const Grid &gr, double obs_x, double obs_y, + double obs_v, double obs_lvl, double obs_hgt, + ClimoPntInfo &cpi) { + bool keep = true; + + int n = three_to_one(i_msg_typ, i_mask, i_interp); + + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); + + // Compute the interpolated forecast value using the + // observation pressure level or height + double to_lvl = (fcst_info->level().type() == LevelType_Pres ? + obs_lvl : obs_hgt); + int lvl_blw, lvl_abv; + + // Initialize + cpi.clear(); + + // Forecast climatology mean + if(keep && fcmn_dpa.n_planes() > 0) { + + find_vert_lvl(fcmn_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.fcmn = compute_interp(fcmn_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.fcmn)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad forecast climatological mean value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_cmn, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + // Observation climatology mean + if(keep && ocmn_dpa.n_planes() > 0) { + + find_vert_lvl(ocmn_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.ocmn = compute_interp(ocmn_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.ocmn)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad observation climatological mean value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_cmn, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + // Check for valid interpolation options + if((fcsd_dpa.n_planes() > 0 || + ocsd_dpa.n_planes() > 0) && + (pb_ptr[n]->interp_mthd == InterpMthd::Min || + pb_ptr[n]->interp_mthd == InterpMthd::Max || + pb_ptr[n]->interp_mthd == InterpMthd::Median || + pb_ptr[n]->interp_mthd == InterpMthd::Best)) { + mlog << Warning << "\nVxPairBase::add_point_obs() -> " + << "applying the " << interpmthd_to_string(pb_ptr[n]->interp_mthd) + << " interpolation method to climatological spread " + << "may cause unexpected results.\n\n"; + } + + // Forecast climatology spread + if(keep && fcsd_dpa.n_planes() > 0) { + + find_vert_lvl(fcsd_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.fcsd = compute_interp(fcsd_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.fcsd)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad forecast climatological spread value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_csd, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + // Observation climatology spread + if(keep && ocsd_dpa.n_planes() > 0) { + + find_vert_lvl(ocsd_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.ocsd = compute_interp(ocsd_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.ocsd)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad observation climatological spread value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_csd, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_fcst( + const char *pnt_obs_str, + int i_msg_typ, int i_mask, int i_interp, + const char *hdr_typ_str, const Grid &gr, + double obs_x, double obs_y, double hdr_elv, + double obs_v, double obs_lvl, double obs_hgt, + const ClimoPntInfo &cpi, double &fcst_v) { + bool keep = true; + + int n = three_to_one(i_msg_typ, i_mask, i_interp); + + // For surface verification, apply land/sea and topo masks + if((sfc_info.land_ptr || sfc_info.topo_ptr) && + (msg_typ_sfc.reg_exp_match(hdr_typ_str))) { + + bool is_land = msg_typ_lnd.has(hdr_typ_str); + + // Check for a single forecast DataPlane + if(fcst_dpa.n_planes() != 1) { + mlog << Error << "\nVxPairBase::add_point_obs() -> " + << "unexpected number of forecast levels (" + << fcst_dpa.n_planes() + << ") for surface verification! Set \"land_mask.flag\" and " + << "\"topo_mask.flag\" to false to disable this check.\n\n"; + exit(1); + } + + fcst_v = compute_sfc_interp(fcst_dpa[0], obs_x, obs_y, hdr_elv, obs_v, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, sfc_info, is_land); + } + // Otherwise, compute interpolated value + else { + + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); + + // Compute the interpolated forecast value using the + // observation pressure level or height + double to_lvl = (fcst_info->level().type() == LevelType_Pres ? + obs_lvl : obs_hgt); + int lvl_blw, lvl_abv; + + find_vert_lvl(fcst_dpa, to_lvl, lvl_blw, lvl_abv); + + fcst_v = compute_interp(fcst_dpa, obs_x, obs_y, obs_v, &cpi, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + } + + // Check for bad data + if(is_bad_data(fcst_v)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad data in the " + << interpmthd_to_string(pb_ptr[n]->interp_mthd) << "(" + << pb_ptr[n]->interp_wdth * pb_ptr[n]->interp_wdth + << ") interpolated forecast value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_fcst, i_msg_typ, i_mask, i_interp); + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::inc_count(vector &rej, int i_msg_typ) { + + for(int i_mask=0; i_mask &rej, int i_msg_typ, int i_mask) { + + for(int i_interp=0; i_interp &rej, int i_msg_typ, int i_mask, int i_interp) { + + rej[three_to_one(i_msg_typ, i_mask, i_interp)]++; + + return; +} + +//////////////////////////////////////////////////////////////////////// +// +// Begin miscellaneous utility functions +// +//////////////////////////////////////////////////////////////////////// + +void find_vert_lvl(const DataPlaneArray &dpa, const double obs_lvl, + int &i_blw, int &i_abv) { + int i; + double dist, dist_blw, dist_abv; + + // Initialize + i_blw = i_abv = bad_data_int; + + // Check for no data + if(dpa.n_planes() == 0) return; + + // Find the closest levels above and below the observation + dist_blw = dist_abv = 1.0e30; + for(i=0; i= 0 && fabs(dist) < dist_abv) { + dist_abv = fabs(dist); + i_abv = i; + } + } + + // Check if the observation is above the forecast range + if(is_eq(dist_blw, 1.0e30) && !is_eq(dist_abv, 1.0e30)) { + + // Set the index below to the index above and perform no vertical + // interpolation + i_blw = i_abv; + } + // Check if the observation is below the forecast range + else if(!is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { + + // Set the index above to the index below and perform no vertical + // interpolation + i_abv = i_blw; + } + // Check if an error occurred + else if(is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { + + mlog << Error << "\nfind_vert_lvl() -> " + << "could not find a level above and/or below the " + << "observation level of " << obs_lvl << ".\n\n"; + exit(1); + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +double compute_interp(const DataPlaneArray &dpa, + const double obs_x, const double obs_y, + const double obs_v, const ClimoPntInfo *cpi, + const InterpMthd method, const int width, + const GridTemplateFactory::GridTemplates shape, + const bool wrap_lon, + const double thresh, + const bool spfh_flag, const LevelType lvl_typ, + const double to_lvl, const int i_blw, const int i_abv, + const SingleThresh *cat_thresh) { + double v, v_blw, v_abv, t; + + // Check for no data + if(dpa.n_planes() == 0) return bad_data_double; + + v_blw = compute_horz_interp(dpa[i_blw], obs_x, obs_y, obs_v, cpi, + method, width, shape, wrap_lon, + thresh, cat_thresh); + + if(i_blw == i_abv) { + v = v_blw; + } + else { + v_abv = compute_horz_interp(dpa[i_abv], obs_x, obs_y, obs_v, cpi, + method, width, shape, wrap_lon, + thresh, cat_thresh); + + // Check for bad data prior to vertical interpolation + if(is_bad_data(v_blw) || is_bad_data(v_abv)) { + return bad_data_double; + } + + // If verifying specific humidity, do vertical interpolation in + // the natural log of q + if(spfh_flag) { + t = compute_vert_pinterp(log(v_blw), dpa.lower(i_blw), + log(v_abv), dpa.lower(i_abv), + to_lvl); + v = exp(t); + } + // Vertically interpolate to the observation pressure level + else if(lvl_typ == LevelType_Pres) { + v = compute_vert_pinterp(v_blw, dpa.lower(i_blw), + v_abv, dpa.lower(i_abv), + to_lvl); + } + // Vertically interpolate to the observation height + else { + v = compute_vert_zinterp(v_blw, dpa.lower(i_blw), + v_abv, dpa.lower(i_abv), + to_lvl); + } + } + + return v; +} + + +//////////////////////////////////////////////////////////////////////// + +void get_interp_points(const DataPlaneArray &dpa, + const double obs_x, const double obs_y, + const InterpMthd method, const int width, + const GridTemplateFactory::GridTemplates shape, + const bool wrap_lon, + const double thresh, const bool spfh_flag, + const LevelType lvl_typ, const double to_lvl, + const int i_blw, const int i_abv, + NumArray &interp_pnts) { + + // Initialize + interp_pnts.erase(); + + // Check for no data + if(dpa.n_planes() == 0) return; + + double v; + int i, n_vld; + NumArray pts_blw, pts_abv; + GridTemplateFactory gtf; + const GridTemplate* gt = gtf.buildGT(shape, width, wrap_lon); + + // Get interpolation points below the observation + pts_blw = interp_points(dpa[i_blw], *gt, obs_x, obs_y); + + // For multiple levels, get interpolation points above + if(i_blw != i_abv) { + pts_abv = interp_points(dpa[i_abv], *gt, obs_x, obs_y); + + if(pts_abv.n() != pts_blw.n()) { + mlog << Error << "\nget_interp_points() -> " + << "the number of interpolation points above (" + << pts_abv.n() << ") and below (" << pts_blw.n() + << ") should match!\n\n"; + exit(1); + } + } + + // Interpolate each point vertically + for(i=0, n_vld=0; isize()) < thresh) { + interp_pnts.erase(); + } + + if ( gt ) { delete gt; gt = (const GridTemplate *) nullptr; } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +bool set_climo_flag(const NumArray &f_na, const NumArray &c_na) { + + // The climo values must have non-zero, consistent length and + // cannot all be bad data + if(c_na.n() != f_na.n() || c_na.n() < 1 || is_bad_data(c_na.max())) { + return false; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////// + +void derive_climo_vals(const ClimoCDFInfo *cdf_info_ptr, + double m, double s, + NumArray &climo_vals) { + + // Initialize + climo_vals.erase(); + + // Check for no work to do + if(!cdf_info_ptr) return; + + // cdf_info_ptr->cdf_ta starts with >=0.0 and ends with >=1.0. + // The number of bins is the number of thresholds minus 1. + + // Check for bad mean value + if(is_bad_data(m) || cdf_info_ptr->cdf_ta.n() < 2) { + return; + } + // Single climo bin + else if(cdf_info_ptr->cdf_ta.n() == 2) { + climo_vals.add(m); + } + // Check for bad standard deviation value + else if(is_bad_data(s)) { + return; + } + // Extract climo distribution values + else { + + // Skip the first and last thresholds + for(int i=1; icdf_ta.n()-1; i++) { + climo_vals.add( + normal_cdf_inv(cdf_info_ptr->cdf_ta[i].get_value(), m, s)); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +NumArray derive_climo_prob(const ClimoCDFInfo *cdf_info_ptr, + const NumArray &mn_na, const NumArray &sd_na, + const SingleThresh &othresh) { + int i, n_mn, n_sd; + NumArray climo_prob, climo_vals; + double prob; + + // Number of valid climo mean and standard deviation + n_mn = mn_na.n_valid(); + n_sd = sd_na.n_valid(); + + // Check for constant climo probability + prob = othresh.get_obs_climo_prob(); + if(!is_bad_data(prob)) { + + mlog << Debug(4) + << "For threshold " << othresh.get_str() << ", using a constant climatological probability value of " << prob << ".\n"; @@ -1206,3 +2623,33 @@ double derive_prob(const NumArray &na, const SingleThresh &st) { } //////////////////////////////////////////////////////////////////////// + +// Write the point observation in the MET point format for logging +ConcatString point_obs_to_string(const float *hdr_arr, const char *hdr_typ_str, + const char *hdr_sid_str, unixtime hdr_ut, + const char *obs_qty, const float *obs_arr, + const char *var_name) { + ConcatString obs_cs, name; + + if((var_name != nullptr) && (0 < m_strlen(var_name))) name << var_name; + else name << nint(obs_arr[1]); + + // + // Write the 11-column MET point format: + // Message_Type Station_ID Valid_Time(YYYYMMDD_HHMMSS) + // Lat(Deg North) Lon(Deg East) Elevation(msl) + // Var_Name(or GRIB_Code) Level Height(msl or agl) + // QC_String Observation_Value + // + obs_cs << " " + << hdr_typ_str << " " << hdr_sid_str << " " + << unix_to_yyyymmdd_hhmmss(hdr_ut) << " " + << hdr_arr[0] << " " << -1.0*hdr_arr[1] << " " + << hdr_arr[2] << " " << name << " " + << obs_arr[2] << " " << obs_arr[3] << " " + << obs_qty << " " << obs_arr[4]; + + return obs_cs; +} + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_base.h b/src/libcode/vx_statistics/pair_base.h index 3a0f869edb..a734d1af31 100644 --- a/src/libcode/vx_statistics/pair_base.h +++ b/src/libcode/vx_statistics/pair_base.h @@ -18,27 +18,40 @@ #include "vx_util.h" #include "vx_data2d.h" +#include "vx_data2d_grib.h" + +//////////////////////////////////////////////////////////////////////// + +static const int REJECT_DEBUG_LEVEL = 9; + +//////////////////////////////////////////////////////////////////////// struct ob_val_t { - unixtime ut; - double val; - std::string qc; + unixtime ut; + double val; + std::string qc; }; struct station_values_t { - std::string sid; - double lat; - double lon; - double x; - double y; - double wgt; - unixtime ut; - double lvl; - double elv; - double cmn; - double csd; - double summary_val; - std::vector obs; + + void clear(); + + std::string typ; + std::string sid; + double lat; + double lon; + double x; + double y; + double wgt; + unixtime ut; + double lvl; + double elv; + double fcmn; + double fcsd; + double ocmn; + double ocsd; + double summary_val; + std::vector obs; }; //////////////////////////////////////////////////////////////////////// @@ -63,10 +76,10 @@ class PairBase { ////////////////////////////////////////////////////////////////// // Masking area applied to the forecast and climo fields - ConcatString mask_name; + ConcatString mask_name; MaskPlane *mask_area_ptr; // Pointer to the masking MaskPlane // which is not allocated - StringArray *mask_sid_ptr; // Pointer to masking station ID list + MaskSID *mask_sid_ptr; // Pointer to masking station ID list // which is not allocated MaskLatLon *mask_llpnt_ptr; // Pointer to Lat/Lon thresholds // which is not allocated @@ -91,11 +104,14 @@ class PairBase { NumArray wgt_na; // Weight [n_obs] // Point and Grid Climatology Information - NumArray cmn_na; // Climatology mean [n_obs] - NumArray csd_na; // Climatology standard deviation [n_obs] - NumArray cdf_na; // Climatology cumulative distribution function [n_obs] + NumArray fcmn_na; // Forecast climatology mean [n_obs] + NumArray fcsd_na; // Forecast climatology standard deviation [n_obs] + NumArray ocmn_na; // Observation climatology mean [n_obs] + NumArray ocsd_na; // Observation climatology standard deviation [n_obs] + NumArray ocdf_na; // Observation climatology cumulative distribution function [n_obs] // Point Observation Information + StringArray typ_sa; // Message type [n_obs] StringArray sid_sa; // Station ID [n_obs] NumArray lat_na; // Latitude [n_obs] NumArray lon_na; // Longitude [n_obs] @@ -123,9 +139,9 @@ class PairBase { bool is_point_vx() const; - void set_mask_name(const char *); + void set_mask_name(const std::string &); void set_mask_area_ptr(MaskPlane *); - void set_mask_sid_ptr(StringArray *); + void set_mask_sid_ptr(MaskSID *); void set_mask_llpnt_ptr(MaskLatLon *); void set_climo_cdf_info_ptr(const ClimoCDFInfo *); @@ -154,34 +170,250 @@ class PairBase { ob_val_t compute_median(std::string sng_key); ob_val_t compute_percentile(std::string sng_key, int perc); - bool add_point_obs(const char *, double, double, double, double, + bool add_point_obs(const char *, const char *, + double, double, double, double, unixtime, double, double, double, const char *, - double, double, double); + const ClimoPntInfo &, double); - void set_point_obs(int, const char *, double, double, double, double, + void set_point_obs(int, const char *, const char *, + double, double, double, double, unixtime, double, double, double, - const char *, double, double, double); + const char *, const ClimoPntInfo &, double); - void add_grid_obs(double, double, double, double); + void add_grid_obs(double, + const ClimoPntInfo &, double); void add_grid_obs(double, double, double, - double, double, double); - - void add_climo(double, double, double); - void set_climo(int, double, double, double); - void add_climo_cdf(); + const ClimoPntInfo &, double); + + void add_climo(double, const ClimoPntInfo &); + + void set_climo(int, double, const ClimoPntInfo &); - double process_obs(VarInfo *, double); + void compute_climo_cdf(); - void print_obs_summary(); + double process_obs(const VarInfo *, double) const; + + void print_obs_summary() const; void calc_obs_summary(); + void set_point_weight(const PointWeightType); }; //////////////////////////////////////////////////////////////////////// -inline bool PairBase::is_point_vx() const { return(IsPointVx); } +inline bool PairBase::is_point_vx() const { return IsPointVx; } + +//////////////////////////////////////////////////////////////////////// +// +// Base class for verification tasks +// +//////////////////////////////////////////////////////////////////////// + +class VxPairBase { + + protected: + + void init_from_scratch(); + void assign(const VxPairBase &); + void copy_var_info(const VarInfo *info, VarInfo *©); + + public: + + VxPairBase(); + ~VxPairBase(); + VxPairBase(const VxPairBase &); + VxPairBase & operator=(const VxPairBase &); + + ////////////////////////////////////////////////////////////////// + // + // Information about the fields to be compared + // + ////////////////////////////////////////////////////////////////// + + VarInfo *fcst_info; // Forecast field, allocated by VarInfoFactory + VarInfo *obs_info; // Observation field, allocated by VarInfoFactory + + VarInfo *fclm_info; // Forecast climatology field, allocated by VarInfoFactory + VarInfo *oclm_info; // Observation climatology field, allocated by VarInfoFactory + + ConcatString desc; // User description from config file + + double interp_thresh; // Threshold between 0 and 1 used when + // interpolating the forecasts to the + // observation location. + + ////////////////////////////////////////////////////////////////// + // + // Forecast and climatology fields falling between the requested + // levels. Store the fields in a data plane array. + // + ////////////////////////////////////////////////////////////////// + + DataPlaneArray fcst_dpa; // Forecast data plane array + DataPlaneArray fcmn_dpa; // Forecast climatology mean data plane array + DataPlaneArray fcsd_dpa; // Forecast climatology standard deviation data plane array + DataPlaneArray ocmn_dpa; // Observation climatology mean data plane array + DataPlaneArray ocsd_dpa; // Observation climatology standard deviation data plane array + + ////////////////////////////////////////////////////////////////// + + unixtime fcst_ut; // Forecast valid time + unixtime beg_ut; // Beginning of valid time window + unixtime end_ut; // End of valid time window + + ////////////////////////////////////////////////////////////////// + + StringArray sid_inc_filt; // Station ID inclusion list + StringArray sid_exc_filt; // Station ID exclusion list + StringArray obs_qty_inc_filt; // Observation quality include markers + StringArray obs_qty_exc_filt; // Observation quality exclude markers + + ////////////////////////////////////////////////////////////////// + + StringArray mpr_column; // Names of MPR columns or diffs of columns + ThreshArray mpr_thresh; // Filtering thresholds for the MPR columns + + ////////////////////////////////////////////////////////////////// + + StringArray msg_typ_sfc; // List of surface message types + StringArray msg_typ_lnd; // List of surface land message types + StringArray msg_typ_wtr; // List of surface water message types + + SurfaceInfo sfc_info; // Land/sea mask and topography info + + ////////////////////////////////////////////////////////////////// + + int n_msg_typ; // Number of verifying message types + + int n_mask; // Total number of masking regions + // of masking DataPlane fields or SIDs + + int n_interp; // Number of interpolation techniques + + int n_vx; // n_msg_typ * n_mask * n_interp + + ////////////////////////////////////////////////////////////////// + + // 3-Dim vector of PairBase pointers [n_msg_typ][n_mask][n_interp] + std::vector pb_ptr; + + // Counts for observation rejection reason codes + int n_try; // Number of observations processed + int rej_sid; // Reject based on SID inclusion and exclusion lists + int rej_var; // Reject based on observation variable name + int rej_vld; // Reject based on valid time + int rej_obs; // Reject observation bad data + int rej_grd; // Reject based on location + int rej_topo; // Reject based on topography + int rej_lvl; // Reject based on vertical level + int rej_qty; // Reject based on obs quality + + // 3-Dim vectors for observation rejection reason codes [n_msg_typ][n_mask][n_interp] + std::vector rej_typ; // Reject based on message type + std::vector rej_mask; // Reject based on masking region + std::vector rej_fcst; // Reject forecast bad data + std::vector rej_cmn; // Reject fcst or obs climo mean bad data + std::vector rej_csd; // Reject fcst or obs climo stdev bad data + std::vector rej_mpr; // Reject based on MPR filtering logic + std::vector rej_dup; // Reject based on duplicates logic + + ////////////////////////////////////////////////////////////////// + + void clear(); + + int three_to_one(int, int, int) const; + + void set_fcst_info(const VarInfo *); + void set_obs_info(const VarInfo *); + + void set_fcst_climo_info(const VarInfo *); + void set_obs_climo_info(const VarInfo *); + + void set_desc(const char *); + + void set_interp_thresh(double); + + void set_fcst_dpa(const DataPlaneArray &); + void set_fcst_climo_mn_dpa(const DataPlaneArray &); + void set_fcst_climo_sd_dpa(const DataPlaneArray &); + void set_obs_climo_mn_dpa(const DataPlaneArray &); + void set_obs_climo_sd_dpa(const DataPlaneArray &); + + void set_fcst_ut(const unixtime); + void set_beg_ut(const unixtime); + void set_end_ut(const unixtime); + + void set_sid_inc_filt(const StringArray &); + void set_sid_exc_filt(const StringArray &); + void set_obs_qty_inc_filt(const StringArray &); + void set_obs_qty_exc_filt(const StringArray &); + + // Call set_size before set_msg_typ, set_mask_area, and set_interp + void set_size(int, int, int); + + void set_msg_typ(int, const char *); + void set_msg_typ_vals(int, const StringArray &); + void set_mask_area(int, const char *, MaskPlane *); + void set_mask_sid(int, const char *, MaskSID *); + void set_mask_llpnt(int, const char *, MaskLatLon *); + + void set_interp(int i_interp, const char *interp_mthd_str, int width, + GridTemplateFactory::GridTemplates shape); + void set_interp(int i_interp, InterpMthd mthd, + int width, GridTemplateFactory::GridTemplates shape); + + void set_mpr_thresh(const StringArray &, const ThreshArray &); + + void set_climo_cdf_info_ptr(const ClimoCDFInfo *); + + void set_msg_typ_sfc(const StringArray &); + void set_msg_typ_lnd(const StringArray &); + void set_msg_typ_wtr(const StringArray &); + + void set_sfc_info(const SurfaceInfo &); + + int get_n_pair() const; + + void set_duplicate_flag(DuplicateType duplicate_flag); + void set_obs_summary(ObsSummary obs_summary); + void set_obs_perc_value(int percentile); + + void print_obs_summary() const; + void calc_obs_summary(); + void set_point_weight(const PointWeightType); + + bool is_keeper_sid(const char *, const char *); + bool is_keeper_var(const char *, const char *, int); + bool is_keeper_qty(const char *, const char *); + bool is_keeper_vld(const char *, unixtime); + bool is_keeper_obs(const char *, double &); + bool is_keeper_grd(const char *, const Grid &, + double, double, + double &, double &); + bool is_keeper_topo(const char *, const Grid &, + double, double, + const char *, double); + bool is_keeper_lvl(const char *, const char *, double, double); + bool is_keeper_typ(const char *, int, const char *); + bool is_keeper_mask(const char *, int, int, int, int, + const char *, double, double); + bool is_keeper_climo(const char *, int, int, int, + const Grid &gr, double, double, + double, double, double, + ClimoPntInfo &); + bool is_keeper_fcst(const char *, int, int, int, + const char *, const Grid &gr, + double, double, double, + double, double, double, + const ClimoPntInfo &, double &); + + // Member functions for incrementing the counts + void inc_count(std::vector &, int); + void inc_count(std::vector &, int, int); + void inc_count(std::vector &, int, int, int); +}; //////////////////////////////////////////////////////////////////////// // @@ -194,7 +426,7 @@ extern void find_vert_lvl(const DataPlaneArray &, const double, extern double compute_interp(const DataPlaneArray &dpa, const double obs_x, const double obs_y, - const double obs_v, const double cmn, const double csd, + const double obs_v, const ClimoPntInfo *cpi, const InterpMthd method, const int width, const GridTemplateFactory::GridTemplates shape, const bool wrap_lon, @@ -224,6 +456,13 @@ extern NumArray derive_climo_prob(const ClimoCDFInfo *, extern double derive_prob(const NumArray &, const SingleThresh &); +// Write the point observation in the MET point format for logging +extern ConcatString point_obs_to_string( + const float *hdr_arr, const char *hdr_typ_str, + const char *hdr_sid_str, unixtime hdr_ut, + const char *obs_qty, const float *obs_arr, + const char *var_name); + //////////////////////////////////////////////////////////////////////// #endif // __PAIR_BASE_H__ diff --git a/src/libcode/vx_statistics/pair_data_ensemble.cc b/src/libcode/vx_statistics/pair_data_ensemble.cc index 2cf41de138..0d80abce1a 100644 --- a/src/libcode/vx_statistics/pair_data_ensemble.cc +++ b/src/libcode/vx_statistics/pair_data_ensemble.cc @@ -8,7 +8,6 @@ //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -32,7 +31,6 @@ using namespace std; - //////////////////////////////////////////////////////////////////////// // // Code for class PairDataEnsemble @@ -233,9 +231,11 @@ void PairDataEnsemble::assign(const PairDataEnsemble &pd) { cdf_info_ptr = pd.cdf_info_ptr; - cmn_na = pd.cmn_na; - csd_na = pd.csd_na; - cdf_na = pd.cdf_na; + fcmn_na = pd.fcmn_na; + fcsd_na = pd.fcsd_na; + ocmn_na = pd.ocmn_na; + ocsd_na = pd.ocsd_na; + ocdf_na = pd.ocdf_na; // PairDataEnsemble v_na = pd.v_na; @@ -380,24 +380,27 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { // Check if the ranks have already been computed if(r_na.n() == o_na.n()) return; - // Print the climo data being used - bool cmn_flag = set_climo_flag(o_na, cmn_na); - bool csd_flag = set_climo_flag(o_na, csd_na); + // Print the observation climo data being used + bool ocmn_flag = set_climo_flag(o_na, ocmn_na); + bool ocsd_flag = set_climo_flag(o_na, ocsd_na); - if(cmn_flag && cdf_info_ptr && cdf_info_ptr->cdf_ta.n() == 2) { + if(ocmn_flag && cdf_info_ptr && cdf_info_ptr->cdf_ta.n() == 2) { mlog << Debug(3) << "Computing ensemble statistics relative to the " - << "climatological mean.\n"; + << "observation climatological mean.\n"; } - else if(cmn_flag && csd_flag && cdf_info_ptr && cdf_info_ptr->cdf_ta.n() > 2) { + else if(ocmn_flag && + ocsd_flag && + cdf_info_ptr && + cdf_info_ptr->cdf_ta.n() > 2) { mlog << Debug(3) << "Computing ensemble statistics relative to a " << cdf_info_ptr->cdf_ta.n() - 2 - << "-member climatological ensemble.\n"; + << "-member observation climatological ensemble.\n"; } else { mlog << Debug(3) - << "No reference climatology data provided.\n"; + << "No reference observation climatology data provided.\n"; } // Compute the rank for each observation @@ -533,8 +536,8 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { r_na.add(nint(dest_na[0])); } - // Derive ensemble from climo mean and standard deviation - derive_climo_vals(cdf_info_ptr, cmn_na[i], csd_na[i], cur_clm); + // Derive ensemble from observation climo mean and standard deviation + derive_climo_vals(cdf_info_ptr, ocmn_na[i], ocsd_na[i], cur_clm); // Store empirical CRPS stats // For crps_emp use temporary, local variable so we can use it @@ -552,7 +555,7 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { // Store Gaussian CRPS stats crps_gaus_na.add(compute_crps_gaus(o_na[i], mean, stdev)); - crpscl_gaus_na.add(compute_crps_gaus(o_na[i], cmn_na[i], csd_na[i])); + crpscl_gaus_na.add(compute_crps_gaus(o_na[i], ocmn_na[i], ocsd_na[i])); ign_na.add(compute_ens_ign(o_na[i], mean, stdev)); pit_na.add(compute_ens_pit(o_na[i], mean, stdev)); @@ -783,10 +786,7 @@ void PairDataEnsemble::compute_ssvar() { // Sort the bins set sorted_bins; - for( ssvar_bin_map::iterator map_it = bins.begin(); - map_it != bins.end(); map_it++ ){ - sorted_bins.insert( (*map_it).first ); - } + for(auto &x : bins) sorted_bins.insert(x.first); // Report the number of bins built int n_bin = sorted_bins.size(); @@ -876,26 +876,33 @@ PairDataEnsemble PairDataEnsemble::subset_pairs_obs_thresh(const SingleThresh &o pd.obs_error_flag = obs_error_flag; pd.cdf_info_ptr = cdf_info_ptr; - bool cmn_flag = set_climo_flag(o_na, cmn_na); - bool csd_flag = set_climo_flag(o_na, csd_na); - bool wgt_flag = set_climo_flag(o_na, wgt_na); + bool fcmn_flag = set_climo_flag(o_na, fcmn_na); + bool fcsd_flag = set_climo_flag(o_na, fcsd_na); + bool ocmn_flag = set_climo_flag(o_na, ocmn_na); + bool ocsd_flag = set_climo_flag(o_na, ocsd_na); + bool wgt_flag = set_climo_flag(o_na, wgt_na); // Loop over the pairs for(i=0; ifile_type()); - *climo_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_info(VarInfo *info) { - VarInfoFactory f; - - // Deallocate, if necessary - if(obs_info) { delete obs_info; obs_info = (VarInfo *) nullptr; } - - // Perform a deep copy - obs_info = f.new_var_info(info->file_type()); - *obs_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_desc(const char *s) { - - desc = s; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_interp_thresh(double t) { - - interp_thresh = t; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_msg_typ_sfc(const StringArray &sa) { - - msg_typ_sfc = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_fcst_dpa(const DataPlaneArray &dpa) { + ens_info = new EnsVarInfo(*info); - fcst_dpa = dpa; + // Set the base pointer + if(!fcst_info) set_fcst_info(ens_info->get_var_info()); return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataEnsemble::set_climo_mn_dpa(const DataPlaneArray &dpa) { +void VxPairDataEnsemble::set_size(int types, int masks, int interps) { - climo_mn_dpa = dpa; + VxPairBase::set_size(types, masks, interps); - return; -} - -//////////////////////////////////////////////////////////////////////// + // Resize the PairDataPoint vector + pd.resize(n_vx); -void VxPairDataEnsemble::set_climo_sd_dpa(const DataPlaneArray &dpa) { - - climo_sd_dpa = dpa; + // Set PairBase pointers to the PairDataEnsemble objects + for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_beg_ut(const unixtime ut) { + for(auto it = pd.begin(); it != pd.end(); it++) { - beg_ut = ut; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_end_ut(const unixtime ut) { - - end_ut = ut; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_sid_inc_filt(const StringArray sa) { - - sid_inc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_sid_exc_filt(const StringArray sa) { - - sid_exc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_qty_inc_filt(const StringArray q) { - - obs_qty_inc_filt = q; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_qty_exc_filt(const StringArray q) { - - obs_qty_exc_filt = q; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_pd_size(int types, int masks, int interps) { - - // Store the dimensions for the PairData array - n_msg_typ = types; - n_mask = masks; - n_interp = interps; - - // Allocate space for the PairData array - pd = new PairDataEnsemble ** [n_msg_typ]; - - for(int i=0; iinterp_mthd == InterpMthd::HiRA) { + GridTemplateFactory gtf; + GridTemplate* gt = gtf.buildGT(it->interp_shape, + it->interp_wdth, + false); + it->set_ens_size(n*gt->size()); } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_msg_typ(int i_msg_typ, const char *name) { - - for(int i=0; iset_ens_size(n); } } @@ -1300,58 +1108,28 @@ void VxPairDataEnsemble::set_msg_typ(int i_msg_typ, const char *name) { //////////////////////////////////////////////////////////////////////// -void VxPairDataEnsemble::set_msg_typ_vals(int i_msg_typ, const StringArray &sa) { +void VxPairDataEnsemble::set_ssvar_bin_size(double ssvar_bin_size) { - for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_mask_area(int i_mask, const char *name, - MaskPlane *mp_ptr) { - - for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_mask_llpnt(int i_mask, const char *name, - MaskLatLon *llpnt_ptr) { - - for(int i=0; isize()); - } - else { - pd[i][j][k].set_ens_size(n); - } - } - } + if(n_vx == 0) { + mlog << Warning << "\nVxPairDataEnsemble::set_ctrl_index() -> " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_climo_cdf_info_ptr(const ClimoCDFInfo *info) { - - for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_phist_bin_size(double phist_bin_size) { - - for(int i=0; ifile_type() != FileType_Gb1) { @@ -1489,104 +1183,49 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, // Create VarInfoGrib pointer VarInfoGrib *obs_info_grib = (VarInfoGrib *) obs_info; - // Check the station ID inclusion and exclusion lists - if((sid_inc_filt.n() && !sid_inc_filt.has(hdr_sid_str)) || - (sid_exc_filt.n() && sid_exc_filt.has(hdr_sid_str))) return; + // Increment the number of tries count + n_try++; - // Check whether the observation variable name matches (rej_var) - if ((var_name != 0) && (0 < m_strlen(var_name))) { - if ( var_name != obs_info->name() ) { - return; - } - } - else if(obs_info_grib->code() != nint(obs_arr[1])) { - return; - } - - // Check the observation quality include and exclude options - if((obs_qty_inc_filt.n() > 0 && !obs_qty_inc_filt.has(obs_qty)) || - (obs_qty_exc_filt.n() > 0 && obs_qty_exc_filt.has(obs_qty))) { - return; + // Point observation summary string for rejection log messages + ConcatString pnt_obs_str; + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + pnt_obs_str = point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, + hdr_ut, obs_qty, obs_arr, var_name); } - - // Check whether the observation time falls within the valid time - // window - if(hdr_ut < beg_ut || hdr_ut > end_ut) return; - - hdr_lat = hdr_arr[0]; - hdr_lon = hdr_arr[1]; - obs_lvl = obs_arr[2]; - obs_hgt = obs_arr[3]; + // Check the station ID + if(!is_keeper_sid(pnt_obs_str.c_str(), hdr_sid_str)) return; - // Apply observation processing logic - obs_v = pd[0][0][0].process_obs(obs_info, obs_arr[4]); + // Check observation variable + if(!is_keeper_var(pnt_obs_str.c_str(), var_name, nint(obs_arr[1]))) return; - // Check whether the observation value contains valid data - if(is_bad_data(obs_v)) return; + // Check observation quality + if(!is_keeper_qty(pnt_obs_str.c_str(), obs_qty)) return; - // Convert the lat/lon value to x/y - gr.latlon_to_xy(hdr_lat, -1.0*hdr_lon, obs_x, obs_y); - x = nint(obs_x); - y = nint(obs_y); + // Check valid time + if(!is_keeper_vld(pnt_obs_str.c_str(), hdr_ut)) return; - // Check if the observation's lat/lon is on the grid - if(((x < 0 || x >= gr.nx()) && !gr.wrap_lon()) || - y < 0 || y >= gr.ny()) return; + // Check observation value + double obs_v = obs_arr[4]; + if(!is_keeper_obs(pnt_obs_str.c_str(), obs_v)) return; - // For pressure levels, check if the observation pressure level - // falls in the requested range. - if(obs_info_grib->level().type() == LevelType_Pres) { + // Check location + double hdr_lat = hdr_arr[0]; + double hdr_lon = hdr_arr[1]; + double obs_x, obs_y; + if(!is_keeper_grd(pnt_obs_str.c_str(), gr, hdr_lat, hdr_lon, obs_x, obs_y)) return; - if(obs_lvl < obs_info_grib->level().lower() || - obs_lvl > obs_info_grib->level().upper()) return; - } - // For accumulations, check if the observation accumulation interval - // matches the requested interval. - else if(obs_info_grib->level().type() == LevelType_Accum) { + // TODO: Add topography filtering to Ensemble-Stat - if(obs_lvl < obs_info_grib->level().lower() || - obs_lvl > obs_info_grib->level().upper()) return; - } - // For all other level types (VertLevel, RecNumber, NoLevel), - // check for a surface message type or if the observation height - // falls within the requested range. - else { + // Check topo + double hdr_elv = hdr_arr[2]; + if(!is_keeper_topo(pnt_obs_str.c_str(), gr, obs_x, obs_y, + hdr_typ_str, hdr_elv)) return; - if(!msg_typ_sfc.reg_exp_match(hdr_typ_str) && - (obs_hgt < obs_info_grib->level().lower() || - obs_hgt > obs_info_grib->level().upper())) { - return; - } - } - - // For a single climatology mean field - if(climo_mn_dpa.n_planes() == 1) { - cmn_lvl_blw = 0; - cmn_lvl_abv = 0; - } - // For multiple climatology mean fields, find the levels above and - // below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->get_var_info()->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_mn_dpa, to_lvl, cmn_lvl_blw, cmn_lvl_abv); - } - - // For a single climatology standard deviation field - if(climo_sd_dpa.n_planes() == 1) { - csd_lvl_blw = 0; - csd_lvl_abv = 0; - } - // For multiple climatology standard deviation fields, find the - // levels above and below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->get_var_info()->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_sd_dpa, to_lvl, csd_lvl_blw, csd_lvl_abv); - } + // Check level + double obs_lvl = obs_arr[2]; + double obs_hgt = obs_arr[3]; + if(!is_keeper_lvl(pnt_obs_str.c_str(), hdr_typ_str, obs_lvl, obs_hgt)) return; // When verifying a vertical level forecast against a surface message type, // set the observation level value to bad data so that it's not used in the @@ -1596,11 +1235,12 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, obs_lvl = bad_data_double; } - // Set flag for specific humidity - bool spfh_flag = fcst_info->get_var_info()->is_specific_humidity() && - obs_info->is_specific_humidity(); + // Set flags + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); // Store pointer to ObsErrorEntry + ObsErrorEntry *oerr_ptr = (ObsErrorEntry *) nullptr; if(obs_error_info->flag) { // Use config file setting, if specified @@ -1637,96 +1277,54 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, FieldType::Obs, oerr_ptr, obs_v); } - // Look through all of the PairData objects to see if the observation - // should be added. + // Loop through the message types + for(int i_msg_typ=0; i_msg_typs_is_on(x, y)) continue; - } - // Otherwise, check for the obs Station ID's presence in the - // masking SID list - else if(pd[i][j][0].mask_sid_ptr != (StringArray *) 0) { - if(!pd[i][j][0].mask_sid_ptr->has(hdr_sid_str)) continue; - } - // Otherwise, check observation Lat/Lon thresholds - else if(pd[i][j][0].mask_llpnt_ptr != (MaskLatLon *) 0) { - if(!pd[i][j][0].mask_llpnt_ptr->lat_thresh.check(hdr_lat) || - !pd[i][j][0].mask_llpnt_ptr->lon_thresh.check(hdr_lon)) { - continue; - } - } + // Check masking region + if(!is_keeper_mask(pnt_obs_str.c_str(), i_msg_typ, i_mask, x, y, + hdr_sid_str, hdr_lat, hdr_lon)) continue; - // Add the observation for each interpolation method - for(k=0; kget_var_info()->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - - // Compute the interpolated climatology mean - cmn_v = compute_interp(climo_mn_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, cmn_lvl_blw, cmn_lvl_abv); - - // Check for bad data - if(climo_mn_dpa.n_planes() > 0 && is_bad_data(cmn_v)) { - continue; - } + // Loop through the interpolation methods + for(int i_interp=0; i_interp 0 && - (pd[0][0][k].interp_mthd == InterpMthd::Min || - pd[0][0][k].interp_mthd == InterpMthd::Max || - pd[0][0][k].interp_mthd == InterpMthd::Median || - pd[0][0][k].interp_mthd == InterpMthd::Best)) { - mlog << Warning << "\nVxPairDataEnsemble::add_point_obs() -> " - << "applying the " - << interpmthd_to_string(pd[0][0][k].interp_mthd) - << " interpolation method to climatological spread " - << "may cause unexpected results.\n\n"; - } + // Check climatology values + ClimoPntInfo cpi; + if(!is_keeper_climo(pnt_obs_str.c_str(), i_msg_typ, i_mask, i_interp, + gr, obs_x, obs_y, obs_v, obs_lvl, obs_hgt, + cpi)) continue; + + // Add the observation value + // Weight is from the nearest grid point + int n = three_to_one(i_msg_typ, i_mask, i_interp); + if(!pd[n].add_point_obs(hdr_typ_str, hdr_sid_str, + hdr_lat, hdr_lon, obs_x, obs_y, + hdr_ut, obs_lvl, obs_hgt, + obs_v, obs_qty, cpi, default_weight)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() + << " versus " << obs_info->magic_str() + << ", skipping observation since it is a duplicate:\n" + << pnt_obs_str << "\n"; + } - // Compute the interpolated climatology standard deviation - csd_v = compute_interp(climo_sd_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, csd_lvl_blw, csd_lvl_abv); - - // Check for bad data - if(climo_sd_dpa.n_planes() > 0 && is_bad_data(csd_v)) { + inc_count(rej_dup, i_msg_typ, i_mask, i_interp); continue; } - // Compute weight for current point - wgt_v = (wgt_dp == (DataPlane *) 0 ? - default_grid_weight : wgt_dp->get(x, y)); + // Store the observation error pointer + pd[n].add_obs_error_entry(oerr_ptr); - // Add the observation value - // Weight is from the nearest grid point - pd[i][j][k].add_point_obs(hdr_sid_str, hdr_lat, hdr_lon, - obs_x, obs_y, hdr_ut, obs_lvl, obs_hgt, - obs_v, obs_qty, cmn_v, csd_v, wgt_v); - pd[i][j][k].add_obs_error_entry(oerr_ptr); } // end for k } // end for j } // end for i @@ -1737,254 +1335,120 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, //////////////////////////////////////////////////////////////////////// void VxPairDataEnsemble::add_ens(int member, bool mn, Grid &gr) { - int i, j, k, l, m; - int f_lvl_blw, f_lvl_abv, i_mem; - double to_lvl, fcst_v; - NumArray fcst_na; // Set flag for specific humidity - bool spfh_flag = fcst_info->get_var_info()->is_specific_humidity() && - obs_info->is_specific_humidity(); + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); // Loop through all the PairDataEnsemble objects and interpolate - for(i=0; i " - << "the \"" << interpmthd_to_string(pd[0][0][k].interp_mthd) - << "\" interpolation method only applies when verifying a " - << "single level, not " << fcst_dpa.n_planes() - << " levels.\n\n"; - continue; - } - - // Process each of the observations - for(l=0; lget_var_info()->level().type() == LevelType_Pres ? - pd[i][j][k].lvl_na[l] : pd[i][j][k].elv_na[l]); - - // For a single forecast field - if(fcst_dpa.n_planes() == 1) { - f_lvl_blw = 0; - f_lvl_abv = 0; - } - // For multiple forecast fields, find the levels above - // and below the observation point. - else { - find_vert_lvl(fcst_dpa, to_lvl, f_lvl_blw, f_lvl_abv); - } - - // Extract the HiRA neighborhood of values - if(pd[0][0][k].interp_mthd == InterpMthd::HiRA) { - - // For HiRA, set the ensemble mean to bad data - if(mn) { - fcst_na.erase(); - fcst_na.add(bad_data_double); - } - // Otherwise, retrieve all the neighborhood values - // using a valid threshold of 0 - else { - get_interp_points(fcst_dpa, - pd[i][j][k].x_na[l], - pd[i][j][k].y_na[l], - pd[0][0][k].interp_mthd, - pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, - gr.wrap_lon(), - 0, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, f_lvl_blw, f_lvl_abv, - fcst_na); - } - } - // Otherwise, get a single interpolated ensemble value - else { - fcst_na.add(compute_interp(fcst_dpa, - pd[i][j][k].x_na[l], - pd[i][j][k].y_na[l], - pd[i][j][k].o_na[l], - pd[i][j][k].cmn_na[l], - pd[i][j][k].csd_na[l], - pd[0][0][k].interp_mthd, - pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, - gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, f_lvl_blw, f_lvl_abv)); - } - - // Store the single ensemble value or HiRA neighborhood - for(m=0; mflag) { - fcst_v = add_obs_error_inc( - obs_error_info->rng_ptr, FieldType::Fcst, - pd[i][j][k].obs_error_entry[l], - pd[i][j][k].o_na[l], fcst_na[m]); - } - else { - fcst_v = fcst_na[m]; - } - - // Determine index of ensemble member - i_mem = member * fcst_na.n() + m; - - // Store perturbed ensemble member value - pd[i][j][k].add_ens(i_mem, fcst_v); - } - - } // end for m - fcst_na - } // end for l - n_obs - } // end for k - n_interp - } // end for j - n_mask - } // end for i - n_msg_typ - - return; -} - -//////////////////////////////////////////////////////////////////////// - -int VxPairDataEnsemble::get_n_pair() const { - int n, i, j, k; - - for(i=0, n=0; iinterp_mthd == InterpMthd::HiRA && + fcst_dpa.n_planes() != 1 ) { + + mlog << Warning << "\nVxPairDataEnsemble::add_ens() -> " + << "the \"" << interpmthd_to_string(it->interp_mthd) + << "\" interpolation method only applies when verifying a " + << "single level, not " << fcst_dpa.n_planes() + << " levels.\n\n"; + continue; } - } - return; -} + // Process each of the observations + NumArray fcst_na; + for(int i_obs=0; i_obsn_obs; i_obs++) { -//////////////////////////////////////////////////////////////////////// + // Initialize + fcst_na.erase(); -void VxPairDataEnsemble::set_obs_summary(ObsSummary s) { + // Interpolate using the observation pressure level or height + double to_lvl = (fcst_info->level().type() == LevelType_Pres ? + it->lvl_na[i_obs] : it->elv_na[i_obs]); + int lvl_blw, lvl_abv; - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_summary(s); + // For a single forecast field + if(fcst_dpa.n_planes() == 1) { + lvl_blw = 0; + lvl_abv = 0; } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_perc_value(int percentile) { - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_perc_value(percentile); + // For multiple forecast fields, find the levels above + // and below the observation point. + else { + find_vert_lvl(fcst_dpa, to_lvl, lvl_blw, lvl_abv); } - } - } - return; -} + // Extract the HiRA neighborhood of values + if(it->interp_mthd == InterpMthd::HiRA) { -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::print_obs_summary() { - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].print_obs_summary(); + // For HiRA, set the ensemble mean to bad data + if(mn) { + fcst_na.erase(); + fcst_na.add(bad_data_double); + } + // Otherwise, retrieve all the neighborhood values + // using a valid threshold of 0 + else { + get_interp_points(fcst_dpa, + it->x_na[i_obs], it->y_na[i_obs], + it->interp_mthd, it->interp_wdth, it->interp_shape, + gr.wrap_lon(), 0, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv, + fcst_na); + } } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::calc_obs_summary() { - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].calc_obs_summary(); + // Otherwise, get a single interpolated ensemble value + else { + ClimoPntInfo cpi(it->fcmn_na[i_obs], it->fcsd_na[i_obs], + it->ocmn_na[i_obs], it->ocsd_na[i_obs]); + + fcst_na.add(compute_interp(fcst_dpa, + it->x_na[i_obs], it->y_na[i_obs], it->o_na[i_obs], &cpi, + it->interp_mthd, it->interp_wdth, it->interp_shape, + gr.wrap_lon(), interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv)); } - } - } - return; -} + // Store the single ensemble value or HiRA neighborhood + for(int i_fcst=0; i_fcstmn_na.add(fcst_na[i_fcst]); + } + // Store the ensemble member values + else { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].ctrl_index = index; - } - } - } + // Track unperturbed ensemble variance sums + // Exclude the control member from the variance + if(member != it->ctrl_index) { + it->add_ens_var_sums(i_obs, fcst_na[i_fcst]); + } - return; -} + // Apply observation error perturbation, if requested + double fcst_v; + if(obs_error_info->flag) { + fcst_v = add_obs_error_inc( + obs_error_info->rng_ptr, FieldType::Fcst, + it->obs_error_entry[i_obs], + it->o_na[i_obs], fcst_na[i_fcst]); + } + else { + fcst_v = fcst_na[i_fcst]; + } -//////////////////////////////////////////////////////////////////////// + // Determine index of ensemble member + int i_mem = member * fcst_na.n() + i_fcst; -void VxPairDataEnsemble::set_skip_const(bool tf) { + // Store perturbed ensemble member value + it->add_ens(i_mem, fcst_v); + } - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].skip_const = tf; - } - } - } + } // end for i_fcst + } // end for i_obs + } // end for PairDataEnsemble iterator return; } diff --git a/src/libcode/vx_statistics/pair_data_ensemble.h b/src/libcode/vx_statistics/pair_data_ensemble.h index 9be2eec3c8..6f14825b11 100644 --- a/src/libcode/vx_statistics/pair_data_ensemble.h +++ b/src/libcode/vx_statistics/pair_data_ensemble.h @@ -164,12 +164,11 @@ class PairDataEnsemble : public PairBase { //////////////////////////////////////////////////////////////////////// // -// Class to store a variety of PairDataEnsemble objects for each -// verification task +// Class to store PairDataEnsemble objects for ensemble verification // //////////////////////////////////////////////////////////////////////// -class VxPairDataEnsemble { +class VxPairDataEnsemble : public VxPairBase { private: @@ -189,130 +188,37 @@ class VxPairDataEnsemble { // ////////////////////////////////////////////////////////////////// - EnsVarInfo *fcst_info; // Forecast field, allocated by EnsVarInfo - VarInfo *climo_info; // Climatology field, allocated by VarInfoFactory - VarInfo *obs_info; // Observation field, allocated by VarInfoFactory - - ConcatString desc; // User description from config file - - double interp_thresh; // Threshold between 0 and 1 used when - // interpolating the forecasts to the - // observation location. - - StringArray msg_typ_sfc; // List of surface message types - - ////////////////////////////////////////////////////////////////// - // - // Forecast and climotology fields falling between the requested - // levels. Store the fields in a data plane array. - // - ////////////////////////////////////////////////////////////////// - - DataPlaneArray fcst_dpa; // Forecast data plane array - DataPlaneArray climo_mn_dpa; // Climatology mean data plane array - DataPlaneArray climo_sd_dpa; // Climatology standard deviation data plane array - - ////////////////////////////////////////////////////////////////// - - unixtime fcst_ut; // Ensemble valid time - unixtime beg_ut; // Beginning of valid time window - unixtime end_ut; // End of valid time window + EnsVarInfo *ens_info; // Ensemble data, allocated by EnsVarInfo ////////////////////////////////////////////////////////////////// - StringArray sid_inc_filt; // Station ID inclusion list - StringArray sid_exc_filt; // Station ID exclusion list - StringArray obs_qty_inc_filt; // Observation quality include markers - StringArray obs_qty_exc_filt; // Observation quality exclude markers - - ////////////////////////////////////////////////////////////////// - ObsErrorInfo *obs_error_info; // Pointer for observation error // Not allocated ////////////////////////////////////////////////////////////////// - int n_msg_typ; // Number of verifying message types - - int n_mask; // Total number of masking regions - // of masking DataPlane fields or SIDs - - int n_interp; // Number of interpolation techniques - - ////////////////////////////////////////////////////////////////// - - PairDataEnsemble ***pd; // 3-Dim Array of PairDataEnsemble objects - // as [n_msg_typ][n_mask][n_interp] + // 3-Dim vector of PairDataEnsemble objects [n_msg_typ][n_mask][n_interp] + std::vector pd; ////////////////////////////////////////////////////////////////// void clear(); - void set_fcst_info(EnsVarInfo *); - void set_climo_info(VarInfo *); - void set_obs_info(VarInfo *); - - void set_desc(const char *); - - void set_interp_thresh(double); - void set_msg_typ_sfc(const StringArray &); - - void set_fcst_dpa(const DataPlaneArray &); - void set_climo_mn_dpa(const DataPlaneArray &); - void set_climo_sd_dpa(const DataPlaneArray &); - - void set_fcst_ut(const unixtime); - void set_beg_ut(const unixtime); - void set_end_ut(const unixtime); - - void set_sid_inc_filt(const StringArray); - void set_sid_exc_filt(const StringArray); - void set_obs_qty_inc_filt(const StringArray); - void set_obs_qty_exc_filt(const StringArray); - - // Call set_pd_size before set_msg_typ, set_mask_area, and set_interp - void set_pd_size(int, int, int); - - void set_msg_typ(int, const char *); - void set_msg_typ_vals(int, const StringArray &); - void set_mask_area(int, const char *, MaskPlane *); - void set_mask_sid(int, const char *, StringArray *); - void set_mask_llpnt(int, const char *, MaskLatLon *); - - void set_interp(int i_interp, const char *interp_mthd_str, int width, - GridTemplateFactory::GridTemplates shape); - void set_interp(int i_interp, InterpMthd mthd, int width, - GridTemplateFactory::GridTemplates shape); + void set_ens_info(const EnsVarInfo *); + void set_size(int, int, int); // Call set_ens_size before add_ens void set_ens_size(int n); - void set_climo_cdf_info_ptr(const ClimoCDFInfo *); - void set_ssvar_bin_size(double); void set_phist_bin_size(double); + void set_ctrl_index(int); + void set_skip_const(bool); void add_point_obs(float *, int *, const char *, const char *, - unixtime, const char *, float *, Grid &, - const char * = 0, const DataPlane * = 0); - + unixtime, const char *, float *, const Grid &, + const char *); void add_ens(int, bool mn, Grid &); - - int get_n_pair() const; - - void set_duplicate_flag(DuplicateType duplicate_flag); - - void set_obs_summary(ObsSummary obs_summary); - - void set_obs_perc_value(int percentile); - - void print_obs_summary(); - - void calc_obs_summary(); - - void set_ctrl_index(int); - - void set_skip_const(bool); }; //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_data_point.cc b/src/libcode/vx_statistics/pair_data_point.cc index cac91ea6c1..85595d69d3 100644 --- a/src/libcode/vx_statistics/pair_data_point.cc +++ b/src/libcode/vx_statistics/pair_data_point.cc @@ -69,9 +69,8 @@ PairDataPoint & PairDataPoint::operator=(const PairDataPoint &pd) { void PairDataPoint::init_from_scratch() { - seeps_mpr.clear(); - seeps.clear(); seeps_climo = nullptr; + clear(); return; @@ -85,10 +84,13 @@ void PairDataPoint::clear() { f_na.clear(); for (int idx=0; idxset_p1_thresh(p1_thresh); - else mlog << Warning << "\nPairDataPoint::set_seeps_thresh() ignored t1_threshold." - << " Load SEEPS climo first\n\n"; + else mlog << Warning << "\nPairDataPoint::set_seeps_thresh() -> " + << "ignored t1_threshold. Load SEEPS climo first\n\n"; } //////////////////////////////////////////////////////////////////////// @@ -211,21 +225,24 @@ void PairDataPoint::set_seeps_score(SeepsScore *seeps, int index) { } } } - else mlog << Warning << "\nPairDataPoint::set_seeps_score(" - << index << ") is out of range " << seeps_count << "\n\n"; + else { + mlog << Warning << "\nPairDataPoint::set_seeps_score(" + << index << ") is out of range " << seeps_count << "\n\n"; + } } } //////////////////////////////////////////////////////////////////////// -void PairDataPoint::set_point_pair(int i_obs, const char *sid, +void PairDataPoint::set_point_pair(int i_obs, + const char *typ, const char *sid, double lat, double lon, double x, double y, unixtime ut, double lvl, double elv, double f, double o, const char *qc, - double cmn, double csd, double wgt, - SeepsScore *seeps) { + const ClimoPntInfo &cpi, + double wgt, const SeepsScore *seeps) { if(i_obs < 0 || i_obs >= n_obs) { mlog << Error << "\nPairDataPoint::set_point_pair() -> " @@ -234,8 +251,8 @@ void PairDataPoint::set_point_pair(int i_obs, const char *sid, exit(1); } - set_point_obs(i_obs, sid, lat, lon, x, y, ut, lvl, elv, - o, qc, cmn, csd, wgt); + set_point_obs(i_obs, typ, sid, lat, lon, x, y, ut, lvl, elv, + o, qc, cpi, wgt); f_na.set(i_obs, f); *seeps_mpr[i_obs] = *seeps; @@ -246,9 +263,10 @@ void PairDataPoint::set_point_pair(int i_obs, const char *sid, //////////////////////////////////////////////////////////////////////// bool PairDataPoint::add_grid_pair(double f, double o, - double cmn, double csd, double wgt) { + const ClimoPntInfo &cpi, + double wgt) { - add_grid_obs(o, cmn, csd, wgt); + add_grid_obs(o, cpi, wgt); f_na.add(f); seeps_mpr.push_back(nullptr); @@ -259,13 +277,16 @@ bool PairDataPoint::add_grid_pair(double f, double o, //////////////////////////////////////////////////////////////////////// bool PairDataPoint::add_grid_pair(const NumArray &f_in, const NumArray &o_in, - const NumArray &cmn_in, const NumArray &csd_in, + const NumArray &fcmn_in, const NumArray &fcsd_in, + const NumArray &ocmn_in, const NumArray &ocsd_in, const NumArray &wgt_in) { // Check for constant length - if(o_in.n() != f_in.n() || - o_in.n() != cmn_in.n() || - o_in.n() != csd_in.n() || + if(o_in.n() != f_in.n() || + o_in.n() != fcmn_in.n() || + o_in.n() != fcsd_in.n() || + o_in.n() != ocmn_in.n() || + o_in.n() != ocsd_in.n() || o_in.n() != wgt_in.n()) { mlog << Error << "\nPairDataPoint::add_grid_pair() -> " << "arrays must all have the same length!\n\n"; @@ -280,7 +301,8 @@ bool PairDataPoint::add_grid_pair(const NumArray &f_in, const NumArray &o_in, wgt_na.add(wgt_in); for(int i=0; iget_seeps_score(sid_no, f, o, month, hour); - if (mlog.verbosity_level() >= seeps_debug_level - && seeps && !is_eq(seeps->score, bad_data_double) - && !is_eq(seeps->score, 0) && seeps_record_count < 10) { + if (mlog.verbosity_level() >= seeps_debug_level && + seeps && + !is_bad_data(seeps->score) && + !is_eq(seeps->score, 0) && + seeps_record_count < 10) { mlog << Debug(seeps_debug_level) - << "PairDataPoint::compute_seeps() score = " << seeps->score << "\n"; + << "PairDataPoint::compute_seeps() score = " + << seeps->score << "\n"; seeps_record_count++; } } @@ -332,38 +357,43 @@ PairDataPoint PairDataPoint::subset_pairs_cnt_thresh( out_pd.extend(n_obs); out_pd.set_climo_cdf_info_ptr(cdf_info_ptr); - bool cmn_flag = set_climo_flag(f_na, cmn_na); - bool csd_flag = set_climo_flag(f_na, csd_na); - bool wgt_flag = set_climo_flag(f_na, wgt_na); + bool fcmn_flag = set_climo_flag(f_na, fcmn_na); + bool fcsd_flag = set_climo_flag(f_na, fcsd_na); + bool ocmn_flag = set_climo_flag(f_na, ocmn_na); + bool ocsd_flag = set_climo_flag(f_na, ocsd_na); + bool wgt_flag = set_climo_flag(f_na, wgt_na); // Loop over the pairs for(i=0; ifile_type()); - *fcst_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_info(VarInfo *info) { - VarInfoFactory f; - - // Deallocate, if necessary - if(climo_info) { delete climo_info; climo_info = (VarInfo *) nullptr; } - - // Perform a deep copy - climo_info = f.new_var_info(info->file_type()); - *climo_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_info(VarInfoGrib *info) { - - // Deallocate, if necessary - if(obs_info) { delete obs_info; obs_info = (VarInfoGrib *) nullptr; } - - // Perform a deep copy - obs_info = new VarInfoGrib; - *obs_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_desc(const char *s) { + VxPairBase::assign(vx_pd); - desc = s; + set_size(vx_pd.n_msg_typ, vx_pd.n_mask, vx_pd.n_interp); - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_interp_thresh(double t) { - - interp_thresh = t; + pd = vx_pd.pd; return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataPoint::set_fcst_dpa(const DataPlaneArray &dpa) { - - fcst_dpa = dpa; +void VxPairDataPoint::set_size(int types, int masks, int interps) { - return; -} - -//////////////////////////////////////////////////////////////////////// + VxPairBase::set_size(types, masks, interps); -void VxPairDataPoint::set_climo_mn_dpa(const DataPlaneArray &dpa) { + // Resize the PairDataPoint vector + pd.resize(n_vx); - climo_mn_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_sd_dpa(const DataPlaneArray &dpa) { - - climo_sd_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_fcst_ut(const unixtime ut) { - - fcst_ut = ut; - - // set the fcst_ut for all PairBase instances, used for duplicate logic - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_fcst_ut(ut); - } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_beg_ut(const unixtime ut) { - - beg_ut = ut; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_end_ut(const unixtime ut) { - - end_ut = ut; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_sid_inc_filt(const StringArray &sa) { - - sid_inc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_sid_exc_filt(const StringArray &sa) { - - sid_exc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_qty_inc_filt(const StringArray &sa) { - - obs_qty_inc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_qty_exc_filt(const StringArray &sa) { - - obs_qty_exc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_pd_size(int types, int masks, int interps) { - int i, j, k; - - // Store the dimensions for the PairDataPoint array - n_msg_typ = types; - n_mask = masks; - n_interp = interps; - - // Allocate space for the PairDataPoint array - pd = new PairDataPoint ** [n_msg_typ]; - rej_typ = new int ** [n_msg_typ]; - rej_mask = new int ** [n_msg_typ]; - rej_fcst = new int ** [n_msg_typ]; - rej_cmn = new int ** [n_msg_typ]; - rej_csd = new int ** [n_msg_typ]; - rej_mpr = new int ** [n_msg_typ]; - rej_dup = new int ** [n_msg_typ]; - - for(i=0; i " - << "the \"" << conf_key_mpr_column << "\" (" - << write_css(sa) << ") and \"" << conf_key_mpr_thresh - << "\" (" << write_css(ta) - << ") config file entries must have the same length!\n\n"; - exit(1); - } - - mpr_column = sa; - mpr_thresh = ta; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_cdf_info_ptr(const ClimoCDFInfo *info) { - - for(int i=0; iname()) { - rej_var++; - return; - } - } - else if(obs_info->code() != nint(obs_arr[1])) { - rej_var++; - return; - } - - // Check the observation quality include and exclude options - if((obs_qty_inc_filt.n() > 0 && !obs_qty_inc_filt.has(obs_qty)) || - (obs_qty_exc_filt.n() > 0 && obs_qty_exc_filt.has(obs_qty))) { - rej_qty++; - return; - } - - // Check whether the observation time falls within the valid time - // window - if(hdr_ut < beg_ut || hdr_ut > end_ut) { - rej_vld++; - return; + // Point observation summary string for rejection log messages + ConcatString pnt_obs_str; + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + pnt_obs_str = point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, + hdr_ut, obs_qty, obs_arr, var_name); } - bool precip_flag = fcst_info->is_precipitation() && - obs_info->is_precipitation(); - int precip_interval = bad_data_int; - if (precip_flag) { - if (wgt_dp) precip_interval = wgt_dp->accum(); - else precip_interval = fcst_dpa[0].accum(); - } + // Check the station ID + if(!is_keeper_sid(pnt_obs_str.c_str(), hdr_sid_str)) return; - hdr_lat = hdr_arr[0]; - hdr_lon = hdr_arr[1]; - hdr_elv = hdr_arr[2]; - - obs_lvl = obs_arr[2]; - obs_hgt = obs_arr[3]; - - // Apply observation processing logic - obs_v = pd[0][0][0].process_obs(obs_info, obs_arr[4]); - - // Check whether the observation value contains valid data - if(is_bad_data(obs_v)) { - mlog << Debug(4) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation with bad data value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - rej_obs++; - return; - } + // Check observation variable + if(!is_keeper_var(pnt_obs_str.c_str(), var_name, nint(obs_arr[1]))) return; - // Convert the lat/lon value to x/y - gr.latlon_to_xy(hdr_lat, -1.0*hdr_lon, obs_x, obs_y); - x = nint(obs_x); - y = nint(obs_y); - - // Check if the observation's lat/lon is on the grid - if(((x < 0 || x >= gr.nx()) && !gr.wrap_lon()) || - y < 0 || y >= gr.ny()) { - - mlog << Debug(4) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation off the grid where (x, y) = (" - << x << ", " << y << ") and grid (nx, ny) = (" << gr.nx() - << ", " << gr.ny() << "):\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - rej_grd++; - return; - } + // Check observation quality + if(!is_keeper_qty(pnt_obs_str.c_str(), obs_qty)) return; - // Check for a large topography difference - if(sfc_info.topo_ptr && msg_typ_sfc.reg_exp_match(hdr_typ_str)) { - - // Interpolate model topography to observation location - double topo = compute_horz_interp( - *sfc_info.topo_ptr, obs_x, obs_y, hdr_elv, - InterpMthd::Bilin, 2, - GridTemplateFactory::GridTemplates::Square, - gr.wrap_lon(), 1.0); - - // Skip bad topography values - if(is_bad_data(hdr_elv) || is_bad_data(topo)) { - mlog << Debug(4) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to bad topography values " - << "where observation elevation = " << hdr_elv - << " and model topography = " << topo << ":\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - rej_topo++; - return; - } + // Check valid time + if(!is_keeper_vld(pnt_obs_str.c_str(), hdr_ut)) return; - // Check the topography difference threshold - if(!sfc_info.topo_use_obs_thresh.check(topo - hdr_elv)) { - mlog << Debug(4) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to topography difference " - << "where observation elevation (" << hdr_elv - << ") minus model topography (" << topo << ") = " - << topo - hdr_elv << " is not " - << sfc_info.topo_use_obs_thresh.get_str() << ":\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - rej_topo++; - return; - } - } + // Check observation value + double obs_v = obs_arr[4]; + if(!is_keeper_obs(pnt_obs_str.c_str(), obs_v)) return; - // For pressure levels, check if the observation pressure level - // falls in the requested range. - if(obs_info->level().type() == LevelType_Pres) { + // Check location + double hdr_lat = hdr_arr[0]; + double hdr_lon = hdr_arr[1]; + double obs_x, obs_y; + if(!is_keeper_grd(pnt_obs_str.c_str(), gr, hdr_lat, hdr_lon, obs_x, obs_y)) return; - if(obs_lvl < obs_info->level().lower() || - obs_lvl > obs_info->level().upper()) { - rej_lvl++; - return; - } - } - // For accumulations, check if the observation accumulation interval - // matches the requested interval. - else if(obs_info->level().type() == LevelType_Accum) { - - if(obs_lvl < obs_info->level().lower() || - obs_lvl > obs_info->level().upper()) { - rej_lvl++; - return; - } - } - // For all other level types (VertLevel, RecNumber, NoLevel), - // check for a surface message type or if the observation height - // falls within the requested range. - else { + // Check topo + double hdr_elv = hdr_arr[2]; + if(!is_keeper_topo(pnt_obs_str.c_str(), gr, obs_x, obs_y, + hdr_typ_str, hdr_elv)) return; - if(!msg_typ_sfc.reg_exp_match(hdr_typ_str) && - (obs_hgt < obs_info->level().lower() || - obs_hgt > obs_info->level().upper())) { - rej_lvl++; - return; - } - } + // Check level + double obs_lvl = obs_arr[2]; + double obs_hgt = obs_arr[3]; + if(!is_keeper_lvl(pnt_obs_str.c_str(), hdr_typ_str, obs_lvl, obs_hgt)) return; - // For a single forecast field - if(fcst_dpa.n_planes() == 1) { - f_lvl_blw = 0; - f_lvl_abv = 0; - } - // For multiple forecast fields, find the levels above and below - // the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(fcst_dpa, to_lvl, f_lvl_blw, f_lvl_abv); - } - - // For a single climatology mean field - if(climo_mn_dpa.n_planes() == 1) { - cmn_lvl_blw = 0; - cmn_lvl_abv = 0; - } - // For multiple climatology mean fields, find the levels above and - // below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_mn_dpa, to_lvl, cmn_lvl_blw, cmn_lvl_abv); - } + // Set flags + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); + bool precip_flag = fcst_info->is_precipitation() && + obs_info->is_precipitation(); + int precip_interval = fcst_dpa[0].accum(); - // For a single climatology standard deviation field - if(climo_sd_dpa.n_planes() == 1) { - csd_lvl_blw = 0; - csd_lvl_abv = 0; - } - // For multiple climatology standard deviation fields, find the - // levels above and below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_sd_dpa, to_lvl, csd_lvl_blw, csd_lvl_abv); - } + bool has_seeps = false; + SeepsScore *seeps = nullptr; // When verifying a vertical level forecast against a surface message // type, set the observation level value to bad data so that it's not @@ -1202,376 +568,131 @@ void VxPairDataPoint::add_point_obs(float *hdr_arr, const char *hdr_typ_str, obs_lvl = bad_data_double; } - // Set flag for specific humidity - bool spfh_flag = fcst_info->is_specific_humidity() && - obs_info->is_specific_humidity(); + // Loop through the message types + for(int i_msg_typ=0; i_msg_typs_is_on(x, y)) { - inc_count(rej_mask, i, j); - continue; - } - } - // Otherwise, check for the obs Station ID's presence in the - // masking SID list - else if(pd[i][j][0].mask_sid_ptr != (StringArray *) 0) { - if(!pd[i][j][0].mask_sid_ptr->has(hdr_sid_str)) { - mlog << Debug(9) << "Checking for the obs station id in the masking SID list: rejected hdr_sid_str = " - << hdr_sid_str << "\n"; - inc_count(rej_mask, i, j); - continue; - } - } - // Otherwise, check observation lat/lon thresholds - else if(pd[i][j][0].mask_llpnt_ptr != (MaskLatLon *) 0) { - if(!pd[i][j][0].mask_llpnt_ptr->lat_thresh.check(hdr_lat) || - !pd[i][j][0].mask_llpnt_ptr->lon_thresh.check(hdr_lon)) { - inc_count(rej_mask, i, j); - continue; - } - } + // Check forecast values + double fcst_v; + if(!is_keeper_fcst(pnt_obs_str.c_str(), + i_msg_typ, i_mask, i_interp, + hdr_typ_str, gr, + obs_x, obs_y, hdr_elv, + obs_v, obs_lvl, obs_hgt, + cpi, fcst_v)) continue; - // Compute the interpolated values - for(k=0; klevel().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - - // Compute the interpolated climatology mean - cmn_v = compute_interp(climo_mn_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->level().type(), - to_lvl, cmn_lvl_blw, cmn_lvl_abv); - - // Check for bad data - if(climo_mn_dpa.n_planes() > 0 && is_bad_data(cmn_v)) { - inc_count(rej_cmn, i, j, k); - continue; - } + // Check matched pair filtering options + ConcatString reason_cs; + if(!check_mpr_thresh(fcst_v, obs_v, cpi, + mpr_column, mpr_thresh, &reason_cs)) { - // Check for valid interpolation options - if(climo_sd_dpa.n_planes() > 0 && - (pd[0][0][k].interp_mthd == InterpMthd::Min || - pd[0][0][k].interp_mthd == InterpMthd::Max || - pd[0][0][k].interp_mthd == InterpMthd::Median || - pd[0][0][k].interp_mthd == InterpMthd::Best)) { - mlog << Warning << "\nVxPairDataPoint::add_point_obs() -> " - << "applying the " - << interpmthd_to_string(pd[0][0][k].interp_mthd) - << " interpolation method to climatological spread " - << "may cause unexpected results.\n\n"; - } + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation" + << "due to matched pair filter since " + << reason_cs << ":\n" + << pnt_obs_str << "\n"; + } - // Compute the interpolated climatology standard deviation - csd_v = compute_interp(climo_sd_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->level().type(), - to_lvl, csd_lvl_blw, csd_lvl_abv); - - // Check for bad data - if(climo_sd_dpa.n_planes() > 0 && is_bad_data(csd_v)) { - inc_count(rej_csd, i, j, k); + inc_count(rej_mpr, i_msg_typ, i_mask, i_interp); continue; } - // For surface verification, apply land/sea and topo masks - if((sfc_info.land_ptr || sfc_info.topo_ptr) && - (msg_typ_sfc.reg_exp_match(hdr_typ_str))) { - - bool is_land = msg_typ_lnd.has(hdr_typ_str); - - // Check for a single forecast DataPlane - if(fcst_dpa.n_planes() != 1) { - mlog << Error << "\nVxPairDataPoint::add_point_obs() -> " - << "unexpected number of forecast levels (" - << fcst_dpa.n_planes() - << ") for surface verification! Set \"land_mask.flag\" and " - << "\"topo_mask.flag\" to false to disable this check.\n\n"; - exit(1); + // Add the forecast, climatological, and observation data + // Weight is from the nearest grid point + int n = three_to_one(i_msg_typ, i_mask, i_interp); + if(!pd[n].add_point_pair(hdr_typ_str, hdr_sid_str, + hdr_lat, hdr_lon, obs_x, obs_y, hdr_ut, obs_lvl, + obs_hgt, fcst_v, obs_v, obs_qty, cpi, default_weight)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() + << ", skipping observation since it is a duplicate:\n" + << pnt_obs_str << "\n"; } - fcst_v = compute_sfc_interp(fcst_dpa[0], obs_x, obs_y, hdr_elv, obs_v, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, sfc_info, is_land); - } - // Otherwise, compute interpolated value - else { - fcst_v = compute_interp(fcst_dpa, obs_x, obs_y, obs_v, cmn_v, csd_v, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->level().type(), - to_lvl, f_lvl_blw, f_lvl_abv); - } - - if(is_bad_data(fcst_v)) { - mlog << Debug(4) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to bad data in the " - << interpmthd_to_string(pd[0][0][k].interp_mthd) << "(" - << pd[0][0][k].interp_wdth * pd[0][0][k].interp_wdth - << ") interpolated forecast value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - inc_count(rej_fcst, i, j, k); + inc_count(rej_dup, i_msg_typ, i_mask, i_interp); continue; } - // Check matched pair filtering options - if(!check_mpr_thresh(fcst_v, obs_v, cmn_v, csd_v, - mpr_column, mpr_thresh, &reason_cs)) { - mlog << Debug(4) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to matched pair filter since " - << reason_cs << ":\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - inc_count(rej_mpr, i, j, k); - continue; + // Compute seeps + if (precip_flag && precip_interval == 24*60*60) { // 24 hour precip only + seeps = pd[n].compute_seeps(hdr_sid_str, fcst_v, obs_v, hdr_ut); } + else { + seeps = nullptr; + } + pd[n].set_seeps_score(seeps); + if (seeps) { delete seeps; seeps = nullptr; } - // Compute weight for current point - wgt_v = (wgt_dp == (DataPlane *) 0 ? - default_grid_weight : wgt_dp->get(x, y)); - - // Add the forecast, climatological, and observation data - // Weight is from the nearest grid point - if(!pd[i][j][k].add_point_pair(hdr_sid_str, - hdr_lat, hdr_lon, obs_x, obs_y, hdr_ut, obs_lvl, - obs_hgt, fcst_v, obs_v, obs_qty, cmn_v, csd_v, - wgt_v)) { - mlog << Debug(4) + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation since it is a duplicate:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - inc_count(rej_dup, i, j, k); - } - seeps = 0; - if (precip_flag && precip_interval == 24*60*60) { // 24 hour precip only - seeps = pd[i][j][k].compute_seeps(hdr_sid_str, fcst_v, obs_v, hdr_ut); + << obs_info->magic_str() << ", for observation type " + << pd[n].msg_typ << ", over region " + << pd[n].mask_name << ", for interpolation method " + << interpmthd_to_string(pd[n].interp_mthd) << "(" + << pd[n].interp_wdth * pd[n].interp_wdth + << "), using observation:\n" + << pnt_obs_str << "\n"; } - pd[i][j][k].set_seeps_score(seeps); - if (seeps) delete seeps; - } // end for k - } // end for j - } // end for i - return; -} - -//////////////////////////////////////////////////////////////////////// - -int VxPairDataPoint::get_n_pair() const { - int n, i, j, k; - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::get_n_pair() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(i=0, n=0; i " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_check_unique(duplicate_flag == DuplicateType::Unique); - } - } - } - -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_summary(ObsSummary s) { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::set_obs_summary() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_summary(s); - } - } - } + } // end for i_interp + } // end for i_mask + } // end for i_msg_typ return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataPoint::set_obs_perc_value(int percentile) { +void VxPairDataPoint::load_seeps_climo(const ConcatString &seeps_climo_name) { - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::set_obs_perc_value() -> " - << "set_pd_size() has not been called yet!\n\n"; + if(n_vx == 0) { + mlog << Warning << "\nVxPairDataPoint::load_seeps_climo() -> " + << "set_size() has not been called yet!\n\n"; } - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_perc_value(percentile); - } - } - } + for(auto &x : pd) x.load_seeps_climo(seeps_climo_name); return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataPoint::load_seeps_climo() { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].load_seeps_climo(); - } - } - } -} - -//////////////////////////////////////////////////////////////////////// - void VxPairDataPoint::set_seeps_thresh(const SingleThresh &p1_thresh) { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_seeps_thresh(p1_thresh); - } - } - } -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::print_obs_summary() { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::print_obs_summary() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].print_obs_summary(); - } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::calc_obs_summary() { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::calc_obs_summary() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].calc_obs_summary(); - } - } - } - - return; -} -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::inc_count(int ***&rej, int i) { - int j, k; - - for(j=0; j " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::inc_count(int ***&rej, int i, int j) { - int k; - - for(k=0; k 1) { // Loop through the columns - for(j=1; j " @@ -1719,7 +841,8 @@ double get_mpr_column_value(double f, double o, double cmn, double csd, //////////////////////////////////////////////////////////////////////// void apply_mpr_thresh_mask(DataPlane &fcst_dp, DataPlane &obs_dp, - DataPlane &cmn_dp, DataPlane &csd_dp, + DataPlane &fcmn_dp, DataPlane &fcsd_dp, + DataPlane &ocmn_dp, DataPlane &ocsd_dp, const StringArray &col_sa, const ThreshArray &col_ta) { // Check for no work to be done @@ -1737,33 +860,43 @@ void apply_mpr_thresh_mask(DataPlane &fcst_dp, DataPlane &obs_dp, int nxy = fcst_dp.nx() * fcst_dp.ny(); int n_skip = 0; - bool cmn_flag = !(cmn_dp.is_empty()); - bool csd_flag = !(csd_dp.is_empty()); + bool fcmn_flag = !(fcmn_dp.is_empty()); + bool fcsd_flag = !(fcsd_dp.is_empty()); + bool ocmn_flag = !(ocmn_dp.is_empty()); + bool ocsd_flag = !(ocsd_dp.is_empty()); // Loop over the pairs for(int i=0; i seeps_mpr; - SeepsAggScore seeps; + SeepsAggScore seeps_agg; ////////////////////////////////////////////////////////////////// @@ -56,21 +56,27 @@ class PairDataPoint : public PairBase { void extend(int); - bool add_point_pair(const char *, double, double, double, double, + bool add_point_pair(const char *, const char *, + double, double, double, double, unixtime, double, double, double, double, - const char *, double, double, double); - void load_seeps_climo(); + const char *, const ClimoPntInfo &, double); + + void load_seeps_climo(const ConcatString &seeps_climo_name); void set_seeps_thresh(const SingleThresh &p1_thresh); void set_seeps_score(SeepsScore *, int index=-1); - void set_point_pair(int, const char *, double, double, double, double, + void set_point_pair(int, const char *, const char *, + double, double, double, double, unixtime, double, double, double, double, - const char *, double, double, double, SeepsScore *); + const char *, const ClimoPntInfo &, + double, const SeepsScore *); - bool add_grid_pair(double, double, double, double, double); + bool add_grid_pair(double, double, + const ClimoPntInfo &, double); bool add_grid_pair(const NumArray &f_in, const NumArray &o_in, - const NumArray &cmn_in, const NumArray &csd_in, + const NumArray &fcmn_in, const NumArray &fcsd_in, + const NumArray &ocmn_in, const NumArray &ocsd_in, const NumArray &w_in); PairDataPoint subset_pairs_cnt_thresh(const SingleThresh &ft, @@ -82,12 +88,11 @@ class PairDataPoint : public PairBase { //////////////////////////////////////////////////////////////////////// // -// Class to store a variety of PairDataPoint objects for each -// verification task +// Class to store PairDataPoint objects for point verification // //////////////////////////////////////////////////////////////////////// -class VxPairDataPoint { +class VxPairDataPoint : public VxPairBase { private: @@ -107,163 +112,21 @@ class VxPairDataPoint { // ////////////////////////////////////////////////////////////////// - VarInfo *fcst_info; // Forecast field, allocated by VarInfoFactory - VarInfo *climo_info; // Climatology field, allocated by VarInfoFactory - VarInfoGrib *obs_info; // Observation field, allocated by VarInfoFactory - - ConcatString desc; // User description from config file - - double interp_thresh; // Threshold between 0 and 1 used when - // interpolating the forecasts to the - // observation location. - - ////////////////////////////////////////////////////////////////// - // - // Forecast and climatology fields falling between the requested - // levels. Store the fields in a data plane array. - // - ////////////////////////////////////////////////////////////////// - - DataPlaneArray fcst_dpa; // Forecast data plane array - DataPlaneArray climo_mn_dpa; // Climatology mean data plane array - DataPlaneArray climo_sd_dpa; // Climatology standard deviation data plane array - - ////////////////////////////////////////////////////////////////// - - unixtime fcst_ut; // Forecast valid time - unixtime beg_ut; // Beginning of valid time window - unixtime end_ut; // End of valid time window - - ////////////////////////////////////////////////////////////////// - - StringArray sid_inc_filt; // Station ID inclusion list - StringArray sid_exc_filt; // Station ID exclusion list - StringArray obs_qty_inc_filt; // Observation quality include markers - StringArray obs_qty_exc_filt; // Observation quality exclude markers - - ////////////////////////////////////////////////////////////////// - - StringArray mpr_column; // Names of MPR columns or diffs of columns - ThreshArray mpr_thresh; // Filtering thresholds for the MPR columns - - ////////////////////////////////////////////////////////////////// - - StringArray msg_typ_sfc; // List of surface message types - StringArray msg_typ_lnd; // List of surface land message types - StringArray msg_typ_wtr; // List of surface water message types - - SurfaceInfo sfc_info; // Land/sea mask and topography info - - ////////////////////////////////////////////////////////////////// - - int n_msg_typ; // Number of verifying message types - - int n_mask; // Total number of masking regions - // of masking DataPlane fields or SIDs - - int n_interp; // Number of interpolation techniques - - ////////////////////////////////////////////////////////////////// - - PairDataPoint ***pd; // 3-Dim Array of PairDataPoint objects - // as [n_msg_typ][n_mask][n_interp] - - // Counts for observation rejection reason codes - int n_try; // Number of observations processed - int rej_sid; // Reject based on SID inclusion and exclusion lists - int rej_var; // Reject based on observation variable name - int rej_vld; // Reject based on valid time - int rej_obs; // Reject observation bad data - int rej_grd; // Reject based on location - int rej_topo; // Reject based on topography - int rej_lvl; // Reject based on vertical level - int rej_qty; // Reject based on obs quality - - // 3-Dim Arrays for observation rejection reason codes - int ***rej_typ; // Reject based on message type - int ***rej_mask; // Reject based on masking region - int ***rej_fcst; // Reject forecast bad data - int ***rej_cmn; // Reject climo mean bad data - int ***rej_csd; // Reject climo stdev bad data - int ***rej_mpr; // Reject based on MPR filtering logic - int ***rej_dup; // Reject based on duplicates logic + // 3-Dim vector of PairDataPoint objects [n_msg_typ][n_mask][n_interp] + std::vector pd; ////////////////////////////////////////////////////////////////// void clear(); - void set_fcst_info(VarInfo *); - void set_climo_info(VarInfo *); - void set_obs_info(VarInfoGrib *); - - void set_desc(const char *); - - void set_interp_thresh(double); - - void set_fcst_dpa(const DataPlaneArray &); - void set_climo_mn_dpa(const DataPlaneArray &); - void set_climo_sd_dpa(const DataPlaneArray &); + void set_size(int, int, int); - void set_fcst_ut(const unixtime); - void set_beg_ut(const unixtime); - void set_end_ut(const unixtime); - - void set_sid_inc_filt(const StringArray &); - void set_sid_exc_filt(const StringArray &); - void set_obs_qty_inc_filt(const StringArray &); - void set_obs_qty_exc_filt(const StringArray &); - - // Call set_pd_size before set_msg_typ, set_mask_area, and set_interp - void set_pd_size(int, int, int); - - void set_msg_typ(int, const char *); - void set_msg_typ_vals(int, const StringArray &); - void set_mask_area(int, const char *, MaskPlane *); - void set_mask_sid(int, const char *, StringArray *); - void set_mask_llpnt(int, const char *, MaskLatLon *); - - void set_interp(int i_interp, const char *interp_mthd_str, int width, - GridTemplateFactory::GridTemplates shape); - void set_interp(int i_interp, InterpMthd mthd, - int width, GridTemplateFactory::GridTemplates shape); - - void set_mpr_thresh(const StringArray &, const ThreshArray &); - - void load_seeps_climo(); + void load_seeps_climo(const ConcatString &seeps_climo_name); void set_seeps_thresh(const SingleThresh &p1_thresh); - void set_climo_cdf_info_ptr(const ClimoCDFInfo *); - - void set_msg_typ_sfc(const StringArray &); - void set_msg_typ_lnd(const StringArray &); - void set_msg_typ_wtr(const StringArray &); - - void set_sfc_info(const SurfaceInfo &); - void add_point_obs(float *, const char *, const char *, unixtime, - const char *, float *, Grid &, const char * = 0, - const DataPlane * = 0); - - void add_prec_point_obs(float *, const char *, const char *, unixtime, - const char *, float *, Grid &, int month, int hour, - const char * = 0, const DataPlane * = 0); - - int get_n_pair() const; - - void set_duplicate_flag(DuplicateType duplicate_flag); - - void set_obs_summary(ObsSummary obs_summary); - - void set_obs_perc_value(int percentile); - - void print_obs_summary(); - - void calc_obs_summary(); - - // Member functions for incrementing the counts - void inc_count(int ***&, int); - void inc_count(int ***&, int, int); - void inc_count(int ***&, int, int, int); + const char *, float *, const Grid &, + const char *); }; @@ -273,18 +136,19 @@ class VxPairDataPoint { // //////////////////////////////////////////////////////////////////////// -extern bool check_fo_thresh(double, double, double, double, +extern bool check_fo_thresh(double, double, const ClimoPntInfo &, const SingleThresh &, const SingleThresh &, const SetLogic); -extern bool check_mpr_thresh(double, double, double, double, +extern bool check_mpr_thresh(double, double, const ClimoPntInfo &, const StringArray &, const ThreshArray &, ConcatString * = 0); -extern double get_mpr_column_value(double, double, double, double, +extern double get_mpr_column_value(double, double, const ClimoPntInfo &, const char *); extern void apply_mpr_thresh_mask(DataPlane &, DataPlane &, + DataPlane &, DataPlane &, DataPlane &, DataPlane &, const StringArray &, const ThreshArray &); @@ -308,13 +172,6 @@ extern void subset_wind_pairs(const PairDataPoint &, extern PairDataPoint subset_climo_cdf_bin(const PairDataPoint &, const ThreshArray &, int i_bin); -// Write the point observation in the MET point format for logging -extern ConcatString point_obs_to_string( - float *hdr_arr, const char *hdr_typ_str, - const char *hdr_sid_str, unixtime hdr_ut, - const char *obs_qty, float *obs_arr, - const char *var_name); - //////////////////////////////////////////////////////////////////////// #endif // __PAIR_DATA_POINT_H__ diff --git a/src/libcode/vx_statistics/read_climo.cc b/src/libcode/vx_statistics/read_climo.cc index 8e43749a8d..8f8ddd8e9b 100644 --- a/src/libcode/vx_statistics/read_climo.cc +++ b/src/libcode/vx_statistics/read_climo.cc @@ -29,7 +29,7 @@ using namespace std; static void read_climo_file( const char *, GrdFileType, Dictionary *, unixtime, int, int, const Grid &, const RegridInfo &, - DataPlaneArray &dpa); + DataPlaneArray &dpa, const char *); static DataPlaneArray climo_time_interp( const DataPlaneArray &, int, unixtime, InterpMthd); @@ -39,8 +39,12 @@ static DataPlane climo_hms_interp( //////////////////////////////////////////////////////////////////////// -DataPlane read_climo_data_plane(Dictionary *dict, int i_vx, - unixtime vld_ut, const Grid &vx_grid) { +DataPlane read_climo_data_plane(Dictionary *dict, + const char *entry_name, + int i_vx, + unixtime vld_ut, + const Grid &vx_grid, + const char *desc) { DataPlane dp; DataPlaneArray dpa; @@ -48,13 +52,14 @@ DataPlane read_climo_data_plane(Dictionary *dict, int i_vx, if(!dict) return dp; // Read array of climatology fields - dpa = read_climo_data_plane_array(dict, i_vx, vld_ut, vx_grid); + dpa = read_climo_data_plane_array(dict, entry_name, i_vx, + vld_ut, vx_grid, desc); // Check for multiple matches if(dpa.n_planes() > 1) { mlog << Warning << "\nread_climo_data_plane() -> " - << "Found " << dpa.n_planes() << " matching climatology " - << "fields. Using the first match found.\n\n"; + << "Found " << dpa.n_planes() << " matching " << desc + << " fields. Using the first match found.\n\n"; } // Store the first match found @@ -65,80 +70,120 @@ DataPlane read_climo_data_plane(Dictionary *dict, int i_vx, //////////////////////////////////////////////////////////////////////// -DataPlaneArray read_climo_data_plane_array(Dictionary *dict, int i_vx, +DataPlaneArray read_climo_data_plane_array(Dictionary *dict, + const char *climo_name, + int i_vx, unixtime vld_ut, - const Grid &vx_grid) { + const Grid &vx_grid, + const char *desc) { + + const char *method_name = "read_climo_data_plane_array() -> "; + + // + // Parse each of the climatology configuration entries separately + // using the "climo_name.entry_name" scope notation. Use the value + // from the specified dictionary (e.g. "fcst.climo_mean") if found, + // or use the value from the parent dictionary (e.g. top-level config + // "climo_mean") if not found. + // DataPlaneArray dpa; - StringArray climo_files; - RegridInfo regrid_info; - InterpMthd time_interp; - GrdFileType ctype; - double day_interval, hour_interval; - int i, day_ts, hour_ts; + ConcatString cs; // Check for null if(!dict) return dpa; - // Get the i-th array entry - Dictionary i_dict = parse_conf_i_vx_dict(dict, i_vx); - - // Climatology mean and standard deviation files - climo_files = i_dict.lookup_string_array(conf_key_file_name, false); + // Parse the "file_name" array entry + cs << cs_erase << climo_name << "." << conf_key_file_name; + StringArray climo_files(dict->lookup_string_array(cs.c_str())); - // Check for at least one file + // Check for at least one input file if(climo_files.n() == 0) return dpa; - // Regrid info - regrid_info = parse_conf_regrid(&i_dict); + // Parse the "field" array entry + cs << cs_erase << climo_name << "." << conf_key_field; + Dictionary *field_dict = dict->lookup_array(cs.c_str(), false); + + // Determine which climo array entry to use + int i_climo_field = bad_data_int; + if(field_dict->n_entries() == 0) return dpa; + else if(field_dict->n_entries() == 1) i_climo_field = 0; + else i_climo_field = i_vx; - // Time interpolation - time_interp = int_to_interpmthd(i_dict.lookup_int(conf_key_time_interp_method)); + // Parse the climo dictionary + Dictionary i_dict = parse_conf_i_vx_dict(field_dict, i_climo_field); - // Day interval - day_interval = i_dict.lookup_double(conf_key_day_interval); + // Parse the "regrid" dictionary from the top-level + // config file context (e.g. "config.climo_mean.regrid") + // to serve as the default. + RegridInfo regrid_default = parse_conf_regrid( + dict->parent()->lookup_dictionary(climo_name, false)); - // Range check day_interval + // Parse the "time_interp_method" + cs << cs_erase << climo_name << "." << conf_key_time_interp_method; + InterpMthd time_interp = int_to_interpmthd(dict->lookup_int(cs.c_str())); + + // Parse the "day_interval" value + cs << cs_erase << climo_name << "." << conf_key_day_interval; + double day_interval = dict->lookup_double(cs.c_str()); + + // Range check day_interval value if(!is_bad_data(day_interval) && day_interval < 1) { - mlog << Error << "\nread_climo_data_plane_array() -> " - << "The \"" << conf_key_day_interval << "\" entry (" + mlog << Error << "\n" << method_name + << "The " << conf_key_day_interval << " entry (" << day_interval << ") can be set to " << na_str << " or a value of at least 1.\n\n"; exit(1); } - // Hour interval - hour_interval = i_dict.lookup_double(conf_key_hour_interval); + // Parse the "hour_interval" value + cs << cs_erase << climo_name << "." << conf_key_hour_interval; + double hour_interval = dict->lookup_double(cs.c_str()); // Range check hour_interval if(!is_bad_data(hour_interval) && (hour_interval <= 0 || hour_interval > 24)) { - mlog << Error << "\nread_climo_data_plane_array() -> " - << "The \"" << conf_key_hour_interval << "\" entry (" + mlog << Error << "\n" << method_name + << "The " << conf_key_hour_interval << " entry (" << hour_interval << ") can be set to " << na_str << " or a value between 0 and 24.\n\n"; exit(1); } - // Check if file_type was specified - ctype = parse_conf_file_type(&i_dict); + // Log search criteria + if(mlog.verbosity_level() >= 5) { + mlog << Debug(5) + << "Searching " << climo_files.n() + << " file(s) for " << desc + << " data using climo_name = " << climo_name + << ", i_vx = " << i_vx + << ", valid time = " << unix_to_yyyymmdd_hhmmss(vld_ut) + << ", time_interp = " << interpmthd_to_string(time_interp) + << ", day_interval = " << day_interval + << ", hour_interval = " << hour_interval + << "\n"; + } // Store the time steps in seconds - day_ts = (is_bad_data(day_interval) ? bad_data_int : - nint(day_interval * 24.0 * sec_per_hour)); - hour_ts = (is_bad_data(hour_interval) ? bad_data_int : - nint(hour_interval * sec_per_hour)); - + int day_ts = (is_bad_data(day_interval) ? bad_data_int : + nint(day_interval * 24.0 * sec_per_hour)); + int hour_ts = (is_bad_data(hour_interval) ? bad_data_int : + nint(hour_interval * sec_per_hour)); + + // Check if file_type was specified + GrdFileType ctype = parse_conf_file_type(&i_dict); + // Search the files for the requested records - for(i=0; i " - << "Trouble reading climatology file \"" - << climo_file << "\"\n\n"; + << "Trouble reading climatology file " + << climo_file << "\n\n"; return; } // Parse the variable name and level info = info_factory.new_var_info(mtddf->file_type()); + info->set_default_regrid(regrid_default); info->set_dict(*dict); // Read data planes @@ -191,21 +237,21 @@ void read_climo_file(const char *climo_file, GrdFileType ctype, // Check the day time step if(!is_bad_data(day_ts) && abs(day_diff_sec) >= day_ts) { - mlog << Debug(3) << "Skipping " << clm_ut_cs << " \"" << info->magic_str() - << "\" climatology field with " << day_diff_sec / sec_per_day + mlog << Debug(3) << "Skipping " << clm_ut_cs << " " << info->magic_str() + << " climatology field with " << day_diff_sec / sec_per_day << " day offset (" << conf_key_day_interval << " = " - << day_ts / sec_per_day << ") from file \"" - << climo_file << "\".\n"; + << day_ts / sec_per_day << ") from file " + << climo_file << ".\n"; continue; } // Check the hour time step if(!is_bad_data(hour_ts) && abs(hms_diff_sec) >= hour_ts) { - mlog << Debug(3) << "Skipping " << clm_ut_cs << " \"" << info->magic_str() - << "\" climatology field with " << (double) hms_diff_sec / sec_per_hour + mlog << Debug(3) << "Skipping " << clm_ut_cs << " " << info->magic_str() + << " climatology field with " << (double) hms_diff_sec / sec_per_hour << " hour offset (" << conf_key_hour_interval << " = " - << hour_ts / sec_per_hour << ") from file \"" - << climo_file << "\".\n"; + << hour_ts / sec_per_hour << ") from file " + << climo_file << ".\n"; continue; } @@ -213,19 +259,20 @@ void read_climo_file(const char *climo_file, GrdFileType ctype, unixtime clm_vld_ut = vld_ut + day_diff_sec + hms_diff_sec; // Print log message for matching record - mlog << Debug(3) << "Storing " << clm_ut_cs << " \"" << info->magic_str() - << "\" climatology field with " << day_diff_sec / sec_per_day + mlog << Debug(3) << "Storing " << clm_ut_cs << " " << info->magic_str() + << " climatology field with " << day_diff_sec / sec_per_day << " day, " << (double) hms_diff_sec / sec_per_hour << " hour offset as time " - << unix_to_yyyymmdd_hhmmss(clm_vld_ut) << " from file \"" - << climo_file << "\".\n"; + << unix_to_yyyymmdd_hhmmss(clm_vld_ut) << " from file " + << climo_file << ".\n"; // Regrid, if needed if(!(mtddf->grid() == vx_grid)) { - mlog << Debug(2) << "Regridding " << clm_ut_cs << " \"" - << info->magic_str() - << "\" climatology field to the verification grid.\n"; + mlog << Debug(2) << "Regridding " << clm_ut_cs << " " + << desc << " field " << info->magic_str() + << " to the verification grid using " + << info->regrid().get_str() << ".\n"; dp = met_regrid(clm_dpa[i], mtddf->grid(), vx_grid, - regrid_info); + info->regrid()); } else { dp = clm_dpa[i]; @@ -343,8 +390,8 @@ DataPlaneArray climo_time_interp(const DataPlaneArray &dpa, int day_ts, // This should only occur when day_interval > 1. if(day_ts <= 3600*24) { mlog << Error << "\nclimo_time_interp() -> " - << "Expecting 1 or 2 climatology fields when \"" - << conf_key_day_interval << "\" <= 1 but found " + << "Expecting 1 or 2 climatology fields when " + << conf_key_day_interval << " <= 1 but found " << it->second.n() << "\n\n"; exit(1); } diff --git a/src/libcode/vx_statistics/read_climo.h b/src/libcode/vx_statistics/read_climo.h index a017df6388..64db97c04a 100644 --- a/src/libcode/vx_statistics/read_climo.h +++ b/src/libcode/vx_statistics/read_climo.h @@ -18,11 +18,15 @@ //////////////////////////////////////////////////////////////////////// -extern DataPlane read_climo_data_plane(Dictionary *, int, - unixtime, const Grid &); - -extern DataPlaneArray read_climo_data_plane_array(Dictionary *, int, - unixtime, const Grid &); +extern DataPlane read_climo_data_plane( + Dictionary *, const char *, + int, unixtime, const Grid &, + const char *); + +extern DataPlaneArray read_climo_data_plane_array( + Dictionary *, const char *, + int, unixtime, const Grid &, + const char *); //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_summary/Makefile.in b/src/libcode/vx_summary/Makefile.in index 6c057f2622..42cac04abd 100644 --- a/src/libcode/vx_summary/Makefile.in +++ b/src/libcode/vx_summary/Makefile.in @@ -256,6 +256,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_summary/summary_calc_percentile.cc b/src/libcode/vx_summary/summary_calc_percentile.cc index d55ace753d..5ac813944f 100644 --- a/src/libcode/vx_summary/summary_calc_percentile.cc +++ b/src/libcode/vx_summary/summary_calc_percentile.cc @@ -40,15 +40,15 @@ SummaryCalcPercentile::SummaryCalcPercentile(const string &type_string) : !isdigit(type_string[2])) { mlog << Error << "\nSummaryCalcPercentile::SummaryCalcPercentile() -> " - << "invalid percentile type \"" << type_string - << "\" specified in configuration file.\n\n"; + << "invalid percentile type \"" << type_string + << "\" specified in configuration file.\n\n"; exit(1); } - + // Pull the desired percentile from the string _percentile = atof(type_string.substr(1,2).c_str()) / 100.0; - + // Construct the type string ConcatString type_buffer; diff --git a/src/libcode/vx_tc_util/Makefile.in b/src/libcode/vx_tc_util/Makefile.in index 747df71532..faae6b3f1d 100644 --- a/src/libcode/vx_tc_util/Makefile.in +++ b/src/libcode/vx_tc_util/Makefile.in @@ -270,6 +270,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_tc_util/atcf_line_base.h b/src/libcode/vx_tc_util/atcf_line_base.h index 8b1141ffad..0547b7d5b0 100644 --- a/src/libcode/vx_tc_util/atcf_line_base.h +++ b/src/libcode/vx_tc_util/atcf_line_base.h @@ -131,13 +131,13 @@ class ATCFLineBase : public DataLine { //////////////////////////////////////////////////////////////////////// inline void ATCFLineBase::set_basin_map (const std::map *m) - { BasinMap = m; } -inline void ATCFLineBase::set_best_technique(const StringArray *s) { BestTechnique = s; } -inline void ATCFLineBase::set_oper_technique(const StringArray *s) { OperTechnique = s; } -inline void ATCFLineBase::set_tech_suffix (const ConcatString *s) { TechSuffix = s; } -inline void ATCFLineBase::set_technique (const ConcatString &s) { Technique = s; } -inline bool ATCFLineBase::is_best_track () const { return(IsBestTrack); } -inline bool ATCFLineBase::is_oper_track () const { return(IsOperTrack); } + { BasinMap = m; } +inline void ATCFLineBase::set_best_technique(const StringArray *s) { BestTechnique = s; } +inline void ATCFLineBase::set_oper_technique(const StringArray *s) { OperTechnique = s; } +inline void ATCFLineBase::set_tech_suffix (const ConcatString *s) { TechSuffix = s; } +inline void ATCFLineBase::set_technique (const ConcatString &s) { Technique = s; } +inline bool ATCFLineBase::is_best_track () const { return IsBestTrack; } +inline bool ATCFLineBase::is_oper_track () const { return IsOperTrack; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/atcf_prob_line.cc b/src/libcode/vx_tc_util/atcf_prob_line.cc index 8b320eb912..8a140997ae 100644 --- a/src/libcode/vx_tc_util/atcf_prob_line.cc +++ b/src/libcode/vx_tc_util/atcf_prob_line.cc @@ -148,7 +148,7 @@ int ATCFProbLine::read_line(LineDataFile * ldf) { status = 0; continue; } - } + } return 1; } diff --git a/src/libcode/vx_tc_util/diag_file.h b/src/libcode/vx_tc_util/diag_file.h index 9c302d5528..f387625758 100644 --- a/src/libcode/vx_tc_util/diag_file.h +++ b/src/libcode/vx_tc_util/diag_file.h @@ -135,17 +135,17 @@ class DiagFile : public LineDataFile { //////////////////////////////////////////////////////////////////////// -inline const ConcatString & DiagFile::storm_id() const { return(StormId); } -inline const ConcatString & DiagFile::basin() const { return(Basin); } -inline const ConcatString & DiagFile::cyclone() const { return(Cyclone); } -inline const StringArray & DiagFile::technique() const { return(Technique); } -inline unixtime DiagFile::init() const { return(InitTime); } -inline int DiagFile::n_time() const { return(NTime); } -inline DiagType DiagFile::diag_source() const { return(DiagSource); } -inline const ConcatString & DiagFile::track_source() const { return(TrackSource); } -inline const ConcatString & DiagFile::field_source() const { return(FieldSource); } -inline int DiagFile::n_diag() const { return(DiagName.n()); } -inline const StringArray & DiagFile::diag_name() const { return(DiagName); } +inline const ConcatString & DiagFile::storm_id() const { return StormId; } +inline const ConcatString & DiagFile::basin() const { return Basin; } +inline const ConcatString & DiagFile::cyclone() const { return Cyclone; } +inline const StringArray & DiagFile::technique() const { return Technique; } +inline unixtime DiagFile::init() const { return InitTime; } +inline int DiagFile::n_time() const { return NTime; } +inline DiagType DiagFile::diag_source() const { return DiagSource; } +inline const ConcatString & DiagFile::track_source() const { return TrackSource; } +inline const ConcatString & DiagFile::field_source() const { return FieldSource; } +inline int DiagFile::n_diag() const { return DiagName.n(); } +inline const StringArray & DiagFile::diag_name() const { return DiagName; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/gen_shape_info.h b/src/libcode/vx_tc_util/gen_shape_info.h index 58ea1d9a6a..f27af62aac 100644 --- a/src/libcode/vx_tc_util/gen_shape_info.h +++ b/src/libcode/vx_tc_util/gen_shape_info.h @@ -91,11 +91,11 @@ class GenShapeInfo { //////////////////////////////////////////////////////////////////////// -inline const ConcatString & GenShapeInfo::basin() const { return(Basin); } +inline const ConcatString & GenShapeInfo::basin() const { return Basin; } -inline unixtime GenShapeInfo::file_time() const { return(FileTime); } -inline unixtime GenShapeInfo::issue_time() const { return(IssueTime); } -inline int GenShapeInfo::issue_hour() const { return(unix_to_sec_of_day(IssueTime)); } +inline unixtime GenShapeInfo::file_time() const { return FileTime ; } +inline unixtime GenShapeInfo::issue_time() const { return IssueTime; } +inline int GenShapeInfo::issue_hour() const { return unix_to_sec_of_day(IssueTime); } //////////////////////////////////////////////////////////////////////// // @@ -140,7 +140,7 @@ class GenShapeInfoArray { //////////////////////////////////////////////////////////////////////// -inline int GenShapeInfoArray::n() const { return(GenShape.size()); } +inline int GenShapeInfoArray::n() const { return GenShape.size(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/genesis_info.h b/src/libcode/vx_tc_util/genesis_info.h index 11df94257e..5ab8da740c 100644 --- a/src/libcode/vx_tc_util/genesis_info.h +++ b/src/libcode/vx_tc_util/genesis_info.h @@ -112,11 +112,11 @@ class GenesisInfo : public TrackInfo { //////////////////////////////////////////////////////////////////////// -inline double GenesisInfo::lat() const { return(Lat); } -inline double GenesisInfo::lon() const { return(Lon); } -inline double GenesisInfo::dland() const { return(DLand); } -inline unixtime GenesisInfo::genesis_time() const { return(GenesisTime); } -inline int GenesisInfo::genesis_lead() const { return(GenesisLead); } +inline double GenesisInfo::lat() const { return Lat; } +inline double GenesisInfo::lon() const { return Lon; } +inline double GenesisInfo::dland() const { return DLand; } +inline unixtime GenesisInfo::genesis_time() const { return GenesisTime; } +inline int GenesisInfo::genesis_lead() const { return GenesisLead; } //////////////////////////////////////////////////////////////////////// // @@ -167,7 +167,7 @@ class GenesisInfoArray { //////////////////////////////////////////////////////////////////////// -inline int GenesisInfoArray::n() const { return(Genesis.size()); } +inline int GenesisInfoArray::n() const { return Genesis.size(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/pair_data_genesis.h b/src/libcode/vx_tc_util/pair_data_genesis.h index 89b0a5b2f6..25fbd653f5 100644 --- a/src/libcode/vx_tc_util/pair_data_genesis.h +++ b/src/libcode/vx_tc_util/pair_data_genesis.h @@ -142,13 +142,13 @@ inline void PairDataGenesis::set_desc (const ConcatString &s) { Desc = s; } inline void PairDataGenesis::set_mask (const ConcatString &s) { Mask = s; } inline void PairDataGenesis::set_model(const ConcatString &s) { Model = s; } -inline ConcatString PairDataGenesis::desc() const { return(Desc); } -inline ConcatString PairDataGenesis::mask() const { return(Mask); } -inline ConcatString PairDataGenesis::model() const { return(Model); } -inline int PairDataGenesis::n_pair() const { return(NPair); } -inline const std::string PairDataGenesis::best_storm_id(int i) const { return(BestStormId[i]); } -inline unixtime PairDataGenesis::init(int i) const { return(InitTime[i]); } -inline int PairDataGenesis::lead_time(int i) const { return(LeadTime[i]); } +inline ConcatString PairDataGenesis::desc() const { return Desc; } +inline ConcatString PairDataGenesis::mask() const { return Mask; } +inline ConcatString PairDataGenesis::model() const { return Model; } +inline int PairDataGenesis::n_pair() const { return NPair; } +inline const std::string PairDataGenesis::best_storm_id(int i) const { return BestStormId[i]; } +inline unixtime PairDataGenesis::init(int i) const { return InitTime[i]; } +inline int PairDataGenesis::lead_time(int i) const { return LeadTime[i]; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/prob_gen_info.h b/src/libcode/vx_tc_util/prob_gen_info.h index 514fa9c337..5d5a03c5bc 100644 --- a/src/libcode/vx_tc_util/prob_gen_info.h +++ b/src/libcode/vx_tc_util/prob_gen_info.h @@ -86,10 +86,10 @@ class ProbGenInfo : public ProbInfoBase { //////////////////////////////////////////////////////////////////////// -inline const ConcatString & ProbGenInfo::initials() const { return(Initials); } -inline const ConcatString & ProbGenInfo::gen_or_dis() const { return(GenOrDis); } -inline unixtime ProbGenInfo::genesis_time() const { return(GenesisTime); } -inline int ProbGenInfo::genesis_lead() const { return(GenesisLead); } +inline const ConcatString & ProbGenInfo::initials() const { return Initials; } +inline const ConcatString & ProbGenInfo::gen_or_dis() const { return GenOrDis; } +inline unixtime ProbGenInfo::genesis_time() const { return GenesisTime; } +inline int ProbGenInfo::genesis_lead() const { return GenesisLead; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/prob_info_array.cc b/src/libcode/vx_tc_util/prob_info_array.cc index 35a37708e2..0caaa7ed73 100644 --- a/src/libcode/vx_tc_util/prob_info_array.cc +++ b/src/libcode/vx_tc_util/prob_info_array.cc @@ -244,7 +244,7 @@ bool ProbInfoArray::add(const ATCFProbLine &l, double dland, bool check_dup) { // Store based on the input line type switch(l.type()) { - case(ATCFLineType::ProbRI): + case ATCFLineType::ProbRI: // Add line to an existing entry if(ProbRIRW.size() > 0 && @@ -260,7 +260,7 @@ bool ProbInfoArray::add(const ATCFProbLine &l, double dland, bool check_dup) { } break; - case(ATCFLineType::ProbGN): + case ATCFLineType::ProbGN: // Add line to an existing entry if(ProbGen.size() > 0 && diff --git a/src/libcode/vx_tc_util/prob_info_array.h b/src/libcode/vx_tc_util/prob_info_array.h index 53856a4a98..182fffd046 100644 --- a/src/libcode/vx_tc_util/prob_info_array.h +++ b/src/libcode/vx_tc_util/prob_info_array.h @@ -79,8 +79,8 @@ class ProbInfoArray { //////////////////////////////////////////////////////////////////////// inline int ProbInfoArray::n_probs() const { return(ProbRIRW.size() + ProbGen.size()); } -inline int ProbInfoArray::n_prob_rirw() const { return(ProbRIRW.size()); } -inline int ProbInfoArray::n_prob_gen() const { return(ProbGen.size()); } +inline int ProbInfoArray::n_prob_rirw() const { return ProbRIRW.size(); } +inline int ProbInfoArray::n_prob_gen() const { return ProbGen.size(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/prob_info_base.h b/src/libcode/vx_tc_util/prob_info_base.h index 09473eb1ab..1bae207793 100644 --- a/src/libcode/vx_tc_util/prob_info_base.h +++ b/src/libcode/vx_tc_util/prob_info_base.h @@ -114,21 +114,21 @@ class ProbInfoBase { //////////////////////////////////////////////////////////////////////// -inline ATCFLineType ProbInfoBase::type() const { return(Type); } -inline const ConcatString & ProbInfoBase::storm_id() const { return(StormId); } -inline const ConcatString & ProbInfoBase::basin() const { return(Basin); } -inline const ConcatString & ProbInfoBase::cyclone() const { return(Cyclone); } -inline const ConcatString & ProbInfoBase::technique() const { return(Technique); } -inline unixtime ProbInfoBase::init() const { return(InitTime); } -inline int ProbInfoBase::init_hour() const { return(unix_to_sec_of_day(InitTime)); } -inline unixtime ProbInfoBase::valid() const { return(ValidTime); } -inline int ProbInfoBase::valid_hour() const { return(unix_to_sec_of_day(ValidTime)); } -inline double ProbInfoBase::lat() const { return(Lat); } -inline double ProbInfoBase::lon() const { return(Lon); } -inline double ProbInfoBase::dland() const { return(DLand); } -inline int ProbInfoBase::n_prob() const { return(NProb); } -inline double ProbInfoBase::prob(int i) const { return(Prob[i]); } -inline double ProbInfoBase::prob_item(int i) const { return(ProbItem[i]); } +inline ATCFLineType ProbInfoBase::type() const { return Type; } +inline const ConcatString & ProbInfoBase::storm_id() const { return StormId; } +inline const ConcatString & ProbInfoBase::basin() const { return Basin; } +inline const ConcatString & ProbInfoBase::cyclone() const { return Cyclone; } +inline const ConcatString & ProbInfoBase::technique() const { return Technique; } +inline unixtime ProbInfoBase::init() const { return InitTime; } +inline int ProbInfoBase::init_hour() const { return unix_to_sec_of_day(InitTime); } +inline unixtime ProbInfoBase::valid() const { return ValidTime; } +inline int ProbInfoBase::valid_hour() const { return unix_to_sec_of_day(ValidTime); } +inline double ProbInfoBase::lat() const { return Lat; } +inline double ProbInfoBase::lon() const { return Lon; } +inline double ProbInfoBase::dland() const { return DLand; } +inline int ProbInfoBase::n_prob() const { return NProb; } +inline double ProbInfoBase::prob(int i) const { return Prob[i]; } +inline double ProbInfoBase::prob_item(int i) const { return ProbItem[i]; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/prob_rirw_info.cc b/src/libcode/vx_tc_util/prob_rirw_info.cc index ea662ef4c3..f12a2d66c5 100644 --- a/src/libcode/vx_tc_util/prob_rirw_info.cc +++ b/src/libcode/vx_tc_util/prob_rirw_info.cc @@ -143,8 +143,8 @@ void ProbRIRWInfo::assign(const ProbRIRWInfo &p) { //////////////////////////////////////////////////////////////////////// int ProbRIRWInfo::rirw_window() const { - return((is_bad_data(rirw_beg()) || is_bad_data(rirw_end()) ? - bad_data_int : rirw_end() - rirw_beg())); + return (is_bad_data(rirw_beg()) || is_bad_data(rirw_end()) ? + bad_data_int : rirw_end() - rirw_beg()); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/prob_rirw_info.h b/src/libcode/vx_tc_util/prob_rirw_info.h index fd1ced2893..e2636ea284 100644 --- a/src/libcode/vx_tc_util/prob_rirw_info.h +++ b/src/libcode/vx_tc_util/prob_rirw_info.h @@ -78,10 +78,10 @@ class ProbRIRWInfo : public ProbInfoBase { //////////////////////////////////////////////////////////////////////// -inline double ProbRIRWInfo::value() const { return(Value); } -inline const ConcatString & ProbRIRWInfo::initials() const { return(Initials); } -inline int ProbRIRWInfo::rirw_beg() const { return(RIRWBeg); } -inline int ProbRIRWInfo::rirw_end() const { return(RIRWEnd); } +inline double ProbRIRWInfo::value() const { return Value; } +inline const ConcatString & ProbRIRWInfo::initials() const { return Initials; } +inline int ProbRIRWInfo::rirw_beg() const { return RIRWBeg; } +inline int ProbRIRWInfo::rirw_end() const { return RIRWEnd; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/prob_rirw_pair_info.h b/src/libcode/vx_tc_util/prob_rirw_pair_info.h index e809bc3198..f81dd22b37 100644 --- a/src/libcode/vx_tc_util/prob_rirw_pair_info.h +++ b/src/libcode/vx_tc_util/prob_rirw_pair_info.h @@ -111,24 +111,24 @@ class ProbRIRWPairInfo { //////////////////////////////////////////////////////////////////////// -inline const ProbRIRWInfo & ProbRIRWPairInfo::prob_rirw() const { return(ProbRIRW); } -inline const TrackInfo * ProbRIRWPairInfo::bdeck() const { return(BDeck); } -inline const ConcatString & ProbRIRWPairInfo::storm_name() const { return(StormName); } -inline const ConcatString & ProbRIRWPairInfo::bmodel() const { return(BModel); } -inline double ProbRIRWPairInfo::blat() const { return(BLat); } -inline double ProbRIRWPairInfo::blon() const { return(BLon); } -inline double ProbRIRWPairInfo::adland() const { return(ADLand); } -inline double ProbRIRWPairInfo::bdland() const { return(BDLand); } -inline double ProbRIRWPairInfo::track_err() const { return(TrackErr); } -inline double ProbRIRWPairInfo::x_err() const { return(XErr); } -inline double ProbRIRWPairInfo::y_err() const { return(YErr); } -inline double ProbRIRWPairInfo::bbegv() const { return(BBegV); } -inline double ProbRIRWPairInfo::bendv() const { return(BEndV); } -inline double ProbRIRWPairInfo::bminv() const { return(BMinV); } -inline double ProbRIRWPairInfo::bmaxv() const { return(BMaxV); } -inline CycloneLevel ProbRIRWPairInfo::bbeglev() const { return(BBegLev); } -inline CycloneLevel ProbRIRWPairInfo::bendlev() const { return(BEndLev); } -inline const TCStatLine & ProbRIRWPairInfo::line() const { return(Line); } +inline const ProbRIRWInfo & ProbRIRWPairInfo::prob_rirw() const { return ProbRIRW; } +inline const TrackInfo * ProbRIRWPairInfo::bdeck() const { return BDeck; } +inline const ConcatString & ProbRIRWPairInfo::storm_name() const { return StormName; } +inline const ConcatString & ProbRIRWPairInfo::bmodel() const { return BModel; } +inline double ProbRIRWPairInfo::blat() const { return BLat; } +inline double ProbRIRWPairInfo::blon() const { return BLon; } +inline double ProbRIRWPairInfo::adland() const { return ADLand; } +inline double ProbRIRWPairInfo::bdland() const { return BDLand; } +inline double ProbRIRWPairInfo::track_err() const { return TrackErr; } +inline double ProbRIRWPairInfo::x_err() const { return XErr; } +inline double ProbRIRWPairInfo::y_err() const { return YErr; } +inline double ProbRIRWPairInfo::bbegv() const { return BBegV; } +inline double ProbRIRWPairInfo::bendv() const { return BEndV; } +inline double ProbRIRWPairInfo::bminv() const { return BMinV; } +inline double ProbRIRWPairInfo::bmaxv() const { return BMaxV; } +inline CycloneLevel ProbRIRWPairInfo::bbeglev() const { return BBegLev; } +inline CycloneLevel ProbRIRWPairInfo::bendlev() const { return BEndLev; } +inline const TCStatLine & ProbRIRWPairInfo::line() const { return Line; } inline void ProbRIRWPairInfo::set_adland(double d) { ADLand = d; return; } inline void ProbRIRWPairInfo::set_bdland(double d) { BDLand = d; return; } @@ -183,7 +183,7 @@ class ProbRIRWPairInfoArray { //////////////////////////////////////////////////////////////////////// -inline int ProbRIRWPairInfoArray::n_pairs() const { return(Pairs.size()); } +inline int ProbRIRWPairInfoArray::n_pairs() const { return Pairs.size(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/tc_columns.cc b/src/libcode/vx_tc_util/tc_columns.cc index 6f7e7f2d6c..f91431718f 100644 --- a/src/libcode/vx_tc_util/tc_columns.cc +++ b/src/libcode/vx_tc_util/tc_columns.cc @@ -170,7 +170,8 @@ void write_prob_rirw_header_row(int hdr_flag, int n_thresh, AsciiTable &at, //////////////////////////////////////////////////////////////////////// void write_track_pair_info(TcHdrColumns &hdr, const TrackPairInfo &p, - AsciiTable &at, int &i_row) { + AsciiTable &at, int &i_row, + const StringArray &hdr_name, const StringArray &hdr_value) { // Loop through the TrackPairInfo points for(int i=0; iget_item("DESC", false)); } + // Apply -set_hdr options + if(hdr_name.n() > 0) hdr.apply_set_hdr_opts(hdr_name, hdr_value); + // Write the header columns write_tc_header_cols(hdr, at, i_row); @@ -221,6 +225,19 @@ void write_track_pair_info(TcHdrColumns &hdr, const TrackPairInfo &p, //////////////////////////////////////////////////////////////////////// +void write_track_pair_info(TcHdrColumns &hdr, const TrackPairInfo &p, + AsciiTable &at, int &i_row) { + + StringArray hdr_name; + StringArray hdr_value; + + write_track_pair_info(hdr, p, at, i_row, hdr_name, hdr_value); + + return; +} + +//////////////////////////////////////////////////////////////////////// + void write_prob_rirw_pair_info(TcHdrColumns &hdr, const ProbRIRWPairInfo &p, AsciiTable &at, int &i_row) { diff --git a/src/libcode/vx_tc_util/tc_columns.h b/src/libcode/vx_tc_util/tc_columns.h index 9a5aee0181..0d3a83e0a8 100644 --- a/src/libcode/vx_tc_util/tc_columns.h +++ b/src/libcode/vx_tc_util/tc_columns.h @@ -166,6 +166,8 @@ extern void write_prob_rirw_header_row(int, int, AsciiTable &, int, int); // Write out the data lines extern void write_track_pair_info (TcHdrColumns &, const TrackPairInfo &, AsciiTable &, int &); +extern void write_track_pair_info (TcHdrColumns &, const TrackPairInfo &, AsciiTable &, int &, + const StringArray &, const StringArray &); extern void write_prob_rirw_pair_info(TcHdrColumns &, const ProbRIRWPairInfo &, AsciiTable &, int &); // Write out the header entries diff --git a/src/libcode/vx_tc_util/tc_hdr_columns.cc b/src/libcode/vx_tc_util/tc_hdr_columns.cc index 3708417560..9cb4c1a77b 100644 --- a/src/libcode/vx_tc_util/tc_hdr_columns.cc +++ b/src/libcode/vx_tc_util/tc_hdr_columns.cc @@ -64,3 +64,89 @@ void TcHdrColumns::clear() { } //////////////////////////////////////////////////////////////////////// + +void TcHdrColumns::apply_set_hdr_opts( + const StringArray &hdr_cols, const StringArray &hdr_vals) { + + // No updates needed + if(hdr_cols.n() == 0) return; + + int index; + + // Sanity check lengths + if(hdr_cols.n() != hdr_vals.n()) { + mlog << Error << "\nTcHdrColumns::apply_set_hdr_opts() -> " + << "the number of -set_hdr columns names (" << hdr_cols.n() + << " and values (" << hdr_vals.n() << " must match!\n\n"; + exit(1); + } + + // AMODEL + if(hdr_cols.has("AMODEL", index)) { + set_adeck_model(hdr_vals[index]); + } + + // BMODEL + if(hdr_cols.has("BMODEL", index)) { + set_bdeck_model(hdr_vals[index]); + } + + // DESC + if(hdr_cols.has("DESC", index)) { + set_desc(hdr_vals[index]); + } + + // STORM_ID + if(hdr_cols.has("STORM_ID", index)) { + set_storm_id(hdr_vals[index]); + } + + // BASIN + if(hdr_cols.has("BASIN", index)) { + set_basin(hdr_vals[index]); + } + + // CYCLONE + if(hdr_cols.has("CYCLONE", index)) { + set_cyclone(hdr_vals[index]); + } + + // STORM_NAME + if(hdr_cols.has("STORM_NAME", index)) { + set_storm_name(hdr_vals[index]); + } + + // INIT + if(hdr_cols.has("INIT", index)) { + set_init(timestring_to_sec(hdr_vals[index].c_str())); + } + + // LEAD + if(hdr_cols.has("LEAD", index)) { + set_lead(timestring_to_sec(hdr_vals[index].c_str())); + } + + // VALID + if(hdr_cols.has("VALID", index)) { + set_valid(timestring_to_sec(hdr_vals[index].c_str())); + } + + // INIT_MASK + if(hdr_cols.has("INIT_MASK", index)) { + set_init_mask(hdr_vals[index]); + } + + // VALID_MASK + if(hdr_cols.has("VALID_MASK", index)) { + set_valid_mask(hdr_vals[index]); + } + + // LINE_TYPE + if(hdr_cols.has("LINE_TYPE", index)) { + set_line_type(hdr_vals[index]); + } + + return; +} + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/tc_hdr_columns.h b/src/libcode/vx_tc_util/tc_hdr_columns.h index f632dfaf56..a2e31a6533 100644 --- a/src/libcode/vx_tc_util/tc_hdr_columns.h +++ b/src/libcode/vx_tc_util/tc_hdr_columns.h @@ -70,6 +70,9 @@ class TcHdrColumns { void set_valid_mask (const ConcatString &); void set_line_type (const ConcatString &); + // Apply -set_hdr overrides + void apply_set_hdr_opts(const StringArray &, const StringArray &); + // Get functions ConcatString adeck_model () const; ConcatString bdeck_model () const; @@ -105,21 +108,21 @@ inline void TcHdrColumns::set_init_mask (const ConcatString &s) { InitMask = s inline void TcHdrColumns::set_valid_mask (const ConcatString &s) { ValidMask = s; } inline void TcHdrColumns::set_line_type (const ConcatString &s) { LineType = s; } -inline ConcatString TcHdrColumns::adeck_model () const { return(ADeckModel); } -inline ConcatString TcHdrColumns::bdeck_model () const { return(BDeckModel); } -inline ConcatString TcHdrColumns::desc () const { return(Desc); } -inline ConcatString TcHdrColumns::storm_id () const { return(StormId); } -inline ConcatString TcHdrColumns::basin () const { return(Basin); } -inline ConcatString TcHdrColumns::cyclone () const { return(Cyclone); } -inline ConcatString TcHdrColumns::storm_name () const { return(StormName); } -inline int TcHdrColumns::lead () const { return(LeadTime); } -inline unixtime TcHdrColumns::init () const { return(InitTime); } -inline int TcHdrColumns::init_hour () const { return(unix_to_sec_of_day(InitTime)); } -inline unixtime TcHdrColumns::valid () const { return(ValidTime); } -inline int TcHdrColumns::valid_hour () const { return(unix_to_sec_of_day(ValidTime)); } -inline ConcatString TcHdrColumns::init_mask () const { return(InitMask); } -inline ConcatString TcHdrColumns::valid_mask () const { return(ValidMask); } -inline ConcatString TcHdrColumns::line_type () const { return(LineType); } +inline ConcatString TcHdrColumns::adeck_model () const { return ADeckModel; } +inline ConcatString TcHdrColumns::bdeck_model () const { return BDeckModel; } +inline ConcatString TcHdrColumns::desc () const { return Desc; } +inline ConcatString TcHdrColumns::storm_id () const { return StormId; } +inline ConcatString TcHdrColumns::basin () const { return Basin; } +inline ConcatString TcHdrColumns::cyclone () const { return Cyclone; } +inline ConcatString TcHdrColumns::storm_name () const { return StormName; } +inline int TcHdrColumns::lead () const { return LeadTime; } +inline unixtime TcHdrColumns::init () const { return InitTime; } +inline int TcHdrColumns::init_hour () const { return unix_to_sec_of_day(InitTime); } +inline unixtime TcHdrColumns::valid () const { return ValidTime; } +inline int TcHdrColumns::valid_hour () const { return unix_to_sec_of_day(ValidTime); } +inline ConcatString TcHdrColumns::init_mask () const { return InitMask; } +inline ConcatString TcHdrColumns::valid_mask () const { return ValidMask; } +inline ConcatString TcHdrColumns::line_type () const { return LineType; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/tc_stat_line.cc b/src/libcode/vx_tc_util/tc_stat_line.cc index a8556a7dd2..d46dc41c77 100644 --- a/src/libcode/vx_tc_util/tc_stat_line.cc +++ b/src/libcode/vx_tc_util/tc_stat_line.cc @@ -164,6 +164,36 @@ bool TCStatLine::is_header() const { //////////////////////////////////////////////////////////////////////// +bool TCStatLine::has(const char *col_str) const { + return !is_bad_data(get_offset(col_str)); +} + +//////////////////////////////////////////////////////////////////////// + +int TCStatLine::get_offset(const char *col_str) const { + int offset = bad_data_int; + + // + // Search for matching header column + // + offset = HdrLine->col_offset(col_str); + + // + // If not found, check extra header columns + // + if(is_bad_data(offset)) { + if(!get_file()->header().has(col_str, offset)) offset = bad_data_int; + } + + // + // Return the offset value + // + + return offset; +} + +//////////////////////////////////////////////////////////////////////// + ConcatString TCStatLine::get(const char *col_str, bool check_na) const { ConcatString cs = (string)get_item(col_str, check_na); diff --git a/src/libcode/vx_tc_util/tc_stat_line.h b/src/libcode/vx_tc_util/tc_stat_line.h index 276fad1638..e39dcf7f70 100644 --- a/src/libcode/vx_tc_util/tc_stat_line.h +++ b/src/libcode/vx_tc_util/tc_stat_line.h @@ -75,6 +75,8 @@ class TCStatLine : public DataLine { // Retrieve values of the header columns // + bool has (const char *) const; + int get_offset (const char *) const; ConcatString get (const char *, bool check_na = true) const; const char * get_item (const char *, bool check_na = true) const; const char * get_item (int, bool check_na = true) const; diff --git a/src/libcode/vx_tc_util/track_info.h b/src/libcode/vx_tc_util/track_info.h index 5b4bfd298a..fdc127e633 100644 --- a/src/libcode/vx_tc_util/track_info.h +++ b/src/libcode/vx_tc_util/track_info.h @@ -182,9 +182,9 @@ class TrackInfo { //////////////////////////////////////////////////////////////////////// -inline bool TrackInfo::is_best_track() const { return(IsBestTrack); } -inline bool TrackInfo::is_oper_track() const { return(IsOperTrack); } -inline bool TrackInfo::is_anly_track() const { return(IsAnlyTrack); } +inline bool TrackInfo::is_best_track() const { return IsBestTrack; } +inline bool TrackInfo::is_oper_track() const { return IsOperTrack; } +inline bool TrackInfo::is_anly_track() const { return IsAnlyTrack; } inline void TrackInfo::set_storm_id(const char *s) { StormId = s; } inline void TrackInfo::set_basin(const char *s) { Basin = s; } inline void TrackInfo::set_cyclone(const char *s) { Cyclone = s; } @@ -200,28 +200,28 @@ inline void TrackInfo::set_track_source(const char *s) { TrackSource = s; inline void TrackInfo::set_field_source(const char *s) { FieldSource = s; } inline void TrackInfo::set_diag_name(const StringArray &s) { DiagName = s; } -inline const ConcatString & TrackInfo::storm_id() const { return(StormId); } -inline const ConcatString & TrackInfo::basin() const { return(Basin); } -inline const ConcatString & TrackInfo::cyclone() const { return(Cyclone); } -inline const ConcatString & TrackInfo::storm_name() const { return(StormName); } -inline int TrackInfo::technique_number() const { return(TechniqueNumber); } -inline const ConcatString & TrackInfo::technique() const { return(Technique); } -inline const ConcatString & TrackInfo::initials() const { return(Initials); } -inline unixtime TrackInfo::init() const { return(InitTime); } -inline int TrackInfo::init_hour() const { return(unix_to_sec_of_day(InitTime)); } -inline unixtime TrackInfo::valid_min() const { return(MinValidTime); } -inline unixtime TrackInfo::valid_max() const { return(MaxValidTime); } -inline unixtime TrackInfo::warm_core_min() const { return(MinWarmCore); } -inline unixtime TrackInfo::warm_core_max() const { return(MaxWarmCore); } -inline int TrackInfo::n_points() const { return(NPoints); } - -inline DiagType TrackInfo::diag_source() const { return(DiagSource); } -inline const ConcatString & TrackInfo::track_source() const { return(TrackSource); } -inline const ConcatString & TrackInfo::field_source() const { return(FieldSource); } -inline int TrackInfo::n_diag() const { return(DiagName.n()); } -inline const StringArray & TrackInfo::diag_name() const { return(DiagName); } - -inline StringArray TrackInfo::track_lines() const { return(TrackLines); } +inline const ConcatString & TrackInfo::storm_id() const { return StormId; } +inline const ConcatString & TrackInfo::basin() const { return Basin; } +inline const ConcatString & TrackInfo::cyclone() const { return Cyclone; } +inline const ConcatString & TrackInfo::storm_name() const { return StormName; } +inline int TrackInfo::technique_number() const { return TechniqueNumber; } +inline const ConcatString & TrackInfo::technique() const { return Technique; } +inline const ConcatString & TrackInfo::initials() const { return Initials; } +inline unixtime TrackInfo::init() const { return InitTime; } +inline int TrackInfo::init_hour() const { return unix_to_sec_of_day(InitTime); } +inline unixtime TrackInfo::valid_min() const { return MinValidTime; } +inline unixtime TrackInfo::valid_max() const { return MaxValidTime; } +inline unixtime TrackInfo::warm_core_min() const { return MinWarmCore; } +inline unixtime TrackInfo::warm_core_max() const { return MaxWarmCore; } +inline int TrackInfo::n_points() const { return NPoints; } + +inline DiagType TrackInfo::diag_source() const { return DiagSource; } +inline const ConcatString & TrackInfo::track_source() const { return TrackSource; } +inline const ConcatString & TrackInfo::field_source() const { return FieldSource; } +inline int TrackInfo::n_diag() const { return DiagName.n(); } +inline const StringArray & TrackInfo::diag_name() const { return DiagName; } + +inline StringArray TrackInfo::track_lines() const { return TrackLines; } //////////////////////////////////////////////////////////////////////// // @@ -279,7 +279,7 @@ class TrackInfoArray { //////////////////////////////////////////////////////////////////////// -inline int TrackInfoArray::n() const { return(Track.size()); } +inline int TrackInfoArray::n() const { return Track.size(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/track_pair_info.h b/src/libcode/vx_tc_util/track_pair_info.h index 3b3a884992..4797a8996e 100644 --- a/src/libcode/vx_tc_util/track_pair_info.h +++ b/src/libcode/vx_tc_util/track_pair_info.h @@ -151,24 +151,24 @@ class TrackPairInfo { //////////////////////////////////////////////////////////////////////// -inline int TrackPairInfo::n_points() const { return(NPoints); } -inline const TrackInfo & TrackPairInfo::adeck() const { return(ADeck); } -inline const TrackInfo & TrackPairInfo::bdeck() const { return(BDeck); } -inline double TrackPairInfo::adeck_dland(int i) const { return(ADeckDLand[i]); } -inline double TrackPairInfo::bdeck_dland(int i) const { return(BDeckDLand[i]); } -inline double TrackPairInfo::track_err(int i) const { return(TrackErr[i]); } -inline double TrackPairInfo::x_err(int i) const { return(XErr[i]); } -inline double TrackPairInfo::y_err(int i) const { return(YErr[i]); } -inline double TrackPairInfo::along_track_err(int i) const { return(AlongTrackErr[i]); } -inline double TrackPairInfo::cross_track_err(int i) const { return(CrossTrackErr[i]); } -inline int TrackPairInfo::adeck_rirw(int i) const { return(nint(ADeckRIRW[i])); } -inline int TrackPairInfo::bdeck_rirw(int i) const { return(nint(BDeckRIRW[i])); } -inline int TrackPairInfo::adeck_prv_int(int i) const { return(nint(ADeckPrvInt[i])); } -inline int TrackPairInfo::bdeck_prv_int(int i) const { return(nint(BDeckPrvInt[i])); } -inline int TrackPairInfo::n_lines() const { return(NLines); } -inline const TCStatLine * TrackPairInfo::tcmpr_line(int i) const { return(&TCMPRLine[i]); } -inline const TCStatLine * TrackPairInfo::tcdiag_line(int i) const { return(&TCDIAGLine[i]); } -inline bool TrackPairInfo::keep(int i) const { return(Keep[i] != 0); } +inline int TrackPairInfo::n_points() const { return NPoints; } +inline const TrackInfo & TrackPairInfo::adeck() const { return ADeck; } +inline const TrackInfo & TrackPairInfo::bdeck() const { return BDeck; } +inline double TrackPairInfo::adeck_dland(int i) const { return ADeckDLand[i]; } +inline double TrackPairInfo::bdeck_dland(int i) const { return BDeckDLand[i]; } +inline double TrackPairInfo::track_err(int i) const { return TrackErr[i]; } +inline double TrackPairInfo::x_err(int i) const { return XErr[i]; } +inline double TrackPairInfo::y_err(int i) const { return YErr[i]; } +inline double TrackPairInfo::along_track_err(int i) const { return AlongTrackErr[i]; } +inline double TrackPairInfo::cross_track_err(int i) const { return CrossTrackErr[i]; } +inline int TrackPairInfo::adeck_rirw(int i) const { return nint(ADeckRIRW[i]); } +inline int TrackPairInfo::bdeck_rirw(int i) const { return nint(BDeckRIRW[i]); } +inline int TrackPairInfo::adeck_prv_int(int i) const { return nint(ADeckPrvInt[i]); } +inline int TrackPairInfo::bdeck_prv_int(int i) const { return nint(BDeckPrvInt[i]); } +inline int TrackPairInfo::n_lines() const { return NLines; } +inline const TCStatLine * TrackPairInfo::tcmpr_line(int i) const { return &TCMPRLine[i]; } +inline const TCStatLine * TrackPairInfo::tcdiag_line(int i) const { return &TCDIAGLine[i]; } +inline bool TrackPairInfo::keep(int i) const { return (Keep[i] != 0); } //////////////////////////////////////////////////////////////////////// // diff --git a/src/libcode/vx_tc_util/track_point.h b/src/libcode/vx_tc_util/track_point.h index cffb79c488..0aede58974 100644 --- a/src/libcode/vx_tc_util/track_point.h +++ b/src/libcode/vx_tc_util/track_point.h @@ -103,12 +103,12 @@ inline void QuadInfo::set_se_val(double v) { SEVal = v; } inline void QuadInfo::set_sw_val(double v) { SWVal = v; } inline void QuadInfo::set_nw_val(double v) { NWVal = v; } -inline int QuadInfo::intensity() const { return(Intensity); } -inline double QuadInfo::al_val() const { return(ALVal); } -inline double QuadInfo::ne_val() const { return(NEVal); } -inline double QuadInfo::se_val() const { return(SEVal); } -inline double QuadInfo::sw_val() const { return(SWVal); } -inline double QuadInfo::nw_val() const { return(NWVal); } +inline int QuadInfo::intensity() const { return Intensity; } +inline double QuadInfo::al_val() const { return ALVal; } +inline double QuadInfo::ne_val() const { return NEVal; } +inline double QuadInfo::se_val() const { return SEVal; } +inline double QuadInfo::sw_val() const { return SWVal; } +inline double QuadInfo::nw_val() const { return NWVal; } //////////////////////////////////////////////////////////////////////// // @@ -296,31 +296,31 @@ inline void TrackPoint::set_track_stdev(const double v) { TrackStdev = v; } inline void TrackPoint::set_v_max_stdev(const double v) { VmaxStdev = v; } inline void TrackPoint::set_mslp_stdev(const double v) { MSLPStdev = v; } -inline unixtime TrackPoint::valid() const { return(ValidTime); } -inline int TrackPoint::valid_hour() const { return(unix_to_sec_of_day(ValidTime)); } -inline int TrackPoint::lead() const { return(LeadTime); } -inline double TrackPoint::lat() const { return(Lat); } -inline double TrackPoint::lon() const { return(Lon); } -inline double TrackPoint::v_max() const { return(Vmax); } -inline double TrackPoint::mslp() const { return(MSLP); } -inline CycloneLevel TrackPoint::level() const { return(Level); } -inline double TrackPoint::radp() const { return(RadP); } -inline double TrackPoint::rrp() const { return(RRP); } -inline double TrackPoint::mrd() const { return(MRD); } -inline double TrackPoint::gusts() const { return(Gusts); } -inline double TrackPoint::eye() const { return(Eye); } -inline double TrackPoint::direction() const { return(Direction); } -inline double TrackPoint::speed() const { return(Speed); } -inline SystemsDepth TrackPoint::depth() const { return(Depth); } -inline bool TrackPoint::warm_core() const { return(WarmCore); } -inline WatchWarnType TrackPoint::watch_warn() const { return(WatchWarn); } - -inline int TrackPoint::num_members() const { return(NumMembers); } -inline double TrackPoint::track_spread() const { return(TrackSpread); } -inline double TrackPoint::track_stdev() const { return(TrackStdev); } -inline double TrackPoint::v_max_stdev() const { return(VmaxStdev); } -inline double TrackPoint::mslp_stdev() const { return(MSLPStdev); } -inline int TrackPoint::n_diag() const { return(DiagVal.n()); } +inline unixtime TrackPoint::valid() const { return ValidTime; } +inline int TrackPoint::valid_hour() const { return unix_to_sec_of_day(ValidTime); } +inline int TrackPoint::lead() const { return LeadTime; } +inline double TrackPoint::lat() const { return Lat; } +inline double TrackPoint::lon() const { return Lon; } +inline double TrackPoint::v_max() const { return Vmax; } +inline double TrackPoint::mslp() const { return MSLP; } +inline CycloneLevel TrackPoint::level() const { return Level; } +inline double TrackPoint::radp() const { return RadP; } +inline double TrackPoint::rrp() const { return RRP; } +inline double TrackPoint::mrd() const { return MRD; } +inline double TrackPoint::gusts() const { return Gusts; } +inline double TrackPoint::eye() const { return Eye; } +inline double TrackPoint::direction() const { return Direction; } +inline double TrackPoint::speed() const { return Speed; } +inline SystemsDepth TrackPoint::depth() const { return Depth; } +inline bool TrackPoint::warm_core() const { return WarmCore; } +inline WatchWarnType TrackPoint::watch_warn() const { return WatchWarn; } + +inline int TrackPoint::num_members() const { return NumMembers; } +inline double TrackPoint::track_spread() const { return TrackSpread; } +inline double TrackPoint::track_stdev() const { return TrackStdev; } +inline double TrackPoint::v_max_stdev() const { return VmaxStdev; } +inline double TrackPoint::mslp_stdev() const { return MSLPStdev; } +inline int TrackPoint::n_diag() const { return DiagVal.n(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/vx_tc_nc_util.cc b/src/libcode/vx_tc_util/vx_tc_nc_util.cc index d601d48caa..2f25861d81 100644 --- a/src/libcode/vx_tc_util/vx_tc_nc_util.cc +++ b/src/libcode/vx_tc_util/vx_tc_nc_util.cc @@ -8,7 +8,6 @@ //////////////////////////////////////////////////////////////////////// - #include #include "vx_tc_nc_util.h" @@ -57,8 +56,8 @@ void write_tc_track_lat_lon(NcFile* nc_out, add_att(&track_lon_var, "units", "degrees_east"); add_att(&track_lon_var, "standard_name", "longitude_track"); - double* track_lat_data = new double[track.n_points()]; - double* track_lon_data = new double[track.n_points()]; + vector track_lat_data(track.n_points()); + vector track_lon_data(track.n_points()); for(int i = 0; i < track.n_points(); i++) { mlog << Debug(5) << track[i].serialize() << "\n"; @@ -72,11 +71,9 @@ void write_tc_track_lat_lon(NcFile* nc_out, vector counts; counts.push_back(track.n_points()); - track_lat_var.putVar(offsets, counts, track_lat_data); - track_lon_var.putVar(offsets, counts, track_lon_data); + track_lat_var.putVar(offsets, counts, track_lat_data.data()); + track_lon_var.putVar(offsets, counts, track_lon_data.data()); - delete[] track_lat_data; - delete[] track_lon_data; } //////////////////////////////////////////////////////////////////////// @@ -140,7 +137,7 @@ void write_tc_rmw(NcFile* nc_out, add_att(&track_mrd_var, "units", "nautical_miles"); add_att(&track_mrd_var, "standard_name", "radius_max_wind"); - double* track_mrd_data = new double[track.n_points()]; + vector track_mrd_data(track.n_points()); for(int i = 0; i < track.n_points(); i++) { track_mrd_data[i] = track[i].mrd(); @@ -152,13 +149,27 @@ void write_tc_rmw(NcFile* nc_out, vector counts; counts.push_back(track.n_points()); - track_mrd_var.putVar(offsets, counts, track_mrd_data); + track_mrd_var.putVar(offsets, counts, track_mrd_data.data()); - delete[] track_mrd_data; } //////////////////////////////////////////////////////////////////////// +bool has_pressure_level(vector levels) { + + bool status = false; + + for (int j = 0; j < levels.size(); j++) { + if (levels[j].substr(0, 1) == "P") { + status = true; + break; + } + } + + return status; +} + +//////////////////////////////////////////////////////////////////////// set get_pressure_level_strings( map > variable_levels) { @@ -267,7 +278,7 @@ void def_tc_pressure(NcFile* nc_out, NcVar pressure_var; - double* pressure_data = new double[pressure_levels.size()]; + vector pressure_data(pressure_levels.size()); // Define variable pressure_var = nc_out->addVar("pressure", ncDouble, pressure_dim); @@ -286,10 +297,7 @@ void def_tc_pressure(NcFile* nc_out, k--; } - put_nc_data(&pressure_var, &pressure_data[0]); - - // Cleanup - if(pressure_data) { delete [] pressure_data; pressure_data = (double *) nullptr; } + put_nc_data(&pressure_var, pressure_data.data()); return; } @@ -303,8 +311,8 @@ void def_tc_range_azimuth(NcFile* nc_out, NcVar range_var; NcVar azimuth_var; - double* range_data = new double[grid.range_n()]; - double* azimuth_data = new double[grid.azimuth_n()]; + vector range_data(grid.range_n()); + vector azimuth_data(grid.azimuth_n()); // Define variables range_var = nc_out->addVar("range", ncDouble, range_dim); @@ -324,7 +332,7 @@ void def_tc_range_azimuth(NcFile* nc_out, add_att(&range_var, "_FillValue", bad_data_double); add_att(&azimuth_var, "long_name", "azimuth"); - add_att(&azimuth_var, "units", "degrees_clockwise_from_north"); + add_att(&azimuth_var, "units", "degrees_clockwise_from_east"); add_att(&azimuth_var, "standard_name", "azimuth"); add_att(&azimuth_var, "_FillValue", bad_data_double); @@ -338,12 +346,8 @@ void def_tc_range_azimuth(NcFile* nc_out, } // Write coordinates - put_nc_data(&range_var, &range_data[0]); - put_nc_data(&azimuth_var, &azimuth_data[0]); - - // Cleanup - if(range_data) { delete [] range_data; range_data = (double *) nullptr; } - if(azimuth_data) { delete [] azimuth_data; azimuth_data = (double *) nullptr; } + put_nc_data(&range_var, range_data.data()); + put_nc_data(&azimuth_var, azimuth_data.data()); return; } @@ -539,7 +543,7 @@ void def_tc_variables(NcFile* nc_out, string long_name = variable_long_names[i->first]; string units = variable_units[i->first]; - if (levels.size() > 1) { + if (has_pressure_level(levels)) { data_var = nc_out->addVar( var_name, ncDouble, dims_3d); add_att(&data_var, "long_name", long_name); @@ -655,8 +659,7 @@ void write_tc_data_rev(NcFile* nc_out, const TcrmwGrid& grid, vector offsets; vector counts; - - double* data_rev; + vector data_rev(grid.range_n() * grid.azimuth_n()); offsets.clear(); offsets.push_back(i_point); @@ -668,9 +671,6 @@ void write_tc_data_rev(NcFile* nc_out, const TcrmwGrid& grid, counts.push_back(grid.range_n()); counts.push_back(grid.azimuth_n()); - data_rev = new double[ - grid.range_n() * grid.azimuth_n()]; - for(int ir = 0; ir < grid.range_n(); ir++) { for(int ia = 0; ia < grid.azimuth_n(); ia++) { int i = ir * grid.azimuth_n() + ia; @@ -679,9 +679,8 @@ void write_tc_data_rev(NcFile* nc_out, const TcrmwGrid& grid, } } - var.putVar(offsets, counts, data_rev); + var.putVar(offsets, counts, data_rev.data()); - delete[] data_rev; } //////////////////////////////////////////////////////////////////////// @@ -692,8 +691,8 @@ void write_tc_azi_mean_data(NcFile* nc_out, const TcrmwGrid& grid, vector offsets; vector counts; - double* data_rev; - double* data_azi_mean; + vector data_rev(grid.range_n() * grid.azimuth_n()); + vector data_azi_mean(grid.range_n(), 0.0); offsets.clear(); offsets.push_back(i_point); @@ -703,10 +702,6 @@ void write_tc_azi_mean_data(NcFile* nc_out, const TcrmwGrid& grid, counts.push_back(1); counts.push_back(grid.range_n()); - data_rev = new double[ - grid.range_n() * grid.azimuth_n()]; - data_azi_mean = new double[grid.range_n()]; - for(int ir = 0; ir < grid.range_n(); ir++) { data_azi_mean[ir] = 0.; for(int ia = 0; ia < grid.azimuth_n(); ia++) { @@ -721,10 +716,8 @@ void write_tc_azi_mean_data(NcFile* nc_out, const TcrmwGrid& grid, data_azi_mean[ir] /= grid.azimuth_n(); } - var.putVar(offsets, counts, data_azi_mean); + var.putVar(offsets, counts, data_azi_mean.data()); - delete[] data_rev; - delete[] data_azi_mean; } //////////////////////////////////////////////////////////////////////// @@ -747,8 +740,7 @@ extern void write_tc_pressure_level_data( vector offsets_3d; vector counts_3d; - - double* data_rev; + vector data_rev(grid.range_n() * grid.azimuth_n()); offsets_3d.clear(); offsets_3d.push_back(i_point); @@ -762,9 +754,6 @@ extern void write_tc_pressure_level_data( counts_3d.push_back(grid.range_n()); counts_3d.push_back(grid.azimuth_n()); - data_rev = new double[ - grid.range_n() * grid.azimuth_n()]; - for(int ir = 0; ir < grid.range_n(); ir++) { for(int ia = 0; ia < grid.azimuth_n(); ia++) { int i = ir * grid.azimuth_n() + ia; @@ -773,9 +762,8 @@ extern void write_tc_pressure_level_data( } } - var.putVar(offsets_3d, counts_3d, data_rev); + var.putVar(offsets_3d, counts_3d, data_rev.data()); - delete[] data_rev; } //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_tc_util/vx_tc_nc_util.h b/src/libcode/vx_tc_util/vx_tc_nc_util.h index bd9911e4d0..174319888c 100644 --- a/src/libcode/vx_tc_util/vx_tc_nc_util.h +++ b/src/libcode/vx_tc_util/vx_tc_nc_util.h @@ -35,6 +35,8 @@ extern void write_tc_track_point(netCDF::NcFile*, extern void write_tc_rmw(netCDF::NcFile*, const netCDF::NcDim&, const TrackInfo&); +extern bool has_pressure_level(std::vector); + extern std::set get_pressure_level_strings( std::map >); diff --git a/src/libcode/vx_time_series/Makefile.in b/src/libcode/vx_time_series/Makefile.in index a5eacf5ac7..a7bed2cf24 100644 --- a/src/libcode/vx_time_series/Makefile.in +++ b/src/libcode/vx_time_series/Makefile.in @@ -238,6 +238,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/libcode/vx_time_series/time_series_util.cc b/src/libcode/vx_time_series/time_series_util.cc index d3e40c1201..e92d493f3f 100644 --- a/src/libcode/vx_time_series/time_series_util.cc +++ b/src/libcode/vx_time_series/time_series_util.cc @@ -40,9 +40,9 @@ const char * timeseriestype_to_string(const TimeSeriesType t) { const char *s = (const char *) nullptr; switch(t) { - case(TimeSeriesType::DyDt): s = timeseriestype_dydt_str; break; - case(TimeSeriesType::Swing): s = timeseriestype_swing_str; break; - default: s = na_str; break; + case TimeSeriesType::DyDt: s = timeseriestype_dydt_str; break; + case TimeSeriesType::Swing: s = timeseriestype_swing_str; break; + default: s = na_str; break; } return s; diff --git a/src/tools/Makefile.in b/src/tools/Makefile.in index c9b6f05b61..6862443abb 100644 --- a/src/tools/Makefile.in +++ b/src/tools/Makefile.in @@ -232,6 +232,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/Makefile.in b/src/tools/core/Makefile.in index 6e18774f39..cacc9eabec 100644 --- a/src/tools/core/Makefile.in +++ b/src/tools/core/Makefile.in @@ -240,6 +240,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/ensemble_stat/Makefile.in b/src/tools/core/ensemble_stat/Makefile.in index a960e21483..0c0f147c70 100644 --- a/src/tools/core/ensemble_stat/Makefile.in +++ b/src/tools/core/ensemble_stat/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/ensemble_stat/ensemble_stat.cc b/src/tools/core/ensemble_stat/ensemble_stat.cc index b15f0fd728..68cfe50cc1 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat.cc +++ b/src/tools/core/ensemble_stat/ensemble_stat.cc @@ -74,6 +74,11 @@ // 041 04/16/24 Halley Gotway MET #2786 Compute RPS from climo bin probs. // 042 04/29/24 Halley Gotway MET #2870 Ignore MISSING keyword. // 043 04/29/24 Halley Gotway MET #2795 Move level mismatch warning. +// 044 06/17/24 Halley Gotway MET #2856 Reinitialize climo_cdf pointer +// 045 07/05/24 Halley Gotway MET #2924 Support forecast climatology. +// 046 10/08/24 Halley Gotway MET #2887 Compute weighted contingency tables. +// 047 10/14/24 Halley Gotway MET #2279 Add point_weight_flag option. +// 048 10/15/24 Halley Gotway MET #2893 Write individual pair OBTYPE. // //////////////////////////////////////////////////////////////////////// @@ -129,6 +134,7 @@ static void process_grid_scores (int, const DataPlane *, const DataPlane *, const DataPlane &, const DataPlane &, const DataPlane &, const DataPlane &, + const DataPlane &, const DataPlane &, const DataPlane &, const MaskPlane &, ObsErrorEntry *, PairDataEnsemble &); @@ -485,7 +491,7 @@ void process_grid(const Grid &fcst_grid) { // Parse regridding logic RegridInfo ri; - ri = conf_info.vx_opt[0].vx_pd.fcst_info->get_var_info()->regrid(); + ri = conf_info.vx_opt[0].vx_pd.ens_info->get_var_info()->regrid(); // Read gridded observation data, if necessary if(ri.field == FieldType::Obs) { @@ -551,15 +557,15 @@ void process_n_vld() { // Loop through the verification fields to be processed for(i_var=0; i_varinputs_n(); + n_ens_inputs = conf_info.vx_opt[i_var].vx_pd.ens_info->inputs_n(); // Loop through the forecast inputs for(i_ens=n_vld=0; i_ensget_file(i_ens); - var_info = conf_info.vx_opt[i_var].vx_pd.fcst_info->get_var_info(i_ens); - j = conf_info.vx_opt[i_var].vx_pd.fcst_info->get_file_index(i_ens); + fcst_file = conf_info.vx_opt[i_var].vx_pd.ens_info->get_file(i_ens); + var_info = conf_info.vx_opt[i_var].vx_pd.ens_info->get_var_info(i_ens); + j = conf_info.vx_opt[i_var].vx_pd.ens_info->get_file_index(i_ens); // Check for valid file if(!ens_file_vld[j]) continue; @@ -591,7 +597,7 @@ void process_n_vld() { << n_vld << " of " << n_ens_inputs << " (" << (double) n_vld/n_ens_inputs << ")" << " forecast fields found for \"" - << conf_info.vx_opt[i_var].vx_pd.fcst_info->get_var_info()->magic_str() + << conf_info.vx_opt[i_var].vx_pd.fcst_info->magic_str() << "\" does not meet the threshold specified by \"" << conf_key_fcst_ens_thresh << "\" (" << conf_info.vld_ens_thresh << ") in the configuration file.\n\n"; @@ -630,7 +636,8 @@ bool get_data_plane(const char *infile, GrdFileType ftype, if(do_regrid && !(mtddf->grid() == grid)) { mlog << Debug(1) << "Regridding field \"" << info->magic_str() - << "\" to the verification grid.\n"; + << "\" to the verification grid using " + << info->regrid().get_str() << ".\n"; dp = met_regrid(dp, mtddf->grid(), grid, info->regrid()); } @@ -688,7 +695,8 @@ bool get_data_plane_array(const char *infile, GrdFileType ftype, mlog << Debug(1) << "Regridding " << dpa.n_planes() << " field(s) \"" << info->magic_str() - << "\" to the verification grid.\n"; + << "\" to the verification grid using " + << info->regrid().get_str() << ".\n"; // Loop through the forecast fields for(i=0; iget_var_info()->magic_str() << ".\n"; + << "For " << conf_info.vx_opt[i].vx_pd.fcst_info->magic_str() << ", found " + << fcmn_dpa.n_planes() << " forecast climatology mean and " + << fcsd_dpa.n_planes() << " standard deviation level(s), and " + << ocmn_dpa.n_planes() << " observation climatology mean and " + << ocsd_dpa.n_planes() << " standard deviation level(s).\n"; // Store climatology information - conf_info.vx_opt[i].vx_pd.set_climo_mn_dpa(cmn_dpa); - conf_info.vx_opt[i].vx_pd.set_climo_sd_dpa(csd_dpa); + conf_info.vx_opt[i].vx_pd.set_fcst_climo_mn_dpa(fcmn_dpa); + conf_info.vx_opt[i].vx_pd.set_fcst_climo_sd_dpa(fcsd_dpa); + conf_info.vx_opt[i].vx_pd.set_obs_climo_mn_dpa(ocmn_dpa); + conf_info.vx_opt[i].vx_pd.set_obs_climo_sd_dpa(ocsd_dpa); } // Process each point observation NetCDF file for(i=0; iget_var_info(); - VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; + EnsVarInfo *ens_info = conf_info.vx_opt[i].vx_pd.ens_info; + VarInfo *fcst_info = ens_info->get_var_info(); + VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; bool print_level_mismatch_warning = true; // Initialize emn_dpa.clear(); // Loop through the ensemble inputs - for(j=0, n_miss=0; jinputs_n(); j++) { + for(j=0, n_miss=0; jinputs_n(); j++) { - i_file = conf_info.vx_opt[i].vx_pd.fcst_info->get_file_index(j); + i_file = ens_info->get_file_index(j); // If the current forecast file is valid, process it if(!ens_file_vld[i_file]) { @@ -885,7 +915,7 @@ void process_point_vx() { mlog << Debug(2) << "Computing the ensemble mean from the members.\n"; - int n = conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n() - n_miss; + int n = ens_info->inputs_n() - n_miss; if(n <= 0) { mlog << Error << "\nprocess_point_vx() -> " @@ -985,11 +1015,11 @@ void process_point_obs(int i_nc) { int hdr_count = met_point_obs->get_hdr_cnt(); int obs_count = met_point_obs->get_obs_cnt(); - mlog << Debug(2) << "Searching " << (obs_count) - << " observations from " << (hdr_count) + mlog << Debug(2) << "Searching " << obs_count + << " observations from " << hdr_count << " header messages.\n"; - const int buf_size = ((obs_count > DEF_NC_BUFFER_SIZE) ? DEF_NC_BUFFER_SIZE : (obs_count)); + const int buf_size = ((obs_count > DEF_NC_BUFFER_SIZE) ? DEF_NC_BUFFER_SIZE : obs_count); int obs_qty_idx_block[buf_size]; float obs_arr_block[buf_size][OBS_ARRAY_LEN]; @@ -1098,13 +1128,13 @@ bool process_point_ens(int i_vx, int i_ens, DataPlaneArray &fcst_dpa) { fcst_dpa.clear(); // Get file based on current vx and ensemble index - ConcatString ens_file = conf_info.vx_opt[i_vx].vx_pd.fcst_info->get_file(i_ens); + ConcatString ens_file = conf_info.vx_opt[i_vx].vx_pd.ens_info->get_file(i_ens); mlog << Debug(2) << "\n" << sep_str << "\n\n" << "Processing ensemble member file: " << ens_file << (i_ens == ctrl_file_index ? " (control)\n" : "\n"); - VarInfo *info = conf_info.vx_opt[i_vx].vx_pd.fcst_info->get_var_info(i_ens); + VarInfo *info = conf_info.vx_opt[i_vx].vx_pd.ens_info->get_var_info(i_ens); // Read the gridded data from the input forecast file bool status = get_data_plane_array(ens_file.c_str(), info->file_type(), info, @@ -1142,28 +1172,31 @@ void process_point_scores() { // requested, and write the output. for(i=0; iget_var_info(); + VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; + // Set the description shc.set_desc(conf_info.vx_opt[i].vx_pd.desc.c_str()); // Store the forecast variable name - shc.set_fcst_var(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->name_attr()); + shc.set_fcst_var(fcst_info->name_attr()); // Store the forecast variable units - shc.set_fcst_units(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->units_attr()); + shc.set_fcst_units(fcst_info->units_attr()); // Set the forecast level name - shc.set_fcst_lev(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->level_attr().c_str()); + shc.set_fcst_lev(fcst_info->level_attr().c_str()); // Store the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(obs_info->name_attr()); // Store the observation variable units - cs = conf_info.vx_opt[i].vx_pd.obs_info->units_attr(); + cs = obs_info->units_attr(); if(cs.empty()) cs = na_string; shc.set_obs_units(cs); // Set the observation level name - shc.set_obs_lev(conf_info.vx_opt[i].vx_pd.obs_info->level_attr().c_str()); + shc.set_obs_lev(obs_info->level_attr().c_str()); // Set the observation lead time shc.set_obs_lead_sec(0); @@ -1194,13 +1227,13 @@ void process_point_scores() { shc.set_interp_wdth(conf_info.vx_opt[i].interp_info.width[l]); } - pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[j][k][l]; + int n = conf_info.vx_opt[i].vx_pd.three_to_one(j, k, l); + + pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[n]; mlog << Debug(2) << "Processing point verification " - << conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->magic_str() - << " versus " - << conf_info.vx_opt[i].vx_pd.obs_info->magic_str() + << fcst_info->magic_str() << " versus " << obs_info->magic_str() << ", for observation type " << pd_ptr->msg_typ << ", over region " << pd_ptr->mask_name << ", for interpolation method " @@ -1237,7 +1270,7 @@ void process_grid_vx() { DataPlane *fcst_dp = (DataPlane *) nullptr; DataPlane *fraw_dp = (DataPlane *) nullptr; DataPlane obs_dp, oraw_dp; - DataPlane emn_dp, cmn_dp, csd_dp; + DataPlane emn_dp, fcmn_dp, fcsd_dp, ocmn_dp, ocsd_dp; PairDataEnsemble pd_all, pd; ObsErrorEntry *oerr_ptr = (ObsErrorEntry *) nullptr; VarInfo * var_info; @@ -1250,13 +1283,16 @@ void process_grid_vx() { shc.set_obtype(conf_info.obtype.c_str()); // Allocate space to store the forecast fields - int num_dp = conf_info.vx_opt[0].vx_pd.fcst_info->inputs_n(); + int num_dp = conf_info.vx_opt[0].vx_pd.ens_info->inputs_n(); fcst_dp = new DataPlane [num_dp]; fraw_dp = new DataPlane [num_dp]; // Loop through each of the fields to be verified for(i=0; iget_var_info(); + VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; + // Initialize emn_dp.clear(); @@ -1271,13 +1307,13 @@ void process_grid_vx() { shc.set_desc(conf_info.vx_opt[i].vx_pd.desc.c_str()); // Set the forecast variable name - shc.set_fcst_var(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->name_attr()); + shc.set_fcst_var(fcst_info->name_attr()); // Store the forecast variable units - shc.set_fcst_units(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->units_attr()); + shc.set_fcst_units(fcst_info->units_attr()); // Set the forecast level name - shc.set_fcst_lev(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->level_attr().c_str()); + shc.set_fcst_lev(fcst_info->level_attr().c_str()); // Set the ObsErrorEntry pointer if(conf_info.vx_opt[i].obs_error.flag) { @@ -1294,12 +1330,12 @@ void process_grid_vx() { // Check for table entries for this variable and message type if(!obs_error_table.has( - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str())) { mlog << Warning << "\nprocess_grid_vx() -> " << "Disabling observation error logic since the " << "obs error table contains no entry for OBS_VAR(" - << conf_info.vx_opt[i].vx_pd.obs_info->name() + << obs_info->name() << ") and MESSAGE_TYPE(" << conf_info.obtype << ").\nSpecify a custom obs error table using the " << "MET_OBS_ERROR_TABLE environment variable.\n\n"; @@ -1309,7 +1345,7 @@ void process_grid_vx() { // Do a lookup for this variable and message type oerr_ptr = obs_error_table.lookup( - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str()); // If match was found and includes a value range setting, @@ -1332,14 +1368,14 @@ void process_grid_vx() { } // Loop through each of the input ensemble files/variables - for(j=0, n_miss=0; j < conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n(); j++) { + for(j=0, n_miss=0; j < conf_info.vx_opt[i].vx_pd.ens_info->inputs_n(); j++) { // Initialize fcst_dp[j].clear(); - i_file = conf_info.vx_opt[i].vx_pd.fcst_info->get_file_index(j); - var_info = conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info(j); - fcst_file = conf_info.vx_opt[i].vx_pd.fcst_info->get_file(j); + i_file = conf_info.vx_opt[i].vx_pd.ens_info->get_file_index(j); + var_info = conf_info.vx_opt[i].vx_pd.ens_info->get_var_info(j); + fcst_file = conf_info.vx_opt[i].vx_pd.ens_info->get_file(j); // If the current ensemble file is valid, read the field if(ens_file_vld[i_file]) { @@ -1368,7 +1404,7 @@ void process_grid_vx() { mlog << Debug(2) << "Processing ensemble mean file: " << ens_mean_file << "\n"; - VarInfo *info = conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info(); + VarInfo *info = conf_info.vx_opt[i].vx_pd.ens_info->get_var_info(); // Read the gridded data from the mean file found = get_data_plane(ens_mean_file.c_str(), FileType_None, @@ -1387,7 +1423,7 @@ void process_grid_vx() { mlog << Debug(2) << "Computing the ensemble mean from the members.\n"; - int n = conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n() - n_miss; + int n = conf_info.vx_opt[i].vx_pd.ens_info->inputs_n() - n_miss; if(n <= 0) { mlog << Error << "\nprocess_grid_vx() -> " @@ -1399,19 +1435,37 @@ void process_grid_vx() { emn_dp /= (double) n; } - // Read climatology data - cmn_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i, ens_valid_ut, grid); - csd_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i, ens_valid_ut, grid); + // Read forecast climatology data + fcmn_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_fcst), + conf_key_climo_mean, + i, ens_valid_ut, grid, + "forecast climatology mean"); + fcsd_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_fcst), + conf_key_climo_stdev, + i, ens_valid_ut, grid, + "forecast climatology standard deviation"); + + // Read observation climatology data + ocmn_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_obs), + conf_key_climo_mean, + i, ens_valid_ut, grid, + "observation climatology mean"); + ocsd_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_obs), + conf_key_climo_stdev, + i, ens_valid_ut, grid, + "observation climatology standard deviation"); mlog << Debug(3) - << "Found " << (cmn_dp.nx() == 0 ? 0 : 1) - << " climatology mean field(s) and " << (csd_dp.nx() == 0 ? 0 : 1) - << " climatology standard deviation field(s) for forecast " - << conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->magic_str() << ".\n"; + << "For " << conf_info.vx_opt[i].vx_pd.fcst_info->magic_str() << ", found " + << (fcmn_dp.nx() == 0 ? 0 : 1) << " forecast climatology mean and " + << (fcsd_dp.nx() == 0 ? 0 : 1) << " standard deviation field(s), and " + << (ocmn_dp.nx() == 0 ? 0 : 1) << " observation climatology mean and " + << (ocsd_dp.nx() == 0 ? 0 : 1) << " standard deviation field(s).\n"; + // If requested in the config file, create a NetCDF file to store // the verification matched pairs @@ -1446,13 +1500,13 @@ void process_grid_vx() { } // Set the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(obs_info->name_attr()); // Store the observation variable units - shc.set_obs_units(conf_info.vx_opt[i].vx_pd.obs_info->units_attr()); + shc.set_obs_units(obs_info->units_attr()); // Set the observation level name - shc.set_obs_lev(conf_info.vx_opt[i].vx_pd.obs_info->level_attr().c_str()); + shc.set_obs_lev(obs_info->level_attr().c_str()); // Set the observation lead time shc.set_obs_lead_sec(obs_dp.lead()); @@ -1512,12 +1566,12 @@ void process_grid_vx() { << "gridded observation data.\n"; obs_dp = add_obs_error_bc(conf_info.rng_ptr, FieldType::Obs, oerr_ptr, oraw_dp, oraw_dp, - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str()); } // Loop through the ensemble members - for(k=0; k < conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n(); k++) { + for(k=0; k < conf_info.vx_opt[i].vx_pd.ens_info->inputs_n(); k++) { // Smooth the forecast field, if requested if(field == FieldType::Fcst || field == FieldType::Both) { @@ -1537,7 +1591,7 @@ void process_grid_vx() { << "ensemble member " << k+1 << ".\n"; fcst_dp[k] = add_obs_error_inc(conf_info.rng_ptr, FieldType::Fcst, oerr_ptr, fraw_dp[k], oraw_dp, - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str()); } } // end for k @@ -1555,22 +1609,22 @@ void process_grid_vx() { pd_all.clear(); pd_all.set_ens_size(n_vx_vld[i]); pd_all.set_climo_cdf_info_ptr(&conf_info.vx_opt[i].cdf_info); - pd_all.ctrl_index = conf_info.vx_opt[i].vx_pd.pd[0][0][0].ctrl_index; - pd_all.skip_const = conf_info.vx_opt[i].vx_pd.pd[0][0][0].skip_const; + pd_all.ctrl_index = conf_info.vx_opt[i].vx_pd.pd[0].ctrl_index; + pd_all.skip_const = conf_info.vx_opt[i].vx_pd.pd[0].skip_const; // Apply the current mask to the fields and compute the pairs process_grid_scores(i, fcst_dp, fraw_dp, obs_dp, oraw_dp, - emn_dp, cmn_dp, csd_dp, + emn_dp, + fcmn_dp, fcsd_dp, + ocmn_dp, ocsd_dp, mask_mp, oerr_ptr, pd_all); mlog << Debug(2) << "Processing gridded verification " - << conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->magic_str() - << " versus " - << conf_info.vx_opt[i].vx_pd.obs_info->magic_str() + << fcst_info->magic_str() << " versus " << obs_info->magic_str() << ", for observation type " << shc.get_obtype() << ", over region " << shc.get_mask() << ", for interpolation method " @@ -1614,23 +1668,28 @@ void process_grid_vx() { void process_grid_scores(int i_vx, const DataPlane *fcst_dp, const DataPlane *fraw_dp, const DataPlane &obs_dp, const DataPlane &oraw_dp, - const DataPlane &emn_dp, const DataPlane &cmn_dp, - const DataPlane &csd_dp, const MaskPlane &mask_mp, + const DataPlane &emn_dp, + const DataPlane &fcmn_dp, const DataPlane &fcsd_dp, + const DataPlane &ocmn_dp, const DataPlane &ocsd_dp, + const MaskPlane &mask_mp, ObsErrorEntry *oerr_ptr, PairDataEnsemble &pd) { int i, j, x, y, n_miss; - double cmn, csd; ObsErrorEntry *e = (ObsErrorEntry *) nullptr; // Allocate memory in one big chunk based on grid size pd.extend(nxy); // Climatology flags - bool emn_flag = (emn_dp.nx() == obs_dp.nx() && - emn_dp.ny() == obs_dp.ny()); - bool cmn_flag = (cmn_dp.nx() == obs_dp.nx() && - cmn_dp.ny() == obs_dp.ny()); - bool csd_flag = (csd_dp.nx() == obs_dp.nx() && - csd_dp.ny() == obs_dp.ny()); + bool emn_flag = (emn_dp.nx() == obs_dp.nx() && + emn_dp.ny() == obs_dp.ny()); + bool fcmn_flag = (fcmn_dp.nx() == obs_dp.nx() && + fcmn_dp.ny() == obs_dp.ny()); + bool fcsd_flag = (fcsd_dp.nx() == obs_dp.nx() && + fcsd_dp.ny() == obs_dp.ny()); + bool ocmn_flag = (ocmn_dp.nx() == obs_dp.nx() && + ocmn_dp.ny() == obs_dp.ny()); + bool ocsd_flag = (ocsd_dp.nx() == obs_dp.nx() && + ocsd_dp.ny() == obs_dp.ny()); // Loop through the observation field for(x=0; xinputs_n(); j++) { + for(j=0,n_miss=0; j < conf_info.vx_opt[i_vx].vx_pd.ens_info->inputs_n(); j++) { // Skip missing data if(fcst_dp[j].nx() == 0 || fcst_dp[j].ny() == 0) { @@ -1741,17 +1803,17 @@ void do_rps(const EnsembleStatVxOpt &vx_opt, rps_info.othresh = othresh; rps_info.set_prob_cat_thresh(vx_opt.fcat_ta); - // If prob_cat_thresh is empty and climo data is available, - // use climo_cdf thresholds instead + // If prob_cat_thresh is empty and observation climo + // data is available, use climo_cdf thresholds instead if(rps_info.fthresh.n() == 0 && - pd_ptr->cmn_na.n_valid() > 0 && - pd_ptr->csd_na.n_valid() > 0 && + pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 && vx_opt.cdf_info.cdf_ta.n() > 0) { rps_info.set_cdp_thresh(vx_opt.cdf_info.cdf_ta); } // Compute ensemble RPS statistics from pre-computed binned probabilities - if(vx_opt.vx_pd.fcst_info->get_var_info()->is_prob()) { + if(vx_opt.vx_pd.ens_info->get_var_info()->is_prob()) { rps_info.set_climo_bin_prob(*pd_ptr, vx_opt.ocat_ta); } // Compute ensemble RPS statistics from ensemble member values @@ -1832,7 +1894,7 @@ void setup_txt_files() { // Compute the number of PHIST bins for(i=n_phist_bin=0; i n_phist_bin ? n : n_phist_bin); } @@ -1900,39 +1962,39 @@ void setup_txt_files() { // Get the maximum number of columns for this line type switch(i) { - case(i_rhist): + case i_rhist: max_col = get_n_rhist_columns(max_n_ens+1) + n_header_columns + 1; break; - case(i_phist): + case i_phist: max_col = get_n_phist_columns(n_phist_bin) + n_header_columns + 1; break; - case(i_relp): + case i_relp: max_col = get_n_relp_columns(max_n_ens) + n_header_columns + 1; break; - case(i_orank): + case i_orank: max_col = get_n_orank_columns(max_n_ens) + n_header_columns + 1; break; - case(i_pct): + case i_pct: max_col = get_n_pct_columns(n_prob) + n_header_columns + 1; break; - case(i_pstd): + case i_pstd: max_col = get_n_pstd_columns(n_prob) + n_header_columns + 1; break; - case(i_pjc): + case i_pjc: max_col = get_n_pjc_columns(n_prob) + n_header_columns + 1; break; - case(i_prc): + case i_prc: max_col = get_n_prc_columns(n_prob) + n_header_columns + 1; break; - case(i_eclv): + case i_eclv: max_col = get_n_eclv_columns(n_eclv) + n_header_columns + 1; break; @@ -1948,39 +2010,39 @@ void setup_txt_files() { // Write the text header row switch(i) { - case(i_rhist): + case i_rhist: write_rhist_header_row(1, max_n_ens+1, txt_at[i], 0, 0); break; - case(i_phist): + case i_phist: write_phist_header_row(1, n_phist_bin, txt_at[i], 0, 0); break; - case(i_relp): + case i_relp: write_relp_header_row(1, max_n_ens, txt_at[i], 0, 0); break; - case(i_orank): + case i_orank: write_orank_header_row(1, max_n_ens, txt_at[i], 0, 0); break; - case(i_pct): + case i_pct: write_pct_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pstd): + case i_pstd: write_pstd_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pjc): + case i_pjc: write_pjc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_prc): + case i_prc: write_prc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_eclv): + case i_eclv: write_eclv_header_row(1, n_eclv, txt_at[i], 0, 0); break; @@ -2058,7 +2120,7 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, PairDataEnsemble pd; // Check for probabilistic input - bool is_prob = vx_opt.vx_pd.fcst_info->get_var_info()->is_prob(); + bool is_prob = vx_opt.vx_pd.ens_info->get_var_info()->is_prob(); // Process each observation filtering threshold for(i=0; i 0) { @@ -2133,7 +2195,7 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, if(!is_prob && vx_opt.output_flag[i_ssvar] != STATOutputType::None) { - pd.ssvar_bin_size = vx_opt.vx_pd.pd[0][0][0].ssvar_bin_size; + pd.ssvar_bin_size = vx_opt.vx_pd.pd[0].ssvar_bin_size; pd.compute_ssvar(); // Make sure there are bins to process @@ -2177,10 +2239,17 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, // Set the header column shc.set_obs_thresh(na_str); + // Store current obtype value + string cur_obtype = shc.get_obtype(); + write_orank_row(shc, &pd_all, vx_opt.output_flag[i_orank], stat_at, i_stat_row, - txt_at[i_orank], i_txt_row[i_orank]); + txt_at[i_orank], i_txt_row[i_orank], + conf_info.obtype_as_group_val_flag); + + // Reset the obtype column + shc.set_obtype(cur_obtype.c_str()); // Reset the observation valid time shc.set_obs_valid_beg(vx_opt.vx_pd.beg_ut); @@ -2195,17 +2264,15 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, void do_pct(const EnsembleStatVxOpt &vx_opt, const PairDataEnsemble &pd_ens) { - // Flag to indicate the presence of valid climo data - bool have_climo = (pd_ens.cmn_na.n_valid() > 0 && - pd_ens.csd_na.n_valid() > 0); - // If forecast probability thresholds were specified, use them. if(vx_opt.fcat_ta.n() > 0) { do_pct_cat_thresh(vx_opt, pd_ens); } // Otherwise, if climo data is available and bins were requested, // use climo_cdf thresholds instead. - else if(have_climo && vx_opt.cdf_info.cdf_ta.n() > 0) { + else if(pd_ens.ocmn_na.n_valid() > 0 && + pd_ens.ocsd_na.n_valid() > 0 && + vx_opt.cdf_info.cdf_ta.n() > 0) { do_pct_cdp_thresh(vx_opt, pd_ens); } @@ -2231,7 +2298,8 @@ void do_pct_cat_thresh(const EnsembleStatVxOpt &vx_opt, pd_pnt.extend(pd_ens.n_obs); // Determine the number of climo CDF bins - n_bin = (pd_ens.cmn_na.n_valid() > 0 && pd_ens.csd_na.n_valid() > 0 ? + n_bin = (pd_ens.ocmn_na.n_valid() > 0 && + pd_ens.ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -2257,6 +2325,7 @@ void do_pct_cat_thresh(const EnsembleStatVxOpt &vx_opt, // Re-initialize pd_pnt.erase(); + pd_pnt.set_climo_cdf_info_ptr(&vx_opt.cdf_info); // Process the observations for(i_obs=0; i_obs 0 || (double) (n_vld/pd_ens.n_ens) >= conf_info.vld_data_thresh) { pd_pnt.add_grid_pair((double) n_evt/n_vld, pd_ens.o_na[i_obs], - pd_ens.cmn_na[i_obs], pd_ens.csd_na[i_obs], - pd_ens.wgt_na[i_obs]); + cpi, pd_ens.wgt_na[i_obs]); } } // end for i_obs @@ -2330,18 +2400,19 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, int n_vld, n_evt, n_bin; PCTInfo *pct_info = (PCTInfo *) nullptr; PairDataPoint pd_pnt, pd; - ThreshArray cdp_thresh; + ThreshArray ocdp_thresh; // Derive a PairDataPoint object from the PairDataEnsemble input pd_pnt.extend(pd_ens.n_obs); // Derive the climo distribution percentile thresholds - cdp_thresh = derive_cdp_thresh(vx_opt.cdf_info.cdf_ta); - n_bin = cdp_thresh.n(); + ocdp_thresh = derive_ocdp_thresh(vx_opt.cdf_info.cdf_ta); + n_bin = ocdp_thresh.n(); mlog << Debug(2) - << "Computing Probabilistic Statistics for " << cdp_thresh.n() - << " climatological distribution percentile thresholds.\n"; + << "Computing Probabilistic Statistics for " + << ocdp_thresh.n() << " observation climatological " + << "distribution percentile thresholds.\n"; // Allocate memory pct_info = new PCTInfo [n_bin]; @@ -2351,10 +2422,11 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, // Set the header columns shc.set_fcst_thresh(vx_opt.fpct_ta); - shc.set_obs_thresh(cdp_thresh[i_bin]); + shc.set_obs_thresh(ocdp_thresh[i_bin]); // Re-initialize pd_pnt.erase(); + pd_pnt.set_climo_cdf_info_ptr(&vx_opt.cdf_info); // Process the observations for(i_obs=0; i_obs 0 || (double) (n_vld/pd_ens.n_ens) >= conf_info.vld_data_thresh) { pd_pnt.add_grid_pair((double) n_evt/n_vld, pd_ens.o_na[i_obs], - pd_ens.cmn_na[i_obs], pd_ens.csd_na[i_obs], - pd_ens.wgt_na[i_obs]); + cpi, pd_ens.wgt_na[i_obs]); } } // end for i_obs @@ -2386,7 +2459,7 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, // Store thresholds pct_info[i_bin].fthresh = vx_opt.fpct_ta; - pct_info[i_bin].othresh = cdp_thresh[i_bin]; + pct_info[i_bin].othresh = ocdp_thresh[i_bin]; pct_info[i_bin].allocate_n_alpha(vx_opt.get_n_ci_alpha()); for(i=0; i obs_v (nxy, bad_data_float); + vector obs_rank (nxy, bad_data_int ); + vector obs_pit (nxy, bad_data_float); + vector ens_vld (nxy, bad_data_int ); + vector ens_mean (nxy, bad_data_float); // Loop over all the pairs for(i=0; iget_var_info(), &nc_var, false, dp, + add_var_att_local(conf_info.vx_opt[i_vx].vx_pd.ens_info->get_var_info(), + &nc_var, false, dp, name_str.c_str(), long_name_str); // Write the data @@ -2693,7 +2744,8 @@ void write_orank_var_int(int i_vx, int i_interp, int i_mask, nc_var = add_var(nc_out, (string)var_name, ncInt, lat_dim, lon_dim); // Add the variable attributes - add_var_att_local(conf_info.vx_opt[i_vx].vx_pd.fcst_info->get_var_info(), &nc_var, true, dp, + add_var_att_local(conf_info.vx_opt[i_vx].vx_pd.ens_info->get_var_info(), + &nc_var, true, dp, name_str.c_str(), long_name_str); // Write the data diff --git a/src/tools/core/ensemble_stat/ensemble_stat.h b/src/tools/core/ensemble_stat/ensemble_stat.h index 28d26a5585..3e14ebbaa1 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat.h +++ b/src/tools/core/ensemble_stat/ensemble_stat.h @@ -49,14 +49,14 @@ static const char * default_config_filename = "MET_BASE/config/EnsembleStatConfig_default"; // Text file abbreviations -static const char *txt_file_abbr[n_txt] = { +static const char * const txt_file_abbr[n_txt] = { "ecnt", "rps", "rhist", "phist", "orank", "ssvar", "relp", "pct", "pstd", "pjc", "prc", "eclv" }; // Header columns -static const char **txt_columns[n_txt] = { +static const char * const * txt_columns[n_txt] = { ecnt_columns, rps_columns, rhist_columns, phist_columns, orank_columns, ssvar_columns, relp_columns, pct_columns, pstd_columns, diff --git a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc index 7987c742de..c218975d6b 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc +++ b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc @@ -70,11 +70,12 @@ void EnsembleStatConfInfo::clear() { vld_ens_thresh = bad_data_double; vld_data_thresh = bad_data_double; msg_typ_group_map.clear(); + obtype_as_group_val_flag = false; msg_typ_sfc.clear(); mask_area_map.clear(); mask_sid_map.clear(); grid_weight_flag = GridWeightType::None; - tmp_dir.clear(); + point_weight_flag = PointWeightType::None; output_prefix.clear(); version.clear(); @@ -158,6 +159,9 @@ void EnsembleStatConfInfo::process_config(GrdFileType etype, // Conf: grid_weight_flag grid_weight_flag = parse_conf_grid_weight_flag(&conf); + // Conf: point_weight_flag + point_weight_flag = parse_conf_point_weight_flag(&conf); + // Conf: output_prefix output_prefix = conf.lookup_string(conf_key_output_prefix); @@ -173,6 +177,10 @@ void EnsembleStatConfInfo::process_config(GrdFileType etype, msg_typ_sfc.parse_css(default_msg_typ_group_surface); } + // Conf: obtype_as_group_val_flag + obtype_as_group_val_flag = + conf.lookup_bool(conf_key_obtype_as_group_val_flag); + // Conf: ens_member_ids ens_member_ids = parse_conf_ens_member_ids(&conf); @@ -261,8 +269,9 @@ void EnsembleStatConfInfo::process_config(GrdFileType etype, n_vx = n_fvx; vx_opt = new EnsembleStatVxOpt [n_vx]; - // Check climatology fields - check_climo_n_vx(&conf, n_vx); + // Check for consistent number of climatology fields + check_climo_n_vx(fdict, n_vx); + check_climo_n_vx(odict, n_vx); // Parse settings for each verification task for(i=0,max_hira_size=0; iadd_input(input_info); + vx_pd.ens_info->add_input(input_info); + + // Set the fcst_info, if needed + if(!vx_pd.fcst_info) vx_pd.set_fcst_info(next_var); // Add InputInfo to fcst info list for each ensemble file provided // set var_info to nullptr to note first VarInfo should be used @@ -672,7 +683,7 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, input_info.var_info = nullptr; input_info.file_index = j; input_info.file_list = ens_files; - vx_pd.fcst_info->add_input(input_info); + vx_pd.ens_info->add_input(input_info); } // end for j } // end for i @@ -691,11 +702,11 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, input_info.var_info = next_var; input_info.file_index = ens_files->n() - 1; input_info.file_list = ens_files; - vx_pd.fcst_info->add_input(input_info); + vx_pd.ens_info->add_input(input_info); } // Allocate new VarInfo object for obs - vx_pd.obs_info = info_factory.new_var_info(otype); + vx_pd.obs_info = info_factory.new_var_info(otype); // Set the VarInfo objects vx_pd.obs_info->set_dict(odict); @@ -704,14 +715,14 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, if(mlog.verbosity_level() >= 5) { mlog << Debug(5) << "Parsed forecast field:\n"; - vx_pd.fcst_info->get_var_info()->dump(cout); + vx_pd.ens_info->get_var_info()->dump(cout); mlog << Debug(5) << "Parsed observation field:\n"; vx_pd.obs_info->dump(cout); } // No support for wind direction - if(vx_pd.fcst_info->get_var_info()->is_wind_direction() || + if(vx_pd.ens_info->get_var_info()->is_wind_direction() || vx_pd.obs_info->is_wind_direction()) { mlog << Error << "\nEnsembleStatVxOpt::process_config() -> " << "wind direction may not be verified using grid_stat.\n\n"; @@ -783,7 +794,7 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, ocat_ta = odict.lookup_thresh_array(conf_key_prob_cat_thresh); // The number of thresholds must match for non-probability forecasts - if(!vx_pd.fcst_info->get_var_info()->is_prob() && + if(!vx_pd.ens_info->get_var_info()->is_prob() && fcat_ta.n() != ocat_ta.n()) { mlog << Error << "\nEnsembleStatVxOpt::process_config() -> " << "The number of forecast (" << write_css(fcat_ta) @@ -936,7 +947,7 @@ void EnsembleStatVxOpt::set_vx_pd(EnsembleStatConfInfo *conf_info, int ctrl_inde } // Define the dimensions - vx_pd.set_pd_size(n_msg_typ, n_mask, n_interp); + vx_pd.set_size(n_msg_typ, n_mask, n_interp); // Store the climo CDF info vx_pd.set_climo_cdf_info_ptr(&cdf_info); @@ -1014,21 +1025,23 @@ void EnsembleStatVxOpt::set_perc_thresh(const PairDataEnsemble *pd_ptr) { // // Sort the input arrays // - NumArray fsort; - for(int i=0; in_ens; i++) fsort.add(pd_ptr->e_na[i]); - NumArray osort = pd_ptr->o_na; - NumArray csort = pd_ptr->cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort; + for(int i=0; in_ens; i++) f_sort.add(pd_ptr->e_na[i]); + NumArray o_sort = pd_ptr->o_na; + NumArray fcmn_sort = pd_ptr->fcmn_na; + NumArray ocmn_sort = pd_ptr->ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles, passing the observation filtering // thresholds in for the fcst and obs slots. // - othr_ta.set_perc(&fsort, &osort, &csort, &othr_ta, &othr_ta); - fcat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - ocat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); + othr_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &othr_ta, &othr_ta); + fcat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + ocat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); return; } @@ -1051,8 +1064,8 @@ int EnsembleStatVxOpt::n_txt_row(int i_txt_row) const { // Switch on the index of the line type switch(i_txt_row) { - case(i_ecnt): - case(i_rps): + case i_ecnt: + case i_rps: // Maximum number of ECNT and RPS lines possible = // Point Vx: Message Types * Masks * Interpolations * Obs Thresholds * Alphas @@ -1061,9 +1074,9 @@ int EnsembleStatVxOpt::n_txt_row(int i_txt_row) const { get_n_obs_thresh() * get_n_ci_alpha(); break; - case(i_rhist): - case(i_phist): - case(i_relp): + case i_rhist: + case i_phist: + case i_relp: // Maximum number of RHIST, PHIST, and RELP lines possible = // Point Vx: Message Types * Masks * Interpolations * Obs Thresholds @@ -1072,7 +1085,7 @@ int EnsembleStatVxOpt::n_txt_row(int i_txt_row) const { get_n_obs_thresh(); break; - case(i_orank): + case i_orank: // Compute the maximum number of matched pairs to be written // out by summing the number for each VxPairDataEnsemble object @@ -1082,40 +1095,40 @@ int EnsembleStatVxOpt::n_txt_row(int i_txt_row) const { n = vx_pd.get_n_pair() * get_n_obs_thresh(); break; - case(i_ssvar): + case i_ssvar: // Just return zero since we'll resize the output AsciiTables // to accomodate the SSVAR output n = 0; break; - case(i_pct): - case(i_pjc): - case(i_prc): + case i_pct: + case i_pjc: + case i_prc: // Maximum number of PCT, PJC, and PRC lines possible = - // Point Vx: Message Types * Masks * Interpolations * Categorical Thresholds - // Grid Vx: Masks * Interpolations * Categorical Thresholds + // Point Vx: Message Types * Masks * Interpolations * Categorical Thresholds * Climo CDF Bins + // Grid Vx: Masks * Interpolations * Categorical Thresholds * Climo CDF Bins n = (get_n_msg_typ() + 1) * get_n_mask() * get_n_interp() * - get_n_prob_cat_thresh(); + max(fcat_ta.n(), 1) * cdf_info.cdf_ta.n(); break; - case(i_pstd): + case i_pstd: // Maximum number of PSTD lines possible = - // Point Vx: Message Types * Masks * Interpolations * Categorical Thresholds * Alphas - // Grid Vx: Masks * Interpolations * Categorical Thresholds * Alphas + // Point Vx: Message Types * Masks * Interpolations * Categorical Thresholds * Climo CDF Bins * Alphas + // Grid Vx: Masks * Interpolations * Categorical Thresholds * Climo CDF Bins * Alphas n = (get_n_msg_typ() + 1) * get_n_mask() * get_n_interp() * - get_n_prob_cat_thresh() * get_n_ci_alpha(); + max(fcat_ta.n(), 1) * cdf_info.cdf_ta.n() * get_n_ci_alpha(); break; - case(i_eclv): + case i_eclv: // Maximum number of ECLV lines possible = - // Point Vx: Message Types * Masks * Interpolations * Probability Thresholds - // Grid Vx: Masks * Interpolations * Probability Thresholds + // Point Vx: Message Types * Masks * Interpolations * Probability Thresholds * Climo CDF Bins + // Grid Vx: Masks * Interpolations * Probability Thresholds * Climo CDF Bins n = (get_n_msg_typ() + 1) * get_n_mask() * get_n_interp() * - get_n_prob_cat_thresh() * get_n_prob_cat_thresh(); + get_n_prob_cat_thresh() * get_n_prob_cat_thresh() * cdf_info.cdf_ta.n(); break; default: diff --git a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.h b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.h index 9d9177cdab..36d6cf5f95 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.h +++ b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.h @@ -221,17 +221,19 @@ class EnsembleStatConfInfo { // Message type groups that should be processed together std::map msg_typ_group_map; StringArray msg_typ_sfc; + bool obtype_as_group_val_flag; // Mapping of mask names to MaskPlanes std::map mask_area_map; // Mapping of mask names to Station ID lists - std::map mask_sid_map; + std::map mask_sid_map; gsl_rng *rng_ptr; // GSL random number generator (allocated) - GridWeightType grid_weight_flag; // Grid weighting flag - ConcatString tmp_dir; // Directory for temporary files + GridWeightType grid_weight_flag; // Grid weighting flag + PointWeightType point_weight_flag; // Point weighting flag + ConcatString output_prefix; // String to customize output file name ConcatString version; // Config file version diff --git a/src/tools/core/grid_stat/Makefile.in b/src/tools/core/grid_stat/Makefile.in index 30cc717d27..97b6321c73 100644 --- a/src/tools/core/grid_stat/Makefile.in +++ b/src/tools/core/grid_stat/Makefile.in @@ -224,6 +224,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/grid_stat/grid_stat.cc b/src/tools/core/grid_stat/grid_stat.cc index 6d8e13864d..45afc74615 100644 --- a/src/tools/core/grid_stat/grid_stat.cc +++ b/src/tools/core/grid_stat/grid_stat.cc @@ -109,9 +109,11 @@ // filtering options. // 052 05/28/21 Halley Gotway Add MCTS HSS_EC output. // 053 12/11/21 Halley Gotway MET #1991 Fix VCNT output. -// 054 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 055 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files -// 056 01/29/24 Halley Gotway MET #2801 Configure time difference warnings +// 054 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 055 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files. +// 056 01/29/24 Halley Gotway MET #2801 Configure time difference warnings. +// 057 07/05/24 Halley Gotway MET #2924 Support forecast climatology. +// 058 10/03/24 Halley Gotway MET #2887 Compute weighted contingency tables. // //////////////////////////////////////////////////////////////////////// @@ -168,6 +170,7 @@ static void get_mask_points(const GridStatVxOpt &, const MaskPlane &, const DataPlane *, const DataPlane *, const DataPlane *, const DataPlane *, const DataPlane *, + const DataPlane *, const DataPlane *, PairDataPoint &); static void do_cts (CTSInfo *&, int, const PairDataPoint *); @@ -194,8 +197,8 @@ static void clean_up(); static void usage(); static void set_outdir(const StringArray &); static void set_compress(const StringArray &); -static bool read_data_plane(VarInfo* info, DataPlane& dp, Met2dDataFile* mtddf, - const ConcatString &filename); +static bool read_data_plane(VarInfo *info, DataPlane &dp, Met2dDataFile *mtddf, + const ConcatString &filename, const char *desc); #ifdef WITH_UGRID static void set_ugrid_config(const StringArray &); #endif @@ -492,27 +495,27 @@ void setup_txt_files(unixtime valid_ut, int lead_sec) { // Get the maximum number of columns for this line type switch(i) { - case(i_mctc): + case i_mctc: max_col = get_n_mctc_columns(n_cat) + n_header_columns + 1; break; - case(i_pct): + case i_pct: max_col = get_n_pct_columns(n_prob) + n_header_columns + 1; break; - case(i_pstd): + case i_pstd: max_col = get_n_pstd_columns(n_prob) + n_header_columns + 1; break; - case(i_pjc): + case i_pjc: max_col = get_n_pjc_columns(n_prob) + n_header_columns + 1; break; - case(i_prc): + case i_prc: max_col = get_n_prc_columns(n_prob) + n_header_columns + 1; break; - case(i_eclv): + case i_eclv: max_col = get_n_eclv_columns(n_eclv) + n_header_columns + 1; break; @@ -528,27 +531,27 @@ void setup_txt_files(unixtime valid_ut, int lead_sec) { // Write the text header row switch(i) { - case(i_mctc): + case i_mctc: write_mctc_header_row(1, n_cat, txt_at[i], 0, 0); break; - case(i_pct): + case i_pct: write_pct_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pstd): + case i_pstd: write_pstd_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pjc): + case i_pjc: write_pjc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_prc): + case i_prc: write_prc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_eclv): + case i_eclv: write_eclv_header_row(1, n_eclv, txt_at[i], 0, 0); break; @@ -672,8 +675,8 @@ void process_scores() { DataPlane fcst_dp_thresh, obs_dp_thresh; // Climatology mean and standard deviation - DataPlane cmn_dp, csd_dp; - DataPlane cmn_dp_smooth; + DataPlane fcmn_dp, fcsd_dp, ocmn_dp, ocsd_dp; + DataPlane fcmn_dp_smooth, ocmn_dp_smooth; // Paired forecast, observation, climatology, and weight values PairDataPoint pd; @@ -682,19 +685,20 @@ void process_scores() { PairDataPoint pd_thr; // Allocate memory in one big chunk based on grid size - pd.extend(grid.nx()*grid.ny()); + pd.extend(grid.nxy()); if(conf_info.output_flag[i_nbrctc] != STATOutputType::None || conf_info.output_flag[i_nbrcts] != STATOutputType::None || conf_info.output_flag[i_nbrcnt] != STATOutputType::None || conf_info.output_flag[i_dmap] != STATOutputType::None) { - pd_thr.extend(grid.nx()*grid.ny()); + pd_thr.extend(grid.nxy()); } // Objects to handle vector winds DataPlane fu_dp, ou_dp; DataPlane fu_dp_smooth, ou_dp_smooth; - DataPlane cmnu_dp, csdu_dp, cmnu_dp_smooth; + DataPlane fcmnu_dp, fcsdu_dp, fcmnu_dp_smooth; + DataPlane ocmnu_dp, ocsdu_dp, ocmnu_dp_smooth; PairDataPoint pd_u; DataPlane seeps_dp, seeps_dp_fcat, seeps_dp_ocat; @@ -722,7 +726,8 @@ void process_scores() { // Read the gridded data from the input forecast file if(!read_data_plane(conf_info.vx_opt[i].fcst_info, - fcst_dp, fcst_mtddf, fcst_file)) continue; + fcst_dp, fcst_mtddf, fcst_file, + "forecast")) continue; mlog << Debug(3) << "Reading forecast data for " @@ -737,7 +742,8 @@ void process_scores() { // Read the gridded data from the input observation file if(!read_data_plane(conf_info.vx_opt[i].obs_info, - obs_dp, obs_mtddf, obs_file)) continue; + obs_dp, obs_mtddf, obs_file, + "observation")) continue; mlog << Debug(3) << "Reading observation data for " @@ -784,23 +790,42 @@ void process_scores() { << ".\n\n"; } - // Read climatology data - cmn_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i, fcst_dp.valid(), grid); - csd_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i, fcst_dp.valid(), grid); + // Read forecast climatology data + fcmn_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_fcst), + conf_key_climo_mean, + i, fcst_dp.valid(), grid, + "forecast climatology mean"); + fcsd_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_fcst), + conf_key_climo_stdev, + i, fcst_dp.valid(), grid, + "forecast climatology standard deviation"); + + // Read observation climatology data + ocmn_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_obs), + conf_key_climo_mean, + i, fcst_dp.valid(), grid, + "observation climatology mean"); + ocsd_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_obs), + conf_key_climo_stdev, + i, fcst_dp.valid(), grid, + "observation climatology standard deviation"); mlog << Debug(3) - << "Found " << (cmn_dp.nx() == 0 ? 0 : 1) - << " climatology mean and " << (csd_dp.nx() == 0 ? 0 : 1) - << " climatology standard deviation field(s) for forecast " - << conf_info.vx_opt[i].fcst_info->magic_str() << ".\n"; + << "For " << conf_info.vx_opt[i].fcst_info->magic_str() << ", found " + << (fcmn_dp.is_empty() ? 0 : 1) << " forecast climatology mean and " + << (fcsd_dp.is_empty() ? 0 : 1) << " standard deviation field(s), and " + << (ocmn_dp.is_empty() ? 0 : 1) << " observation climatology mean and " + << (ocsd_dp.is_empty() ? 0 : 1) << " standard deviation field(s).\n"; // Apply MPR threshold filters if(conf_info.vx_opt[i].mpr_sa.n() > 0) { - apply_mpr_thresh_mask(fcst_dp, obs_dp, cmn_dp, csd_dp, + apply_mpr_thresh_mask(fcst_dp, obs_dp, + fcmn_dp, fcsd_dp, + ocmn_dp, ocsd_dp, conf_info.vx_opt[i].mpr_sa, conf_info.vx_opt[i].mpr_ta); } @@ -879,22 +904,20 @@ void process_scores() { // Store the current mask mask_mp = conf_info.mask_map[conf_info.vx_opt[i].mask_name[k]]; - // Turn off the mask for missing data values + // Turn off the mask for any grid points containing bad data mask_bad_data(mask_mp, fcst_dp_smooth); mask_bad_data(mask_mp, obs_dp_smooth); - if(cmn_dp.nx() == fcst_dp_smooth.nx() && - cmn_dp.ny() == fcst_dp_smooth.ny()) { - mask_bad_data(mask_mp, cmn_dp); - } - if(csd_dp.nx() == fcst_dp_smooth.nx() && - csd_dp.ny() == fcst_dp_smooth.ny()) { - mask_bad_data(mask_mp, csd_dp); - } + if(!fcmn_dp.is_empty()) mask_bad_data(mask_mp, fcmn_dp); + if(!fcsd_dp.is_empty()) mask_bad_data(mask_mp, fcsd_dp); + if(!ocmn_dp.is_empty()) mask_bad_data(mask_mp, ocmn_dp); + if(!ocsd_dp.is_empty()) mask_bad_data(mask_mp, ocsd_dp); // Apply the current mask to the current fields get_mask_points(conf_info.vx_opt[i], mask_mp, &fcst_dp_smooth, &obs_dp_smooth, - &cmn_dp, &csd_dp, &wgt_dp, pd); + &fcmn_dp, &fcsd_dp, + &ocmn_dp, &ocsd_dp, + &wgt_dp, pd); // Set the mask name shc.set_mask(conf_info.vx_opt[i].mask_name[k].c_str()); @@ -931,10 +954,10 @@ void process_scores() { // Loop through all of the thresholds for(m=0; m 0) { + if(cts_info[m].cts.n_pairs() == 0) continue; + // Write out FHO + if(conf_info.vx_opt[i].output_flag[i_fho] != STATOutputType::None) { write_fho_row(shc, cts_info[m], conf_info.vx_opt[i].output_flag[i_fho], stat_at, i_stat_row, @@ -942,9 +965,7 @@ void process_scores() { } // Write out CTC - if(conf_info.vx_opt[i].output_flag[i_ctc] != STATOutputType::None && - cts_info[m].cts.n() > 0) { - + if(conf_info.vx_opt[i].output_flag[i_ctc] != STATOutputType::None) { write_ctc_row(shc, cts_info[m], conf_info.vx_opt[i].output_flag[i_ctc], stat_at, i_stat_row, @@ -952,9 +973,7 @@ void process_scores() { } // Write out CTS - if(conf_info.vx_opt[i].output_flag[i_cts] != STATOutputType::None && - cts_info[m].cts.n() > 0) { - + if(conf_info.vx_opt[i].output_flag[i_cts] != STATOutputType::None) { write_cts_row(shc, cts_info[m], conf_info.vx_opt[i].output_flag[i_cts], stat_at, i_stat_row, @@ -962,9 +981,7 @@ void process_scores() { } // Write out ECLV - if(conf_info.vx_opt[i].output_flag[i_eclv] != STATOutputType::None && - cts_info[m].cts.n() > 0) { - + if(conf_info.vx_opt[i].output_flag[i_eclv] != STATOutputType::None) { write_eclv_row(shc, cts_info[m], conf_info.vx_opt[i].eclv_points, conf_info.vx_opt[i].output_flag[i_eclv], stat_at, i_stat_row, @@ -985,10 +1002,10 @@ void process_scores() { // Compute MCTS do_mcts(mcts_info, i, &pd); - // Write out MCTC - if(conf_info.vx_opt[i].output_flag[i_mctc] != STATOutputType::None && - mcts_info.cts.total() > 0) { + if(mcts_info.cts.n_pairs() == 0) continue; + // Write out MCTC + if(conf_info.vx_opt[i].output_flag[i_mctc] != STATOutputType::None) { write_mctc_row(shc, mcts_info, conf_info.vx_opt[i].output_flag[i_mctc], stat_at, i_stat_row, @@ -996,9 +1013,7 @@ void process_scores() { } // Write out MCTS - if(conf_info.vx_opt[i].output_flag[i_mcts] != STATOutputType::None && - mcts_info.cts.total() > 0) { - + if(conf_info.vx_opt[i].output_flag[i_mcts] != STATOutputType::None) { write_mcts_row(shc, mcts_info, conf_info.vx_opt[i].output_flag[i_mcts], stat_at, i_stat_row, @@ -1036,19 +1051,37 @@ void process_scores() { // Read forecast data for UGRD if(!read_data_plane(conf_info.vx_opt[ui].fcst_info, - fu_dp, fcst_mtddf, fcst_file)) continue; + fu_dp, fcst_mtddf, fcst_file, + "U-wind forecast")) continue; // Read observation data for UGRD if(!read_data_plane(conf_info.vx_opt[ui].obs_info, - ou_dp, obs_mtddf, obs_file)) continue; - - // Read climatology data for UGRD - cmnu_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - ui, fcst_dp.valid(), grid); - csdu_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - ui, fcst_dp.valid(), grid); + ou_dp, obs_mtddf, obs_file, + "U-wind observation")) continue; + + // Read the forecast climatology data for UGRD + fcmnu_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_fcst), + conf_key_climo_mean, + ui, fcst_dp.valid(), grid, + "forecast U-wind climatology mean"); + fcsdu_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_fcst), + conf_key_climo_stdev, + ui, fcst_dp.valid(), grid, + "forecast U-wind climatology standard deviation"); + + // Read the observation climatology data for UGRD + ocmnu_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_obs), + conf_key_climo_mean, + ui, fcst_dp.valid(), grid, + "observation U-wind climatology mean"); + ocsdu_dp = read_climo_data_plane( + conf_info.conf.lookup_dictionary(conf_key_obs), + conf_key_climo_stdev, + ui, fcst_dp.valid(), grid, + "observation U-wind climatology standard deviation"); // If requested in the config file, smooth the forecast // and climatology U-wind fields @@ -1080,7 +1113,9 @@ void process_scores() { // Apply the current mask to the U-wind fields get_mask_points(conf_info.vx_opt[i], mask_mp, &fu_dp_smooth, &ou_dp_smooth, - &cmnu_dp, &csdu_dp, &wgt_dp, pd_u); + &fcmnu_dp, &fcsdu_dp, + &ocmnu_dp, &ocsdu_dp, + &wgt_dp, pd_u); // Compute VL1L2 do_vl1l2(vl1l2_info, i, &pd_u, &pd); @@ -1153,23 +1188,44 @@ void process_scores() { } if(conf_info.vx_opt[i].nc_info.do_diff) { write_nc((string)"DIFF", subtract(fcst_dp_smooth, obs_dp_smooth), - i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); } - if(conf_info.vx_opt[i].nc_info.do_climo && !cmn_dp.is_empty()) { - write_nc((string)"CLIMO_MEAN", cmn_dp, i, mthd, pnts, + if(conf_info.vx_opt[i].nc_info.do_climo && + !fcmn_dp.is_empty()) { + write_nc((string)"FCST_CLIMO_MEAN", fcmn_dp, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); } - if(conf_info.vx_opt[i].nc_info.do_climo && !csd_dp.is_empty()) { - write_nc((string)"CLIMO_STDEV", csd_dp, i, mthd, pnts, + if(conf_info.vx_opt[i].nc_info.do_climo && + !fcsd_dp.is_empty()) { + write_nc((string)"FCST_CLIMO_STDEV", fcsd_dp, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); } - if(conf_info.vx_opt[i].nc_info.do_climo && !cmn_dp.is_empty() && !csd_dp.is_empty()) { - write_nc((string)"CLIMO_CDF", normal_cdf(obs_dp, cmn_dp, csd_dp), - i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocmn_dp.is_empty()) { + write_nc((string)"OBS_CLIMO_MEAN", ocmn_dp, + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocsd_dp.is_empty()) { + write_nc((string)"OBS_CLIMO_STDEV", ocsd_dp, + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocmn_dp.is_empty() && !ocsd_dp.is_empty()) { + write_nc((string)"OBS_CLIMO_CDF", normal_cdf(obs_dp, ocmn_dp, ocsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); } // Write out the fields of requested climo distribution percentile threshold values - if(conf_info.vx_opt[i].nc_info.do_climo_cdp && !cmn_dp.is_empty() && !csd_dp.is_empty()) { + if(conf_info.vx_opt[i].nc_info.do_climo_cdp && + ((!fcmn_dp.is_empty() && !fcsd_dp.is_empty()) || + (!ocmn_dp.is_empty() && !ocsd_dp.is_empty()))) { // Construct one list of all thresholds ThreshArray ta; @@ -1185,36 +1241,48 @@ void process_scores() { // Process all CDP thresholds except 0 and 100 for(vector::iterator it = simp.begin(); it != simp.end(); it++) { - if(it->ptype() == perc_thresh_climo_dist && + if(it->ptype() == perc_thresh_fcst_climo_dist && !is_eq(it->pvalue(), 0.0) && !is_eq(it->pvalue(), 100.0)) { - cs << cs_erase << "CLIMO_CDP" << nint(it->pvalue()); - write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, cmn_dp, csd_dp), - i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); + cs << cs_erase << "FCST_CLIMO_CDP" << nint(it->pvalue()); + write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, fcmn_dp, fcsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + else if(it->ptype() == perc_thresh_obs_climo_dist && + !is_eq(it->pvalue(), 0.0) && + !is_eq(it->pvalue(), 100.0)) { + cs << cs_erase << "OBS_CLIMO_CDP" << nint(it->pvalue()); + write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, ocmn_dp, ocsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); } } // end for it } // Write out the fields of requested SEEPS - if(conf_info.vx_opt[i].output_flag[i_seeps] != STATOutputType::None - && conf_info.vx_opt[i].fcst_info->is_precipitation() - && conf_info.vx_opt[i].obs_info->is_precipitation()) { - SeepsAggScore seeps; + if(conf_info.vx_opt[i].output_flag[i_seeps] != STATOutputType::None && + conf_info.vx_opt[i].fcst_info->is_precipitation() && + conf_info.vx_opt[i].obs_info->is_precipitation()) { + SeepsAggScore seeps_agg; int month, day, year, hour, minute, second; unix_to_mdyhms(fcst_dp.valid(), month, day, year, hour, minute, second); compute_aggregated_seeps_grid(fcst_dp_smooth, obs_dp_smooth, seeps_dp, seeps_dp_fcat, seeps_dp_ocat, - &seeps, month, hour, - conf_info.vx_opt[i].seeps_p1_thresh); + &seeps_agg, month, hour, + conf_info.seeps_p1_thresh, conf_info.seeps_climo_name); - write_nc("SEEPS_MPR_SCORE", seeps_dp, i, mthd, pnts, + write_nc("SEEPS_MPR_SCORE", seeps_dp, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); - write_nc("SEEPS_MPR_FCAT", seeps_dp_fcat, i, mthd, pnts, + write_nc("SEEPS_MPR_FCAT", seeps_dp_fcat, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); - write_nc("SEEPS_MPR_OCAT", seeps_dp_ocat, i, mthd, pnts, + write_nc("SEEPS_MPR_OCAT", seeps_dp_ocat, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); - write_seeps_row(shc, &seeps, conf_info.output_flag[i_seeps], + write_seeps_row(shc, &seeps_agg, conf_info.output_flag[i_seeps], stat_at, i_stat_row, txt_at[i_seeps], i_txt_row[i_seeps]); } @@ -1225,8 +1293,8 @@ void process_scores() { // Allocate memory in one big chunk based on grid size DataPlane fgx_dp, fgy_dp, ogx_dp, ogy_dp; PairDataPoint pd_gx, pd_gy; - pd_gx.extend(grid.nx()*grid.ny()); - pd_gy.extend(grid.nx()*grid.ny()); + pd_gx.extend(grid.nxy()); + pd_gy.extend(grid.nxy()); // Loop over gradient Dx/Dy for(k=0; kwidth[j], nbrhd->shape, grid.wrap_lon(), conf_info.vx_opt[i].fcat_ta[k], - &cmn_dp, &csd_dp, + &fcmn_dp, &fcsd_dp, + &ocmn_dp, &ocsd_dp, nbrhd->vld_thresh); // Compute the binary threshold field @@ -1538,7 +1614,8 @@ void process_scores() { nbrhd->width[j], nbrhd->shape, grid.wrap_lon(), conf_info.vx_opt[i].ocat_ta[k], - &cmn_dp, &csd_dp, + &fcmn_dp, &fcsd_dp, + &ocmn_dp, &ocsd_dp, nbrhd->vld_thresh); // Compute the binary threshold field @@ -1565,7 +1642,7 @@ void process_scores() { } } - // Turn off the mask for bad forecast or observation values + // Turn off the mask for any grid points containing bad data mask_bad_data(mask_mp, fcst_dp_smooth); mask_bad_data(mask_mp, obs_dp_smooth); @@ -1583,14 +1660,18 @@ void process_scores() { // and thresholded fields get_mask_points(conf_info.vx_opt[i], mask_mp, &fcst_dp_smooth, &obs_dp_smooth, - 0, 0, &wgt_dp, pd); + nullptr, nullptr, nullptr, nullptr, + &wgt_dp, pd); get_mask_points(conf_info.vx_opt[i], mask_mp, &fcst_dp_thresh, &obs_dp_thresh, - 0, 0, 0, pd_thr); + nullptr, nullptr, nullptr, nullptr, + nullptr, pd_thr); // Store climatology values as bad data - pd.cmn_na.add_const(bad_data_double, pd.f_na.n()); - pd.csd_na.add_const(bad_data_double, pd.f_na.n()); + pd.fcmn_na.add_const(bad_data_double, pd.f_na.n()); + pd.fcsd_na.add_const(bad_data_double, pd.f_na.n()); + pd.ocmn_na.add_const(bad_data_double, pd.f_na.n()); + pd.ocsd_na.add_const(bad_data_double, pd.f_na.n()); mlog << Debug(2) << "Processing " << conf_info.vx_opt[i].fcst_info->magic_str() @@ -1625,7 +1706,7 @@ void process_scores() { for(n=0; n 0 - if(nbrcts_info[n].cts_info.cts.n() > 0) { + if(nbrcts_info[n].cts_info.cts.n_pairs() > 0) { // Write out NBRCTC if(conf_info.vx_opt[i].output_flag[i_nbrctc] != STATOutputType::None) { @@ -1691,15 +1772,16 @@ void process_scores() { for(j=0; jcmn_na.n_valid() > 0 && - pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -2310,8 +2437,9 @@ void do_pct(const GridStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { mlog << Debug(2) << "Computing Probabilistic Statistics.\n"; - // Determine the number of climo CDF bins - n_bin = (pd_ptr->cmn_na.n_valid() > 0 && pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -2346,7 +2474,7 @@ void do_pct(const GridStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { } // Compute the probabilistic counts and statistics - compute_pctinfo(pd, ( STATOutputType::None!=vx_opt.output_flag[i_pstd]), pct_info[j]); + compute_pctinfo(pd, (STATOutputType::None!=vx_opt.output_flag[i_pstd]), pct_info[j]); // Check for no matched pairs to process if(pd.n_obs == 0) continue; @@ -2615,8 +2743,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, n_masks = (apply_mask ? conf_info.vx_opt[i_vx].get_n_mask() : 1); // Allocate memory - float *data = (float *) nullptr; - data = new float [grid.nx()*grid.ny()]; + vector data(grid.nxy()); // Set the NetCDF compression level int deflate_level = compress_level; @@ -2665,45 +2792,81 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, << conf_info.vx_opt[i_vx].fcst_info->units_attr() << " and " << conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(field_name == "CLIMO_MEAN") { + else if(field_name == "FCST_CLIMO_MEAN") { var_name << cs_erase << field_name << "_" << obs_name << var_suffix << "_" << mask_str; + // Append interpolation string for Fourier decomposition if(interp_str.nonempty()) { - if(strncmp(interp_str.c_str(), "_WV", 3) == 0) var_name << interp_str; + if(interp_str.startswith("_WV")) var_name << interp_str; } long_att << cs_erase - << "Climatology mean for " + << "Forecast climatology mean for " + << fcst_long_name; + level_att = shc.get_fcst_lev(); + units_att = conf_info.vx_opt[i_vx].fcst_info->units_attr(); + } + else if(field_name == "FCST_CLIMO_STDEV") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Forecast climatology standard deviation for " + << fcst_long_name; + level_att = shc.get_fcst_lev(); + units_att = conf_info.vx_opt[i_vx].fcst_info->units_attr(); + } + else if(field_name == "OBS_CLIMO_MEAN") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + + // Append interpolation string for Fourier decomposition + if(interp_str.nonempty()) { + if(interp_str.startswith("_WV")) var_name << interp_str; + } + long_att << cs_erase + << "Observation climatology mean for " << obs_long_name; level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(field_name == "CLIMO_STDEV") { + else if(field_name == "OBS_CLIMO_STDEV") { var_name << cs_erase << field_name << "_" << obs_name << var_suffix << "_" << mask_str; long_att << cs_erase - << "Climatology standard deviation for " + << "Observation climatology standard deviation for " << obs_long_name; level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(field_name == "CLIMO_CDF") { + else if(field_name == "OBS_CLIMO_CDF") { var_name << cs_erase << field_name << "_" << obs_name << var_suffix << "_" << mask_str; long_att << cs_erase - << "Climatology cumulative distribution function for " + << "Observation climatology cumulative distribution function for " << obs_long_name; level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(strncmp(field_name.c_str(), "CLIMO_CDP", 9) == 0) { + else if(field_name.startswith("FCST_CLIMO_CDP")) { + var_name << cs_erase + << field_name << "_" + << conf_info.vx_opt[i_vx].fcst_info->name_attr() << "_" + << conf_info.vx_opt[i_vx].fcst_info->level_attr() + << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Forecast climatology distribution percentile thresholds for " + << fcst_long_name; + level_att = shc.get_fcst_lev(); + units_att = conf_info.vx_opt[i_vx].fcst_info->units_attr(); + } + else if(field_name.startswith("OBS_CLIMO_CDP")) { var_name << cs_erase << field_name << "_" << conf_info.vx_opt[i_vx].obs_info->name_attr() << "_" << conf_info.vx_opt[i_vx].obs_info->level_attr() << var_suffix << "_" << mask_str; long_att << cs_erase - << "Climatology distribution percentile thresholds for " + << "Observation climatology distribution percentile thresholds for " << obs_long_name; level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); @@ -2762,7 +2925,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(strncmp(field_name.c_str(), "SEEPS_MPR", 9) == 0) { + else if(field_name.startswith("SEEPS_MPR")) { ConcatString seeps_desc; var_name << cs_erase << field_name << "_" << obs_name << var_suffix << "_" << mask_str; @@ -2770,11 +2933,11 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, field_type == FieldType::Both) { var_name << interp_str; } - if(strncmp(field_name.c_str(), "SEEPS_MPR_SCORE", 15) == 0) + if(field_name.startswith("SEEPS_MPR_SCORE")) seeps_desc = "score"; - else if(strncmp(field_name.c_str(), "SEEPS_MPR_FCAT", 14) == 0) + else if(field_name.startswith("SEEPS_MPR_FCAT")) seeps_desc = "forecast category"; - else if(strncmp(field_name.c_str(), "SEEPS_MPR_OCAT", 14) == 0) + else if(field_name.startswith("SEEPS_MPR_OCAT")) seeps_desc = "observation category"; long_att << cs_erase << "SEEPS MPR " << seeps_desc << " for " @@ -2837,7 +3000,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, } // end for x // Write out the data - if(!put_nc_data_with_dims(&nc_var, &data[0], grid.ny(), grid.nx())) { + if(!put_nc_data_with_dims(&nc_var, data.data(), grid.ny(), grid.nx())) { mlog << Error << "\nwrite_nc() -> " << "error writing NetCDF variable name " << var_name << "\n\n"; @@ -2846,9 +3009,6 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, } // end for i - // Deallocate and clean up - if(data) { delete [] data; data = (float *) nullptr; } - return; } @@ -2882,9 +3042,6 @@ void write_nbrhd_nc(const DataPlane &fcst_dp, const DataPlane &obs_dp, // Store the apply_mask option apply_mask = conf_info.vx_opt[i_vx].nc_info.do_apply_mask; - float *fcst_data = (float *) nullptr; - float *obs_data = (float *) nullptr; - NcVar fcst_var; NcVar obs_var; @@ -2917,8 +3074,8 @@ void write_nbrhd_nc(const DataPlane &fcst_dp, const DataPlane &obs_dp, if(!fcst_flag && !obs_flag) return; // Allocate memory for the forecast and observation fields - fcst_data = new float [grid.nx()*grid.ny()]; - obs_data = new float [grid.nx()*grid.ny()]; + vector fcst_data(grid.nxy()); + vector obs_data (grid.nxy()); // Add the forecast variable if(fcst_flag) { @@ -2997,7 +3154,7 @@ void write_nbrhd_nc(const DataPlane &fcst_dp, const DataPlane &obs_dp, // Write out the forecast field if(fcst_flag) { - if(!put_nc_data_with_dims(&fcst_var, &fcst_data[0], grid.ny(), grid.nx())) { + if(!put_nc_data_with_dims(&fcst_var, fcst_data.data(), grid.ny(), grid.nx())) { mlog << Error << "\nwrite_nbrhd_nc() -> " << "error with the fcst_var->put for forecast variable " << fcst_var_name << "\n\n"; @@ -3007,7 +3164,7 @@ void write_nbrhd_nc(const DataPlane &fcst_dp, const DataPlane &obs_dp, // Write out the observation field if(obs_flag) { - if(!put_nc_data_with_dims(&obs_var, &obs_data[0], grid.ny(), grid.nx())) { + if(!put_nc_data_with_dims(&obs_var, obs_data.data(), grid.ny(), grid.nx())) { mlog << Error << "\nwrite_nbrhd_nc() -> " << "error with the obs_var->put for observation variable " << obs_var_name << "\n\n"; @@ -3015,10 +3172,6 @@ void write_nbrhd_nc(const DataPlane &fcst_dp, const DataPlane &obs_dp, } } - // Deallocate and clean up - if(fcst_data) { delete [] fcst_data; fcst_data = (float *) nullptr; } - if(obs_data) { delete [] obs_data; obs_data = (float *) nullptr; } - return; } @@ -3161,15 +3314,15 @@ void set_compress(const StringArray & a) { //////////////////////////////////////////////////////////////////////// -bool read_data_plane(VarInfo* info, DataPlane& dp, Met2dDataFile* mtddf, - const ConcatString &filename) { +bool read_data_plane(VarInfo *info, DataPlane &dp, Met2dDataFile *mtddf, + const ConcatString &filename, const char *desc) { bool status = mtddf->data_plane(*info, dp); if(!status) { mlog << Warning << "\nread_data_plane() -> " << info->magic_str() - << " not found in file: " << filename + << " not found in " << desc << " file: " << filename << "\n\n"; return false; } @@ -3177,9 +3330,10 @@ bool read_data_plane(VarInfo* info, DataPlane& dp, Met2dDataFile* mtddf, // Regrid, if necessary if(!(mtddf->grid() == grid)) { mlog << Debug(1) - << "Regridding field " + << "Regridding " << desc << " field " << info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << info->regrid().get_str() << ".\n"; dp = met_regrid(dp, mtddf->grid(), grid, info->regrid()); } diff --git a/src/tools/core/grid_stat/grid_stat.h b/src/tools/core/grid_stat/grid_stat.h index ccd85c82ac..9f170b5a9f 100644 --- a/src/tools/core/grid_stat/grid_stat.h +++ b/src/tools/core/grid_stat/grid_stat.h @@ -66,7 +66,7 @@ static const char * default_config_filename = static const char * default_out_dir = "."; // Header columns -static const char **txt_columns[n_txt] = { +static const char * const * txt_columns[n_txt] = { fho_columns, ctc_columns, cts_columns, mctc_columns, mcts_columns, cnt_columns, sl1l2_columns, sal1l2_columns, vl1l2_columns, @@ -90,7 +90,7 @@ static const int n_txt_columns[n_txt] = { }; // Text file abbreviations -static const char *txt_file_abbr[n_txt] = { +static const char * const txt_file_abbr[n_txt] = { "fho", "ctc", "cts", "mctc", "mcts", "cnt", "sl1l2", "sal1l2", "vl1l2", diff --git a/src/tools/core/grid_stat/grid_stat_conf_info.cc b/src/tools/core/grid_stat/grid_stat_conf_info.cc index db34b15925..d334804850 100644 --- a/src/tools/core/grid_stat/grid_stat_conf_info.cc +++ b/src/tools/core/grid_stat/grid_stat_conf_info.cc @@ -84,6 +84,9 @@ void GridStatConfInfo::clear() { output_ascii_flag = false; output_nc_flag = false; + seeps_climo_name.clear(); + seeps_p1_thresh.clear(); + // Deallocate memory if(vx_opt) { delete [] vx_opt; vx_opt = (GridStatVxOpt *) nullptr; } @@ -162,6 +165,12 @@ void GridStatConfInfo::process_config(GrdFileType ftype, // Conf: tmp_dir tmp_dir = parse_conf_tmp_dir(&conf); + // Conf: threshold for SEEPS p1 + seeps_p1_thresh = conf.lookup_thresh(conf_key_seeps_p1_thresh); + + // Conf: SEEPS climo filename + seeps_climo_name = conf.lookup_string(conf_key_seeps_grid_climo_name, false); + #ifdef WITH_UGRID // Conf: ugrid_dataset if (!ignore_ugrid_dataset) ugrid_dataset = parse_conf_ugrid_dataset(&conf); @@ -202,7 +211,8 @@ void GridStatConfInfo::process_config(GrdFileType ftype, vx_opt = new GridStatVxOpt [n_vx]; // Check for consistent number of climatology fields - check_climo_n_vx(&conf, n_vx); + check_climo_n_vx(fdict, n_vx); + check_climo_n_vx(odict, n_vx); // Parse settings for each verification task for(i=0; i " + << "Disabling FHO output that is not compatible with grid weighting. " + << "Set \"grid_weight_flag = NONE\" to write FHO output.\n\n"; + + // Disable FHO output + for(i=0; iset_default_regrid(regrid_info); + obs_info->set_default_regrid(regrid_info); + // Set the VarInfo objects fcst_info->set_dict(fdict); obs_info->set_dict(odict); @@ -913,9 +939,6 @@ void GridStatVxOpt::process_config( // Conf: rank_corr_flag rank_corr_flag = odict.lookup_bool(conf_key_rank_corr_flag); - // Conf: threshold for SEEPS p1 - seeps_p1_thresh = odict.lookup_thresh(conf_key_seeps_p1_thresh); - // Conf: nc_pairs_flag parse_nc_info(odict); @@ -1027,20 +1050,22 @@ void GridStatVxOpt::set_perc_thresh(const PairDataPoint &pd) { // // Sort the input arrays // - NumArray fsort = pd.f_na; - NumArray osort = pd.o_na; - NumArray csort = pd.cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort = pd.f_na; + NumArray o_sort = pd.o_na; + NumArray fcmn_sort = pd.fcmn_na; + NumArray ocmn_sort = pd.ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles // - fcat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - ocat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - fcnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); - ocnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); + fcat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + ocat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + fcnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); + ocnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); return; } @@ -1076,15 +1101,15 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { // Switch on the index of the line type switch(i_txt_row) { - case(i_fho): - case(i_ctc): + case i_fho: + case i_ctc: // Number of FHO or CTC lines = // Masks * Smoothing Methods * Thresholds n = (prob_flag ? 0 : get_n_mask() * get_n_interp() * get_n_cat_thresh()); break; - case(i_cts): + case i_cts: // Number of CTS lines = // Masks * Smoothing Methods * Thresholds * Alphas n = (prob_flag ? 0: @@ -1092,21 +1117,21 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_ci_alpha()); break; - case(i_mctc): + case i_mctc: // Number of MCTC lines = // Masks * Smoothing Methods n = (prob_flag ? 0 : get_n_mask() * get_n_interp()); break; - case(i_mcts): + case i_mcts: // Number of MCTS lines = // Masks * Smoothing Methods * Alphas n = (prob_flag ? 0: get_n_mask() * get_n_interp() * get_n_ci_alpha()); break; - case(i_cnt): + case i_cnt: // Number of CNT lines = // Masks * (Smoothing Methods + Fourier Waves) * // Thresholds * Climo Bins * Alphas @@ -1115,8 +1140,8 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_cnt_thresh() * n_bin * get_n_ci_alpha()); break; - case(i_sl1l2): - case(i_sal1l2): + case i_sl1l2: + case i_sal1l2: // Number of SL1L2 or SAL1L2 lines = // Masks * (Smoothing Methods + Fourier Waves) * // Thresholds * Climo Bins @@ -1125,8 +1150,8 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_cnt_thresh() * n_bin); break; - case(i_vl1l2): - case(i_val1l2): + case i_vl1l2: + case i_val1l2: // Number of VL1L2 or VAL1L2 lines = // Masks * (Smoothing Methods + Fourier Waves) * Thresholds n = (!vect_flag ? 0 : @@ -1134,7 +1159,7 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_wind_thresh()); break; - case(i_vcnt): + case i_vcnt: // Number of VCNT lines = // Masks * (Smoothing Methods + Fourier Waves) * Thresholds * // Alphas @@ -1143,7 +1168,7 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_wind_thresh() * get_n_ci_alpha()); break; - case(i_nbrctc): + case i_nbrctc: // Number of NBRCTC lines = // Masks * Thresholds * Neighborhoods * Frac Thresholds n = (prob_flag ? 0 : @@ -1151,7 +1176,7 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_cov_thresh()); break; - case(i_nbrcts): + case i_nbrcts: // Number of NBRCTS lines = // Masks * Thresholds * Neighborhoods * Frac Thresholds * // Alphas @@ -1160,7 +1185,7 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_cov_thresh() * get_n_ci_alpha()); break; - case(i_nbrcnt): + case i_nbrcnt: // Number of NBRCNT lines = // Masks * Thresholds * Neighborhoods * Alphas n = (prob_flag ? 0 : @@ -1168,9 +1193,9 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_ci_alpha()); break; - case(i_pct): - case(i_pjc): - case(i_prc): + case i_pct: + case i_pjc: + case i_prc: // Number of PCT, PJC, or PRC lines = // Masks * Smoothing Methods * Thresholds * Climo Bins n = (!prob_flag ? 0 : @@ -1178,7 +1203,7 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { n_bin); break; - case(i_pstd): + case i_pstd: // Number of PSTD lines = // Masks * Smoothing Methods * Thresholds * Alphas * // Climo Bins @@ -1187,7 +1212,7 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_ci_alpha() * n_bin); break; - case(i_eclv): + case i_eclv: // Number of CTC -> ECLV lines = // Masks * Smoothing Methods * Thresholds n = (prob_flag ? 0 : @@ -1202,21 +1227,21 @@ int GridStatVxOpt::n_txt_row(int i_txt_row) const { get_n_fprob_thresh() * n_bin); break; - case(i_grad): + case i_grad: // Number of GRAD lines = // Masks * Smoothing Methods * Gradient Sizes n = (prob_flag ? 0 : get_n_mask() * get_n_interp() * get_n_grad()); break; - case(i_dmap): + case i_dmap: // Number of DMAP lines = // Masks * Smoothing Methods * Thresholds n = (prob_flag ? 0 : get_n_mask() * get_n_interp() * get_n_cat_thresh()); break; - case(i_seeps): + case i_seeps: n = (prob_flag ? 0 : get_n_mask() * get_n_interp()); break; diff --git a/src/tools/core/grid_stat/grid_stat_conf_info.h b/src/tools/core/grid_stat/grid_stat_conf_info.h index ca529a27e1..c3e72ee3a4 100644 --- a/src/tools/core/grid_stat/grid_stat_conf_info.h +++ b/src/tools/core/grid_stat/grid_stat_conf_info.h @@ -162,8 +162,6 @@ class GridStatVxOpt { ThreshArray owind_ta; // obs wind speed thresholds SetLogic wind_logic; // wind speed field logic - SingleThresh seeps_p1_thresh; // SEESP p1 threshold - StringArray mask_grid; // Masking grid strings StringArray mask_poly; // Masking polyline strings @@ -236,15 +234,15 @@ class GridStatVxOpt { //////////////////////////////////////////////////////////////////////// -inline int GridStatVxOpt::get_n_mask() const { return(mask_name.n_elements()); } -inline int GridStatVxOpt::get_n_interp() const { return(interp_info.n_interp); } -inline int GridStatVxOpt::get_n_eclv_points() const { return(eclv_points.n_elements()); } -inline int GridStatVxOpt::get_n_cdf_bin() const { return(cdf_info.n_bin); } -inline int GridStatVxOpt::get_n_nbrhd_wdth() const { return(nbrhd_info.width.n_elements()); } -inline int GridStatVxOpt::get_n_cov_thresh() const { return(nbrhd_info.cov_ta.n_elements()); } -inline int GridStatVxOpt::get_n_wave_1d() const { return(wave_1d_beg.n_elements()); } -inline int GridStatVxOpt::get_n_grad() const { return(grad_dx.n_elements()); } -inline int GridStatVxOpt::get_n_ci_alpha() const { return(ci_alpha.n_elements()); } +inline int GridStatVxOpt::get_n_mask() const { return mask_name.n_elements(); } +inline int GridStatVxOpt::get_n_interp() const { return interp_info.n_interp; } +inline int GridStatVxOpt::get_n_eclv_points() const { return eclv_points.n_elements(); } +inline int GridStatVxOpt::get_n_cdf_bin() const { return cdf_info.n_bin; } +inline int GridStatVxOpt::get_n_nbrhd_wdth() const { return nbrhd_info.width.n_elements(); } +inline int GridStatVxOpt::get_n_cov_thresh() const { return nbrhd_info.cov_ta.n_elements(); } +inline int GridStatVxOpt::get_n_wave_1d() const { return wave_1d_beg.n_elements(); } +inline int GridStatVxOpt::get_n_grad() const { return grad_dx.n_elements(); } +inline int GridStatVxOpt::get_n_ci_alpha() const { return ci_alpha.n_elements(); } //////////////////////////////////////////////////////////////////////// @@ -283,6 +281,10 @@ class GridStatConfInfo { ConcatString tmp_dir; // Directory for temporary files ConcatString output_prefix; // String to customize output file name ConcatString version; // Config file version + + ConcatString seeps_climo_name; // SEESP climo filename + SingleThresh seeps_p1_thresh; // SEESP p1 threshold + #ifdef WITH_UGRID bool ignore_ugrid_dataset; ConcatString ugrid_nc; // NetCDF for coordinate variables of unstructured grid @@ -331,10 +333,10 @@ class GridStatConfInfo { //////////////////////////////////////////////////////////////////////// -inline int GridStatConfInfo::get_n_vx() const { return(n_vx); } -inline int GridStatConfInfo::get_compression_level() { return(conf.nc_compression()); } -inline bool GridStatConfInfo::get_output_ascii_flag() const { return(output_ascii_flag); } -inline bool GridStatConfInfo::get_output_nc_flag() const { return(output_nc_flag); } +inline int GridStatConfInfo::get_n_vx() const { return n_vx; } +inline int GridStatConfInfo::get_compression_level() { return conf.nc_compression(); } +inline bool GridStatConfInfo::get_output_ascii_flag() const { return output_ascii_flag; } +inline bool GridStatConfInfo::get_output_nc_flag() const { return output_nc_flag; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/core/mode/Makefile.in b/src/tools/core/mode/Makefile.in index 10f09a3200..55999c4871 100644 --- a/src/tools/core/mode/Makefile.in +++ b/src/tools/core/mode/Makefile.in @@ -243,6 +243,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/mode/mode_exec.cc b/src/tools/core/mode/mode_exec.cc index 578c92acb7..db43b97ebe 100644 --- a/src/tools/core/mode/mode_exec.cc +++ b/src/tools/core/mode/mode_exec.cc @@ -292,7 +292,8 @@ void ModeExecutive::setup_traditional_fcst_obs_data() if ( !(fcst_mtddf->grid() == grid) ) { mlog << Debug(1) << "Regridding forecast " << engine.conf_info.Fcst->var_info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << engine.conf_info.Fcst->var_info->regrid().get_str() << ".\n"; Fcst_sd.data = met_regrid(Fcst_sd.data, fcst_mtddf->grid(), grid, engine.conf_info.Fcst->var_info->regrid()); } @@ -302,7 +303,8 @@ void ModeExecutive::setup_traditional_fcst_obs_data() if ( !(obs_mtddf->grid() == grid) ) { mlog << Debug(1) << "Regridding observation " << engine.conf_info.Obs->var_info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << engine.conf_info.Obs->var_info->regrid().get_str() << ".\n"; Obs_sd.data = met_regrid(Obs_sd.data, obs_mtddf->grid(), grid, engine.conf_info.Obs->var_info->regrid()); } @@ -454,7 +456,8 @@ void ModeExecutive::setup_multivar_fcst_data(const Grid &verification_grid, if ( !(input._grid == grid) ) { mlog << Debug(1) << "Regridding forecast " << engine.conf_info.Fcst->var_info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << engine.conf_info.Fcst->var_info->regrid().get_str() << ".\n"; Fcst_sd.data = met_regrid(Fcst_sd.data, input._grid, grid, engine.conf_info.Fcst->var_info->regrid()); } @@ -520,7 +523,8 @@ void ModeExecutive::setup_multivar_obs_data(const Grid &verification_grid, if ( !(input._grid == grid) ) { mlog << Debug(1) << "Regridding observation " << engine.conf_info.Obs->var_info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << engine.conf_info.Obs->var_info->regrid().get_str() << ".\n"; Obs_sd.data = met_regrid(Obs_sd.data, input._grid, grid, engine.conf_info.Obs->var_info->regrid()); } @@ -2374,7 +2378,8 @@ void ModeExecutive::write_poly_netcdf(NcFile *f_out, ObjPolyType poly_type) // // Store the points for each polyline // - for(i=0, n_pts=0; i #include #include +#include #include @@ -135,6 +137,7 @@ static int i_out_var = 0; static int n_out_var; static MetConfig config; static VarInfo * var_info = (VarInfo *) nullptr; +static double input_thresh = 1.0; static double vld_thresh = 1.0; static int compress_level = -1; @@ -148,6 +151,7 @@ static ConcatString pcp_reg_exp = (string)default_reg_exp; // Variables for the derive command static StringArray file_list; +static GrdFileType file_list_type = FileType_None; static StringArray field_list; static StringArray derive_list; @@ -171,10 +175,11 @@ static void sum_data_files(Grid &, DataPlane &); static int search_pcp_dir(const char *, const unixtime, ConcatString &); -static void get_field(const char * filename, const char * cur_field, +static bool get_field(const char * filename, const char * cur_field, const unixtime get_init_ut, const unixtime get_valid_ut, - Grid & grid, DataPlane & plane); + Grid & grid, DataPlane & plane, + bool error_out); static void open_nc(const Grid &); static void write_nc_data(unixtime, unixtime, int, const DataPlane &, @@ -194,13 +199,13 @@ static void set_pcpdir(const StringArray &); static void set_pcprx(const StringArray &); static void set_field(const StringArray & a); static void set_name(const StringArray & a); +static void set_input_thresh(const StringArray & a); static void set_vld_thresh(const StringArray & a); static void set_compress(const StringArray &); //////////////////////////////////////////////////////////////////////// int met_main(int argc, char *argv[]) { - int i, j; program_name = get_short_name(argv[0]); @@ -212,7 +217,7 @@ int met_main(int argc, char *argv[]) { // // Process each requested field // - for(i=0; i " << "the output accumulation time (" << sec_to_hhmmss(out_accum) - << ") cannot be greater than the lead time (" + << ") can't be greater than the lead time (" << sec_to_hhmmss(lead_time) << ").\n\n"; exit(1); } @@ -535,7 +544,7 @@ void do_sum_command() { } // - // Check that the lead time is divisible by the the input. + // Check that the lead time is divisible by the input // accumulation time except when init_time = 0 for observations. // if(lead_time%in_accum != 0 && init_time != (unixtime) 0) { @@ -563,22 +572,18 @@ void do_sum_command() { //////////////////////////////////////////////////////////////////////// void sum_data_files(Grid & grid, DataPlane & plane) { - int i, j, x, y; + int n_vld = 0; DataPlane part; double v_sum, v_part; Grid cur_grid; - unixtime * pcp_times = (unixtime *) nullptr; - int * pcp_recs = (int *) nullptr; - ConcatString * pcp_files = (ConcatString *) nullptr; + vector pcp_times; + vector pcp_recs; + vector pcp_files; // - // Compute the number of forecast precipitation files to be found, - // and allocate memory to store their names and times. + // Compute the number of forecast precipitation files to be found. // - n_files = out_accum/in_accum; - pcp_times = new unixtime [n_files]; - pcp_recs = new int [n_files]; - pcp_files = new ConcatString [n_files]; + n_files = out_accum/in_accum; mlog << Debug(2) << "Searching for " << n_files << " files " @@ -586,25 +591,22 @@ void sum_data_files(Grid & grid, DataPlane & plane) { << " to sum to a total accumulation time of " << sec_to_hhmmss(out_accum) << ".\n"; - // - // Compute the valid times for the precipitation files - // to be found. - // - for(i=0; i " - << "cannot find a file with a valid time of " + if(pcp_recs[i] != -1) { + n_vld++; + } + else { + mlog << Warning << "\nsum_data_files() -> " + << "can't find a file with a valid time of " << unix_to_yyyymmdd_hhmmss(pcp_times[i]) << " and accumulation time of " << sec_to_hhmmss(in_accum) << " matching the regular " << "expression \"" << pcp_reg_exp << "\"\n\n"; - exit(1); } } // end for i + // Check for enough valid input files. + if((double) n_vld/n_files < input_thresh) { + mlog << Error << "\nsum_data_files() -> " + << n_vld << " of " << n_files << " (" << (double) n_vld/n_files + << ") valid inputs does not meet the required input threshold (" + << input_thresh << ").\n\n"; + exit(1); + } + // // Open each of the files found and parse the data. // - for(i=0; i " - << "cannot open search directory: " << cur_dir << "\n\n"; + << "can't open search directory: " << cur_dir << "\n\n"; exit(1); } // // Initialize the record index to not found. // - i_rec = -1; + int i_rec = -1; // // Process each file contained in the directory. @@ -807,8 +818,11 @@ int search_pcp_dir(const char *cur_dir, const unixtime cur_ut, if(mtddf) { delete mtddf; mtddf = (Met2dDataFile *) nullptr; } if(cur_var) { delete cur_var; cur_var = (VarInfo *) nullptr; } - // check for a valid match - if( -1 != i_rec ) { met_closedir(dp); break; } + // Check for a valid match + if(i_rec != -1) { + met_closedir(dp); + break; + } } // end if @@ -824,11 +838,9 @@ int search_pcp_dir(const char *cur_dir, const unixtime cur_ut, void do_sub_command() { DataPlane plus, minus, diff; Grid grid1, grid2; - unixtime nc_init_time, nc_valid_time; - int i, nxy, nc_accum; // - // Check for exactly two input files + // Check for exactly two input files. // if(n_files != 2) { mlog << Error << "\ndo_sub_command() -> " @@ -838,18 +850,21 @@ void do_sub_command() { } // - // Read the two specified data files + // Read the two specified data files. + // Error out for any problems reading the requested data. // mlog << Debug(1) << "Reading input file: " << file_list[0] << "\n"; - get_field(file_list[0].c_str(), field_list[0].c_str(), 0, 0, grid1, plus); + get_field(file_list[0].c_str(), field_list[0].c_str(), + 0, 0, grid1, plus, true); mlog << Debug(1) << "Reading input file: " << file_list[1] << "\n"; - get_field(file_list[1].c_str(), field_list[1].c_str(), 0, 0, grid2, minus); + get_field(file_list[1].c_str(), field_list[1].c_str(), + 0, 0, grid2, minus, true); // - // Check for the same grid dimensions + // Check for the same grid dimensions. // if(grid1 != grid2) { mlog << Error << "\ndo_sub_command() -> " @@ -867,10 +882,10 @@ void do_sub_command() { // // Output valid time // - nc_valid_time = plus.valid(); + unixtime nc_valid_time = plus.valid(); // - // Check that the initialization times match + // Check that the initialization times match. // if(plus.init() != minus.init()) { @@ -890,7 +905,7 @@ void do_sub_command() { mlog << Debug(3) << cs << "\n"; } } - nc_init_time = plus.init(); + unixtime nc_init_time = plus.init(); // // Output accumulation time @@ -902,7 +917,7 @@ void do_sub_command() { << "second (" << sec_to_hhmmss(plus.accum()) << " < " << sec_to_hhmmss(minus.accum()) << ") for subtraction.\n\n"; } - nc_accum = plus.accum() - minus.accum(); + int nc_accum = plus.accum() - minus.accum(); // // Initialize. @@ -912,7 +927,8 @@ void do_sub_command() { // // Update value for each grid point. // - for(i=0, nxy=grid1.nx()*grid1.ny(); i " @@ -1052,7 +1068,7 @@ void do_derive_command() { // // Update sums and counts. // - for(j=0; j " + << n_vld << " of " << n_files << " (" << (double) n_vld/n_files + << ") valid inputs does not meet the required input threshold (" + << input_thresh << ").\n\n"; + exit(1); } // - // Compute the valid data mask. + // Compute the valid data mask, relative the number of valid inputs. // mask.set_size(grid.nx(), grid.ny()); - for(j=0, n=0; j= vld_thresh; - if(!mask.data()[j]) n++; + int n_skip = 0; + for(int j=0; j= vld_thresh; + if(!mask.data()[j]) n_skip++; } mlog << Debug(2) - << "Skipping " << n << " of " << nxy << " grid points which " + << "Skipping " << n_skip << " of " << nxy << " grid points which " << "do not meet the valid data threshold (" << vld_thresh << ").\n"; @@ -1105,7 +1131,7 @@ void do_derive_command() { // // Loop through the derived fields. // - for(i=0; i " - << "can't open data file \"" << filename << "\"\n\n"; - exit(1); - } + if(ftype == FileType_None) ftype = file_list_type; - cur_var = var_fac.new_var_info(mtddf->file_type()); - if(!cur_var) { - mlog << Error << "\nget_field() -> " - << "unable to determine filetype of \"" << filename - << "\"\n\n"; - exit(1); + // + // Check for missing non-python input files. + // + if(!file_exists(filename) && + !is_python_grdfiletype(ftype)) { + log_missing_file(method_name, "input file", filename); + status = false; } // - // Initialize the VarInfo object with a config. + // Open the data file. // - cur_var->set_dict(config); + if(status) { + mtddf = factory.new_met_2d_data_file(filename, ftype); + if(!mtddf) { + mlog << Warning << "\n" << method_name + << "can't open data file \"" << filename << "\"\n\n"; + status = false; + } + } // - // Set the VarInfo timing object + // Build a VarInfo object. // - if(get_valid_ut != 0) cur_var->set_valid(get_valid_ut); - if(get_init_ut != 0) cur_var->set_init(get_init_ut); + if(status) { + cur_var = var_fac.new_var_info(mtddf->file_type()); + if(!cur_var) { + mlog << Warning << "\n" << method_name + << "unable to determine filetype of \"" << filename + << "\"\n\n"; + status = false; + } + } // - // Read the record of interest into a DataPlane object. + // Setup the VarInfo object and read the data. // - if(!mtddf->data_plane(*cur_var, plane)) { - mlog << Error << "\nget_field() -> " - << "can't get data plane from file \"" << filename - << "\"\n\n"; - exit(1); - } + if(status) { - grid = mtddf->grid(); + // + // Initialize the VarInfo object with a config. + // + cur_var->set_dict(config); + + // + // Set the VarInfo timing object. + // + if(get_valid_ut != 0) cur_var->set_valid(get_valid_ut); + if(get_init_ut != 0) cur_var->set_init(get_init_ut); + + // + // Read the record of interest into a DataPlane object. + // + if(!mtddf->data_plane(*cur_var, plane)) { + mlog << Warning << "\n" << method_name + << "can't get data plane from file \"" << filename + << "\"\n\n"; + status = false; + } + } // - // Set the global var_info, if needed. + // Store grid and global VarInfo, if needed. // - if(!var_info) { - var_info = var_fac.new_var_info(mtddf->file_type()); - *var_info = *cur_var; + if(status) { + + grid = mtddf->grid(); + + if(!var_info) { + var_info = var_fac.new_var_info(mtddf->file_type()); + *var_info = *cur_var; + } } // @@ -1265,9 +1327,17 @@ void get_field(const char *filename, const char *cur_field, if(mtddf) { delete mtddf; mtddf = (Met2dDataFile *) nullptr; } if(cur_var) { delete cur_var; cur_var = (VarInfo *) nullptr; } - // if ( var ) { delete var; var = nullptr; } + // + // Error out and exit, if requested. + // + if(!status && error_out) { + mlog << Error << "\n" << method_name + << "trouble reading data (" << config_str + << ") from required input file: " << filename << "\n\n"; + exit(1); + } - return; + return status; } @@ -1276,7 +1346,7 @@ void get_field(const char *filename, const char *cur_field, void open_nc(const Grid &grid) { ConcatString command_str; - // List the output file + // List the output file. mlog << Debug(1) << "Creating output file: " << out_filename << "\n"; @@ -1402,7 +1472,7 @@ void write_nc_data(unixtime nc_init, unixtime nc_valid, int nc_accum, int deflate_level = compress_level; if(deflate_level < 0) deflate_level = config.nc_compression(); - // Define Variable. + // Define variable. nc_var = add_var(nc_out, var_str.c_str(), ncFloat, lat_dim, lon_dim, deflate_level); @@ -1516,6 +1586,7 @@ void usage() { << "\tout_file\n" << "\t[-field string]\n" << "\t[-name list]\n" + << "\t[-input_thresh n]\n" << "\t[-vld_thresh n]\n" << "\t[-log file]\n" << "\t[-v level]\n" @@ -1546,8 +1617,12 @@ void usage() { << "variable names to be written to the \"out_file\" " << "(optional).\n" + << "\t\t\"-input_thresh\" overrides the default required ratio " + << "of valid input files (" << input_thresh << ") (optional).\n" + << "\t\t\"-vld_thresh\" overrides the default required ratio " - << "of valid data (" << vld_thresh << ") (optional).\n" + << "of valid data at each grid point (" << vld_thresh + << ") (optional).\n" << "\t\t\"-log file\" write log messages to the specified file " << "(optional).\n" @@ -1695,6 +1770,18 @@ void set_name(const StringArray & a) { //////////////////////////////////////////////////////////////////////// +void set_input_thresh(const StringArray & a) { + input_thresh = atof(a[0].c_str()); + if(input_thresh > 1 || input_thresh < 0) { + mlog << Error << "\nset_input_thresh() -> " + << "the \"-input_thresh\" command line option (" << input_thresh + << ") must be set between 0 and 1!\n\n"; + exit(1); + } +} + +//////////////////////////////////////////////////////////////////////// + void set_vld_thresh(const StringArray & a) { vld_thresh = atof(a[0].c_str()); if(vld_thresh > 1 || vld_thresh < 0) { diff --git a/src/tools/core/point_stat/Makefile.in b/src/tools/core/point_stat/Makefile.in index 81735f1efb..4efc7d8ba8 100644 --- a/src/tools/core/point_stat/Makefile.in +++ b/src/tools/core/point_stat/Makefile.in @@ -224,6 +224,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/point_stat/point_stat.cc b/src/tools/core/point_stat/point_stat.cc index bbfb8774d5..735f85107e 100644 --- a/src/tools/core/point_stat/point_stat.cc +++ b/src/tools/core/point_stat/point_stat.cc @@ -100,10 +100,14 @@ // Added code for obs_qty_exc. // 049 12/11/21 Halley Gotway MET #1991 Fix VCNT output. // 050 02/11/22 Halley Gotway MET #2045 Fix HiRA output. -// 051 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main +// 051 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. // 052 09/29/22 Halley Gotway MET #2286 Refine GRIB1 table lookup logic. // 053 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files. // 054 04/29/24 Halley Gotway MET #2795 Move level mismatch warning. +// 055 07/05/24 Halley Gotway MET #2924 Support forecast climatology. +// 056 10/08/24 Halley Gotway MET #2887 Compute weighted contingency tables. +// 057 10/14/24 Halley Gotway MET #2279 Add point_weight_flag option. +// 058 10/15/24 Halley Gotway MET #2893 Write individual pair OBTYPE. // //////////////////////////////////////////////////////////////////////// @@ -201,10 +205,11 @@ int met_main(int argc, char *argv[]) { process_obs_file(i); } - // Calculate and print observation summaries + // Process observation summaries and point weights for(i=0; iregrid(), - &(data_grid), &(data_grid)); + &data_grid, &data_grid); // Process the masks conf_info.process_masks(grid); @@ -464,31 +469,31 @@ void setup_txt_files() { // Get the maximum number of columns for this line type switch(i) { - case(i_mctc): + case i_mctc: max_col = get_n_mctc_columns(n_cat) + n_header_columns + 1; break; - case(i_pct): + case i_pct: max_col = get_n_pct_columns(n_prob) + n_header_columns + 1; break; - case(i_pstd): + case i_pstd: max_col = get_n_pstd_columns(n_prob) + n_header_columns + 1; break; - case(i_pjc): + case i_pjc: max_col = get_n_pjc_columns(n_prob) + n_header_columns + 1; break; - case(i_prc): + case i_prc: max_col = get_n_prc_columns(n_prob) + n_header_columns + 1; break; - case(i_eclv): + case i_eclv: max_col = get_n_eclv_columns(n_eclv) + n_header_columns + 1; break; - case(i_orank): + case i_orank: max_col = get_n_orank_columns(n_ens) + n_header_columns + 1; break; @@ -504,31 +509,31 @@ void setup_txt_files() { // Write the text header row switch(i) { - case(i_mctc): + case i_mctc: write_mctc_header_row(1, n_cat, txt_at[i], 0, 0); break; - case(i_pct): + case i_pct: write_pct_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pstd): + case i_pstd: write_pstd_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pjc): + case i_pjc: write_pjc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_prc): + case i_prc: write_prc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_eclv): + case i_eclv: write_eclv_header_row(1, n_eclv, txt_at[i], 0, 0); break; - case(i_orank): + case i_orank: write_orank_header_row(1, n_ens, txt_at[i], 0, 0); break; @@ -600,7 +605,9 @@ void build_outfile_name(unixtime valid_ut, int lead_sec, void process_fcst_climo_files() { int j; int n_fcst; - DataPlaneArray fcst_dpa, cmn_dpa, csd_dpa; + DataPlaneArray fcst_dpa; + DataPlaneArray fcmn_dpa, fcsd_dpa; + DataPlaneArray ocmn_dpa, ocsd_dpa; unixtime file_ut, beg_ut, end_ut; // Loop through each of the fields to be verified and extract @@ -646,7 +653,8 @@ void process_fcst_climo_files() { mlog << Debug(1) << "Regridding " << fcst_dpa.n_planes() << " forecast field(s) for " << fcst_info->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << fcst_info->regrid().get_str() << ".\n"; // Loop through the forecast fields for(j=0; jmagic_str() << " found " + << "For " << fcst_info->magic_str() << ", found " << n_fcst << " forecast levels, " - << cmn_dpa.n_planes() << " climatology mean levels, and " - << csd_dpa.n_planes() << " climatology standard deviation levels.\n"; + << fcmn_dpa.n_planes() << " forecast climatology mean and " + << fcsd_dpa.n_planes() << " standard deviation level(s), and " + << ocmn_dpa.n_planes() << " observation climatology mean and " + << ocsd_dpa.n_planes() << " standard deviation level(s).\n"; } // end for i @@ -810,7 +838,7 @@ void process_obs_file(int i_nc) { StringArray obs_qty_array = met_point_obs->get_qty_data(); if(use_var_id) var_names = met_point_obs->get_var_names(); - const int buf_size = ((obs_count > BUFFER_SIZE) ? BUFFER_SIZE : (obs_count)); + const int buf_size = (obs_count > BUFFER_SIZE) ? BUFFER_SIZE : obs_count; int obs_qty_idx_block[buf_size]; float obs_arr_block[buf_size][OBS_ARRAY_LEN]; @@ -944,7 +972,6 @@ void process_obs_file(int i_nc) { //////////////////////////////////////////////////////////////////////// void process_scores() { - int i, j, k, l, m; int n_cat, n_wind; ConcatString cs; @@ -969,75 +996,77 @@ void process_scores() { vl1l2_info = new VL1L2Info [n_wind]; // Compute scores for each PairData object and write output - for(i=0; iname_attr()); + shc.set_fcst_var(conf_info.vx_opt[i_vx].vx_pd.fcst_info->name_attr()); // Store the forecast variable units - shc.set_fcst_units(conf_info.vx_opt[i].vx_pd.fcst_info->units_attr()); + shc.set_fcst_units(conf_info.vx_opt[i_vx].vx_pd.fcst_info->units_attr()); // Set the forecast level name - shc.set_fcst_lev(conf_info.vx_opt[i].vx_pd.fcst_info->level_attr().c_str()); + shc.set_fcst_lev(conf_info.vx_opt[i_vx].vx_pd.fcst_info->level_attr().c_str()); // Store the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(conf_info.vx_opt[i_vx].vx_pd.obs_info->name_attr()); // Store the observation variable units - cs = conf_info.vx_opt[i].vx_pd.obs_info->units_attr(); + cs = conf_info.vx_opt[i_vx].vx_pd.obs_info->units_attr(); if(cs.empty()) cs = na_string; shc.set_obs_units(cs); // Set the observation level name - shc.set_obs_lev(conf_info.vx_opt[i].vx_pd.obs_info->level_attr().c_str()); + shc.set_obs_lev(conf_info.vx_opt[i_vx].vx_pd.obs_info->level_attr().c_str()); // Set the forecast lead time - shc.set_fcst_lead_sec(conf_info.vx_opt[i].vx_pd.fcst_dpa[0].lead()); + shc.set_fcst_lead_sec(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa[0].lead()); // Set the forecast valid time - shc.set_fcst_valid_beg(conf_info.vx_opt[i].vx_pd.fcst_dpa[0].valid()); - shc.set_fcst_valid_end(conf_info.vx_opt[i].vx_pd.fcst_dpa[0].valid()); + shc.set_fcst_valid_beg(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa[0].valid()); + shc.set_fcst_valid_end(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa[0].valid()); // Set the observation lead time shc.set_obs_lead_sec(0); // Set the observation valid time - shc.set_obs_valid_beg(conf_info.vx_opt[i].vx_pd.beg_ut); - shc.set_obs_valid_end(conf_info.vx_opt[i].vx_pd.end_ut); + shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); + shc.set_obs_valid_end(conf_info.vx_opt[i_vx].vx_pd.end_ut); // Loop through the message types - for(j=0; jmagic_str() + << conf_info.vx_opt[i_vx].vx_pd.fcst_info->magic_str() << " versus " - << conf_info.vx_opt[i].vx_pd.obs_info->magic_str() + << conf_info.vx_opt[i_vx].vx_pd.obs_info->magic_str() << ", for observation type " << pd_ptr->msg_typ << ", over region " << pd_ptr->mask_name << ", for interpolation method " @@ -1048,22 +1077,22 @@ void process_scores() { // List counts for reasons why observations were rejected cs << cs_erase << "Number of matched pairs = " << pd_ptr->n_obs << "\n" - << "Observations processed = " << conf_info.vx_opt[i].vx_pd.n_try << "\n" - << "Rejected: station id = " << conf_info.vx_opt[i].vx_pd.rej_sid << "\n" - << "Rejected: obs var name = " << conf_info.vx_opt[i].vx_pd.rej_var << "\n" - << "Rejected: valid time = " << conf_info.vx_opt[i].vx_pd.rej_vld << "\n" - << "Rejected: bad obs value = " << conf_info.vx_opt[i].vx_pd.rej_obs << "\n" - << "Rejected: off the grid = " << conf_info.vx_opt[i].vx_pd.rej_grd << "\n" - << "Rejected: topography = " << conf_info.vx_opt[i].vx_pd.rej_topo << "\n" - << "Rejected: level mismatch = " << conf_info.vx_opt[i].vx_pd.rej_lvl << "\n" - << "Rejected: quality marker = " << conf_info.vx_opt[i].vx_pd.rej_qty << "\n" - << "Rejected: message type = " << conf_info.vx_opt[i].vx_pd.rej_typ[j][k][l] << "\n" - << "Rejected: masking region = " << conf_info.vx_opt[i].vx_pd.rej_mask[j][k][l] << "\n" - << "Rejected: bad fcst value = " << conf_info.vx_opt[i].vx_pd.rej_fcst[j][k][l] << "\n" - << "Rejected: bad climo mean = " << conf_info.vx_opt[i].vx_pd.rej_cmn[j][k][l] << "\n" - << "Rejected: bad climo stdev = " << conf_info.vx_opt[i].vx_pd.rej_csd[j][k][l] << "\n" - << "Rejected: mpr filter = " << conf_info.vx_opt[i].vx_pd.rej_mpr[j][k][l] << "\n" - << "Rejected: duplicates = " << conf_info.vx_opt[i].vx_pd.rej_dup[j][k][l] << "\n"; + << "Observations processed = " << conf_info.vx_opt[i_vx].vx_pd.n_try << "\n" + << "Rejected: station id = " << conf_info.vx_opt[i_vx].vx_pd.rej_sid << "\n" + << "Rejected: obs var name = " << conf_info.vx_opt[i_vx].vx_pd.rej_var << "\n" + << "Rejected: valid time = " << conf_info.vx_opt[i_vx].vx_pd.rej_vld << "\n" + << "Rejected: bad obs value = " << conf_info.vx_opt[i_vx].vx_pd.rej_obs << "\n" + << "Rejected: off the grid = " << conf_info.vx_opt[i_vx].vx_pd.rej_grd << "\n" + << "Rejected: topography = " << conf_info.vx_opt[i_vx].vx_pd.rej_topo << "\n" + << "Rejected: level mismatch = " << conf_info.vx_opt[i_vx].vx_pd.rej_lvl << "\n" + << "Rejected: quality marker = " << conf_info.vx_opt[i_vx].vx_pd.rej_qty << "\n" + << "Rejected: message type = " << conf_info.vx_opt[i_vx].vx_pd.rej_typ[n] << "\n" + << "Rejected: masking region = " << conf_info.vx_opt[i_vx].vx_pd.rej_mask[n] << "\n" + << "Rejected: bad fcst value = " << conf_info.vx_opt[i_vx].vx_pd.rej_fcst[n] << "\n" + << "Rejected: bad climo mean = " << conf_info.vx_opt[i_vx].vx_pd.rej_cmn[n] << "\n" + << "Rejected: bad climo stdev = " << conf_info.vx_opt[i_vx].vx_pd.rej_csd[n] << "\n" + << "Rejected: mpr filter = " << conf_info.vx_opt[i_vx].vx_pd.rej_mpr[n] << "\n" + << "Rejected: duplicates = " << conf_info.vx_opt[i_vx].vx_pd.rej_dup[n] << "\n"; // Print report based on the number of matched pairs if(pd_ptr->n_obs > 0) { @@ -1076,88 +1105,96 @@ void process_scores() { } // Process percentile thresholds - conf_info.vx_opt[i].set_perc_thresh(pd_ptr); + conf_info.vx_opt[i_vx].set_perc_thresh(pd_ptr); // Write out the MPR lines - if(conf_info.vx_opt[i].output_flag[i_mpr] != STATOutputType::None) { + if(conf_info.vx_opt[i_vx].output_flag[i_mpr] != STATOutputType::None) { write_mpr_row(shc, pd_ptr, - conf_info.vx_opt[i].output_flag[i_mpr], + conf_info.vx_opt[i_vx].output_flag[i_mpr], stat_at, i_stat_row, - txt_at[i_mpr], i_txt_row[i_mpr]); + txt_at[i_mpr], i_txt_row[i_mpr], + conf_info.obtype_as_group_val_flag); + + // Reset the obtype column + shc.set_obtype(conf_info.vx_opt[i_vx].msg_typ[i_msg_typ].c_str()); // Reset the observation valid time - shc.set_obs_valid_beg(conf_info.vx_opt[i].vx_pd.beg_ut); - shc.set_obs_valid_end(conf_info.vx_opt[i].vx_pd.end_ut); + shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); + shc.set_obs_valid_end(conf_info.vx_opt[i_vx].vx_pd.end_ut); } // Write out the SEEPS MPR lines - if(conf_info.vx_opt[i].output_flag[i_seeps_mpr] != STATOutputType::None) { + if(conf_info.vx_opt[i_vx].output_flag[i_seeps_mpr] != STATOutputType::None) { write_seeps_mpr_row(shc, pd_ptr, - conf_info.vx_opt[i].output_flag[i_seeps_mpr], + conf_info.vx_opt[i_vx].output_flag[i_seeps_mpr], stat_at, i_stat_row, - txt_at[i_seeps_mpr], i_txt_row[i_seeps_mpr]); + txt_at[i_seeps_mpr], i_txt_row[i_seeps_mpr], + conf_info.obtype_as_group_val_flag); + + // Reset the obtype column + shc.set_obtype(conf_info.vx_opt[i_vx].msg_typ[i_msg_typ].c_str()); // Reset the observation valid time - shc.set_obs_valid_beg(conf_info.vx_opt[i].vx_pd.beg_ut); - shc.set_obs_valid_end(conf_info.vx_opt[i].vx_pd.end_ut); + shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); + shc.set_obs_valid_end(conf_info.vx_opt[i_vx].vx_pd.end_ut); } // Write out the SEEPS lines - if(conf_info.vx_opt[i].output_flag[i_seeps] != STATOutputType::None) { - compute_aggregated_seeps(pd_ptr, &pd_ptr->seeps); - write_seeps_row(shc, &pd_ptr->seeps, - conf_info.vx_opt[i].output_flag[i_seeps], + if(conf_info.vx_opt[i_vx].output_flag[i_seeps] != STATOutputType::None) { + compute_aggregated_seeps(pd_ptr, &pd_ptr->seeps_agg); + write_seeps_row(shc, &pd_ptr->seeps_agg, + conf_info.vx_opt[i_vx].output_flag[i_seeps], stat_at, i_stat_row, txt_at[i_seeps], i_txt_row[i_seeps]); } // Compute CTS scores - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].fcat_ta.n() > 0 && - (conf_info.vx_opt[i].output_flag[i_fho] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_ctc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_cts] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_eclv] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].fcat_ta.n() > 0 && + (conf_info.vx_opt[i_vx].output_flag[i_fho] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_ctc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_cts] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_eclv] != STATOutputType::None)) { // Initialize - for(m=0; mis_prob() && - conf_info.vx_opt[i].fcat_ta.n() > 1 && - (conf_info.vx_opt[i].output_flag[i_mctc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_mcts] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].fcat_ta.n() > 1 && + (conf_info.vx_opt[i_vx].output_flag[i_mctc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_mcts] != STATOutputType::None)) { // Initialize mcts_info.clear(); // Compute MCTS Info - do_mcts(mcts_info, i, pd_ptr); + do_mcts(mcts_info, i_vx, pd_ptr); - // Write out MCTC - if(conf_info.vx_opt[i].output_flag[i_mctc] != STATOutputType::None && - mcts_info.cts.total() > 0) { + if(mcts_info.cts.n_pairs() == 0) continue; + // Write out MCTC + if(conf_info.vx_opt[i_vx].output_flag[i_mctc] != STATOutputType::None) { write_mctc_row(shc, mcts_info, - conf_info.vx_opt[i].output_flag[i_mctc], + conf_info.vx_opt[i_vx].output_flag[i_mctc], stat_at, i_stat_row, txt_at[i_mctc], i_txt_row[i_mctc]); } // Write out MCTS - if(conf_info.vx_opt[i].output_flag[i_mcts] != STATOutputType::None && - mcts_info.cts.total() > 0) { - + if(conf_info.vx_opt[i_vx].output_flag[i_mcts] != STATOutputType::None) { write_mcts_row(shc, mcts_info, - conf_info.vx_opt[i].output_flag[i_mcts], + conf_info.vx_opt[i_vx].output_flag[i_mcts], stat_at, i_stat_row, txt_at[i_mcts], i_txt_row[i_mcts]); } } // end Compute MCTS scores // Compute CNT, SL1L2, and SAL1L2 scores - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - (conf_info.vx_opt[i].output_flag[i_cnt] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_sl1l2] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_sal1l2] != STATOutputType::None)) { - do_cnt_sl1l2(conf_info.vx_opt[i], pd_ptr); + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + (conf_info.vx_opt[i_vx].output_flag[i_cnt] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_sl1l2] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_sal1l2] != STATOutputType::None)) { + do_cnt_sl1l2(conf_info.vx_opt[i_vx], pd_ptr); } // Compute VL1L2 and VAL1L2 partial sums for UGRD and VGRD - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].vx_pd.fcst_info->is_v_wind() && - conf_info.vx_opt[i].vx_pd.fcst_info->uv_index() >= 0 && - (conf_info.vx_opt[i].output_flag[i_vl1l2] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_val1l2] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_vcnt] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_v_wind() && + conf_info.vx_opt[i_vx].vx_pd.fcst_info->uv_index() >= 0 && + (conf_info.vx_opt[i_vx].output_flag[i_vl1l2] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_val1l2] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_vcnt] != STATOutputType::None)) { // Store the forecast variable name shc.set_fcst_var(ugrd_vgrd_abbr_str); @@ -1220,19 +1255,19 @@ void process_scores() { shc.set_obs_var(ugrd_vgrd_abbr_str); // Initialize - for(m=0; muv_index(); + int u_vx = conf_info.vx_opt[i_vx].vx_pd.fcst_info->uv_index(); // Check to make sure message types, masking regions, // and interpolation methods match - if(conf_info.vx_opt[i].get_n_msg_typ() != - conf_info.vx_opt[ui].get_n_msg_typ() || - conf_info.vx_opt[i].get_n_mask() != - conf_info.vx_opt[ui].get_n_mask() || - conf_info.vx_opt[i].get_n_interp() != - conf_info.vx_opt[ui].get_n_interp()) { + if(conf_info.vx_opt[i_vx].get_n_msg_typ() != + conf_info.vx_opt[u_vx].get_n_msg_typ() || + conf_info.vx_opt[i_vx].get_n_mask() != + conf_info.vx_opt[u_vx].get_n_mask() || + conf_info.vx_opt[i_vx].get_n_interp() != + conf_info.vx_opt[u_vx].get_n_interp()) { mlog << Warning << "\nprocess_scores() -> " << "when computing VL1L2 and/or VAL1L2 vector " << "partial sums, the U and V components must " @@ -1243,106 +1278,110 @@ void process_scores() { } // Compute VL1L2 and VAL1L2 - do_vl1l2(vl1l2_info, i, - &conf_info.vx_opt[ui].vx_pd.pd[j][k][l], - &conf_info.vx_opt[i].vx_pd.pd[j][k][l]); + do_vl1l2(vl1l2_info, i_vx, + &conf_info.vx_opt[u_vx].vx_pd.pd[n], + &conf_info.vx_opt[i_vx].vx_pd.pd[n]); // Loop through all of the wind speed thresholds - for(m=0; m 0) { - write_vl1l2_row(shc, vl1l2_info[m], - conf_info.vx_opt[i].output_flag[i_vl1l2], + if(conf_info.vx_opt[i_vx].output_flag[i_vl1l2] != STATOutputType::None && + vl1l2_info[i_wind].vcount > 0) { + write_vl1l2_row(shc, vl1l2_info[i_wind], + conf_info.vx_opt[i_vx].output_flag[i_vl1l2], stat_at, i_stat_row, txt_at[i_vl1l2], i_txt_row[i_vl1l2]); } // Write out VAL1L2 - if(conf_info.vx_opt[i].output_flag[i_val1l2] != STATOutputType::None && - vl1l2_info[m].vacount > 0) { - write_val1l2_row(shc, vl1l2_info[m], - conf_info.vx_opt[i].output_flag[i_val1l2], + if(conf_info.vx_opt[i_vx].output_flag[i_val1l2] != STATOutputType::None && + vl1l2_info[i_wind].vacount > 0) { + write_val1l2_row(shc, vl1l2_info[i_wind], + conf_info.vx_opt[i_vx].output_flag[i_val1l2], stat_at, i_stat_row, txt_at[i_val1l2], i_txt_row[i_val1l2]); } // Write out VCNT - if(conf_info.vx_opt[i].output_flag[i_vcnt] != STATOutputType::None && - vl1l2_info[m].vcount > 0) { - write_vcnt_row(shc, vl1l2_info[m], - conf_info.vx_opt[i].output_flag[i_vcnt], + if(conf_info.vx_opt[i_vx].output_flag[i_vcnt] != STATOutputType::None && + vl1l2_info[i_wind].vcount > 0) { + write_vcnt_row(shc, vl1l2_info[i_wind], + conf_info.vx_opt[i_vx].output_flag[i_vcnt], stat_at, i_stat_row, txt_at[i_vcnt], i_txt_row[i_vcnt]); } - } // end for m + } // end for i // Reset the forecast variable name - shc.set_fcst_var(conf_info.vx_opt[i].vx_pd.fcst_info->name_attr()); + shc.set_fcst_var(conf_info.vx_opt[i_vx].vx_pd.fcst_info->name_attr()); // Reset the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(conf_info.vx_opt[i_vx].vx_pd.obs_info->name_attr()); } // end Compute VL1L2 and VAL1L2 // Compute PCT counts and scores - if(conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - (conf_info.vx_opt[i].output_flag[i_pct] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pstd] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pjc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_prc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_eclv] != STATOutputType::None)) { - do_pct(conf_info.vx_opt[i], pd_ptr); + if(conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + (conf_info.vx_opt[i_vx].output_flag[i_pct] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pstd] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pjc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_prc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_eclv] != STATOutputType::None)) { + do_pct(conf_info.vx_opt[i_vx], pd_ptr); } // Reset the verification masking region - shc.set_mask(conf_info.vx_opt[i].mask_name[k].c_str()); + shc.set_mask(conf_info.vx_opt[i_vx].mask_name[i_mask].c_str()); - } // end for l + } // end for i_interp // Apply HiRA ensemble verification logic - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].hira_info.flag && - (conf_info.vx_opt[i].output_flag[i_ecnt] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_rps] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].hira_info.flag && + (conf_info.vx_opt[i_vx].output_flag[i_ecnt] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_rps] != STATOutputType::None)) { - pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[j][k][0]; + int n = conf_info.vx_opt[i_vx].vx_pd.three_to_one(i_msg_typ, i_mask, 0); + + pd_ptr = &conf_info.vx_opt[i_vx].vx_pd.pd[n]; // Process percentile thresholds - conf_info.vx_opt[i].set_perc_thresh(pd_ptr); + conf_info.vx_opt[i_vx].set_perc_thresh(pd_ptr); // Appy HiRA verification and write ensemble output - do_hira_ens(i, pd_ptr); + do_hira_ens(i_vx, pd_ptr); } // end HiRA for ensembles // Apply HiRA probabilistic verification logic - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].hira_info.flag && - (conf_info.vx_opt[i].output_flag[i_mpr] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pct] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pstd] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pjc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_prc] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].hira_info.flag && + (conf_info.vx_opt[i_vx].output_flag[i_mpr] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pct] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pstd] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pjc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_prc] != STATOutputType::None)) { + + int n = conf_info.vx_opt[i_vx].vx_pd.three_to_one(i_msg_typ, i_mask, 0); - pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[j][k][0]; + pd_ptr = &conf_info.vx_opt[i_vx].vx_pd.pd[n]; // Process percentile thresholds - conf_info.vx_opt[i].set_perc_thresh(pd_ptr); + conf_info.vx_opt[i_vx].set_perc_thresh(pd_ptr); // Apply HiRA verification and write probabilistic output - do_hira_prob(i, pd_ptr); + do_hira_prob(i_vx, pd_ptr); } // end HiRA for probabilities - } // end for k - } // end for j + } // end for i_mask + } // end for i_msg_typ - mlog << Debug(2) - << "\n" << sep_str << "\n\n"; - } // end for i + mlog << Debug(2) << "\n" << sep_str << "\n\n"; + + } // end for i_vx // Deallocate memory if(cts_info) { delete [] cts_info; cts_info = (CTSInfo *) nullptr; } @@ -1466,8 +1505,9 @@ void do_cnt_sl1l2(const PointStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { mlog << Debug(2) << "Computing Scalar Partial Sums and Continuous Statistics.\n"; - // Determine the number of climo CDF bins - n_bin = (pd_ptr->cmn_na.n_valid() > 0 && pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -1702,8 +1742,9 @@ void do_pct(const PointStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { mlog << Debug(2) << "Computing Probabilistic Statistics.\n"; - // Determine the number of climo CDF bins - n_bin = (pd_ptr->cmn_na.n_valid() > 0 && pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -1868,18 +1909,26 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { // Check for values if(f_ens.n() == 0) continue; + // TODO: Add has_climo member function instead + // Skip points where climatology has been specified but is bad data - if(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa.n_planes() > 0 && - is_bad_data(pd_ptr->cmn_na[j])) continue; + if((conf_info.vx_opt[i_vx].vx_pd.fcmn_dpa.n_planes() > 0 && + is_bad_data(pd_ptr->fcmn_na[j])) || + (conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa.n_planes() > 0 && + is_bad_data(pd_ptr->ocmn_na[j]))) continue; + + // Store climo data + ClimoPntInfo cpi(pd_ptr->fcmn_na[j], pd_ptr->fcsd_na[j], + pd_ptr->ocmn_na[j], pd_ptr->ocsd_na[j]); // Store the observation value - hira_pd.add_point_obs(pd_ptr->sid_sa[j].c_str(), + hira_pd.add_point_obs( + pd_ptr->typ_sa[j].c_str(), pd_ptr->sid_sa[j].c_str(), pd_ptr->lat_na[j], pd_ptr->lon_na[j], pd_ptr->x_na[j], pd_ptr->y_na[j], pd_ptr->vld_ta[j], pd_ptr->lvl_na[j], pd_ptr->elv_na[j], pd_ptr->o_na[j], pd_ptr->o_qc_sa[j].c_str(), - pd_ptr->cmn_na[j], pd_ptr->csd_na[j], - pd_ptr->wgt_na[j]); + cpi, pd_ptr->wgt_na[j]); // Store the ensemble mean and member values hira_pd.mn_na.add(f_ens.mean()); @@ -1929,7 +1978,11 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { write_orank_row(shc, &hira_pd, conf_info.vx_opt[i_vx].output_flag[i_orank], stat_at, i_stat_row, - txt_at[i_orank], i_txt_row[i_orank]); + txt_at[i_orank], i_txt_row[i_orank], + conf_info.obtype_as_group_val_flag); + + // Reset the obtype column + shc.set_obtype(pd_ptr->msg_typ.c_str()); // Reset the observation valid time shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); @@ -1946,9 +1999,9 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { // If prob_cat_thresh is empty, try to select other thresholds if(rps_info.fthresh.n() == 0) { - // Use climo data, if avaiable - if(hira_pd.cmn_na.n_valid() > 0 && - hira_pd.csd_na.n_valid() > 0 && + // Use observation climo data, if avaiable + if(hira_pd.ocmn_na.n_valid() > 0 && + hira_pd.ocsd_na.n_valid() > 0 && conf_info.vx_opt[i_vx].cdf_info.cdf_ta.n() > 0) { mlog << Debug(3) << "Resetting the empty HiRA \"" << conf_key_prob_cat_thresh << "\" thresholds to " @@ -1995,8 +2048,8 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { PairDataPoint hira_pd; int i, j, k, lvl_blw, lvl_abv; - double f_cov, cmn_cov; - NumArray cmn_cov_na; + double f_cov, ocmn_cov; + NumArray ocmn_cov_na; SingleThresh cat_thresh; PCTInfo pct_info; @@ -2023,20 +2076,23 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { // Initialize hira_pd.clear(); pct_info.clear(); - cmn_cov_na.erase(); + ocmn_cov_na.erase(); // Loop through matched pairs and replace the forecast value // with the HiRA fractional coverage. for(k=0; kn_obs; k++) { + // Store climo data + ClimoPntInfo cpi(pd_ptr->fcmn_na[k], pd_ptr->fcsd_na[k], + pd_ptr->ocmn_na[k], pd_ptr->ocsd_na[k]); + // Compute the fractional coverage forecast value using the // observation level value find_vert_lvl(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa, pd_ptr->lvl_na[k], lvl_blw, lvl_abv); f_cov = compute_interp(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa, - pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], - pd_ptr->cmn_na[k], pd_ptr->csd_na[k], + pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], &cpi, InterpMthd::Nbrhd, conf_info.vx_opt[i_vx].hira_info.width[j], conf_info.vx_opt[i_vx].hira_info.shape, grid.wrap_lon(), conf_info.vx_opt[i_vx].hira_info.vld_thresh, spfh_flag, @@ -2046,35 +2102,36 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { // Check for bad data if(is_bad_data(f_cov)) continue; - // Compute the fractional coverage for the climatological mean - if(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa.n_planes() > 0) { + // Compute the climatological event probability as the fractional + // coverage of the observation climatology mean field + if(conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa.n_planes() > 0) { // Interpolate to the observation level - find_vert_lvl(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa, + find_vert_lvl(conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa, pd_ptr->lvl_na[k], lvl_blw, lvl_abv); - cmn_cov = compute_interp(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa, - pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], - pd_ptr->cmn_na[k], pd_ptr->csd_na[k], - InterpMthd::Nbrhd, conf_info.vx_opt[i_vx].hira_info.width[j], - conf_info.vx_opt[i_vx].hira_info.shape, grid.wrap_lon(), - conf_info.vx_opt[i_vx].hira_info.vld_thresh, spfh_flag, - conf_info.vx_opt[i_vx].vx_pd.fcst_info->level().type(), - pd_ptr->lvl_na[k], lvl_blw, lvl_abv, &cat_thresh); + ocmn_cov = compute_interp(conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa, + pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], &cpi, + InterpMthd::Nbrhd, conf_info.vx_opt[i_vx].hira_info.width[j], + conf_info.vx_opt[i_vx].hira_info.shape, grid.wrap_lon(), + conf_info.vx_opt[i_vx].hira_info.vld_thresh, spfh_flag, + conf_info.vx_opt[i_vx].vx_pd.fcst_info->level().type(), + pd_ptr->lvl_na[k], lvl_blw, lvl_abv, &cat_thresh); // Check for bad data - if(is_bad_data(cmn_cov)) continue; - else cmn_cov_na.add(cmn_cov); + if(is_bad_data(ocmn_cov)) continue; + else ocmn_cov_na.add(ocmn_cov); } // Store the fractional coverage pair - hira_pd.add_point_pair(pd_ptr->sid_sa[k].c_str(), + hira_pd.add_point_pair( + pd_ptr->typ_sa[k].c_str(), + pd_ptr->sid_sa[k].c_str(), pd_ptr->lat_na[k], pd_ptr->lon_na[k], pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->vld_ta[k], pd_ptr->lvl_na[k], pd_ptr->elv_na[k], f_cov, pd_ptr->o_na[k], pd_ptr->o_qc_sa[k].c_str(), - pd_ptr->cmn_na[k], pd_ptr->csd_na[k], pd_ptr->wgt_na[k]); - + cpi, pd_ptr->wgt_na[k]); } // end for k mlog << Debug(2) @@ -2103,8 +2160,8 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { } // Compute the probabilistic counts and statistics - compute_pctinfo(hira_pd, (STATOutputType::None!=conf_info.vx_opt[i_vx].output_flag[i_pstd]), - pct_info, &cmn_cov_na); + bool pstd_flag = conf_info.vx_opt[i_vx].output_flag[i_pstd] != STATOutputType::None; + compute_pctinfo(hira_pd, pstd_flag, pct_info, &ocmn_cov_na); // Set the contents of the output threshold columns shc.set_fcst_thresh (conf_info.vx_opt[i_vx].fcat_ta[i]); @@ -2117,7 +2174,12 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { write_mpr_row(shc, &hira_pd, conf_info.vx_opt[i_vx].output_flag[i_mpr], stat_at, i_stat_row, - txt_at[i_mpr], i_txt_row[i_mpr], false); + txt_at[i_mpr], i_txt_row[i_mpr], + conf_info.obtype_as_group_val_flag, + false); + + // Reset the obtype column + shc.set_obtype(pd_ptr->msg_typ.c_str()); // Reset the observation valid time shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); diff --git a/src/tools/core/point_stat/point_stat.h b/src/tools/core/point_stat/point_stat.h index 0804650b0a..894a79559b 100644 --- a/src/tools/core/point_stat/point_stat.h +++ b/src/tools/core/point_stat/point_stat.h @@ -15,11 +15,8 @@ // Mod# Date Name Description // ---- ---- ---- ----------- // 000 11/11/08 Halley Gotway New -// 001 09/28/22 Prestopnik MET #2227 Remove namespace std and netCDF from header files +// 001 09/28/22 Prestopnik MET #2227 Remove namespace std and netCDF from header files. // -// -//////////////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////////////// #ifndef __POINT_STAT_H__ @@ -61,7 +58,7 @@ static const char * default_config_filename = "MET_BASE/config/PointStatConfig_default"; // Header columns -static const char **txt_columns[n_txt] = { +static const char * const * txt_columns[n_txt] = { fho_columns, ctc_columns, cts_columns, mctc_columns, mcts_columns, cnt_columns, sl1l2_columns, sal1l2_columns, vl1l2_columns, @@ -85,7 +82,7 @@ static const int n_txt_columns[n_txt] = { }; // Text file abbreviations -static const char *txt_file_abbr[n_txt] = { +static const char * const txt_file_abbr[n_txt] = { "fho", "ctc", "cts", "mctc", "mcts", "cnt", "sl1l2", "sal1l2", "vl1l2", diff --git a/src/tools/core/point_stat/point_stat_conf_info.cc b/src/tools/core/point_stat/point_stat_conf_info.cc index 1a416fbd5a..cfd5e10432 100644 --- a/src/tools/core/point_stat/point_stat_conf_info.cc +++ b/src/tools/core/point_stat/point_stat_conf_info.cc @@ -69,8 +69,10 @@ void PointStatConfInfo::clear() { topo_use_obs_thresh.clear(); topo_interp_fcst_thresh.clear(); msg_typ_group_map.clear(); + obtype_as_group_val_flag = false; mask_area_map.clear(); mask_sid_map.clear(); + point_weight_flag = PointWeightType::None; tmp_dir.clear(); output_prefix.clear(); version.clear(); @@ -80,6 +82,9 @@ void PointStatConfInfo::clear() { ugrid_map_config.clear(); ugrid_max_distance_km = bad_data_double; #endif + seeps_climo_name.clear(); + seeps_p1_thresh.clear(); + // Deallocate memory if(vx_opt) { delete [] vx_opt; vx_opt = (PointStatVxOpt *) nullptr; } @@ -148,6 +153,9 @@ void PointStatConfInfo::process_config(GrdFileType ftype) { // Conf: model model = parse_conf_string(&conf, conf_key_model); + // Conf: point_weight_flag + point_weight_flag = parse_conf_point_weight_flag(&conf); + // Conf: tmp_dir tmp_dir = parse_conf_tmp_dir(&conf); @@ -171,6 +179,10 @@ void PointStatConfInfo::process_config(GrdFileType ftype) { // Conf: message_type_group_map msg_typ_group_map = parse_conf_message_type_group_map(&conf); + // Conf: obtype_as_group_val_flag + obtype_as_group_val_flag = + conf.lookup_bool(conf_key_obtype_as_group_val_flag); + // Conf: fcst.field and obs.field fdict = conf.lookup_array(conf_key_fcst_field); odict = conf.lookup_array(conf_key_obs_field); @@ -194,7 +206,14 @@ void PointStatConfInfo::process_config(GrdFileType ftype) { vx_opt = new PointStatVxOpt [n_vx]; // Check for consistent number of climatology fields - check_climo_n_vx(&conf, n_vx); + check_climo_n_vx(fdict, n_vx); + check_climo_n_vx(odict, n_vx); + + // Conf: threshold for SEEPS p1 + seeps_p1_thresh = conf.lookup_thresh(conf_key_seeps_p1_thresh); + + // Conf: SEEPS climo filename + seeps_climo_name = conf.lookup_string(conf_key_seeps_point_climo_name, false); // Parse settings for each verification task for(i=0; iset_dict(fdict); @@ -999,9 +1015,6 @@ void PointStatVxOpt::process_config(GrdFileType ftype, // Conf: rank_corr_flag rank_corr_flag = odict.lookup_bool(conf_key_rank_corr_flag); - // Conf: threshold for SEEPS p1 - seeps_p1_thresh = odict.lookup_thresh(conf_key_seeps_p1_thresh); - // Conf: message_type msg_typ = parse_conf_message_type(&odict); @@ -1073,7 +1086,7 @@ void PointStatVxOpt::set_vx_pd(PointStatConfInfo *conf_info) { } // Define the dimensions - vx_pd.set_pd_size(n_msg_typ, n_mask, n_interp); + vx_pd.set_size(n_msg_typ, n_mask, n_interp); // Store the MPR filter threshold vx_pd.set_mpr_thresh(mpr_sa, mpr_ta); @@ -1161,8 +1174,8 @@ void PointStatVxOpt::set_vx_pd(PointStatConfInfo *conf_info) { vx_pd.set_obs_perc_value(obs_perc); if (output_flag[i_seeps_mpr] != STATOutputType::None || output_flag[i_seeps] != STATOutputType::None) { - vx_pd.load_seeps_climo(); - vx_pd.set_seeps_thresh(seeps_p1_thresh); + vx_pd.load_seeps_climo(conf_info->seeps_climo_name); + vx_pd.set_seeps_thresh(conf_info->seeps_p1_thresh); } return; } @@ -1181,20 +1194,22 @@ void PointStatVxOpt::set_perc_thresh(const PairDataPoint *pd_ptr) { // // Sort the input arrays // - NumArray fsort = pd_ptr->f_na; - NumArray osort = pd_ptr->o_na; - NumArray csort = pd_ptr->cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort = pd_ptr->f_na; + NumArray o_sort = pd_ptr->o_na; + NumArray fcmn_sort = pd_ptr->fcmn_na; + NumArray ocmn_sort = pd_ptr->ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles // - fcat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - ocat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - fcnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); - ocnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); + fcat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + ocat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + fcnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); + ocnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); return; } @@ -1234,14 +1249,14 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { // Switch on the index of the line type switch(i_txt_row) { - case(i_fho): - case(i_ctc): + case i_fho: + case i_ctc: // Number of FHO or CTC lines = // Message Types * Masks * Interpolations * Thresholds n = (prob_flag ? 0 : n_pd * get_n_cat_thresh()); break; - case(i_cts): + case i_cts: // Number of CTS lines = // Message Types * Masks * Interpolations * Thresholds * // Alphas @@ -1249,19 +1264,19 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { get_n_ci_alpha()); break; - case(i_mctc): + case i_mctc: // Number of MCTC lines = // Message Types * Masks * Interpolations n = (prob_flag ? 0 : n_pd); break; - case(i_mcts): + case i_mcts: // Number of MCTS lines = // Message Types * Masks * Interpolations * Alphas n = (prob_flag ? 0 : n_pd * get_n_ci_alpha()); break; - case(i_cnt): + case i_cnt: // Number of CNT lines = // Message Types * Masks * Interpolations * Thresholds * // Climo Bins * Alphas @@ -1269,23 +1284,23 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { get_n_ci_alpha()); break; - case(i_sl1l2): - case(i_sal1l2): + case i_sl1l2: + case i_sal1l2: // Number of SL1L2 and SAL1L2 lines = // Message Types * Masks * Interpolations * Thresholds * // Climo Bins n = (prob_flag ? 0 : n_pd * get_n_cnt_thresh() * n_bin); break; - case(i_vl1l2): - case(i_val1l2): + case i_vl1l2: + case i_val1l2: // Number of VL1L2 or VAL1L2 lines = // Message Types * Masks * Interpolations * Thresholds n = (!vect_flag ? 0 : n_pd * get_n_wind_thresh()); break; - case(i_vcnt): + case i_vcnt: // Number of VCNT lines = // Message Types * Masks * Interpolations * Thresholds * // Alphas @@ -1293,9 +1308,9 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { get_n_wind_thresh() * get_n_ci_alpha()); break; - case(i_pct): - case(i_pjc): - case(i_prc): + case i_pct: + case i_pjc: + case i_prc: // Number of PCT, PJC, or PRC lines possible = // Message Types * Masks * Interpolations * Thresholds * // Climo Bins @@ -1310,7 +1325,7 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { break; - case(i_pstd): + case i_pstd: // Number of PSTD lines = // Message Types * Masks * Interpolations * Thresholds * // Alphas * Climo Bins @@ -1328,8 +1343,8 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { break; - case(i_ecnt): - case(i_rps): + case i_ecnt: + case i_rps: // Number of HiRA ECNT and RPS lines = // Message Types * Masks * HiRA widths * // Alphas @@ -1342,7 +1357,7 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { break; - case(i_orank): + case i_orank: // Number of HiRA ORANK lines possible = // Number of pairs * Categorical Thresholds * // HiRA widths @@ -1356,7 +1371,7 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { break; - case(i_eclv): + case i_eclv: // Number of CTC -> ECLV lines = // Message Types * Masks * Interpolations * Thresholds * // Climo Bins @@ -1371,7 +1386,7 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { break; - case(i_mpr): + case i_mpr: // Compute the number of matched pairs to be written n = vx_pd.get_n_pair(); @@ -1386,13 +1401,13 @@ int PointStatVxOpt::n_txt_row(int i_txt_row) const { break; - case(i_seeps_mpr): + case i_seeps_mpr: // Compute the number of matched pairs to be written n = vx_pd.get_n_pair(); break; - case(i_seeps): + case i_seeps: // Compute the number of matched pairs to be written n = vx_pd.get_n_pair(); diff --git a/src/tools/core/point_stat/point_stat_conf_info.h b/src/tools/core/point_stat/point_stat_conf_info.h index befde20349..9db3081dd7 100644 --- a/src/tools/core/point_stat/point_stat_conf_info.h +++ b/src/tools/core/point_stat/point_stat_conf_info.h @@ -131,7 +131,6 @@ class PointStatVxOpt { StringArray mpr_sa; // MPR column names ThreshArray mpr_ta; // MPR column thresholds - SingleThresh seeps_p1_thresh; // SEESP p1 threshold // Vector of MaskLatLon objects defining Lat/Lon Point masks std::vector mask_llpnt; @@ -193,13 +192,13 @@ class PointStatVxOpt { //////////////////////////////////////////////////////////////////////// -inline int PointStatVxOpt::get_n_msg_typ() const { return(msg_typ.n()); } -inline int PointStatVxOpt::get_n_mask() const { return(mask_name.n()); } -inline int PointStatVxOpt::get_n_interp() const { return(interp_info.n_interp); } +inline int PointStatVxOpt::get_n_msg_typ() const { return msg_typ.n(); } +inline int PointStatVxOpt::get_n_mask() const { return mask_name.n(); } +inline int PointStatVxOpt::get_n_interp() const { return interp_info.n_interp; } -inline int PointStatVxOpt::get_n_eclv_points() const { return(eclv_points.n()); } -inline int PointStatVxOpt::get_n_cdf_bin() const { return(cdf_info.n_bin); } -inline int PointStatVxOpt::get_n_ci_alpha() const { return(ci_alpha.n()); } +inline int PointStatVxOpt::get_n_eclv_points() const { return eclv_points.n(); } +inline int PointStatVxOpt::get_n_cdf_bin() const { return cdf_info.n_bin; } +inline int PointStatVxOpt::get_n_ci_alpha() const { return ci_alpha.n(); } //////////////////////////////////////////////////////////////////////// @@ -236,16 +235,23 @@ class PointStatConfInfo { // Message type groups that should be processed together std::map msg_typ_group_map; + bool obtype_as_group_val_flag; // Mapping of mask names to DataPlanes std::map mask_area_map; // Mapping of mask names to Station ID lists - std::map mask_sid_map; + std::map mask_sid_map; + + PointWeightType point_weight_flag; // Point weighting flag ConcatString tmp_dir; // Directory for temporary files ConcatString output_prefix; // String to customize output file name ConcatString version; // Config file version + + ConcatString seeps_climo_name; // SEESP climo filename + SingleThresh seeps_p1_thresh; // SEESP p1 threshold + #ifdef WITH_UGRID bool ignore_ugrid_dataset; ConcatString ugrid_nc; // NetCDF for coordinate variables of unstructured grid diff --git a/src/tools/core/series_analysis/Makefile.in b/src/tools/core/series_analysis/Makefile.in index 2e26c49701..fd4915a5a5 100644 --- a/src/tools/core/series_analysis/Makefile.in +++ b/src/tools/core/series_analysis/Makefile.in @@ -226,6 +226,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/series_analysis/series_analysis.cc b/src/tools/core/series_analysis/series_analysis.cc index ebbb43e27a..e5c3a62fe8 100644 --- a/src/tools/core/series_analysis/series_analysis.cc +++ b/src/tools/core/series_analysis/series_analysis.cc @@ -32,13 +32,14 @@ // 011 05/28/21 Halley Gotway Add MCTS HSS_EC output. // 012 01/20/22 Halley Gotway MET #2003 Add PSTD BRIERCL output. // 013 05/25/22 Halley Gotway MET #2147 Add CTS HSS_EC output. -// 014 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 015 10/03/22 Presotpnik MET #2227 Remove namespace netCDF from header files -// 016 01/29/24 Halley Gotway MET #2801 Configure time difference warnings +// 014 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 015 10/03/22 Presotpnik MET #2227 Remove namespace netCDF from header files. +// 016 01/29/24 Halley Gotway MET #2801 Configure time difference warnings. +// 017 07/05/24 Halley Gotway MET #2924 Support forecast climatology. +// 018 07/26/24 Halley Gotway MET #1371 Aggregate previous output. // //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -64,7 +65,6 @@ using namespace std; using namespace netCDF; - //////////////////////////////////////////////////////////////////////// static void process_command_line(int, char **); @@ -83,31 +83,69 @@ static void get_series_entry(int, VarInfo *, const StringArray &, static bool read_single_entry(VarInfo *, const ConcatString &, const GrdFileType, DataPlane &, Grid &); +static void open_aggr_file(); +static DataPlane read_aggr_data_plane(const ConcatString &, + const char *suggestion=nullptr); + static void process_scores(); -static void do_cts (int, const PairDataPoint *); -static void do_mcts (int, const PairDataPoint *); -static void do_cnt (int, const PairDataPoint *); -static void do_sl1l2 (int, const PairDataPoint *); -static void do_pct (int, const PairDataPoint *); - -static void store_stat_fho (int, const ConcatString &, const CTSInfo &); -static void store_stat_ctc (int, const ConcatString &, const CTSInfo &); -static void store_stat_cts (int, const ConcatString &, const CTSInfo &); -static void store_stat_mctc (int, const ConcatString &, const MCTSInfo &); -static void store_stat_mcts (int, const ConcatString &, const MCTSInfo &); -static void store_stat_cnt (int, const ConcatString &, const CNTInfo &); -static void store_stat_sl1l2(int, const ConcatString &, const SL1L2Info &); -static void store_stat_pct (int, const ConcatString &, const PCTInfo &); -static void store_stat_pstd (int, const ConcatString &, const PCTInfo &); -static void store_stat_pjc (int, const ConcatString &, const PCTInfo &); -static void store_stat_prc (int, const ConcatString &, const PCTInfo &); +static void do_categorical (int, const PairDataPoint *); +static void do_multicategory (int, const PairDataPoint *); +static void do_continuous (int, const PairDataPoint *); +static void do_partialsums (int, const PairDataPoint *); +static void do_probabilistic (int, const PairDataPoint *); +static void do_climo_brier (int, double, int, PCTInfo &); + +static int read_aggr_total (int); +static void read_aggr_ctc (int, const CTSInfo &, CTSInfo &); +static void read_aggr_mctc (int, const MCTSInfo &, MCTSInfo &); +static void read_aggr_sl1l2 (int, const SL1L2Info &, SL1L2Info &); +static void read_aggr_sal1l2 (int, const SL1L2Info &, SL1L2Info &); +static void read_aggr_pct (int, const PCTInfo &, PCTInfo &); + +static void store_stat_categorical(int, + STATLineType, const ConcatString &, + const CTSInfo &); +static void store_stat_multicategory(int, + STATLineType, const ConcatString &, + const MCTSInfo &); +static void store_stat_partialsums(int, + STATLineType, const ConcatString &, + const SL1L2Info &); +static void store_stat_continuous(int, + STATLineType, const ConcatString &, + const CNTInfo &); +static void store_stat_probabilistic(int, + STATLineType, const ConcatString &, + const PCTInfo &); + +static void store_stat_all_ctc (int, const CTSInfo &); +static void store_stat_all_mctc (int, const MCTSInfo &); +static void store_stat_all_sl1l2 (int, const SL1L2Info &); +static void store_stat_all_sal1l2(int, const SL1L2Info &); +static void store_stat_all_pct (int, const PCTInfo &); + +static ConcatString build_nc_var_name_categorical( + STATLineType, const ConcatString &, + const CTSInfo &, double); +static ConcatString build_nc_var_name_multicategory( + STATLineType, const ConcatString &, + double); +static ConcatString build_nc_var_name_partialsums( + STATLineType, const ConcatString &, + const SL1L2Info &); +static ConcatString build_nc_var_name_continuous( + STATLineType, const ConcatString &, + const CNTInfo &, double); +static ConcatString build_nc_var_name_probabilistic( + STATLineType, const ConcatString &, + const PCTInfo &, double); static void setup_nc_file(const VarInfo *, const VarInfo *); -static void add_nc_var(const ConcatString &, const ConcatString &, - const ConcatString &, const ConcatString &, - const ConcatString &, double); -static void put_nc_val(int, const ConcatString &, float); +static void add_stat_data(const ConcatString &, const ConcatString &, + const ConcatString &, const ConcatString &, + const ConcatString &, double); +static void write_stat_data(); static void set_range(const unixtime &, unixtime &, unixtime &); static void set_range(const int &, int &, int &); @@ -118,6 +156,7 @@ static void usage(); static void set_fcst_files(const StringArray &); static void set_obs_files(const StringArray &); static void set_both_files(const StringArray &); +static void set_aggr(const StringArray &); static void set_paired(const StringArray &); static void set_out_file(const StringArray &); static void set_config_file(const StringArray &); @@ -164,12 +203,13 @@ void process_command_line(int argc, char **argv) { cline.set_usage(usage); // Add the options function calls - cline.add(set_fcst_files, "-fcst", -1); - cline.add(set_obs_files, "-obs", -1); - cline.add(set_both_files, "-both", -1); - cline.add(set_paired, "-paired", 0); - cline.add(set_config_file, "-config", 1); - cline.add(set_out_file, "-out", 1); + cline.add(set_fcst_files, "-fcst", -1); + cline.add(set_obs_files, "-obs", -1); + cline.add(set_both_files, "-both", -1); + cline.add(set_aggr, "-aggr", 1); + cline.add(set_paired, "-paired", 0); + cline.add(set_config_file, "-config", 1); + cline.add(set_out_file, "-out", 1); cline.add(set_compress, "-compress", 1); // Parse the command line @@ -178,36 +218,43 @@ void process_command_line(int argc, char **argv) { // Check for error. There should be zero arguments left. if(cline.n() != 0) usage(); - // Warn about log output + // Recommend logging verbosity level of 3 or less if(mlog.verbosity_level() >= 3) { - mlog << Warning << "\nRunning Series-Analysis at verbosity >= 3 " + mlog << Debug(3) << "Running Series-Analysis at verbosity >= 3 " << "produces excessive log output and can slow the runtime " - << "considerably.\n\n"; + << "considerably.\n"; } // Check that the required arguments have been set. if(fcst_files.n() == 0) { mlog << Error << "\nprocess_command_line() -> " << "the forecast file list must be set using the " - << "\"-fcst\" or \"-both\" option.\n\n"; + << R"("-fcst" or "-both" option.)" << "\n\n"; usage(); } if(obs_files.n() == 0) { mlog << Error << "\nprocess_command_line() -> " << "the observation file list must be set using the " - << "\"-obs\" or \"-both\" option.\n\n"; + << R"("-obs" or "-both" option.)" << "\n\n"; usage(); } if(config_file.length() == 0) { mlog << Error << "\nprocess_command_line() -> " << "the configuration file must be set using the " - << "\"-config\" option.\n\n"; + << R"("-config" option.)" << "\n\n"; usage(); } if(out_file.length() == 0) { mlog << Error << "\nprocess_command_line() -> " << "the output NetCDF file must be set using the " - << "\"-out\" option.\n\n"; + << R"("-out" option.)" << "\n\n"; + usage(); + } + if(aggr_file == out_file) { + mlog << Error << "\nprocess_command_line() -> " + << R"(the "-out" and "-aggr" options cannot be )" + << R"(set to the same file (")" << aggr_file + << R"(")!)" << "\n\n"; usage(); } @@ -247,9 +294,9 @@ void process_command_line(int argc, char **argv) { // List the lengths of the series options mlog << Debug(1) - << "Length of configuration \"fcst.field\" = " + << R"(Length of configuration "fcst.field" = )" << conf_info.get_n_fcst() << "\n" - << "Length of configuration \"obs.field\" = " + << R"(Length of configuration "obs.field" = )" << conf_info.get_n_obs() << "\n" << "Length of forecast file list = " << fcst_files.n() << "\n" @@ -264,38 +311,38 @@ void process_command_line(int argc, char **argv) { // - Observation file list if(conf_info.get_n_fcst() > 1) { series_type = SeriesType::Fcst_Conf; - n_series = conf_info.get_n_fcst(); + n_series_pair = conf_info.get_n_fcst(); mlog << Debug(1) - << "Series defined by the \"fcst.field\" configuration entry " - << "of length " << n_series << ".\n"; + << R"(Series defined by the "fcst.field" configuration entry )" + << "of length " << n_series_pair << ".\n"; } else if(conf_info.get_n_obs() > 1) { series_type = SeriesType::Obs_Conf; - n_series = conf_info.get_n_obs(); + n_series_pair = conf_info.get_n_obs(); mlog << Debug(1) - << "Series defined by the \"obs.field\" configuration entry " - << "of length " << n_series << ".\n"; + << R"(Series defined by the "obs.field" configuration entry )" + << "of length " << n_series_pair << ".\n"; } else if(fcst_files.n() > 1) { series_type = SeriesType::Fcst_Files; - n_series = fcst_files.n(); + n_series_pair = fcst_files.n(); mlog << Debug(1) << "Series defined by the forecast file list of length " - << n_series << ".\n"; + << n_series_pair << ".\n"; } else if(obs_files.n() > 1) { series_type = SeriesType::Obs_Files; - n_series = obs_files.n(); + n_series_pair = obs_files.n(); mlog << Debug(1) << "Series defined by the observation file list of length " - << n_series << ".\n"; + << n_series_pair << ".\n"; } else { series_type = SeriesType::Fcst_Conf; - n_series = 1; + n_series_pair = 1; mlog << Debug(1) - << "The \"fcst.field\" and \"obs.field\" configuration entries " - << "and the \"-fcst\" and \"-obs\" command line options " + << R"(The "fcst.field" and "obs.field" configuration entries )" + << R"(and the "-fcst" and "-obs" command line options )" << "all have length one.\n"; } @@ -305,7 +352,7 @@ void process_command_line(int argc, char **argv) { // The number of forecast and observation files must match. if(fcst_files.n() != obs_files.n()) { mlog << Error << "\nprocess_command_line() -> " - << "when using the \"-paired\" command line option, the " + << R"(when using the "-paired" command line option, the )" << "number of forecast (" << fcst_files.n() << ") and observation (" << obs_files.n() << ") files must match.\n\n"; @@ -313,24 +360,24 @@ void process_command_line(int argc, char **argv) { } // The number of files must match the series length. - if(fcst_files.n() != n_series) { + if(fcst_files.n() != n_series_pair) { mlog << Error << "\nprocess_command_line() -> " - << "when using the \"-paired\" command line option, the " - << "the file list length (" << fcst_files.n() - << ") and series length (" << n_series + << R"(when using the "-paired" command line option, the )" + << "file list length (" << fcst_files.n() + << ") and series length (" << n_series_pair << ") must match.\n\n"; usage(); } // Set the series file names to the input file lists - for(i=0; iregrid(), &fcst_grid, &obs_grid); - nxy = grid.nx() * grid.ny(); // Process masking regions conf_info.process_masks(grid); // Set the block size, if needed - if(is_bad_data(conf_info.block_size)) conf_info.block_size = nxy; + if(is_bad_data(conf_info.block_size)) { + conf_info.block_size = grid.nxy(); + } // Compute the number of reads required - n_reads = nint(ceil((double) nxy / conf_info.block_size)); + n_reads = nint(ceil((double) grid.nxy() / conf_info.block_size)); mlog << Debug(2) << "Computing statistics using a block size of " @@ -369,7 +417,7 @@ void process_grid(const Grid &fcst_grid, const Grid &obs_grid) { << "\nA block size of " << conf_info.block_size << " for a " << grid.nx() << " x " << grid.ny() << " grid requires " << n_reads << " passes through the data which will be slow.\n" - << "Consider increasing \"block_size\" in the configuration " + << R"(Consider increasing "block_size" in the configuration )" << "file based on available memory.\n\n"; } @@ -396,8 +444,8 @@ Met2dDataFile *get_mtddf(const StringArray &file_list, // Read first valid file if(!(mtddf = mtddf_factory.new_met_2d_data_file(file_list[i].c_str(), type))) { - mlog << Error << "\nTrouble reading data file \"" - << file_list[i] << "\"\n\n"; + mlog << Error << "\nTrouble reading data file: " + << file_list[i] << "\n\n"; exit(1); } @@ -419,7 +467,7 @@ void get_series_data(int i_series, mlog << Debug(2) << "Processing series entry " << i_series + 1 << " of " - << n_series << ": " << fcst_info->magic_str() + << n_series_pair << ": " << fcst_info->magic_str() << " versus " << obs_info->magic_str() << "\n"; // Switch on the series type @@ -497,7 +545,7 @@ void get_series_data(int i_series, } // Setup the verification grid - if(nxy == 0) process_grid(fcst_grid, obs_grid); + if(!grid.is_set()) process_grid(fcst_grid, obs_grid); // Regrid the forecast, if necessary if(!(fcst_grid == grid)) { @@ -508,13 +556,14 @@ void get_series_data(int i_series, << "disabled:\n" << fcst_grid.serialize() << " !=\n" << grid.serialize() << "\nSpecify regridding logic in the config file " - << "\"regrid\" section.\n\n"; + << R"("regrid" section.)" << "\n\n"; exit(1); } - mlog << Debug(1) - << "Regridding field " << fcst_info->magic_str() - << " to the verification grid.\n"; + mlog << Debug(2) + << "Regridding forecast " << fcst_info->magic_str() + << " to the verification grid using " + << fcst_info->regrid().get_str() << ".\n"; fcst_dp = met_regrid(fcst_dp, fcst_grid, grid, fcst_info->regrid()); } @@ -528,13 +577,14 @@ void get_series_data(int i_series, << "disabled:\n" << obs_grid.serialize() << " !=\n" << grid.serialize() << "\nSpecify regridding logic in the config file " - << "\"regrid\" section.\n\n"; + << R"("regrid" section.)" << "\n\n"; exit(1); } - mlog << Debug(1) - << "Regridding field " << obs_info->magic_str() - << " to the verification grid.\n"; + mlog << Debug(2) + << "Regridding observation " << obs_info->magic_str() + << " to the verification grid using " + << obs_info->regrid().get_str() << ".\n"; obs_dp = met_regrid(obs_dp, obs_grid, grid, obs_info->regrid()); } @@ -562,7 +612,8 @@ void get_series_data(int i_series, << cs << "\n\n"; } else { - mlog << Debug(3) << cs << "\n"; + mlog << Debug(3) + << cs << "\n"; } } @@ -576,7 +627,6 @@ void get_series_entry(int i_series, VarInfo *info, const GrdFileType type, StringArray &found_files, DataPlane &dp, Grid &cur_grid) { - int i, j; bool found = false; // Initialize @@ -586,10 +636,10 @@ void get_series_entry(int i_series, VarInfo *info, if(found_files[i_series].length() == 0) { // Loop through the file list - for(i=0; i " << "Could not find data for " << info->magic_str() << " in file list:\n"; - for(i=0; i " + << "unable to open the aggregate NetCDF file: " + << aggr_file << "\n\n"; + exit(1); + } + + // Update timing info based on aggregate file global attributes + ConcatString cs; + + if(get_att_value_string(aggr_nc.MetNc->Nc, "fcst_init_beg", cs)) { + set_range(timestring_to_unix(cs.c_str()), fcst_init_beg, fcst_init_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "fcst_init_end", cs)) { + set_range(timestring_to_unix(cs.c_str()), fcst_init_beg, fcst_init_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "fcst_valid_beg", cs)) { + set_range(timestring_to_unix(cs.c_str()), fcst_valid_beg, fcst_valid_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "fcst_valid_end", cs)) { + set_range(timestring_to_unix(cs.c_str()), fcst_valid_beg, fcst_valid_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "fcst_lead_beg", cs)) { + set_range(timestring_to_sec(cs.c_str()), fcst_lead_beg, fcst_lead_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "fcst_lead_end", cs)) { + set_range(timestring_to_sec(cs.c_str()), fcst_lead_beg, fcst_lead_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "obs_init_beg", cs)) { + set_range(timestring_to_unix(cs.c_str()), obs_init_beg, obs_init_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "obs_init_end", cs)) { + set_range(timestring_to_unix(cs.c_str()), obs_init_beg, obs_init_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "obs_valid_beg", cs)) { + set_range(timestring_to_unix(cs.c_str()), obs_valid_beg, obs_valid_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "obs_valid_end", cs)) { + set_range(timestring_to_unix(cs.c_str()), obs_valid_beg, obs_valid_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "obs_lead_beg", cs)) { + set_range(timestring_to_sec(cs.c_str()), obs_lead_beg, obs_lead_end); + } + if(get_att_value_string(aggr_nc.MetNc->Nc, "obs_lead_end", cs)) { + set_range(timestring_to_sec(cs.c_str()), obs_lead_beg, obs_lead_end); + } + + // Store the aggregate series length + n_series_aggr = get_int_var(aggr_nc.MetNc->Nc, n_series_var_name, 0); + + mlog << Debug(3) + << "Aggregation series has length " << n_series_aggr << ".\n"; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +DataPlane read_aggr_data_plane(const ConcatString &var_name, + const char *suggestion) { + DataPlane aggr_dp; + + // Setup the data request + VarInfoNcMet aggr_info; + aggr_info.set_magic(var_name, "(*,*)"); + + mlog << Debug(2) + << R"(Reading aggregation ")" + << aggr_info.magic_str() + << R"(" field.)" << "\n"; + + // Attempt to read the gridded data from the current file + if(!aggr_nc.data_plane(aggr_info, aggr_dp)) { + mlog << Error << "\nread_aggr_data_plane() -> " + << R"(Required variable ")" << aggr_info.magic_str() + << R"(" not found in the aggregate file!)" << "\n\n"; + if(suggestion) { + mlog << Error + << R"(Recommend recreating ")" << aggr_file + << R"(" to request that )" << suggestion + << " column(s) be written.\n\n"; + } + exit(1); + } + + // Check that the grid has not changed + if(aggr_nc.grid().nx() != grid.nx() || + aggr_nc.grid().ny() != grid.ny()) { + mlog << Error << "\nread_aggr_data_plane() -> " + << "the input grid dimensions (" << grid.nx() << ", " << grid.ny() + << ") and aggregate grid dimensions (" << aggr_nc.grid().nx() + << ", " << aggr_nc.grid().ny() << ") do not match!\n\n"; + exit(1); + } + + return aggr_dp; +} + +//////////////////////////////////////////////////////////////////////// + void process_scores() { - int i, x, y, i_read, i_series, i_point, i_fcst; + int x; + int y; + int i_point = 0; VarInfo *fcst_info = (VarInfo *) nullptr; VarInfo *obs_info = (VarInfo *) nullptr; - PairDataPoint *pd_ptr = (PairDataPoint *) nullptr; - DataPlane fcst_dp, obs_dp; + DataPlane fcst_dp; + DataPlane obs_dp; + vector pd_block; const char *method_name = "process_scores() "; // Climatology mean and standard deviation - DataPlane cmn_dp, csd_dp; - bool cmn_flag, csd_flag; + DataPlane fcmn_dp, fcsd_dp; + DataPlane ocmn_dp, ocsd_dp; + + // Open the aggregate file, if needed + if(aggr_file.nonempty()) open_aggr_file(); // Number of points skipped due to valid data threshold - int n_skip_zero = 0; - int n_skip_pos = 0; + int n_skip_zero_vld = 0; + int n_skip_some_vld = 0; // Loop over the data reads - for(i_read=0; i_read 1 ? i_series : 0); + // Get the index for the VarInfo objects + int i_fcst = (conf_info.get_n_fcst() > 1 ? i_series : 0); + int i_obs = (conf_info.get_n_obs() > 1 ? i_series : 0); // Store the current VarInfo objects - fcst_info = conf_info.fcst_info[i_fcst]; + fcst_info = (conf_info.get_n_fcst() > 1 ? + conf_info.fcst_info[i_series] : + conf_info.fcst_info[0]); obs_info = (conf_info.get_n_obs() > 1 ? conf_info.obs_info[i_series] : conf_info.obs_info[0]); @@ -713,18 +876,20 @@ void process_scores() { // Retrieve the data planes for the current series entry get_series_data(i_series, fcst_info, obs_info, fcst_dp, obs_dp); - // Allocate PairDataPoint objects, if needed - if(!pd_ptr) { - pd_ptr = new PairDataPoint [conf_info.block_size]; - for(i=0; imagic_str() << ".\n"; + << "For " << fcst_info->magic_str() << ", found " + << (fcmn_flag ? 1 : 0) << " forecast climatology mean and " + << (fcsd_flag ? 1 : 0) << " standard deviation field(s), and " + << (ocmn_flag ? 1 : 0) << " observation climatology mean and " + << (ocsd_flag ? 1 : 0) << " standard deviation field(s).\n"; // Setup the output NetCDF file on the first pass - if(nc_out == (NcFile *) 0) setup_nc_file(fcst_info, obs_info); + if(!nc_out) setup_nc_file(fcst_info, obs_info); // Update timing info set_range(fcst_dp.init(), fcst_init_beg, fcst_init_end); @@ -765,49 +949,53 @@ void process_scores() { set_range(obs_dp.lead(), obs_lead_beg, obs_lead_end); // Store matched pairs for each grid point - for(i=0; i 0) { - do_cts(i_point+i, &pd_ptr[i]); + do_categorical(i_point+i, &pd_block[i]); } // Compute multi-category contingency table counts and statistics if(!conf_info.fcst_info[0]->is_prob() && (conf_info.output_stats[STATLineType::mctc].n() + conf_info.output_stats[STATLineType::mcts].n()) > 0) { - do_mcts(i_point+i, &pd_ptr[i]); + do_multicategory(i_point+i, &pd_block[i]); } // Compute continuous statistics if(!conf_info.fcst_info[0]->is_prob() && conf_info.output_stats[STATLineType::cnt].n() > 0) { - do_cnt(i_point+i, &pd_ptr[i]); + do_continuous(i_point+i, &pd_block[i]); } // Compute partial sums if(!conf_info.fcst_info[0]->is_prob() && - (conf_info.output_stats[STATLineType::sl1l2].n() > 0 || - conf_info.output_stats[STATLineType::sal1l2].n() > 0)) { - do_sl1l2(i_point+i, &pd_ptr[i]); + (conf_info.output_stats[STATLineType::sl1l2].n() + + conf_info.output_stats[STATLineType::sal1l2].n()) > 0) { + do_partialsums(i_point+i, &pd_block[i]); } // Compute probabilistics counts and statistics @@ -852,20 +1040,16 @@ void process_scores() { conf_info.output_stats[STATLineType::pstd].n() + conf_info.output_stats[STATLineType::pjc].n() + conf_info.output_stats[STATLineType::prc].n()) > 0) { - do_pct(i_point+i, &pd_ptr[i]); + do_probabilistic(i_point+i, &pd_block[i]); } - } // end for i - // Erase the data - for(i=0; i 0 && conf_info.vld_data_thresh == 1.0) { + if(n_skip_some_vld > 0 && conf_info.vld_data_thresh == 1.0) { mlog << Debug(2) << "Some points skipped due to missing data:\n" - << "Consider decreasing \"vld_thresh\" in the config file " + << R"(Consider decreasing "vld_thresh" in the config file )" << "to include more points.\n" - << "Consider requesting \"TOTAL\" from \"output_stats\" " + << R"(Consider requesting "TOTAL" from "output_stats" )" << "in the config file to see the valid data counts.\n"; } @@ -908,30 +1089,56 @@ void process_scores() { //////////////////////////////////////////////////////////////////////// -void do_cts(int n, const PairDataPoint *pd_ptr) { - int i, j; +void do_categorical(int n, const PairDataPoint *pd_ptr) { - mlog << Debug(4) << "Computing Categorical Statistics.\n"; + mlog << Debug(4) + << "Computing Categorical Statistics.\n"; // Allocate objects to store categorical statistics int n_cts = conf_info.fcat_ta.n(); CTSInfo *cts_info = new CTSInfo [n_cts]; // Setup CTSInfo objects - for(i=0; in_obs-1); + + // Loop over the thresholds + for(int i=0; in_obs-1); + + // Compute the current MCTSInfo + compute_mctsinfo(*pd_ptr, i_na, false, false, mcts_info); + + // Read the MCTC data to be aggregated + MCTSInfo aggr_mcts; + read_aggr_mctc(n, mcts_info, aggr_mcts); + + // Aggregate MCTC counts + mcts_info.cts += aggr_mcts.cts; + + // Compute statistics and confidence intervals + mcts_info.compute_stats(); + mcts_info.compute_ci(); + + } // Compute the counts, stats, normal confidence intervals, and // bootstrap confidence intervals - if(conf_info.boot_interval == BootIntervalType::BCA) { + else if(conf_info.boot_interval == BootIntervalType::BCA) { compute_mcts_stats_ci_bca(rng_ptr, *pd_ptr, conf_info.n_boot_rep, mcts_info, true, @@ -1009,15 +1241,17 @@ void do_mcts(int n, const PairDataPoint *pd_ptr) { } // Add statistic value for each possible MCTC column - for(i=0; isubset_pairs_cnt_thresh(cnt_info.fthresh, cnt_info.othresh, - cnt_info.logic); - - // Check for no matched pairs to process - if(pd.n_obs == 0) continue; - - // Compute the stats, normal confidence intervals, and - // bootstrap confidence intervals - int precip_flag = (conf_info.fcst_info[0]->is_precipitation() && - conf_info.obs_info[0]->is_precipitation()); + // Aggregate input pair data with existing partial sums + if(aggr_file.nonempty()) { + + // Compute partial sums from the pair data + SL1L2Info s_info; + s_info.fthresh = cnt_info.fthresh; + s_info.othresh = cnt_info.othresh; + s_info.logic = cnt_info.logic; + s_info.set(*pd_ptr); + + // Aggregate scalar partial sums + SL1L2Info aggr_psum; + read_aggr_sl1l2(n, s_info, aggr_psum); + s_info += aggr_psum; + + // Aggregate scalar anomaly partial sums + if(conf_info.output_stats[STATLineType::cnt].has("ANOM_CORR")) { + read_aggr_sal1l2(n, s_info, aggr_psum); + s_info += aggr_psum; + } - if(conf_info.boot_interval == BootIntervalType::BCA) { - compute_cnt_stats_ci_bca(rng_ptr, pd, - precip_flag, conf_info.rank_corr_flag, - conf_info.n_boot_rep, - cnt_info, conf_info.tmp_dir.c_str()); + // Compute continuous statistics from partial sums + compute_cntinfo(s_info, cnt_info); } + // Compute continuous statistics from the pair data else { - compute_cnt_stats_ci_perc(rng_ptr, pd, - precip_flag, conf_info.rank_corr_flag, - conf_info.n_boot_rep, conf_info.boot_rep_prop, - cnt_info, conf_info.tmp_dir.c_str()); + + // Apply continuous filtering thresholds to subset pairs + pd = pd_ptr->subset_pairs_cnt_thresh(cnt_info.fthresh, cnt_info.othresh, + cnt_info.logic); + + // Check for no matched pairs to process + if(pd.n_obs == 0) continue; + + // Compute the stats, normal confidence intervals, and + // bootstrap confidence intervals + int precip_flag = (conf_info.fcst_info[0]->is_precipitation() && + conf_info.obs_info[0]->is_precipitation()); + + if(conf_info.boot_interval == BootIntervalType::BCA) { + compute_cnt_stats_ci_bca(rng_ptr, pd, + precip_flag, conf_info.rank_corr_flag, + conf_info.n_boot_rep, + cnt_info, conf_info.tmp_dir.c_str()); + } + else { + compute_cnt_stats_ci_perc(rng_ptr, pd, + precip_flag, conf_info.rank_corr_flag, + conf_info.n_boot_rep, conf_info.boot_rep_prop, + cnt_info, conf_info.tmp_dir.c_str()); + } } // Add statistic value for each possible CNT column - for(j=0; j 0) { + read_aggr_sl1l2(n, s_info, aggr_psum); + s_info += aggr_psum; + } + + // Aggregate SAL1L2 partial sums + if(conf_info.output_stats[STATLineType::sal1l2].n() > 0) { + read_aggr_sal1l2(n, s_info, aggr_psum); + s_info += aggr_psum; + } + } + // Add statistic value for each possible SL1L2 column - for(j=0; jNc, &aggr_var_names); - // Setup the PCTInfo object - pct_info.fthresh = conf_info.fcat_ta; - pct_info.allocate_n_alpha(conf_info.ci_alpha.n()); + // Search for one containing TOTAL + for(int i=0; i " + << R"(No variable containing ")" << total_name + << R"(" "found in the aggregate file!)" << "\n\n"; + exit(1); + } } - // Compute PCTInfo for each observation threshold - for(i=0; i " + << "the number of MCTC categories do not match (" + << nint(v) << " != " << aggr_mcts.cts.nrows() << ")!\n\n"; + exit(1); + } + // Check the expected correct + else if(c == "EC_VALUE" && !is_bad_data(v) && + !is_eq(v, aggr_mcts.cts.ec_value(), loose_tol)) { + mlog << Error << "\nread_aggr_mctc() -> " + << "the MCTC expected correct values do not match (" + << v << " != " << aggr_mcts.cts.ec_value() << ")!\n\n"; + exit(1); + } + // Populate the MCTC table + else if(check_reg_exp("F[0-9]*_O[0-9]*", c.c_str())) { + StringArray sa(c.split("_")); + int i_row = atoi(sa[0].c_str()+1) - 1; + int i_col = atoi(sa[1].c_str()+1) - 1; + aggr_mcts.cts.set_entry(i_row, i_col, nint(v)); + } + } return; } //////////////////////////////////////////////////////////////////////// -void store_stat_fho(int n, const ConcatString &col, - const CTSInfo &cts_info) { - double v; - ConcatString lty_stat, var_name; +void read_aggr_sl1l2(int n, const SL1L2Info &s_info, + SL1L2Info &aggr_psum) { - // Set the column name to all upper case - ConcatString c = to_upper(col); + // Initialize + aggr_psum.zero_out(); - // Get the column value - if(c == "TOTAL") { v = (double) cts_info.cts.n(); } - else if(c == "F_RATE") { v = cts_info.cts.f_rate(); } - else if(c == "H_RATE") { v = cts_info.cts.h_rate(); } - else if(c == "O_RATE") { v = cts_info.cts.o_rate(); } - else { - mlog << Error << "\nstore_stat_fho() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + // Loop over the SL1L2 columns + for(auto &col : sl1l2_columns) { - // Construct the NetCDF variable name - var_name << cs_erase << "series_fho_" << c; + ConcatString c(to_upper(col)); + ConcatString var_name(build_nc_var_name_partialsums( + STATLineType::sl1l2, c, + s_info)); - // Append threshold information - if(cts_info.fthresh == cts_info.othresh) { - var_name << "_" << cts_info.fthresh.get_abbr_str(); - } - else { - var_name << "_fcst" << cts_info.fthresh.get_abbr_str() - << "_obs" << cts_info.othresh.get_abbr_str(); + // Read aggregate data, if needed + if(aggr_data.count(var_name) == 0) { + aggr_data[var_name] = read_aggr_data_plane( + var_name, R"("ALL" SL1L2)"); + } + + // Populate the partial sums + aggr_psum.set_stat_sl1l2(col, aggr_data[var_name].buf()[n]); } - // Add map for this variable name - if(stat_data.count(var_name) == 0) { + return; +} - // Build key - lty_stat << "FHO_" << c; +//////////////////////////////////////////////////////////////////////// - // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - cts_info.fthresh.get_str(), - cts_info.othresh.get_str(), - bad_data_double); - } +void read_aggr_sal1l2(int n, const SL1L2Info &s_info, + SL1L2Info &aggr_psum) { - // Store the statistic value - put_nc_val(n, var_name, (float) v); + // Initialize + aggr_psum.zero_out(); + + // Loop over the SAL1L2 columns + for(auto &col : sal1l2_columns) { + + ConcatString c(to_upper(col)); + ConcatString var_name(build_nc_var_name_partialsums( + STATLineType::sal1l2, c, + s_info)); + + // Read aggregate data, if needed + if(aggr_data.count(var_name) == 0) { + aggr_data[var_name] = read_aggr_data_plane( + var_name, R"("ALL" SAL1L2)"); + } + + // Populate the partial sums + aggr_psum.set_stat_sal1l2(col, aggr_data[var_name].buf()[n]); + } return; } //////////////////////////////////////////////////////////////////////// -void store_stat_ctc(int n, const ConcatString &col, - const CTSInfo &cts_info) { - int v; - ConcatString lty_stat, var_name; +void read_aggr_pct(int n, const PCTInfo &pct_info, + PCTInfo &aggr_pct) { - // Set the column name to all upper case - ConcatString c = to_upper(col); + // Initialize + aggr_pct.pct = pct_info.pct; + aggr_pct.pct.zero_out(); - // Get the column value - if(c == "TOTAL") { v = cts_info.cts.n(); } - else if(c == "FY_OY") { v = cts_info.cts.fy_oy(); } - else if(c == "FY_ON") { v = cts_info.cts.fy_on(); } - else if(c == "FN_OY") { v = cts_info.cts.fn_oy(); } - else if(c == "FN_ON") { v = cts_info.cts.fn_on(); } - else if(c == "EC_VALUE") { v = cts_info.cts.ec_value(); } - else { - mlog << Error << "\nstore_stat_ctc() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + // Get PCT column names + StringArray pct_cols(get_pct_columns(aggr_pct.pct.nrows()+1)); - // Construct the NetCDF variable name - var_name << cs_erase << "series_ctc_" << c; + // Loop over the PCT columns + for(int i=0; i " + << "the number of PCT thresholds do not match (" + << nint(v) << " != " << aggr_pct.pct.nrows()+1 + << ")!\n\n"; + exit(1); + } + // Set the event counts + else if(check_reg_exp("OY_[0-9]", c.c_str())) { - // Store the statistic value - put_nc_val(n, var_name, (float) v); + // Parse the index value from the column name + int i_row = atoi(strrchr(c.c_str(), '_') + 1) - 1; + aggr_pct.pct.set_event(i_row, nint(v)); + } + // Set the non-event counts + else if(check_reg_exp("ON_[0-9]", c.c_str())) { + + // Parse the index value from the column name + int i_row = atoi(strrchr(c.c_str(), '_') + 1) - 1; + aggr_pct.pct.set_nonevent(i_row, nint(v)); + } + } return; } //////////////////////////////////////////////////////////////////////// -void store_stat_cts(int n, const ConcatString &col, - const CTSInfo &cts_info) { - int i; - double v; - ConcatString lty_stat, var_name; - int n_ci = 1; +void do_probabilistic(int n, const PairDataPoint *pd_ptr) { - // Set the column name to all upper case - ConcatString c = to_upper(col); + mlog << Debug(4) + << "Computing Probabilistic Statistics.\n"; - // Check for columns with normal or bootstrap confidence limits - if(strstr(c.c_str(), "_NC") || strstr(c.c_str(), "_BC")) n_ci = cts_info.n_alpha; - - // Loop over the alpha values, if necessary - for(i=0; i " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + // Object to store probabilistic statistics + PCTInfo pct_info; - // Construct the NetCDF variable name - var_name << cs_erase << "series_cts_" << c; + // Setup the PCTInfo object + pct_info.fthresh = conf_info.fcat_ta; + pct_info.allocate_n_alpha(conf_info.ci_alpha.n()); - // Append threshold information - if(cts_info.fthresh == cts_info.othresh) { - var_name << "_" << cts_info.fthresh.get_abbr_str(); + for(int i=0; i 1) var_name << "_a" << cts_info.alpha[i]; - - // Add map for this variable name - if(stat_data.count(var_name) == 0) { - - // Build key - lty_stat << "CTS_" << c; + // Add statistic value for each possible PCT column + for(int j=0; j 1 ? cts_info.alpha[i] : bad_data_double)); + // Add statistic value for each possible PSTD column + for(int j=0; j= mcts_info.cts.nrows() || - j < 0 || j >= mcts_info.cts.ncols()) { - mlog << Error << "\nstore_stat_mctc() -> " - << "range check error for column name requested \"" << c - << "\"\n\n"; - exit(1); - } + // Aggregate the climatology brier score as a weighted + // average and recompute the brier skill score - // Retrieve the value - v = (double) mcts_info.cts.entry(i, j); - } - else { - mlog << Error << "\nstore_stat_mctc() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + if(is_bad_data(briercl_pair) || total_pair == 0) return; // Construct the NetCDF variable name - var_name << cs_erase << "series_mctc_" << c; + ConcatString var_name(build_nc_var_name_probabilistic( + STATLineType::pstd, "BRIERCL", + pct_info, bad_data_double)); + + // Read aggregate data, if needed + if(aggr_data.count(var_name) == 0) { + aggr_data[var_name] = read_aggr_data_plane( + var_name, R"(the "BRIERCL" PSTD)"); + } - // Add map for this variable name - if(stat_data.count(var_name) == 0) { + // Get the n-th BRIERCL value + double briercl_aggr = aggr_data[var_name].buf()[n]; + int total_aggr = read_aggr_total(n); - // Build key - lty_stat << "MCTC_" << d; + // Aggregate BRIERCL as a weighted average + if(!is_bad_data(briercl_pair) && + !is_bad_data(briercl_aggr) && + (total_pair + total_aggr) > 0) { - // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - mcts_info.fthresh.get_str(","), - mcts_info.othresh.get_str(","), - bad_data_double); - } + pct_info.briercl.v = (total_pair * briercl_pair + + total_aggr * briercl_aggr) / + (total_pair + total_aggr); - // Store the statistic value - put_nc_val(n, var_name, (float) v); + // Compute the brier skill score + if(!is_bad_data(pct_info.brier.v) && + !is_bad_data(pct_info.briercl.v)) { + pct_info.bss = 1.0 - (pct_info.brier.v / pct_info.briercl.v); + } + } return; } //////////////////////////////////////////////////////////////////////// -void store_stat_mcts(int n, const ConcatString &col, - const MCTSInfo &mcts_info) { - int i; - double v; - ConcatString lty_stat, var_name; - int n_ci = 1; +void store_stat_categorical(int n, STATLineType lt, + const ConcatString &col, + const CTSInfo &cts_info) { // Set the column name to all upper case ConcatString c = to_upper(col); + // Handle ALL CTC columns + if(lt == STATLineType::ctc && c == all_columns) { + return store_stat_all_ctc(n, cts_info); + } + // Check for columns with normal or bootstrap confidence limits - if(strstr(c.c_str(), "_NC") || strstr(c.c_str(), "_BC")) n_ci = mcts_info.n_alpha; - - // Loop over the alpha values, if necessary - for(i=0; i " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + int n_alpha = 1; + if(lt == STATLineType::cts && is_ci_stat_name(c)) { + n_alpha = cts_info.n_alpha; + } - // Construct the NetCDF variable name - var_name << cs_erase << "series_mcts_" << c; + // Loop over the alpha values + for(int i_alpha=0; i_alpha 1 ? cts_info.alpha[i_alpha] : bad_data_double); - // Append confidence interval alpha value - if(n_ci > 1) var_name << "_a" << mcts_info.alpha[i]; + // Construct the NetCDF variable name + ConcatString var_name(build_nc_var_name_categorical( + lt, c, cts_info, alpha)); // Add map for this variable name if(stat_data.count(var_name) == 0) { // Build key - lty_stat << "MCTS_" << c; + ConcatString lty_stat(statlinetype_to_string(lt)); + lty_stat << "_" << c; // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - mcts_info.fthresh.get_str(","), - mcts_info.othresh.get_str(","), - (n_ci > 1 ? mcts_info.alpha[i] : bad_data_double)); + add_stat_data(var_name, c, stat_long_name[lty_stat], + cts_info.fthresh.get_str(), + cts_info.othresh.get_str(), + alpha); } // Store the statistic value - put_nc_val(n, var_name, (float) v); + stat_data[var_name].dp.buf()[n] = cts_info.get_stat(lt, c, i_alpha); - } // end for i + } // end for i_alpha return; } //////////////////////////////////////////////////////////////////////// -void store_stat_cnt(int n, const ConcatString &col, - const CNTInfo &cnt_info) { - int i; - double v; - ConcatString lty_stat, var_name; - int n_ci = 1; +void store_stat_multicategory(int n, STATLineType lt, + const ConcatString &col, + const MCTSInfo &mcts_info) { // Set the column name to all upper case ConcatString c = to_upper(col); + // Handle ALL MCTC columns + if(lt == STATLineType::mctc && c == all_columns) { + return store_stat_all_mctc(n, mcts_info); + } + // Check for columns with normal or bootstrap confidence limits - if(strstr(c.c_str(), "_NC") || strstr(c.c_str(), "_BC")) n_ci = cnt_info.n_alpha; - - // Loop over the alpha values, if necessary - for(i=0; i " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + int n_alpha = 1; + if(lt == STATLineType::mcts && is_ci_stat_name(c)) { + n_alpha = mcts_info.n_alpha; + } + + // Loop over the alpha values + for(int i_alpha=0; i_alpha 1 ? mcts_info.alpha[i_alpha] : bad_data_double); // Construct the NetCDF variable name - var_name << cs_erase << "series_cnt_" << c; - - // Append threshold information, if supplied - if(cnt_info.fthresh.get_type() != thresh_na || - cnt_info.othresh.get_type() != thresh_na) { - var_name << "_fcst" << cnt_info.fthresh.get_abbr_str() - << "_" << setlogic_to_abbr(conf_info.cnt_logic) - << "_obs" << cnt_info.othresh.get_abbr_str(); - } + ConcatString var_name(build_nc_var_name_multicategory( + lt, c, alpha)); - // Append confidence interval alpha value - if(n_ci > 1) var_name << "_a" << cnt_info.alpha[i]; + // Store the data value + ConcatString col_name; + auto v = (float) mcts_info.get_stat(lt, c, col_name, i_alpha); // Add map for this variable name if(stat_data.count(var_name) == 0) { // Build key - lty_stat << "CNT_" << c; + ConcatString lty_stat; + lty_stat << statlinetype_to_string(lt) << "_" << col_name; // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - cnt_info.fthresh.get_str(), - cnt_info.othresh.get_str(), - (n_ci > 1 ? cnt_info.alpha[i] : bad_data_double)); + add_stat_data(var_name, c, stat_long_name[lty_stat], + mcts_info.fthresh.get_str(","), + mcts_info.othresh.get_str(","), + alpha); } // Store the statistic value - put_nc_val(n, var_name, (float) v); + stat_data[var_name].dp.buf()[n] = v; - } // end for i + } // end for i_alpha return; } //////////////////////////////////////////////////////////////////////// -void store_stat_sl1l2(int n, const ConcatString &col, - const SL1L2Info &s_info) { - double v; - ConcatString lty_stat, var_name; +void store_stat_continuous(int n, STATLineType lt, + const ConcatString &col, + const CNTInfo &cnt_info) { // Set the column name to all upper case ConcatString c = to_upper(col); - // Get the column value - if(c == "TOTAL") { v = (double) s_info.scount; } - else if(c == "FBAR") { v = s_info.fbar; } - else if(c == "OBAR") { v = s_info.obar; } - else if(c == "FOBAR") { v = s_info.fobar; } - else if(c == "FFBAR") { v = s_info.ffbar; } - else if(c == "OOBAR") { v = s_info.oobar; } - else if(c == "MAE") { v = s_info.mae; } - else if(c == "FABAR") { v = s_info.fabar; } - else if(c == "OABAR") { v = s_info.oabar; } - else if(c == "FOABAR") { v = s_info.foabar; } - else if(c == "FFABAR") { v = s_info.ffabar; } - else if(c == "OOABAR") { v = s_info.ooabar; } - else if(c == "MAE") { v = s_info.mae; } - else { - mlog << Error << "\nstore_stat_sl1l2() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + // Check for columns with normal or bootstrap confidence limits + int n_alpha = 1; + if(is_ci_stat_name(c)) n_alpha = cnt_info.n_alpha; - // Construct the NetCDF variable name - var_name << cs_erase << "series_sl1l2_" << c; + // Loop over the alpha values + for(int i_alpha=0; i_alpha 1 ? cnt_info.alpha[i_alpha] : bad_data_double); - // Add map for this variable name - if(stat_data.count(var_name) == 0) { + // Construct the NetCDF variable name + ConcatString var_name(build_nc_var_name_continuous( + lt, c, cnt_info, alpha)); - // Build key - lty_stat << "SL1L2_" << c; + // Add map for this variable name + if(stat_data.count(var_name) == 0) { - // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - s_info.fthresh.get_str(), - s_info.othresh.get_str(), - bad_data_double); - } + // Build key + ConcatString lty_stat(statlinetype_to_string(lt)); + lty_stat << "_" << c; - // Store the statistic value - put_nc_val(n, var_name, (float) v); + // Add new map entry + add_stat_data(var_name, c, stat_long_name[lty_stat], + cnt_info.fthresh.get_str(), + cnt_info.othresh.get_str(), + alpha); + } + + // Store the statistic value + stat_data[var_name].dp.buf()[n] = cnt_info.get_stat(c, i_alpha); + + } // end for i_alpha return; } //////////////////////////////////////////////////////////////////////// -void store_stat_pct(int n, const ConcatString &col, - const PCTInfo &pct_info) { - int i = 0; - double v; - ConcatString lty_stat, var_name; +void store_stat_partialsums(int n, STATLineType lt, + const ConcatString &col, + const SL1L2Info &s_info) { // Set the column name to all upper case ConcatString c = to_upper(col); - ConcatString d = c; - // Get index value for variable column numbers - if(check_reg_exp("_[0-9]", c.c_str())) { - - // Parse the index value from the column name - i = atoi(strrchr(c.c_str(), '_') + 1) - 1; - - // Range check - if(i < 0 || i >= pct_info.pct.nrows()) { - mlog << Error << "\nstore_stat_pct() -> " - << "range check error for column name requested \"" << c - << "\"\n\n"; - exit(1); - } - } // end if - - // Get the column value - if(c == "TOTAL") { v = (double) pct_info.pct.n(); } - else if(c == "N_THRESH") { v = (double) pct_info.pct.nrows() + 1; } - else if(check_reg_exp("THRESH_[0-9]", c.c_str())) { v = pct_info.pct.threshold(i); } - else if(check_reg_exp("OY_[0-9]", c.c_str())) { v = (double) pct_info.pct.event_count_by_row(i); - d = "OY_I"; } - else if(check_reg_exp("ON_[0-9]", c.c_str())) { v = (double) pct_info.pct.nonevent_count_by_row(i); - d = "ON_I"; } - else { - mlog << Error << "\nstore_stat_pct() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); + // Handle ALL columns + if(c == all_columns) { + if(lt == STATLineType::sl1l2) return store_stat_all_sl1l2(n, s_info); + else if(lt == STATLineType::sal1l2) return store_stat_all_sal1l2(n, s_info); } // Construct the NetCDF variable name - var_name << cs_erase << "series_pct_" << c - << "_obs" << pct_info.othresh.get_abbr_str(); + ConcatString var_name(build_nc_var_name_partialsums( + lt, c, s_info)); // Add map for this variable name if(stat_data.count(var_name) == 0) { // Build key - lty_stat << "PCT_" << d; + ConcatString lty_stat(statlinetype_to_string(lt)); + lty_stat << "_" << c; // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - pct_info.fthresh.get_str(","), - pct_info.othresh.get_str(), - bad_data_double); + add_stat_data(var_name, c, stat_long_name[lty_stat], + s_info.fthresh.get_str(), + s_info.othresh.get_str(), + bad_data_double); } // Store the statistic value - put_nc_val(n, var_name, (float) v); + stat_data[var_name].dp.buf()[n] = s_info.get_stat(lt, c); return; } //////////////////////////////////////////////////////////////////////// -void store_stat_pstd(int n, const ConcatString &col, - const PCTInfo &pct_info) { - int i; - double v; - ConcatString lty_stat, var_name; - int n_ci = 1; +void store_stat_probabilistic(int n, STATLineType lt, + const ConcatString &col, + const PCTInfo &pct_info) { // Set the column name to all upper case ConcatString c = to_upper(col); + // Handle ALL PCT columns + if(lt == STATLineType::pct && c == all_columns) { + return store_stat_all_pct(n, pct_info); + } + // Check for columns with normal or bootstrap confidence limits - if(strstr(c.c_str(), "_NC") || strstr(c.c_str(), "_BC")) n_ci = pct_info.n_alpha; - - // Loop over the alpha values, if necessary - for(i=0; i " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); - } + int n_alpha = 1; + if(is_ci_stat_name(c)) n_alpha = pct_info.n_alpha; + + // Loop over the alpha values + for(int i_alpha=0; i_alpha 1 ? pct_info.alpha[i_alpha] : bad_data_double); // Construct the NetCDF variable name - var_name << cs_erase << "series_pstd_" << c; + ConcatString var_name(build_nc_var_name_probabilistic( + lt, c, pct_info, alpha)); - // Append confidence interval alpha value - if(n_ci > 1) var_name << "_a" << pct_info.alpha[i]; + // Store the data value + ConcatString col_name; + auto v = (float) pct_info.get_stat(lt, c, col_name, i_alpha); // Add map for this variable name if(stat_data.count(var_name) == 0) { // Build key - lty_stat << "PSTD_" << c; + ConcatString lty_stat(statlinetype_to_string(lt)); + lty_stat << "_" << col_name; // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - pct_info.fthresh.get_str(","), - pct_info.othresh.get_str(), - (n_ci > 1 ? pct_info.alpha[i] : bad_data_double)); + add_stat_data(var_name, c, stat_long_name[lty_stat], + pct_info.fthresh.get_str(","), + pct_info.othresh.get_str(), + alpha); } // Store the statistic value - put_nc_val(n, var_name, (float) v); + stat_data[var_name].dp.buf()[n] = v; - } // end for i + } // end for i_alpha return; } //////////////////////////////////////////////////////////////////////// -void store_stat_pjc(int n, const ConcatString &col, - const PCTInfo &pct_info) { - int i = 0; - int tot; - double v; - ConcatString lty_stat, var_name; +void store_stat_all_ctc(int n, const CTSInfo &cts_info) { + for(auto &col : ctc_columns) { + store_stat_categorical(n, STATLineType::ctc, col, cts_info); + } +} - // Set the column name to all upper case - ConcatString c = to_upper(col); - ConcatString d = c; +//////////////////////////////////////////////////////////////////////// - // Get index value for variable column numbers - if(check_reg_exp("_[0-9]", c.c_str())) { +void store_stat_all_mctc(int n, const MCTSInfo &mcts_info) { + StringArray mctc_cols(get_mctc_columns(mcts_info.cts.nrows())); + for(int i=0; i= pct_info.pct.nrows()) { - mlog << Error << "\nstore_stat_pjc() -> " - << "range check error for column name requested \"" << c - << "\"\n\n"; - exit(1); - } - } // end if - - // Store the total count - tot = pct_info.pct.n(); - - // Get the column value - if(c == "TOTAL") { v = (double) tot; } - else if(c == "N_THRESH") { v = (double) pct_info.pct.nrows() + 1; } - else if(check_reg_exp("THRESH_[0-9]", c.c_str())) { v = pct_info.pct.threshold(i); - d = "THRESH_I"; } - else if(check_reg_exp("OY_TP_[0-9]", c.c_str())) { v = pct_info.pct.event_count_by_row(i)/(double) tot; - d = "OY_TP_I"; } - else if(check_reg_exp("ON_TP_[0-9]", c.c_str())) { v = pct_info.pct.nonevent_count_by_row(i)/(double) tot; - d = "ON_TP_I"; } - else if(check_reg_exp("CALIBRATION_[0-9]", c.c_str())) { v = pct_info.pct.row_calibration(i); - d = "CALIBRATION_I"; } - else if(check_reg_exp("REFINEMENT_[0-9]", c.c_str())) { v = pct_info.pct.row_refinement(i); - d = "REFINEMENT_I"; } - else if(check_reg_exp("LIKELIHOOD_[0-9]", c.c_str())) { v = pct_info.pct.row_event_likelihood(i); - d = "LIKELIHOOD_I"; } - else if(check_reg_exp("BASER_[0-9]", c.c_str())) { v = pct_info.pct.row_obar(i); - d = "BASER_I"; } - else { - mlog << Error << "\nstore_stat_pjc() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); +void store_stat_all_sl1l2(int n, const SL1L2Info &s_info) { + for(auto &col : sl1l2_columns) { + store_stat_partialsums(n, STATLineType::sl1l2, col, s_info); } +} - // Construct the NetCDF variable name - var_name << cs_erase << "series_pjc_" << c - << "_obs" << pct_info.othresh.get_abbr_str(); +//////////////////////////////////////////////////////////////////////// - // Add map for this variable name - if(stat_data.count(var_name) == 0) { +void store_stat_all_sal1l2(int n, const SL1L2Info &s_info) { + for(auto &col : sal1l2_columns) { + store_stat_partialsums(n, STATLineType::sal1l2, col, s_info); + } +} - // Build key - lty_stat << "PJC_" << d; +//////////////////////////////////////////////////////////////////////// - // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - pct_info.fthresh.get_str(","), - pct_info.othresh.get_str(), - bad_data_double); +void store_stat_all_pct(int n, const PCTInfo &pct_info) { + StringArray pct_cols(get_pct_columns(pct_info.pct.nrows() + 1)); + for(int i=0; i= pct_info.pct.nrows()) { - mlog << Error << "\nstore_stat_prc() -> " - << "range check error for column name requested \"" << c - << "\"\n\n"; - exit(1); - } +//////////////////////////////////////////////////////////////////////// - // Get the 2x2 contingency table for this row - ct = pct_info.pct.ctc_by_row(i); +ConcatString build_nc_var_name_partialsums( + STATLineType lt, const ConcatString &col, + const SL1L2Info &s_info) { - } // end if + // Append the column name + ConcatString var_name("series_"); + var_name << to_lower(statlinetype_to_string(lt)) << "_" << col; - // Get the column value - if(c == "TOTAL") { v = (double) pct_info.pct.n(); } - else if(c == "N_THRESH") { v = (double) pct_info.pct.nrows() + 1; } - else if(check_reg_exp("THRESH_[0-9]", c.c_str())) { v = pct_info.pct.threshold(i); - d = "THRESH_I"; } - else if(check_reg_exp("PODY_[0-9]", c.c_str())) { v = ct.pod_yes(); - d = "PODY_I"; } - else if(check_reg_exp("POFD_[0-9]", c.c_str())) { v = ct.pofd(); - d = "POFD_I"; } - else { - mlog << Error << "\nstore_stat_prc() -> " - << "unsupported column name requested \"" << c - << "\"\n\n"; - exit(1); + // Append threshold information, if supplied + if(s_info.fthresh.get_type() != thresh_na || + s_info.othresh.get_type() != thresh_na) { + var_name << "_fcst" << s_info.fthresh.get_abbr_str() + << "_" << setlogic_to_abbr(s_info.logic) + << "_obs" << s_info.othresh.get_abbr_str(); } - // Add map for this variable name - if(stat_data.count(var_name) == 0) { + return var_name; +} - // Build key - lty_stat << "PRC_" << d; +//////////////////////////////////////////////////////////////////////// - // Add new map entry - add_nc_var(var_name, c, stat_long_name[lty_stat], - pct_info.fthresh.get_str(","), - pct_info.othresh.get_str(), - bad_data_double); +ConcatString build_nc_var_name_continuous( + STATLineType lt, const ConcatString &col, + const CNTInfo &cnt_info, double alpha) { + + // Append the column name + ConcatString var_name("series_"); + var_name << to_lower(statlinetype_to_string(lt)) << "_" << col; + + // Append threshold information, if supplied + if(cnt_info.fthresh.get_type() != thresh_na || + cnt_info.othresh.get_type() != thresh_na) { + var_name << "_fcst" << cnt_info.fthresh.get_abbr_str() + << "_" << setlogic_to_abbr(cnt_info.logic) + << "_obs" << cnt_info.othresh.get_abbr_str(); } - // Store the statistic value - put_nc_val(n, var_name, (float) v); + // Append confidence interval alpha value + if(!is_bad_data(alpha)) var_name << "_a" << alpha; - return; + return var_name; +} + +//////////////////////////////////////////////////////////////////////// + +ConcatString build_nc_var_name_probabilistic( + STATLineType lt, const ConcatString &col, + const PCTInfo &pct_info, double alpha) { + + // Append the column name + ConcatString var_name("series_"); + var_name << to_lower(statlinetype_to_string(lt)) << "_" << col; + + // Append the observation threshold + var_name << "_obs" << pct_info.othresh.get_abbr_str(); + + // Append confidence interval alpha value + if(!is_bad_data(alpha)) var_name << "_a" << alpha; + + return var_name; } //////////////////////////////////////////////////////////////////////// @@ -2117,9 +2221,10 @@ void setup_nc_file(const VarInfo *fcst_info, const VarInfo *obs_info) { if (deflate_level < 0) deflate_level = conf_info.get_compression_level(); // Add the series length variable - NcVar var = add_var(nc_out, "n_series", ncInt, deflate_level); + NcVar var = add_var(nc_out, n_series_var_name, ncInt, deflate_level); add_att(&var, "long_name", "length of series"); + int n_series = n_series_pair + n_series_aggr; if(!put_nc_data(&var, &n_series)) { mlog << Error << "\nsetup_nc_file() -> " << "error writing the series length variable.\n\n"; @@ -2134,67 +2239,73 @@ void setup_nc_file(const VarInfo *fcst_info, const VarInfo *obs_info) { //////////////////////////////////////////////////////////////////////// -void add_nc_var(const ConcatString &var_name, - const ConcatString &name, - const ConcatString &long_name, - const ConcatString &fcst_thresh, - const ConcatString &obs_thresh, - double alpha) { - NcVarData d; - - int deflate_level = compress_level; - if (deflate_level < 0) deflate_level = conf_info.get_compression_level(); - - // Add a new variable to the NetCDF file - NcVar var = add_var(nc_out, (string)var_name, ncFloat, lat_dim, lon_dim, deflate_level); - d.var = new NcVar(var); - - // Add variable attributes - add_att(d.var, "_FillValue", bad_data_float); - if(name.length() > 0) add_att(d.var, "name", (string)name); - if(long_name.length() > 0) add_att(d.var, "long_name", (string)long_name); - if(fcst_thresh.length() > 0) add_att(d.var, "fcst_thresh", (string)fcst_thresh); - if(obs_thresh.length() > 0) add_att(d.var, "obs_thresh", (string)obs_thresh); - if(!is_bad_data(alpha)) add_att(d.var, "alpha", alpha); - - // Store the new NcVarData object in the map - stat_data[var_name] = d; +void add_stat_data(const ConcatString &var_name, + const ConcatString &name, + const ConcatString &long_name, + const ConcatString &fcst_thresh, + const ConcatString &obs_thresh, + double alpha) { + + NcVarData data; + data.dp.set_size(grid.nx(), grid.ny(), bad_data_double); + data.name = name; + data.long_name = long_name; + data.fcst_thresh = fcst_thresh; + data.obs_thresh = obs_thresh; + data.alpha = alpha; + + // Store the new NcVarData object + stat_data[var_name] = data; + stat_data_keys.push_back(var_name); return; } //////////////////////////////////////////////////////////////////////// -void put_nc_val(int n, const ConcatString &var_name, float v) { - int x, y; - - // Determine x,y location - DefaultTO.one_to_two(grid.nx(), grid.ny(), n, x, y); +void write_stat_data() { - // Check for key in the map - if(stat_data.count(var_name) == 0) { - mlog << Error << "\nput_nc_val() -> " - << "variable name \"" << var_name - << "\" does not exist in the map.\n\n"; - exit(1); - } - - // Get the NetCDF variable to be written - NcVar *var = stat_data[var_name].var; - - long offsets[2]; - long lengths[2]; - offsets[0] = y; - offsets[1] = x; - lengths[0] = 1; - lengths[1] = 1; + mlog << Debug(2) + << "Writing " << stat_data_keys.size() + << " output variables.\n"; - // Store the current value - if(!put_nc_data(var, &v, lengths, offsets)) { - mlog << Error << "\nput_nc_val() -> " - << "error writing to variable " << var_name - << " for point (" << x << ", " << y << ").\n\n"; - exit(1); + int deflate_level = compress_level; + if(deflate_level < 0) deflate_level = conf_info.get_compression_level(); + + // Allocate memory to store data values for each grid point + vector data(grid.nx()*grid.ny()); + + // Write output for each stat_data map entry + for(auto &key : stat_data_keys) { + + NcVarData *ptr = &stat_data[key]; + + // Add a new variable to the NetCDF file + NcVar nc_var = add_var(nc_out, key, ncFloat, lat_dim, lon_dim, deflate_level); + + // Add variable attributes + add_att(&nc_var, "_FillValue", bad_data_float); + add_att(&nc_var, "name", ptr->name); + add_att(&nc_var, "long_name", ptr->long_name); + if(ptr->fcst_thresh.length() > 0) add_att(&nc_var, "fcst_thresh", ptr->fcst_thresh); + if(ptr->obs_thresh.length() > 0) add_att(&nc_var, "obs_thresh", ptr->obs_thresh); + if(!is_bad_data(ptr->alpha)) add_att(&nc_var, "alpha", ptr->alpha); + + // Store the data + for(int x=0; xdp(x, y); + } // end for y + } // end for x + + // Write out the data + if(!put_nc_data_with_dims(&nc_var, data.data(), grid.ny(), grid.nx())) { + mlog << Error << "\nwrite_stat_data() -> " + << R"(error writing ")" << key + << R"(" data to the output file.)" << "\n\n"; + exit(1); + } } return; @@ -2228,25 +2339,23 @@ void set_range(const int &t, int &beg, int &end) { void clean_up() { - // Deallocate NetCDF variable for each map entry - map::const_iterator it; - for(it=stat_data.begin(); it!=stat_data.end(); it++) { - if(it->second.var) { delete it->second.var; } - } - // Close the output NetCDF file if(nc_out) { // List the NetCDF file after it is finished - mlog << Debug(1) << "Output file: " << out_file << "\n"; + mlog << Debug(1) + << "Output file: " << out_file << "\n"; delete nc_out; nc_out = (NcFile *) nullptr; } + // Close the aggregate NetCDF file + if(aggr_nc.MetNc) aggr_nc.close(); + // Deallocate memory for data files - if(fcst_mtddf) { delete fcst_mtddf; fcst_mtddf = (Met2dDataFile *) nullptr; } - if(obs_mtddf) { delete obs_mtddf; obs_mtddf = (Met2dDataFile *) nullptr; } + if(fcst_mtddf) { delete fcst_mtddf; fcst_mtddf = nullptr; } + if(obs_mtddf) { delete obs_mtddf; obs_mtddf = nullptr; } // Deallocate memory for the random number generator rng_free(rng_ptr); @@ -2265,6 +2374,7 @@ void usage() { << "\t-fcst file_1 ... file_n | fcst_file_list\n" << "\t-obs file_1 ... file_n | obs_file_list\n" << "\t[-both file_1 ... file_n | both_file_list]\n" + << "\t[-aggr file]\n" << "\t[-paired]\n" << "\t-out file\n" << "\t-config file\n" @@ -2272,37 +2382,53 @@ void usage() { << "\t[-v level]\n" << "\t[-compress level]\n\n" - << "\twhere\t\"-fcst file_1 ... file_n\" are the gridded " + << "\twhere\t" + << R"("-fcst file_1 ... file_n" are the gridded )" << "forecast files to be used (required).\n" - << "\t\t\"-fcst fcst_file_list\" is an ASCII file containing " + << "\t\t" + << R"("-fcst fcst_file_list" is an ASCII file containing )" << "a list of gridded forecast files to be used (required).\n" - << "\t\t\"-obs file_1 ... file_n\" are the gridded " + << "\t\t" + << R"("-obs file_1 ... file_n" are the gridded )" << "observation files to be used (required).\n" - << "\t\t\"-obs obs_file_list\" is an ASCII file containing " + << "\t\t" + << R"("-obs obs_file_list" is an ASCII file containing )" << "a list of gridded observation files to be used (required).\n" - << "\t\t\"-both\" sets the \"-fcst\" and \"-obs\" options to " + << "\t\t" + << R"("-both" sets the "-fcst" and "-obs" options to )" << "the same set of files (optional).\n" - << "\t\t\"-paired\" to indicate that the input -fcst and -obs " + << "\t\t" + << R"("-aggr file" specifies a series_analysis output )" + << "file with partial sums and/or contingency table counts to be " + << "updated prior to deriving statistics (optional).\n" + + << "\t\t" + << R"("-paired" to indicate that the input -fcst and -obs )" << "file lists are already paired (optional).\n" - << "\t\t\"-out file\" is the NetCDF output file containing " + << "\t\t" + << R"("-out file" is the NetCDF output file containing )" << "computed statistics (required).\n" - << "\t\t\"-config file\" is a SeriesAnalysisConfig file " + << "\t\t" + << R"("-config file" is a SeriesAnalysisConfig file )" << "containing the desired configuration settings (required).\n" - << "\t\t\"-log file\" outputs log messages to the specified " + << "\t\t" + << R"("-log file" outputs log messages to the specified )" << "file (optional).\n" - << "\t\t\"-v level\" overrides the default level of logging (" + << "\t\t" + << R"("-v level" overrides the default level of logging ()" << mlog.verbosity_level() << ") (optional).\n" - << "\t\t\"-compress level\" overrides the compression level of NetCDF variable (" + << "\t\t" + << R"("-compress level" overrides the compression level of NetCDF variable ()" << conf_info.get_compression_level() << ") (optional).\n\n" << flush; exit(1); @@ -2329,6 +2455,12 @@ void set_both_files(const StringArray & a) { //////////////////////////////////////////////////////////////////////// +void set_aggr(const StringArray & a) { + aggr_file = a[0]; +} + +//////////////////////////////////////////////////////////////////////// + void set_paired(const StringArray & a) { paired = true; } @@ -2366,8 +2498,8 @@ void parse_long_names() { f_in.open(file_name.c_str()); if(!f_in) { mlog << Error << "\nparse_long_names() -> " - << "can't open the ASCII file \"" << file_name - << "\" for reading\n\n"; + << R"(can't open the ASCII file ") << file_name + << R"(" for reading!)" << "\n\n"; exit(1); } diff --git a/src/tools/core/series_analysis/series_analysis.h b/src/tools/core/series_analysis/series_analysis.h index 2540540015..73b2f3d6f6 100644 --- a/src/tools/core/series_analysis/series_analysis.h +++ b/src/tools/core/series_analysis/series_analysis.h @@ -17,7 +17,6 @@ // 000 12/10/12 Halley Gotway New // 001 09/28/22 Prestopnik MET #2227 Remove namespace std and netCDF from header files // -// //////////////////////////////////////////////////////////////////////// #ifndef __SERIES_ANALYSIS_H__ @@ -43,6 +42,7 @@ #include "series_analysis_conf_info.h" #include "vx_data2d_factory.h" +#include "vx_data2d_nc_met.h" #include "vx_grid.h" #include "vx_util.h" #include "vx_stat_out.h" @@ -60,6 +60,11 @@ static const char * program_name = "series_analysis"; static const char * default_config_filename = "MET_BASE/config/SeriesAnalysisConfig_default"; +static const char * all_columns = "ALL"; +static const char * n_series_var_name = "n_series"; + +static const char * total_name = "TOTAL"; + //////////////////////////////////////////////////////////////////////// // // Variables for Command Line Arguments @@ -68,10 +73,11 @@ static const char * default_config_filename = // Input files static StringArray fcst_files, found_fcst_files; -static StringArray obs_files, found_obs_files; -static GrdFileType ftype = FileType_None; -static GrdFileType otype = FileType_None; -static bool paired = false; +static StringArray obs_files, found_obs_files; +static GrdFileType ftype = FileType_None; +static GrdFileType otype = FileType_None; +static ConcatString aggr_file; +static bool paired = false; static int compress_level = -1; // Output file @@ -88,17 +94,26 @@ static SeriesAnalysisConfInfo conf_info; //////////////////////////////////////////////////////////////////////// // Output NetCDF file -static netCDF::NcFile *nc_out = (netCDF::NcFile *) nullptr; -static netCDF::NcDim lat_dim; -static netCDF::NcDim lon_dim ; +static netCDF::NcFile *nc_out = nullptr; +static netCDF::NcDim lat_dim; +static netCDF::NcDim lon_dim ; -// Structure to store computed statistics and corresponding metadata +// Structure to store computed statistics struct NcVarData { - netCDF::NcVar * var; // Pointer to NetCDF variable + DataPlane dp; + std::string name; + std::string long_name; + std::string fcst_thresh; + std::string obs_thresh; + double alpha; }; // Mapping of NetCDF variable name to computed statistic -std::map stat_data; +std::map stat_data; +std::vector stat_data_keys; + +// Mapping of aggregate NetCDF variable name to DataPlane +std::map aggr_data; //////////////////////////////////////////////////////////////////////// // @@ -108,16 +123,16 @@ std::map stat_data; // Grid variables static Grid grid; -static int nxy = 0; static int n_reads = 1; // Initialize to at least one pass // Data file factory and input files static Met2dDataFileFactory mtddf_factory; -static Met2dDataFile *fcst_mtddf = (Met2dDataFile *) nullptr; -static Met2dDataFile *obs_mtddf = (Met2dDataFile *) nullptr; +static Met2dDataFile *fcst_mtddf = nullptr; +static Met2dDataFile *obs_mtddf = nullptr; +static MetNcMetDataFile aggr_nc; // Pointer to the random number generator to be used -static gsl_rng *rng_ptr = (gsl_rng *) nullptr; +static gsl_rng *rng_ptr = nullptr; // Enumeration of ways that a series can be defined enum class SeriesType { @@ -130,7 +145,8 @@ enum class SeriesType { static SeriesType series_type = SeriesType::None; // Series length -static int n_series = 0; +static int n_series_pair = 0; // Input pair data series +static int n_series_aggr = 0; // Input aggr series // Range of timing values encountered in the data static unixtime fcst_init_beg = (unixtime) 0; diff --git a/src/tools/core/series_analysis/series_analysis_conf_info.cc b/src/tools/core/series_analysis/series_analysis_conf_info.cc index 2e032256a0..fd19bf61bc 100644 --- a/src/tools/core/series_analysis/series_analysis_conf_info.cc +++ b/src/tools/core/series_analysis/series_analysis_conf_info.cc @@ -212,8 +212,9 @@ void SeriesAnalysisConfInfo::process_config(GrdFileType ftype, exit(1); } - // Check climatology fields - check_climo_n_vx(&conf, n_fcst); + // Check for consistent number of climatology fields + check_climo_n_vx(fdict, n_fcst); + check_climo_n_vx(odict, n_obs); // Allocate space based on the number of verification tasks fcst_info = new VarInfo * [n_fcst]; diff --git a/src/tools/core/series_analysis/series_analysis_conf_info.h b/src/tools/core/series_analysis/series_analysis_conf_info.h index 76f7041472..01aff16098 100644 --- a/src/tools/core/series_analysis/series_analysis_conf_info.h +++ b/src/tools/core/series_analysis/series_analysis_conf_info.h @@ -100,8 +100,8 @@ class SeriesAnalysisConfInfo { //////////////////////////////////////////////////////////////////////// -inline int SeriesAnalysisConfInfo::get_n_fcst() const { return(n_fcst); } -inline int SeriesAnalysisConfInfo::get_n_obs() const { return(n_obs); } +inline int SeriesAnalysisConfInfo::get_n_fcst() const { return n_fcst; } +inline int SeriesAnalysisConfInfo::get_n_obs() const { return n_obs; } inline int SeriesAnalysisConfInfo::get_compression_level() { return conf.nc_compression(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/core/stat_analysis/Makefile.in b/src/tools/core/stat_analysis/Makefile.in index 168c3da589..e1b0bda93d 100644 --- a/src/tools/core/stat_analysis/Makefile.in +++ b/src/tools/core/stat_analysis/Makefile.in @@ -231,6 +231,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/stat_analysis/aggr_stat_line.cc b/src/tools/core/stat_analysis/aggr_stat_line.cc index 7f3c16af10..98df91caad 100644 --- a/src/tools/core/stat_analysis/aggr_stat_line.cc +++ b/src/tools/core/stat_analysis/aggr_stat_line.cc @@ -41,10 +41,11 @@ // to VL1L2, VAL1L2, and VCNT. // 019 02/21/24 Halley Gotway MET #2583 Add observation error // ECNT statistics. +// 020 06/14/24 Halley Gotway MET #2911 Call apply_set_hdr_opts(). +// 021 07/05/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -62,11 +63,11 @@ using namespace std; - //////////////////////////////////////////////////////////////////////// -static bool is_precip_var_name(const ConcatString &s); -static const std::string case_str = "CASE"; +static bool is_precip_var_name(const ConcatString &s); +static const string case_str = "CASE"; +static bool is_vector_dir_stat(const STATLineType &t, const ConcatString &s); //////////////////////////////////////////////////////////////////////// // @@ -349,25 +350,17 @@ StatHdrColumns StatHdrInfo::get_shc(const ConcatString &cur_case, const StringArray &hdr_cols, const StringArray &hdr_vals, const STATLineType lt) { - ConcatString css; - StringArray case_vals; ThreshArray ta; + ConcatString css; double out_alpha; - int index, wdth; + int wdth; StatHdrColumns shc; - // Split up the current case into values - case_vals = cur_case.split(":"); - // MODEL - shc.set_model( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "MODEL", model, false).c_str()); + shc.set_model(get_col_css(cur_case, "MODEL", model, false).c_str()); // DESC - shc.set_desc( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "DESC", desc, false).c_str()); + shc.set_desc(get_col_css(cur_case, "DESC", desc, false).c_str()); // FCST_LEAD css = write_css_hhmmss(fcst_lead); @@ -377,26 +370,11 @@ StatHdrColumns StatHdrInfo::get_shc(const ConcatString &cur_case, << fcst_lead.n() << " unique FCST_LEAD values: " << css << "\n"; } - if(hdr_cols.has("FCST_LEAD", index)) { - shc.set_fcst_lead_sec(timestring_to_sec(hdr_vals[index].c_str())); - } - else { - shc.set_fcst_lead_sec(fcst_lead.max()); - } + shc.set_fcst_lead_sec(fcst_lead.max()); // FCST_VALID_BEG, FCST_VALID_END - if(hdr_cols.has("FCST_VALID_BEG", index)) { - shc.set_fcst_valid_beg(timestring_to_unix(hdr_vals[index].c_str())); - } - else { - shc.set_fcst_valid_beg(fcst_valid_beg); - } - if(hdr_cols.has("FCST_VALID_END", index)) { - shc.set_fcst_valid_end(timestring_to_unix(hdr_vals[index].c_str())); - } - else { - shc.set_fcst_valid_end(fcst_valid_end); - } + shc.set_fcst_valid_beg(fcst_valid_beg); + shc.set_fcst_valid_end(fcst_valid_end); // OBS_LEAD css = write_css_hhmmss(obs_lead); @@ -406,71 +384,38 @@ StatHdrColumns StatHdrInfo::get_shc(const ConcatString &cur_case, << obs_lead.n() << " unique OBS_LEAD values: " << css << "\n"; } - if(hdr_cols.has("OBS_LEAD", index)) { - shc.set_obs_lead_sec(timestring_to_sec(hdr_vals[index].c_str())); - } - else { - shc.set_obs_lead_sec(obs_lead.max()); - } + shc.set_obs_lead_sec(obs_lead.max()); // OBS_VALID_BEG, OBS_VALID_END - if(hdr_cols.has("OBS_VALID_BEG", index)) { - shc.set_obs_valid_beg(timestring_to_unix(hdr_vals[index].c_str())); - } - else { - shc.set_obs_valid_beg(obs_valid_beg); - } - if(hdr_cols.has("OBS_VALID_END", index)) { - shc.set_obs_valid_end(timestring_to_unix(hdr_vals[index].c_str())); - } - else { - shc.set_obs_valid_end(obs_valid_end); - } + shc.set_obs_valid_beg(obs_valid_beg); + shc.set_obs_valid_end(obs_valid_end); // FCST_VAR - shc.set_fcst_var( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "FCST_VAR", fcst_var, false)); + shc.set_fcst_var(get_col_css(cur_case, "FCST_VAR", fcst_var, false)); // FCST_UNITS - shc.set_fcst_units( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "FCST_UNITS", fcst_units, false)); + shc.set_fcst_units(get_col_css(cur_case, "FCST_UNITS", fcst_units, false)); // FCST_LEV - shc.set_fcst_lev( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "FCST_LEV", fcst_lev, false).c_str()); + shc.set_fcst_lev(get_col_css(cur_case, "FCST_LEV", fcst_lev, false).c_str()); // OBS_VAR - shc.set_obs_var( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "OBS_VAR", obs_var, false)); + shc.set_obs_var(get_col_css(cur_case, "OBS_VAR", obs_var, false)); // OBS_UNITS - shc.set_obs_units( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "OBS_UNITS", obs_units, false)); + shc.set_obs_units(get_col_css(cur_case, "OBS_UNITS", obs_units, false)); // OBS_LEV - shc.set_obs_lev( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "OBS_LEV", obs_lev, false).c_str()); + shc.set_obs_lev(get_col_css(cur_case, "OBS_LEV", obs_lev, false).c_str()); // OBTYPE - shc.set_obtype( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "OBTYPE", obtype, false).c_str()); + shc.set_obtype(get_col_css(cur_case, "OBTYPE", obtype, false).c_str()); // VX_MASK - shc.set_mask( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "VX_MASK", vx_mask, false).c_str()); + shc.set_mask(get_col_css(cur_case, "VX_MASK", vx_mask, false).c_str()); // INTERP_MTHD - shc.set_interp_mthd( - get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "INTERP_MTHD", interp_mthd, true)); + shc.set_interp_mthd(get_col_css(cur_case, "INTERP_MTHD", interp_mthd, true)); // INTERP_PNTS css = write_css(interp_pnts); @@ -484,28 +429,21 @@ StatHdrColumns StatHdrInfo::get_shc(const ConcatString &cur_case, else { wdth = nint(sqrt(interp_pnts[0])); } - - if(hdr_cols.has("INTERP_PNTS", index)) { - wdth = nint(sqrt(atof(hdr_vals[index].c_str()))); - } shc.set_interp_wdth(wdth); // FCST_THRESH ta.clear(); - ta.add_css(get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "FCST_THRESH", fcst_thresh, true).c_str()); + ta.add_css(get_col_css(cur_case, "FCST_THRESH", fcst_thresh, true).c_str()); shc.set_fcst_thresh(ta); // OBS_THRESH ta.clear(); - ta.add_css(get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "OBS_THRESH", obs_thresh, true).c_str()); + ta.add_css(get_col_css(cur_case, "OBS_THRESH", obs_thresh, true).c_str()); shc.set_obs_thresh(ta); // COV_THRESH ta.clear(); - ta.add_css(get_shc_str(cur_case, case_cols, case_vals, hdr_cols, hdr_vals, - "COV_THRESH", cov_thresh, true).c_str()); + ta.add_css(get_col_css(cur_case, "COV_THRESH", cov_thresh, true).c_str()); shc.set_cov_thresh(ta); // ALPHA @@ -520,34 +458,27 @@ StatHdrColumns StatHdrInfo::get_shc(const ConcatString &cur_case, else { out_alpha = alpha[0]; } - - if(hdr_cols.has("ALPHA", index)) { - out_alpha = atof(hdr_vals[index].c_str()); - } shc.set_alpha(out_alpha); // LINE_TYPE shc.set_line_type(statlinetype_to_string(lt)); + // Apply the -set_hdr options + StringArray case_vals = cur_case.split(":"); + shc.apply_set_hdr_opts(hdr_cols, hdr_vals, case_cols, case_vals); + return shc; } //////////////////////////////////////////////////////////////////////// -ConcatString StatHdrInfo::get_shc_str(const ConcatString &cur_case, - const StringArray &case_cols, - const StringArray &case_vals, - const StringArray &hdr_cols, - const StringArray &hdr_vals, +ConcatString StatHdrInfo::get_col_css(const ConcatString &cur_case, const char *col_name, const StringArray &col_vals, - bool warning) { - ConcatString css, shc_str; - int hdr_index, case_index; - + bool warning) const { // Build comma-separated list of column values - css = write_css(col_vals); + ConcatString css(write_css(col_vals)); // Check for multiple entries if(col_vals.n() > 1) { @@ -559,28 +490,7 @@ ConcatString StatHdrInfo::get_shc_str(const ConcatString &cur_case, else mlog << Debug(2) << msg; } - // Check the header options. - if(hdr_cols.has(col_name, hdr_index)) { - - // Check for the full CASE string. - if(case_str.compare(hdr_vals[hdr_index]) == 0) { - shc_str = cur_case; - } - // Check for one of the case columns. - else if(case_cols.has(hdr_vals[hdr_index], case_index)) { - shc_str = case_vals[case_index]; - } - // Otherwise, use the constant header string. - else { - shc_str = hdr_vals[hdr_index]; - } - } - // Otherwise, use the comma-separated list of values. - else { - shc_str = css; - } - - return shc_str; + return css; } //////////////////////////////////////////////////////////////////////// @@ -679,12 +589,15 @@ void aggr_summary_lines(LineDataFile &f, STATAnalysisJob &job, int &n_in, int &n_out) { STATLine line; AggrSummaryInfo aggr; - ConcatString key, cs; - StringArray sa, req_stat, req_lty, req_col; + ConcatString cs; + StringArray sa; + StringArray req_stat; + StringArray req_lty; + StringArray req_col; STATLineType lty; NumArray empty_na; - int i, n_add; - double v, w; + double v; + double w; // // Objects for derived statistics @@ -697,7 +610,7 @@ void aggr_summary_lines(LineDataFile &f, STATAnalysisJob &job, // // Build list of requested line types and column names // - for(i=0; i::iterator it; // @@ -940,13 +856,13 @@ void aggr_ctc_lines(LineDataFile &f, STATAnalysisJob &job, << "line type value of " << statlinetype_to_string(line.type()) << " not currently supported for the aggregation job.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // end switch // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -974,14 +890,7 @@ void aggr_ctc_lines(LineDataFile &f, STATAnalysisJob &job, // Increment counts in the existing map entry // else { - m[key].cts_info.cts.set_fy_oy(m[key].cts_info.cts.fy_oy() + - cur.cts.fy_oy()); - m[key].cts_info.cts.set_fy_on(m[key].cts_info.cts.fy_on() + - cur.cts.fy_on()); - m[key].cts_info.cts.set_fn_oy(m[key].cts_info.cts.fn_oy() + - cur.cts.fn_oy()); - m[key].cts_info.cts.set_fn_on(m[key].cts_info.cts.fn_on() + - cur.cts.fn_on()); + m[key].cts_info.cts += cur.cts; } // @@ -1057,12 +966,13 @@ void aggr_ctc_lines(LineDataFile &f, STATAnalysisJob &job, // // Sort the valid times // - n = it->second.valid_ts.rank_array(n_ties); + int n_ties; + int n = it->second.valid_ts.rank_array(n_ties); if(n_ties > 0 || n != it->second.valid_ts.n()) { mlog << Error << "\naggr_ctc_lines() -> " << "should be no ties in the valid time array.\n\n"; - throw(1); + throw 1; } // @@ -1105,9 +1015,7 @@ void aggr_mctc_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrMCTCInfo aggr; MCTSInfo cur; - ConcatString key; unixtime ut; - int i, k, n, n_ties; map::iterator it; // @@ -1131,7 +1039,7 @@ void aggr_mctc_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter multi-category contingency table count " << "(MCTC) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -1142,7 +1050,7 @@ void aggr_mctc_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -1173,14 +1081,14 @@ void aggr_mctc_lines(LineDataFile &f, STATAnalysisJob &job, << "lines. Try setting \"-column_eq N_CAT n\", " << m[key].mcts_info.cts.nrows() << " != " << cur.cts.nrows() << "\n\n"; - throw(1); + throw 1; } // // Increment the counts // - for(i=0; isecond.valid_ts.rank_array(n_ties); + int n_ties; + int n = it->second.valid_ts.rank_array(n_ties); if(n_ties > 0 || n != it->second.valid_ts.n()) { mlog << Error << "\naggr_mctc_lines() -> " << "should be no ties in the valid time array.\n\n"; - throw(1); + throw 1; } // @@ -1285,9 +1194,7 @@ void aggr_pct_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrPCTInfo aggr; PCTInfo cur; - ConcatString key; unixtime ut; - int i, n, oy, on, n_ties; map::iterator it; // @@ -1311,7 +1218,7 @@ void aggr_pct_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter probability contingency table (PCT) " << "line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -1322,7 +1229,7 @@ void aggr_pct_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -1342,45 +1249,8 @@ void aggr_pct_lines(LineDataFile &f, STATAnalysisJob &job, // Increment counts in the existing map entry // else { - - // - // The size of the contingency table must remain the same - // - if(m[key].pct_info.pct.nrows() != cur.pct.nrows()) { - mlog << Error << "\naggr_pct_lines() -> " - << "when aggregating PCT lines the number of " - << "thresholds must remain the same for all lines, " - << m[key].pct_info.pct.nrows() << " != " - << cur.pct.nrows() << "\n\n"; - throw(1); - } - - // - // Increment the counts - // - for(i=0; i " - << "when aggregating PCT lines the threshold " - << "values must remain the same for all lines, " - << m[key].pct_info.pct.threshold(i) << " != " - << cur.pct.threshold(i) << "\n\n"; - throw(1); - } - - oy = m[key].pct_info.pct.event_count_by_row(i); - on = m[key].pct_info.pct.nonevent_count_by_row(i); - - m[key].pct_info.pct.set_entry(i, nx2_event_column, - oy + cur.pct.event_count_by_row(i)); - m[key].pct_info.pct.set_entry(i, nx2_nonevent_column, - on + cur.pct.nonevent_count_by_row(i)); - } // end for i - } // end else + m[key].pct_info.pct += cur.pct; + } // // Keep track of scores for each time step for VIF @@ -1448,12 +1318,13 @@ void aggr_pct_lines(LineDataFile &f, STATAnalysisJob &job, // // Sort the valid times // - n = it->second.valid_ts.rank_array(n_ties); + int n_ties; + int n = it->second.valid_ts.rank_array(n_ties); if(n_ties > 0 || n != it->second.valid_ts.n()) { mlog << Error << "\naggr_pct_lines() -> " << "should be no ties in the valid time array.\n\n"; - throw(1); + throw 1; } // @@ -1485,9 +1356,7 @@ void aggr_psum_lines(LineDataFile &f, STATAnalysisJob &job, VL1L2Info cur_vl1l2; NBRCNTInfo cur_nbrcnt; CNTInfo cur_cnt; - ConcatString key; unixtime ut; - int n, n_ties; map::iterator it; // @@ -1547,13 +1416,13 @@ void aggr_psum_lines(LineDataFile &f, STATAnalysisJob &job, mlog << Error << "\naggr_psum_lines() -> " << "should only encounter partial sum line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // end switch // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -1605,7 +1474,7 @@ void aggr_psum_lines(LineDataFile &f, STATAnalysisJob &job, // // Compute the stats for the current time // - compute_cntinfo(cur_sl1l2, 0, cur_cnt); + compute_cntinfo(cur_sl1l2, cur_cnt); // // Append the stats @@ -1649,12 +1518,13 @@ void aggr_psum_lines(LineDataFile &f, STATAnalysisJob &job, // // Sort the valid times // - n = it->second.valid_ts.rank_array(n_ties); + int n_ties; + int n = it->second.valid_ts.rank_array(n_ties); if(n_ties > 0 || n != it->second.valid_ts.n()) { mlog << Error << "\naggr_psum_lines() -> " << "should be no ties in the valid time array.\n\n"; - throw(1); + throw 1; } // @@ -1685,7 +1555,6 @@ void aggr_grad_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrGRADInfo aggr; GRADInfo cur; - ConcatString key; map::iterator it; // @@ -1705,7 +1574,7 @@ void aggr_grad_lines(LineDataFile &f, STATAnalysisJob &job, mlog << Error << "\naggr_grad_lines() -> " << "should only encounter gradient (GRAD) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -1716,7 +1585,7 @@ void aggr_grad_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -1745,7 +1614,7 @@ void aggr_grad_lines(LineDataFile &f, STATAnalysisJob &job, << " != " << cur.dx << " and " << cur.dy << "). Try setting \"-column_eq DX n -column_eq DY n\"" << " or \"-by DX,DY\".\n\n"; - throw(1); + throw 1; } // @@ -1775,8 +1644,10 @@ void aggr_wind_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrWindInfo aggr; VL1L2Info cur; - ConcatString key; - double uf, vf, uo, vo; + double uf; + double vf; + double uo; + double vo; // // Process the STAT lines @@ -1825,13 +1696,13 @@ void aggr_wind_lines(LineDataFile &f, STATAnalysisJob &job, mlog << Error << "\naggr_wind_lines() -> " << "should only encounter vector partial sum line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // end switch // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -1856,10 +1727,9 @@ void aggr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Append the unit vectors with no climatological values // - m[key].pd_u.add_grid_pair(uf, uo, bad_data_double, - bad_data_double, default_grid_weight); - m[key].pd_v.add_grid_pair(vf, vo, bad_data_double, - bad_data_double, default_grid_weight); + ClimoPntInfo cpi; + m[key].pd_u.add_grid_pair(uf, uo, cpi, default_weight); + m[key].pd_v.add_grid_pair(vf, vo, cpi, default_weight); // // Keep track of the unique header column entries @@ -1882,12 +1752,7 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, AggrWindInfo aggr; VL1L2Info v_info; MPRData cur; - ConcatString hdr, key; - double uf, uo, ucmn, ucsd; - double vf, vo, vcmn, vcsd; - double fcst_wind, obs_wind, cmn_wind, csd_wind; - bool is_ugrd; - int i; + ConcatString hdr; map::iterator it; // @@ -1904,15 +1769,19 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, job.dump_stat_line(line); parse_mpr_line(line, cur); - is_ugrd = (cur.fcst_var == ugrd_abbr_str); - uf = (is_ugrd ? cur.fcst : bad_data_double); - uo = (is_ugrd ? cur.obs : bad_data_double); - ucmn = (is_ugrd ? cur.climo_mean : bad_data_double); - ucsd = (is_ugrd ? cur.climo_stdev : bad_data_double); - vf = (is_ugrd ? bad_data_double : cur.fcst); - vo = (is_ugrd ? bad_data_double : cur.obs); - vcmn = (is_ugrd ? bad_data_double : cur.climo_mean); - vcsd = (is_ugrd ? bad_data_double : cur.climo_stdev); + bool is_ugrd = (cur.fcst_var == ugrd_abbr_str); + double uf = (is_ugrd ? cur.fcst : bad_data_double); + double uo = (is_ugrd ? cur.obs : bad_data_double); + double ufcmn = (is_ugrd ? cur.fcst_climo_mean : bad_data_double); + double ufcsd = (is_ugrd ? cur.fcst_climo_stdev : bad_data_double); + double uocmn = (is_ugrd ? cur.obs_climo_mean : bad_data_double); + double uocsd = (is_ugrd ? cur.obs_climo_stdev : bad_data_double); + double vf = (is_ugrd ? bad_data_double : cur.fcst); + double vo = (is_ugrd ? bad_data_double : cur.obs); + double vfcmn = (is_ugrd ? bad_data_double : cur.fcst_climo_mean); + double vfcsd = (is_ugrd ? bad_data_double : cur.fcst_climo_stdev); + double vocmn = (is_ugrd ? bad_data_double : cur.obs_climo_mean); + double vocsd = (is_ugrd ? bad_data_double : cur.obs_climo_stdev); // // Build header string for matching UGRD and VGRD lines @@ -1941,7 +1810,7 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -1966,10 +1835,10 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // Initialize values // aggr.hdr_sa.add(hdr); - aggr.pd_u.add_grid_pair(uf, uo, ucmn, ucsd, - default_grid_weight); - aggr.pd_v.add_grid_pair(vf, vo, vcmn, vcsd, - default_grid_weight); + ClimoPntInfo u_cpi(ufcmn, ufcsd, uocmn, uocsd); + ClimoPntInfo v_cpi(vfcmn, vfcsd, vocmn, vocsd); + aggr.pd_u.add_grid_pair(uf, uo, u_cpi, default_weight); + aggr.pd_v.add_grid_pair(vf, vo, v_cpi, default_weight); // // Add the new map entry @@ -1988,6 +1857,7 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Add data for existing header entry // + int i; if(m[key].hdr_sa.has(hdr, i)) { // @@ -2013,24 +1883,28 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Update the existing values // - if(!is_bad_data(uf)) m[key].pd_u.f_na.set(i, uf); - if(!is_bad_data(uo)) m[key].pd_u.o_na.set(i, uo); - if(!is_bad_data(ucmn)) m[key].pd_u.cmn_na.set(i, ucmn); - if(!is_bad_data(ucsd)) m[key].pd_u.csd_na.set(i, ucsd); - if(!is_bad_data(vf)) m[key].pd_v.f_na.set(i, vf); - if(!is_bad_data(vo)) m[key].pd_v.o_na.set(i, vo); - if(!is_bad_data(vcmn)) m[key].pd_v.cmn_na.set(i, vcmn); - if(!is_bad_data(vcsd)) m[key].pd_v.csd_na.set(i, vcsd); + if(!is_bad_data(uf)) m[key].pd_u.f_na.set(i, uf); + if(!is_bad_data(uo)) m[key].pd_u.o_na.set(i, uo); + if(!is_bad_data(ufcmn)) m[key].pd_u.fcmn_na.set(i, ufcmn); + if(!is_bad_data(ufcsd)) m[key].pd_u.fcsd_na.set(i, ufcsd); + if(!is_bad_data(uocmn)) m[key].pd_u.ocmn_na.set(i, uocmn); + if(!is_bad_data(uocsd)) m[key].pd_u.ocsd_na.set(i, uocsd); + if(!is_bad_data(vf)) m[key].pd_v.f_na.set(i, vf); + if(!is_bad_data(vo)) m[key].pd_v.o_na.set(i, vo); + if(!is_bad_data(vfcmn)) m[key].pd_v.fcmn_na.set(i, vfcmn); + if(!is_bad_data(vfcsd)) m[key].pd_v.fcsd_na.set(i, vfcsd); + if(!is_bad_data(vocmn)) m[key].pd_v.ocmn_na.set(i, vocmn); + if(!is_bad_data(vocsd)) m[key].pd_v.ocsd_na.set(i, vocsd); } // // Add data for a new header entry // else { m[key].hdr_sa.add(hdr); - m[key].pd_u.add_grid_pair(uf, uo, ucmn, ucsd, - default_grid_weight); - m[key].pd_v.add_grid_pair(vf, vo, vcmn, vcsd, - default_grid_weight); + ClimoPntInfo u_cpi(ufcmn, ufcsd, uocmn, uocsd); + ClimoPntInfo v_cpi(vfcmn, vfcsd, vocmn, vocsd); + m[key].pd_u.add_grid_pair(uf, uo, u_cpi, default_weight); + m[key].pd_v.add_grid_pair(vf, vo, v_cpi, default_weight); } } @@ -2066,7 +1940,7 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Loop over the pairs for the current map entry // - for(i=0; isecond.hdr_sa.n(); i++) { + for(int i=0; isecond.hdr_sa.n(); i++) { // // Check for missing UGRD data @@ -2097,18 +1971,25 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, job.out_obs_wind_thresh.get_type() != thresh_na) { // Compute wind speeds - fcst_wind = convert_u_v_to_wind(it->second.pd_u.f_na[i], - it->second.pd_v.f_na[i]); - obs_wind = convert_u_v_to_wind(it->second.pd_u.o_na[i], - it->second.pd_v.o_na[i]); - cmn_wind = convert_u_v_to_wind(it->second.pd_u.cmn_na[i], - it->second.pd_v.cmn_na[i]); - csd_wind = convert_u_v_to_wind(it->second.pd_u.csd_na[i], - it->second.pd_v.csd_na[i]); + double fcst_wind = convert_u_v_to_wind(it->second.pd_u.f_na[i], + it->second.pd_v.f_na[i]); + double obs_wind = convert_u_v_to_wind(it->second.pd_u.o_na[i], + it->second.pd_v.o_na[i]); + double fcmn_wind = convert_u_v_to_wind(it->second.pd_u.fcmn_na[i], + it->second.pd_v.fcmn_na[i]); + double fcsd_wind = convert_u_v_to_wind(it->second.pd_u.fcsd_na[i], + it->second.pd_v.fcsd_na[i]); + double ocmn_wind = convert_u_v_to_wind(it->second.pd_u.ocmn_na[i], + it->second.pd_v.ocmn_na[i]); + double ocsd_wind = convert_u_v_to_wind(it->second.pd_u.ocsd_na[i], + it->second.pd_v.ocsd_na[i]); + + // Store climo data + ClimoPntInfo cpi(fcmn_wind, fcsd_wind, ocmn_wind, ocsd_wind); // No climo mean and standard deviation in the input VL1L2 lines, // so just fill with bad data. - if(!check_fo_thresh(fcst_wind, obs_wind, cmn_wind, csd_wind, + if(!check_fo_thresh(fcst_wind, obs_wind, cpi, job.out_fcst_wind_thresh, job.out_obs_wind_thresh, job.out_wind_logic)) { mlog << Debug(4) << "aggr_mpr_wind_lines() -> " @@ -2150,47 +2031,30 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, it->second.pd_u.f_na[i], it->second.pd_v.f_na[i], it->second.pd_u.o_na[i], it->second.pd_v.o_na[i]); - if(is_bad_data(d_diff)) { - v_info.n_dir_undef = 1; - } - else { - v_info.n_dir_undef = 0; - v_info.dir_bar = d_diff; - v_info.absdir_bar = abs(d_diff); - v_info.dir2_bar = d_diff*d_diff; + if(!is_bad_data(d_diff)) { + v_info.dcount = 1; + v_info.dir_bar = d_diff; + v_info.absdir_bar = abs(d_diff); + v_info.dir2_bar = d_diff*d_diff; } aggr.vl1l2_info += v_info; - // - // Check for vectors of length zero - // - if((is_eq(it->second.pd_u.f_na[i], 0.0) && - is_eq(it->second.pd_v.f_na[i], 0.0)) || - (is_eq(it->second.pd_u.o_na[i], 0.0) && - is_eq(it->second.pd_v.o_na[i], 0.0))) { - mlog << Debug(4) << "aggr_mpr_wind_lines() -> " - << "angle not defined for zero forecast (" - << it->second.pd_u.f_na[i] << ", " << it->second.pd_v.f_na[i] - << ") or observation (" - << it->second.pd_u.o_na[i] << ", " << it->second.pd_v.o_na[i] - << ") vector for header:\n" - << it->second.hdr_sa[i] << "\n"; - continue; - } - // // Convert to and append unit vectors // + ClimoPntInfo cpi; aggr.hdr_sa.add(it->second.hdr_sa[i]); + double uf; + double vf; convert_u_v_to_unit(it->second.pd_u.f_na[i], it->second.pd_v.f_na[i], uf, vf); + double uo; + double vo; convert_u_v_to_unit(it->second.pd_u.o_na[i], it->second.pd_v.o_na[i], uo, vo); - aggr.pd_u.add_grid_pair(uf, uo, bad_data_double, - bad_data_double, default_grid_weight); - aggr.pd_v.add_grid_pair(vf, vo, bad_data_double, - bad_data_double, default_grid_weight); + aggr.pd_u.add_grid_pair(uf, uo, cpi, default_weight); + aggr.pd_v.add_grid_pair(vf, vo, cpi, default_weight); } // @@ -2210,7 +2074,6 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrMPRInfo aggr; MPRData cur; - ConcatString key; // // Process the STAT lines @@ -2232,7 +2095,7 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, mlog << Error << "\naggr_mpr_lines() -> " << "should only encounter matched pair (MPR) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2248,7 +2111,7 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -2257,18 +2120,22 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, aggr.pd.f_na.clear(); aggr.pd.o_na.clear(); - aggr.pd.cmn_na.clear(); - aggr.pd.csd_na.clear(); - aggr.pd.cdf_na.clear(); + aggr.pd.fcmn_na.clear(); + aggr.pd.fcsd_na.clear(); + aggr.pd.ocmn_na.clear(); + aggr.pd.ocsd_na.clear(); + aggr.pd.ocdf_na.clear(); aggr.pd.wgt_na.clear(); aggr.pd.n_obs = 1; aggr.pd.f_na.add(cur.fcst); aggr.pd.o_na.add(cur.obs); - aggr.pd.cmn_na.add(cur.climo_mean); - aggr.pd.csd_na.add(cur.climo_stdev); - aggr.pd.cdf_na.add(cur.climo_cdf); - aggr.pd.wgt_na.add(default_grid_weight); + aggr.pd.fcmn_na.add(cur.fcst_climo_mean); + aggr.pd.fcsd_na.add(cur.fcst_climo_stdev); + aggr.pd.ocmn_na.add(cur.obs_climo_mean); + aggr.pd.ocsd_na.add(cur.obs_climo_stdev); + aggr.pd.ocdf_na.add(cur.obs_climo_cdf); + aggr.pd.wgt_na.add(default_weight); aggr.fcst_var = cur.fcst_var; aggr.obs_var = cur.obs_var; @@ -2294,10 +2161,12 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, m[key].pd.n_obs++; m[key].pd.f_na.add(cur.fcst); m[key].pd.o_na.add(cur.obs); - m[key].pd.cmn_na.add(cur.climo_mean); - m[key].pd.csd_na.add(cur.climo_stdev); - m[key].pd.cdf_na.add(cur.climo_cdf); - m[key].pd.wgt_na.add(default_grid_weight); + m[key].pd.fcmn_na.add(cur.fcst_climo_mean); + m[key].pd.fcsd_na.add(cur.fcst_climo_stdev); + m[key].pd.ocmn_na.add(cur.obs_climo_mean); + m[key].pd.ocsd_na.add(cur.obs_climo_stdev); + m[key].pd.ocdf_na.add(cur.obs_climo_cdf); + m[key].pd.wgt_na.add(default_weight); // // Only aggregate consistent variable names @@ -2309,7 +2178,7 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, << "remain constant. Try setting \"-fcst_var\" and/or " << "\"-obs_var\".\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } } @@ -2333,9 +2202,8 @@ void aggr_isc_lines(LineDataFile &ldf, STATAnalysisJob &job, STATLine line; AggrISCInfo aggr; ISCInfo cur; - ConcatString key; - int i, k, iscale; - double total, w, den, baser_fbias_sum; + int iscale; + double den; map::iterator it; // @@ -2356,7 +2224,7 @@ void aggr_isc_lines(LineDataFile &ldf, STATAnalysisJob &job, << "should only encounter intensity-scale " << "(ISC) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2374,7 +2242,7 @@ void aggr_isc_lines(LineDataFile &ldf, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -2431,7 +2299,7 @@ void aggr_isc_lines(LineDataFile &ldf, STATAnalysisJob &job, << "filter out only those lines you'd like " << "to aggregate.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2483,20 +2351,20 @@ void aggr_isc_lines(LineDataFile &ldf, STATAnalysisJob &job, // Get the sum of the totals, compute the weight, and sum the // weighted scores // - for(i=0; isecond.isc_info.n_scale+2; i++) { + for(int i=0; isecond.isc_info.n_scale+2; i++) { // Total number of points for this scale - total = it->second.total_na[i].sum(); + double total = it->second.total_na[i].sum(); // Initialize - baser_fbias_sum = 0.0; + double baser_fbias_sum = 0.0; // Loop through all scores for this scale - for(k=0; ksecond.total_na[i].n(); k++) { + for(int k=0; ksecond.total_na[i].n(); k++) { // Compute the weight for each score to be aggregated // based on the number of points it represents - w = it->second.total_na[i][k]/total; + double w = it->second.total_na[i][k]/total; // Sum scores for the binary fields if(i == 0) { @@ -2592,8 +2460,7 @@ void aggr_ecnt_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrENSInfo aggr; ECNTData cur; - ConcatString key; - double crps_emp, crps_emp_fair, spread_md, crpscl_emp, crps_gaus, crpscl_gaus, v; + double v; map::iterator it; // @@ -2614,7 +2481,7 @@ void aggr_ecnt_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter ensemble continuous statistics " << "(ECNT) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2625,7 +2492,7 @@ void aggr_ecnt_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -2675,14 +2542,14 @@ void aggr_ecnt_lines(LineDataFile &f, STATAnalysisJob &job, // m[key].me_na.add(cur.me); m[key].mae_na.add(cur.mae); - m[key].mse_na.add((is_bad_data(cur.rmse) ? - bad_data_double : - cur.rmse * cur.rmse)); + m[key].mse_na.add(is_bad_data(cur.rmse) ? + bad_data_double : + cur.rmse * cur.rmse); m[key].me_oerr_na.add(cur.me_oerr); m[key].mae_oerr_na.add(cur.mae_oerr); - m[key].mse_oerr_na.add((is_bad_data(cur.rmse_oerr) ? - bad_data_double : - cur.rmse_oerr * cur.rmse_oerr)); + m[key].mse_oerr_na.add(is_bad_data(cur.rmse_oerr) ? + bad_data_double : + cur.rmse_oerr * cur.rmse_oerr); // // Keep track of the unique header column entries @@ -2711,12 +2578,10 @@ void aggr_ecnt_lines(LineDataFile &f, STATAnalysisJob &job, v = it->second.mse_oerr_na.wmean(it->second.ens_pd.wgt_na); it->second.ens_pd.rmse_oerr = (is_bad_data(v) ? bad_data_double : sqrt(v)); - crps_emp = it->second.ens_pd.crps_emp_na.wmean(it->second.ens_pd.wgt_na); - crps_emp_fair = it->second.ens_pd.crps_emp_fair_na.wmean(it->second.ens_pd.wgt_na); - spread_md = it->second.ens_pd.spread_md_na.wmean(it->second.ens_pd.wgt_na); - crpscl_emp = it->second.ens_pd.crpscl_emp_na.wmean(it->second.ens_pd.wgt_na); - crps_gaus = it->second.ens_pd.crps_gaus_na.wmean(it->second.ens_pd.wgt_na); - crpscl_gaus = it->second.ens_pd.crpscl_gaus_na.wmean(it->second.ens_pd.wgt_na); + double crps_emp = it->second.ens_pd.crps_emp_na.wmean(it->second.ens_pd.wgt_na); + double crpscl_emp = it->second.ens_pd.crpscl_emp_na.wmean(it->second.ens_pd.wgt_na); + double crps_gaus = it->second.ens_pd.crps_gaus_na.wmean(it->second.ens_pd.wgt_na); + double crpscl_gaus = it->second.ens_pd.crpscl_gaus_na.wmean(it->second.ens_pd.wgt_na); // Compute aggregated empirical CRPSS it->second.ens_pd.crpss_emp = @@ -2741,7 +2606,6 @@ void aggr_rps_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrRPSInfo aggr; RPSInfo cur; - ConcatString key; map::iterator it; // @@ -2765,7 +2629,7 @@ void aggr_rps_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter ranked probability score " << "(RPS) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2776,7 +2640,7 @@ void aggr_rps_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -2805,7 +2669,7 @@ void aggr_rps_lines(LineDataFile &f, STATAnalysisJob &job, << "the \"N_PROB\" column must remain constant (" << m[key].rps_info.n_prob << " != " << cur.n_prob << "). Try setting \"-column_eq N_PROB n\".\n\n"; - throw(1); + throw 1; } // @@ -2835,8 +2699,6 @@ void aggr_rhist_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrENSInfo aggr; RHISTData cur; - ConcatString key; - int i; map::iterator it; // @@ -2857,7 +2719,7 @@ void aggr_rhist_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter ranked histogram " << "(RHIST) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2868,14 +2730,14 @@ void aggr_rhist_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary // if(m.count(key) == 0) { aggr.clear(); - for(i=0; i::iterator it; // @@ -2947,7 +2807,7 @@ void aggr_phist_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter probability integral " << "transform histogram (PHIST) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -2958,7 +2818,7 @@ void aggr_phist_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -2985,13 +2845,13 @@ void aggr_phist_lines(LineDataFile &f, STATAnalysisJob &job, << "the \"BIN_SIZE\" column must remain constant (" << m[key].ens_pd.phist_bin_size << " != " << cur.bin_size << "). Try setting \"-column_eq BIN_SIZE n\".\n\n"; - throw(1); + throw 1; } // // Aggregate the probability integral transform histogram counts // - for(i=0; i::iterator it; // @@ -3038,7 +2896,7 @@ void aggr_relp_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter relative position (RELP) " << "line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3049,7 +2907,7 @@ void aggr_relp_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -3075,13 +2933,13 @@ void aggr_relp_lines(LineDataFile &f, STATAnalysisJob &job, << "the \"N_ENS\" column must remain constant (" << m[key].ens_pd.relp_na.n() << " != " << cur.n_ens << "). Try setting \"-column_eq N_ENS n\".\n\n"; - throw(1); + throw 1; } // // Aggregate the RELP histogram counts // - for(i=0; i::iterator it; // @@ -3130,7 +2985,7 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, << "should only encounter observation rank " << "(ORANK) line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3141,7 +2996,7 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Skip missing data @@ -3158,10 +3013,10 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, aggr.ens_pd.obs_error_flag = !is_bad_data(cur.ens_mean_oerr); aggr.ens_pd.set_ens_size(cur.n_ens); aggr.ens_pd.extend(cur.total); - for(i=0; i " << "the \"N_ENS\" column must remain constant. " << "Try setting \"-column_eq N_ENS n\".\n\n"; - throw(1); + throw 1; } // @@ -3192,8 +3047,9 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, // ensemble spread, ensemble member values, and // valid ensemble count // - m[key].ens_pd.add_grid_obs(cur.obs, cur.climo_mean, - cur.climo_stdev, default_grid_weight); + ClimoPntInfo cpi(cur.fcst_climo_mean, cur.fcst_climo_stdev, + cur.obs_climo_mean, cur.obs_climo_stdev); + m[key].ens_pd.add_grid_obs(cur.obs, cpi, default_weight); m[key].ens_pd.skip_ba.add(false); m[key].ens_pd.n_pair++; m[key].ens_pd.r_na.add(cur.rank); @@ -3203,8 +3059,10 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, m[key].ens_pd.mn_na.add(cur.ens_mean); m[key].ens_pd.mn_oerr_na.add(cur.ens_mean_oerr); - for(i=0, n_valid=0, esum=0.0, esumsq=0.0; - i " << "should only encounter SEEPS line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3439,7 +3302,7 @@ void aggr_seeps_lines(LineDataFile &f, STATAnalysisJob &job, << "remain constant. Try setting \"-fcst_var\" and/or " << "\"-obs_var\".\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3450,7 +3313,7 @@ void aggr_seeps_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -3488,7 +3351,8 @@ void aggr_seeps_mpr_lines(LineDataFile &f, STATAnalysisJob &job, STATLine line; AggrSEEPSMPRInfo aggr; SEEPSMPRData cur; - ConcatString key, fcst_var, obs_var; + ConcatString fcst_var; + ConcatString obs_var; // // Process the STAT lines @@ -3507,7 +3371,7 @@ void aggr_seeps_mpr_lines(LineDataFile &f, STATAnalysisJob &job, mlog << Error << "\naggr_seeps_mpr_lines() -> " << "should only encounter SEEPS_MPR line types.\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3523,7 +3387,7 @@ void aggr_seeps_mpr_lines(LineDataFile &f, STATAnalysisJob &job, << "remain constant. Try setting \"-fcst_var\" and/or " << "\"-obs_var\".\n" << "ERROR occurred on STAT line:\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3534,7 +3398,7 @@ void aggr_seeps_mpr_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -3581,7 +3445,6 @@ void aggr_time_series_lines(LineDataFile &f, STATAnalysisJob &job, int &n_in, int &n_out) { STATLine line; AggrTimeSeriesInfo cur; - ConcatString key; int lead_sec; unixtime init_ut, valid_ut; @@ -3601,7 +3464,7 @@ void aggr_time_series_lines(LineDataFile &f, STATAnalysisJob &job, // // Build the map key for the current line // - key = job.get_case_info(line); + ConcatString key(job.get_case_info(line)); // // Add a new map entry, if necessary @@ -3626,7 +3489,7 @@ void aggr_time_series_lines(LineDataFile &f, STATAnalysisJob &job, << "remain constant for case \"" << key << "\". Try setting \"-fcst_var\" and/or \"-obs_var\".\n" << line << "\n\n"; - throw(1); + throw 1; } // @@ -3686,8 +3549,6 @@ void aggr_ss_index(LineDataFile &f, STATAnalysisJob &job, int &n_in, int &n_out) { STATLine line; AggrSSIndexInfo cur; - ConcatString key; - int i, n_term; // // Store the index name and valid data threshold @@ -3704,7 +3565,7 @@ void aggr_ss_index(LineDataFile &f, STATAnalysisJob &job, << "this job may only be called when the \"-model\" option " << "has been used exactly twice to specify the forecast " << "model followed by the reference model.\n\n"; - throw(1); + throw 1; } else { cur.job_info.fcst_model = job.model[0]; @@ -3719,6 +3580,7 @@ void aggr_ss_index(LineDataFile &f, STATAnalysisJob &job, // // Compute the number of terms as the maximum array length // + int n_term; n_term = max( 0, job.fcst_var.n()); n_term = max(n_term, job.fcst_lev.n()); n_term = max(n_term, job.fcst_lead.n()); @@ -3734,7 +3596,7 @@ void aggr_ss_index(LineDataFile &f, STATAnalysisJob &job, << "you must define the skill score index to be computed " << "using the \"-fcst_var\", \"-fcst_lev\", \"-fcst_lead\", " << "\"-line_type\", \"-column\", and \"-weight\" options.\n\n"; - throw(1); + throw 1; } // @@ -3750,13 +3612,13 @@ void aggr_ss_index(LineDataFile &f, STATAnalysisJob &job, << n_term << ") or have length 1!\n" << "Check the \"-fcst_var\", \"-fcst_lev\", \"-fcst_lead\", " << "\"-line_type\", \"-column\", and \"-weight\" options.\n\n"; - throw(1); + throw 1; } // // Create a job for each term // - for(i=0; i thresh(n); - for(i=0; i " diff --git a/src/tools/core/stat_analysis/stat_analysis.cc b/src/tools/core/stat_analysis/stat_analysis.cc index d957137d28..a4a803a9b0 100644 --- a/src/tools/core/stat_analysis/stat_analysis.cc +++ b/src/tools/core/stat_analysis/stat_analysis.cc @@ -208,7 +208,7 @@ int met_main(int argc, char * argv []) { if(jobs_sa.n() == 0) { mlog << Error << "\nmain() -> " << "no jobs defined in \"" << config_file << "\"!\n\n"; - throw(1); + throw 1; } for(i=0; i " << "at least one job must be specified on the command line " << "with \"-job\" or in a configuration file with \"-config\"!\n\n"; - throw(1); + throw 1; } } catch(int j) { // Catch errors @@ -464,7 +464,7 @@ void process_search_dirs() { mlog << Error << "\nprocess_search_dirs() -> " << "no STAT files found in the directories specified!\n\n"; - throw(1); + throw 1; } // @@ -556,7 +556,7 @@ void process_stat_file(const char *filename, const STATAnalysisJob &job, int &n_ << "unable to open input stat file \"" << filename << "\"\n\n"; - throw(1); + throw 1; } @@ -606,7 +606,7 @@ if ( ! pldf->open(user_script_path.c_str(), user_script_args) ) { << "unable to open user script file \"" << user_script_path << "\"\n\n"; - throw(1); + throw 1; } @@ -887,7 +887,7 @@ void open_temps() << "can't open the temporary file \"" << tmp_path << "\" for writing!\n\n"; - throw(1); + throw 1; } diff --git a/src/tools/core/stat_analysis/stat_analysis_job.cc b/src/tools/core/stat_analysis/stat_analysis_job.cc index 5c22c39d5a..b3a9eb12cb 100644 --- a/src/tools/core/stat_analysis/stat_analysis_job.cc +++ b/src/tools/core/stat_analysis/stat_analysis_job.cc @@ -132,7 +132,7 @@ void set_job_from_config(MetConfig &c, STATAnalysisJob &job) { mlog << Error << "\nset_job_from_config() -> " << "\"" << conf_key_ss_index_vld_thresh << "\" (" << job.ss_index_vld_thresh << ") must be set between 0 and 1.\n\n"; - throw(1); + throw 1; } job.hss_ec_value = c.lookup_double(conf_key_hss_ec_value); @@ -167,7 +167,7 @@ void do_job(const ConcatString &jobstring, STATAnalysisJob &job, mlog << Error << "\ndo_job() -> " << "can't open the temporary file \"" << tmp_path << "\" for reading!\n\n"; - throw(1); + throw 1; } // @@ -255,7 +255,7 @@ void do_job(const ConcatString &jobstring, STATAnalysisJob &job, default: mlog << Error << "\ndo_job() -> " << "Invalid -job type requested!\n\n"; - throw(1); + throw 1; } mlog << Debug(2) << "Job " << n_job << " used " << n_out << " out of " @@ -305,7 +305,7 @@ void do_job_filter(const ConcatString &jobstring, LineDataFile &f, << "this function may only be called when using the " << "-dump_row option in the job command line: " << jobstring << "\n\n"; - throw(1); + throw 1; } mlog << Debug(3) << "Filter Test jobstring:\n" << jobstring << "\n"; @@ -362,7 +362,7 @@ void do_job_summary(const ConcatString &jobstring, LineDataFile &f, mlog << Error << "\ndo_job_summary() -> " << "the \"-column\" option must be used at least once: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -433,7 +433,7 @@ void do_job_aggr(const ConcatString &jobstring, LineDataFile &f, << "this function may only be called when the \"-line_type\" " << "option has been used exactly once to specify the line " << "type for aggregation: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -460,7 +460,7 @@ void do_job_aggr(const ConcatString &jobstring, LineDataFile &f, << "\tSL1L2, SAL1L2, VL1L2, VAL1L2,\n" << "\tPCT, NBRCTC, NBRCNT, GRAD, ISC,\n" << "\tECNT, RPS, RHIST, PHIST, RELP, SSVAR, SEEPS\n\n"; - throw(1); + throw 1; } // @@ -640,7 +640,7 @@ void do_job_aggr_stat(const ConcatString &jobstring, LineDataFile &f, << "the \"-line_type\" option must be used exactly once and " << "the \"-out_line_type\" option must be used at least once: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -813,7 +813,7 @@ void do_job_aggr_stat(const ConcatString &jobstring, LineDataFile &f, << "when \"-out_line_type\" is set to RPS, the " << "\"-out_fcst_thresh\" option must be used to specify " << "monotonically increasing thresholds of interet.\n\n"; - throw(1); + throw 1; } } @@ -872,7 +872,7 @@ void do_job_aggr_stat(const ConcatString &jobstring, LineDataFile &f, << "or SAL1L2, the \"-out_fcst_thresh\" and " << "\"-out_obs_thresh\" options must specify the " << "same number of thresholds.\n\n"; - throw(1); + throw 1; } // Store a single NA threshold @@ -899,7 +899,7 @@ void do_job_aggr_stat(const ConcatString &jobstring, LineDataFile &f, << "\"-out_fcst_thresh\" and \"-out_obs_thresh\" " << "options must specify the same number of thresholds " << "and at least one.\n\n"; - throw(1); + throw 1; } } @@ -916,7 +916,7 @@ void do_job_aggr_stat(const ConcatString &jobstring, LineDataFile &f, << "the \"-out_thresh\" option or \"-out_fcst_thresh\" and " << "\"-out_obs_thresh\" options must specify " << "the same number of thresholds and at least two.\n\n"; - throw(1); + throw 1; } for(i=0; i " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -1554,7 +1554,7 @@ void write_job_aggr_mctc(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_mctc() -> " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -1732,7 +1732,7 @@ void write_job_aggr_pct(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_pct() -> " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -1876,10 +1876,7 @@ void write_job_aggr_psum(STATAnalysisJob &job, STATLineType lt, // // Compute CNTInfo statistics from the aggregated partial sums // - if(it->second.sl1l2_info.scount > 0) - compute_cntinfo(it->second.sl1l2_info, 0, it->second.cnt_info); - else - compute_cntinfo(it->second.sl1l2_info, 1, it->second.cnt_info); + compute_cntinfo(it->second.sl1l2_info, it->second.cnt_info); if(job.stat_out) { write_cnt_cols(it->second.cnt_info, 0, job.stat_at, @@ -1932,7 +1929,7 @@ void write_job_aggr_psum(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_psum() -> " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -2045,7 +2042,7 @@ void write_job_aggr_wind(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_wind() -> " << "the number of U and V forecast and observation points " << "must be the same.\n\n"; - throw(1); + throw 1; } // @@ -2610,7 +2607,7 @@ void write_job_aggr_ssvar(STATAnalysisJob &job, STATLineType lt, // // Compute CNTInfo statistics from the aggregated partial sums // - compute_cntinfo(bin_it->second.sl1l2_info, 0, cnt_info); + compute_cntinfo(bin_it->second.sl1l2_info, cnt_info); // // Write the output STAT line @@ -3062,7 +3059,7 @@ void write_job_aggr_orank(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_orank() -> " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -3227,7 +3224,8 @@ void write_job_aggr_mpr(STATAnalysisJob &job, STATLineType lt, // // Process percentile thresholds // - job.set_perc_thresh(it->second.pd.f_na, it->second.pd.o_na, it->second.pd.cmn_na); + job.set_perc_thresh(it->second.pd.f_na, it->second.pd.o_na, + it->second.pd.fcmn_na, it->second.pd.ocmn_na); // // Prepare the output STAT header columns @@ -3515,7 +3513,7 @@ void write_job_aggr_mpr(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_mpr() -> " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -3615,7 +3613,7 @@ void write_job_aggr_mpr_wind(STATAnalysisJob &job, STATLineType lt, mlog << Error << "\nwrite_job_aggr_mpr_wind() -> " << "unsupported output line type \"" << statlinetype_to_string(lt) << "\" requested.\n\n"; - throw(1); + throw 1; } } // end for it @@ -4292,7 +4290,7 @@ void do_job_ramp(const ConcatString &jobstring, LineDataFile &f, mlog << Error << "\ndo_job_ramp() -> " << "the \"-line_type\" option may be used at most once to " << "specify the input line type: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -4307,7 +4305,7 @@ void do_job_ramp(const ConcatString &jobstring, LineDataFile &f, mlog << Error << "\ndo_job_ramp() -> " << "the \"-out_line_type\" option must be set to CTC, CTS, and/or MPR: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -4323,7 +4321,7 @@ void do_job_ramp(const ConcatString &jobstring, LineDataFile &f, << "specify the forecast and observation values for the " << "ramp job: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -4334,7 +4332,7 @@ void do_job_ramp(const ConcatString &jobstring, LineDataFile &f, mlog << Error << "\ndo_job_ramp() -> " << "unsupported \"-ramp_type\" option: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -4345,7 +4343,7 @@ void do_job_ramp(const ConcatString &jobstring, LineDataFile &f, mlog << Error << "\ndo_job_ramp() -> " << "the \"-swing_width\" option is required for \"-ramp_type SWING\": " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -4357,7 +4355,7 @@ void do_job_ramp(const ConcatString &jobstring, LineDataFile &f, << "the \"-ramp_thresh\" or \"-ramp_thresh_fcst\" and " << "\"-ramp_thresh_obs\" options must be used to define the " << "ramp events: " << jobstring << "\n\n"; - throw(1); + throw 1; } // @@ -4423,8 +4421,8 @@ void write_table(AsciiTable &at, ofstream *sa_out) { // if(at.nrows() == 0 && at.ncols() == 0) return; - if(sa_out) *(sa_out) << at << "\n" << flush; - else cout << at << "\n" << flush; + if(sa_out) *sa_out << at << "\n" << flush; + else cout << at << "\n" << flush; return; } @@ -4445,8 +4443,8 @@ void write_jobstring(const ConcatString &jobstring, ofstream *sa_out) { void write_line(const ConcatString &str, ofstream *sa_out) { - if(sa_out) *(sa_out) << str << "\n" << flush; - else cout << str << "\n" << flush; + if(sa_out) *sa_out << str << "\n" << flush; + else cout << str << "\n" << flush; return; } diff --git a/src/tools/core/wavelet_stat/Makefile.in b/src/tools/core/wavelet_stat/Makefile.in index 96490833a2..5826b4102e 100644 --- a/src/tools/core/wavelet_stat/Makefile.in +++ b/src/tools/core/wavelet_stat/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/core/wavelet_stat/wavelet_stat.cc b/src/tools/core/wavelet_stat/wavelet_stat.cc index 27d868d78b..89d05ce6ae 100644 --- a/src/tools/core/wavelet_stat/wavelet_stat.cc +++ b/src/tools/core/wavelet_stat/wavelet_stat.cc @@ -300,7 +300,8 @@ void process_scores() { if(!(fcst_mtddf->grid() == grid)) { mlog << Debug(1) << "Regridding forecast " << conf_info.fcst_info[i]->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << conf_info.fcst_info[i]->regrid().get_str() << ".\n"; fcst_dp = met_regrid(fcst_dp, fcst_mtddf->grid(), grid, conf_info.fcst_info[i]->regrid()); } @@ -326,7 +327,8 @@ void process_scores() { if(!(obs_mtddf->grid() == grid)) { mlog << Debug(1) << "Regridding observation " << conf_info.obs_info[i]->magic_str() - << " to the verification grid.\n"; + << " to the verification grid using " + << conf_info.obs_info[i]->regrid().get_str() << ".\n"; obs_dp = met_regrid(obs_dp, obs_mtddf->grid(), grid, conf_info.obs_info[i]->regrid()); } @@ -951,13 +953,6 @@ int get_tile_tot_count() { void do_intensity_scale(const NumArray &f_na, const NumArray &o_na, ISCInfo *&isc_info, int i_vx, int i_tile) { - double *f_dat = (double *) nullptr; // Raw and thresholded binary fields - double *o_dat = (double *) nullptr; // Raw and thresholded binary fields - double *f_dwt = (double *) nullptr; // Discrete wavelet transformations - double *o_dwt = (double *) nullptr; // Discrete wavelet transformations - double *f_scl = (double *) nullptr; // Binary field decomposed by scale - double *o_scl = (double *) nullptr; // Binary field decomposed by scale - double *diff = (double *) nullptr; // Difference field double mse, fen, oen, mad; int n, ns, n_isc; int bnd, row, col; @@ -997,13 +992,13 @@ void do_intensity_scale(const NumArray &f_na, const NumArray &o_na, } // Allocate space - f_dat = new double [n]; - o_dat = new double [n]; - f_dwt = new double [n]; - o_dwt = new double [n]; - f_scl = new double [n]; - o_scl = new double [n]; - diff = new double [n]; + vector f_dat(n); // Raw and thresholded binary fields + vector o_dat(n); // Raw and thresholded binary fields + vector f_dwt(n); // Discrete wavelet transformations + vector o_dwt(n); // Discrete wavelet transformations + vector f_scl(n); // Binary field decomposed by scale + vector o_scl(n); // Binary field decomposed by scale + vector diff (n); // Difference field // Initialize f_dat and o_dat for(i=0; i " << "Unsupported wavelet type value of " << enum_class_as_int(wvlt_type) << ".\n\n"; @@ -350,8 +350,8 @@ void WaveletStatConfInfo::process_config(GrdFileType ftype, // Check for valid member number switch(wvlt_type) { - case(WaveletType::Haar): - case(WaveletType::Haar_Cntr): + case WaveletType::Haar: + case WaveletType::Haar_Cntr: if(wvlt_member != 2) { mlog << Error << "\nWaveletStatConfInfo::process_config() -> " << "For Haar wavelets, \"" << conf_key_wavelet_member @@ -360,8 +360,8 @@ void WaveletStatConfInfo::process_config(GrdFileType ftype, } break; - case(WaveletType::Daub): - case(WaveletType::Daub_Cntr): + case WaveletType::Daub: + case WaveletType::Daub_Cntr: if(wvlt_member < 4 || wvlt_member > 20 || wvlt_member%2 == 1) { mlog << Error << "\nWaveletStatConfInfo::process_config() -> " << "For Daubechies wavelets, \"" << conf_key_wavelet_member @@ -370,8 +370,8 @@ void WaveletStatConfInfo::process_config(GrdFileType ftype, } break; - case(WaveletType::BSpline): - case(WaveletType::BSpline_Cntr): + case WaveletType::BSpline: + case WaveletType::BSpline_Cntr: if(wvlt_member != 103 && wvlt_member != 105 && wvlt_member != 202 && wvlt_member != 204 && wvlt_member != 206 && wvlt_member != 208 && wvlt_member != 301 && wvlt_member != 303 && wvlt_member != 305 && @@ -384,7 +384,7 @@ void WaveletStatConfInfo::process_config(GrdFileType ftype, } break; - case(WaveletType::None): + case WaveletType::None: default: mlog << Error << "\nWaveletStatConfInfo::process_config() -> " << "Unsupported wavelet type value of " << enum_class_as_int(wvlt_type) << ".\n\n"; @@ -449,8 +449,8 @@ void WaveletStatConfInfo::set_perc_thresh(const DataPlane &f_dp, // // Compute percentiles // - fcat_ta->set_perc(&fsort, &osort, (NumArray *) 0, fcat_ta, ocat_ta); - ocat_ta->set_perc(&fsort, &osort, (NumArray *) 0, fcat_ta, ocat_ta); + fcat_ta->set_perc(&fsort, &osort, nullptr, nullptr, fcat_ta, ocat_ta); + ocat_ta->set_perc(&fsort, &osort, nullptr, nullptr, fcat_ta, ocat_ta); return; } @@ -511,7 +511,7 @@ void WaveletStatConfInfo::process_tiles(const Grid &grid) { // Tile the input data using tiles of dimension n by n where n // is the largest integer power of 2 less than the smallest // dimension of the input data and allowing no overlap. - case(GridDecompType::Auto): + case GridDecompType::Auto: center_tiles(grid.nx(), grid.ny()); @@ -528,7 +528,7 @@ void WaveletStatConfInfo::process_tiles(const Grid &grid) { break; // Apply the tiles specified in the configuration file - case(GridDecompType::Tile): + case GridDecompType::Tile: // Number of tiles based on the user-specified locations n_tile = tile_xll.n(); @@ -547,7 +547,7 @@ void WaveletStatConfInfo::process_tiles(const Grid &grid) { // Setup tiles for padding the input fields out to the nearest // integer power of two - case(GridDecompType::Pad): + case GridDecompType::Pad: pad_tiles(grid.nx(), grid.ny()); @@ -562,7 +562,7 @@ void WaveletStatConfInfo::process_tiles(const Grid &grid) { break; - case(GridDecompType::None): + case GridDecompType::None: default: mlog << Error << "\nWaveletStatConfInfo::process_tiles() -> " << "Unsupported grid decomposition type of " diff --git a/src/tools/core/wavelet_stat/wavelet_stat_conf_info.h b/src/tools/core/wavelet_stat/wavelet_stat_conf_info.h index 1380ecc6b3..11bdf93f90 100644 --- a/src/tools/core/wavelet_stat/wavelet_stat_conf_info.h +++ b/src/tools/core/wavelet_stat/wavelet_stat_conf_info.h @@ -146,11 +146,11 @@ class WaveletStatConfInfo { //////////////////////////////////////////////////////////////////////// -inline int WaveletStatConfInfo::get_n_vx() const { return(n_vx); } -inline int WaveletStatConfInfo::get_max_n_thresh() const { return(max_n_thresh); } -inline int WaveletStatConfInfo::get_n_tile() const { return(n_tile); } -inline int WaveletStatConfInfo::get_tile_dim() const { return(tile_dim); } -inline int WaveletStatConfInfo::get_n_scale() const { return(n_scale); } +inline int WaveletStatConfInfo::get_n_vx() const { return n_vx; } +inline int WaveletStatConfInfo::get_max_n_thresh() const { return max_n_thresh; } +inline int WaveletStatConfInfo::get_n_tile() const { return n_tile; } +inline int WaveletStatConfInfo::get_tile_dim() const { return tile_dim; } +inline int WaveletStatConfInfo::get_n_scale() const { return n_scale; } inline int WaveletStatConfInfo::get_compression_level() { return conf.nc_compression(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/dev_utils/Makefile.in b/src/tools/dev_utils/Makefile.in index e9b745a97f..46d98a6aeb 100644 --- a/src/tools/dev_utils/Makefile.in +++ b/src/tools/dev_utils/Makefile.in @@ -368,6 +368,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/dev_utils/gen_climo_bin.cc b/src/tools/dev_utils/gen_climo_bin.cc index 4551342192..62541a8a67 100644 --- a/src/tools/dev_utils/gen_climo_bin.cc +++ b/src/tools/dev_utils/gen_climo_bin.cc @@ -196,7 +196,7 @@ void process_binary() { // Loop over climo bins, skipping the first and last points for(j=0; j<(n_bin-1); j++) { - int byte_offset = (j+1)*((n_read_bin - 1)/(n_bin))*4; + int byte_offset = (j+1)*((n_read_bin - 1)/n_bin)*4; unsigned char * b = buf + byte_offset; my_memcpy(&v, b, 4); shuffle_4(&v); @@ -282,7 +282,7 @@ void write_nc_bin(const DataPlane &dp, int i_cdf, double cdf_y) { // Allocate memory n = grid.nx() * grid.ny(); - float * data = new float [n]; + vector data(n); dp.data_range(dmin, dmax); mlog << Debug(2) @@ -319,7 +319,7 @@ void write_nc_bin(const DataPlane &dp, int i_cdf, double cdf_y) { } // Write out the gridded field of CDF X-values - if(!put_nc_data(&cdf_x_var, &data[0], lengths, offsets)) { + if(!put_nc_data(&cdf_x_var, data.data(), lengths, offsets)) { mlog << Error << "\nwrite_nc_bin() -> " << "error writing NetCDF variable name \"" << var_name << "\" for the " << i_cdf @@ -327,9 +327,6 @@ void write_nc_bin(const DataPlane &dp, int i_cdf, double cdf_y) { exit(1); } - // Deallocate and clean up - if(data) { delete [] data; data = (float *) nullptr; } - return; } diff --git a/src/tools/dev_utils/met_nc_file.cc b/src/tools/dev_utils/met_nc_file.cc index 50e1151c51..5b293ec211 100644 --- a/src/tools/dev_utils/met_nc_file.cc +++ b/src/tools/dev_utils/met_nc_file.cc @@ -127,7 +127,7 @@ bool MetNcFile::readFile(const int desired_grib_code, IS_INVALID_NC(obsArrVar)) { mlog << Error << "\nmain() -> " - << "trouble reading netCDF file " << _filePath << "\n\n"; + << "trouble reading netCDF file " << _filePath << "\n\n"; return false; } @@ -156,8 +156,8 @@ bool MetNcFile::readFile(const int desired_grib_code, float *obs_arr = new float[obs_arr_len]; float *hdr_arr = new float[hdr_arr_len]; - mlog << Debug(2) << "Processing " << (obs_count) << " observations at " - << (hdr_count) << " locations.\n"; + mlog << Debug(2) << "Processing " << obs_count << " observations at " + << hdr_count << " locations.\n"; // Loop through the observations, saving the ones that we are // interested in diff --git a/src/tools/dev_utils/shapefiles/Makefile.in b/src/tools/dev_utils/shapefiles/Makefile.in index f5454f70fb..62ec442a56 100644 --- a/src/tools/dev_utils/shapefiles/Makefile.in +++ b/src/tools/dev_utils/shapefiles/Makefile.in @@ -219,6 +219,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/Makefile.in b/src/tools/other/Makefile.in index 7574bf3ce1..010a792155 100644 --- a/src/tools/other/Makefile.in +++ b/src/tools/other/Makefile.in @@ -252,6 +252,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/ascii2nc/Makefile.in b/src/tools/other/ascii2nc/Makefile.in index ccc217163a..d7d75fb04a 100644 --- a/src/tools/other/ascii2nc/Makefile.in +++ b/src/tools/other/ascii2nc/Makefile.in @@ -274,6 +274,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/ascii2nc/aeronet_handler.cc b/src/tools/other/ascii2nc/aeronet_handler.cc index bf6ed08d12..440622938c 100644 --- a/src/tools/other/ascii2nc/aeronet_handler.cc +++ b/src/tools/other/ascii2nc/aeronet_handler.cc @@ -794,7 +794,7 @@ string AeronetHandler::make_var_name_from_header(string hdr_field) { else if ((int) string::npos != hdr_field.find(WAVELENGTHS_PW_NAME)) { var_name = WAVELENGTHS_PW_NAME; } - else if ((hdr_field == OPTICAL_AIR_MASS_NAME)) { + else if (hdr_field == OPTICAL_AIR_MASS_NAME) { var_name = hdr_field; } else { diff --git a/src/tools/other/ascii2nc/airnow_locations.cc b/src/tools/other/ascii2nc/airnow_locations.cc index a7908c7a7b..e8d09647b8 100644 --- a/src/tools/other/ascii2nc/airnow_locations.cc +++ b/src/tools/other/ascii2nc/airnow_locations.cc @@ -55,8 +55,8 @@ bool AirnowLocations::initialize(const string &fileName) LineDataFile locFile; if (!locFile.open(monitoringSiteFileName.c_str())) { mlog << Warning << "\n" << method_name << " -> " - << "can't open input ASCII file \"" << monitoringSiteFileName - << "\" for reading\n\n"; + << "can't open input ASCII file \"" << monitoringSiteFileName + << "\" for reading\n\n"; return false; } DataLine data_line; @@ -82,7 +82,7 @@ bool AirnowLocations::initialize(const string &fileName) if (!status) { return false; } - + // this is inefficient, but will work int bad_line_count = 0; while (locFile >> data_line) { @@ -126,12 +126,12 @@ bool AirnowLocations::initialize(const string &fileName) #ifdef DEBUGGING for (size_t i=0; i valid_end_ut) keep = false; } - return(keep); + return keep; } diff --git a/src/tools/other/ascii2nc/iabp_handler.cc b/src/tools/other/ascii2nc/iabp_handler.cc index 71ce27f10c..fd384cbf2e 100644 --- a/src/tools/other/ascii2nc/iabp_handler.cc +++ b/src/tools/other/ascii2nc/iabp_handler.cc @@ -82,7 +82,7 @@ bool IabpHandler::isFileType(LineDataFile &ascii_file) const { if (tokens[6] != "Lat") is_file_type = false; if (tokens[7] != "Lon") is_file_type = false; - return(is_file_type); + return is_file_type; } //////////////////////////////////////////////////////////////////////// @@ -92,7 +92,7 @@ bool IabpHandler::isFileType(LineDataFile &ascii_file) const { bool IabpHandler::_readObservations(LineDataFile &ascii_file) { // Read and save the header information - if(!_readHeaderInfo(ascii_file)) return(false); + if(!_readHeaderInfo(ascii_file)) return false; string header_type = "IABP_STANDARD"; @@ -107,7 +107,7 @@ bool IabpHandler::_readObservations(LineDataFile &ascii_file) << " != " << _numColumns << ") on line number " << dl.line_number() << " of IABP file \"" << ascii_file.filename() << "\"!\n\n"; - return(false); + return false; } // Extract the valid time from the data line, using POS_DOY (scientist is most @@ -119,7 +119,7 @@ bool IabpHandler::_readObservations(LineDataFile &ascii_file) << "No valid time computed in file, line number " << dl.line_number() << " of IABP file \"" << ascii_file.filename() << "\". Ignore this line\n\n"; - return(false); + return false; } double lat = stod(dl[_latPtr]); @@ -139,7 +139,7 @@ bool IabpHandler::_readObservations(LineDataFile &ascii_file) << "Latitude/longitude has missing value " << IABP_MISSING_VALUE << ", line number " << dl.line_number() << " of IABP file \"" << ascii_file.filename() << "\". Ignore this line\n\n"; - return(false); + return false; } if (_bpPtr >= 0) { @@ -184,7 +184,7 @@ bool IabpHandler::_readObservations(LineDataFile &ascii_file) } // end while - return(true); + return true; } // //////////////////////////////////////////////////////////////////////// @@ -207,7 +207,7 @@ bool IabpHandler::_readHeaderInfo(LineDataFile &ascii_file) { << dl.n_items() << " < " << MIN_NUM_HDR_COLS << ") in IABP file \"" << ascii_file.filename() << "\"!\n\n"; - return(false); + return false; } // Map the header information to column numbers diff --git a/src/tools/other/ascii2nc/little_r_handler.cc b/src/tools/other/ascii2nc/little_r_handler.cc index 3de94d81df..49a95cc226 100644 --- a/src/tools/other/ascii2nc/little_r_handler.cc +++ b/src/tools/other/ascii2nc/little_r_handler.cc @@ -53,15 +53,15 @@ static const int lr_grib_codes[] = { 1, 7, 11, 17, 32, 31, 33, 34, 52, bad_data_int }; static const string lr_grib_names[] = { - "PRES", // 001 PRES Pressure Pa - "HGT", // 007 HGT Geopotential height gpm - "TMP", // 011 TMP Temperature K - "DPT", // 017 DPT Dewpoint temperature K - "WIND", // 032 WIND Wind speed m s-1 - "WDIR", // 031 WDIR Wind direction deg - "UGRD", // 033 U GRD u-component of wind m s-1 - "VGRD", // 034 V GRD v-component of wind m s-1 - "RH", // 052 R H Relative humidity % + "PRES", // 001 PRES Pressure (Pa) + "HGT", // 007 HGT Geopotential height (gpm) + "TMP", // 011 TMP Temperature (K) + "DPT", // 017 DPT Dewpoint temperature (K) + "WIND", // 032 WIND Wind speed (m s-1) + "WDIR", // 031 WDIR Wind direction (deg) + "UGRD", // 033 UGRD u-component of wind (m s-1) + "VGRD", // 034 VGRD v-component of wind (m s-1) + "RH", // 052 RH Relative humidity (%) "UNKNOWN" // bad_data_int }; @@ -237,7 +237,7 @@ bool LittleRHandler::_readObservations(LineDataFile &ascii_file) na_string : (string)data_line[19]); obs_qty.ws_strip(); - // 002 PRMSL Pressure reduced to MSL Pa + // 002 PRMSL Pressure reduced to MSL (Pa) _addObservations(Observation(hdr_typ.text(), hdr_sid.text(), hdr_vld, diff --git a/src/tools/other/ascii2nc/met_handler.cc b/src/tools/other/ascii2nc/met_handler.cc index dd868d8029..2066a31537 100644 --- a/src/tools/other/ascii2nc/met_handler.cc +++ b/src/tools/other/ascii2nc/met_handler.cc @@ -101,13 +101,13 @@ bool MetHandler::_readObservations(LineDataFile &ascii_file) // Check for the first line of the file or the header changing - if (data_line.line_number() == 1 || - hdr_typ != data_line[0] || - hdr_sid != data_line[1] || - hdr_vld_str != data_line[2] || - !is_eq(hdr_lat, parse_num(data_line[3])) || - !is_eq(hdr_lon, parse_num(data_line[4])) || - !is_eq(hdr_elv, parse_num(data_line[5]))) + if (data_line.line_number() == 1 || + hdr_typ != data_line[0] || + hdr_sid != data_line[1] || + hdr_vld_str != data_line[2] || + !is_eq(hdr_lat, parse_num(data_line[3])) || + !is_eq(hdr_lon, parse_num(data_line[4])) || + !is_eq(hdr_elv, parse_num(data_line[5]))) { // Store the column format @@ -115,9 +115,9 @@ bool MetHandler::_readObservations(LineDataFile &ascii_file) if (data_line.line_number() == 1 && _nFileColumns == n_met_col) { - mlog << Warning << "\nFound deprecated 10 column input file format, " - << "consider adding quality flag values to file: " - << ascii_file.filename() << "\n\n"; + mlog << Warning << "\nFound deprecated 10 column input file format, " + << "consider adding quality flag values to file: " + << ascii_file.filename() << "\n\n"; } // Store the header info @@ -135,8 +135,10 @@ bool MetHandler::_readObservations(LineDataFile &ascii_file) // Pressure level (hPa) or precip accumulation interval (sec) - double obs_prs = ((is_precip_grib_name(data_line[6]) || is_precip_grib_code(atoi(data_line[6]))) ? - timestring_to_sec(data_line[7]) : parse_num(data_line[7])); + double obs_prs = ((is_precip_grib_name(data_line[6]) || + is_precip_grib_code(atoi(data_line[6]))) ? + timestring_to_sec(data_line[7]) : + parse_num(data_line[7])); // Observation height (meters above sea level) @@ -186,7 +188,7 @@ bool MetHandler::_readObservations(LineDataFile &ascii_file) double parse_num(const char *s) { if(!s) return bad_data_double; - return( (strcasecmp(s, na_str) == 0 ? bad_data_double : atof(s)) ); + return( strcasecmp(s, na_str) == 0 ? bad_data_double : atof(s) ); } diff --git a/src/tools/other/ascii2nc/ndbc_locations.cc b/src/tools/other/ascii2nc/ndbc_locations.cc index d766e7e234..bc111e7d05 100644 --- a/src/tools/other/ascii2nc/ndbc_locations.cc +++ b/src/tools/other/ascii2nc/ndbc_locations.cc @@ -68,8 +68,8 @@ bool NdbcLocations::initialize(const string &fName) LineDataFile locFile; if (!locFile.open(fileName.c_str())) { mlog << Error << method_name << "->" - << "can't open input ASCII file \"" << fileName - << "\" for reading\n\n"; + << "can't open input ASCII file \"" << fileName + << "\" for reading\n\n"; return false; } DataLine data_line; @@ -77,7 +77,7 @@ bool NdbcLocations::initialize(const string &fName) string latKey = "lat="; string lonKey = "lon="; string elevKey = "elev="; - + while (locFile >> data_line) { string sline = data_line.get_line(); string stationId; @@ -88,14 +88,14 @@ bool NdbcLocations::initialize(const string &fName) } if (!_parseLineForDouble(sline, latKey, lat)) { mlog << Warning << method_name << "-> " - << "parsing out lat from line '" << sline << "'\n" - << "in file \"" << fileName << "\n\n"; + << "parsing out lat from line '" << sline << "'\n" + << "in file \"" << fileName << "\n\n"; continue; } if (!_parseLineForDouble(sline, lonKey, lon)) { mlog << Warning << method_name << "-> " - << "parsing out lon from line '" << sline << "'\n" - << "in file \"" << fileName << "\n\n"; + << "parsing out lon from line '" << sline << "'\n" + << "in file \"" << fileName << "\n\n"; continue; } if (!_parseLineForDouble(sline, elevKey, elev)) { diff --git a/src/tools/other/gen_ens_prod/Makefile.in b/src/tools/other/gen_ens_prod/Makefile.in index 8586403cb6..4f070ce15d 100644 --- a/src/tools/other/gen_ens_prod/Makefile.in +++ b/src/tools/other/gen_ens_prod/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/gen_ens_prod/gen_ens_prod.cc b/src/tools/other/gen_ens_prod/gen_ens_prod.cc index 46ad8b66df..d552e90405 100644 --- a/src/tools/other/gen_ens_prod/gen_ens_prod.cc +++ b/src/tools/other/gen_ens_prod/gen_ens_prod.cc @@ -468,16 +468,20 @@ void get_climo_mean_stdev(GenEnsProdVarInfo *ens_info, int i_var, << ens_info->get_var_info(i_ens)->magic_str() << "\".\n"; cmn_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i_var, ens_valid_ut, grid); + conf_info.conf.lookup_dictionary(conf_key_ens), + conf_key_climo_mean, + i_var, ens_valid_ut, grid, + "ensemble climatology mean"); mlog << Debug(4) << "Reading climatology standard deviation data for ensemble field \"" << ens_info->get_var_info(i_ens)->magic_str() << "\".\n"; csd_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i_var, ens_valid_ut, grid); + conf_info.conf.lookup_dictionary(conf_key_ens), + conf_key_climo_stdev, + i_var, ens_valid_ut, grid, + "ensemble climatology standard deviation"); // Unset the MET_ENS_MEMBER_ID environment variable if(set_ens_mem_id) { @@ -645,7 +649,8 @@ bool get_data_plane(const char *infile, GrdFileType ftype, if(!(mtddf->grid() == grid)) { mlog << Debug(1) << "Regridding field \"" << info->magic_str() - << "\" to the verification grid.\n"; + << "\" to the verification grid using " + << info->regrid().get_str() << ".\n"; dp = met_regrid(dp, mtddf->grid(), grid, info->regrid()); } @@ -713,6 +718,9 @@ void track_counts(GenEnsProdVarInfo *ens_info, const DataPlane &ens_dp, bool is_ cmn = (cmn_dp.is_empty() ? bad_data_double : cmn_dp.data()[i]); csd = (csd_dp.is_empty() ? bad_data_double : csd_dp.data()[i]); + // MET #2924 Use the same data for the forecast and observation climatologies + ClimoPntInfo cpi(cmn, csd, cmn, csd); + // Skip bad data values if(is_bad_data(ens)) continue; @@ -738,7 +746,7 @@ void track_counts(GenEnsProdVarInfo *ens_info, const DataPlane &ens_dp, bool is_ // Event frequency for(j=0; j ens_mean (nxy); + vector ens_stdev (nxy); + vector ens_minus (nxy); + vector ens_plus (nxy); + vector ens_min (nxy); + vector ens_max (nxy); + vector ens_range (nxy); + vector ens_vld (nxy); // Store the threshold for the ratio of valid data points t = conf_info.vld_data_thresh; @@ -857,56 +865,56 @@ void write_ens_nc(GenEnsProdVarInfo *ens_info, int n_ens_vld, // Add the ensemble mean, if requested if(ens_info->nc_info.do_mean) { - write_ens_var_float(ens_info, ens_mean, ens_dp, + write_ens_var_float(ens_info, ens_mean.data(), ens_dp, "ENS_MEAN", "Ensemble Mean"); } // Add the ensemble standard deviation, if requested if(ens_info->nc_info.do_stdev) { - write_ens_var_float(ens_info, ens_stdev, ens_dp, + write_ens_var_float(ens_info, ens_stdev.data(), ens_dp, "ENS_STDEV", "Ensemble Standard Deviation"); } // Add the ensemble mean minus one standard deviation, if requested if(ens_info->nc_info.do_minus) { - write_ens_var_float(ens_info, ens_minus, ens_dp, + write_ens_var_float(ens_info, ens_minus.data(), ens_dp, "ENS_MINUS", "Ensemble Mean Minus 1 Standard Deviation"); } // Add the ensemble mean plus one standard deviation, if requested if(ens_info->nc_info.do_plus) { - write_ens_var_float(ens_info, ens_plus, ens_dp, + write_ens_var_float(ens_info, ens_plus.data(), ens_dp, "ENS_PLUS", "Ensemble Mean Plus 1 Standard Deviation"); } // Add the ensemble minimum value, if requested if(ens_info->nc_info.do_min) { - write_ens_var_float(ens_info, ens_min, ens_dp, + write_ens_var_float(ens_info, ens_min.data(), ens_dp, "ENS_MIN", "Ensemble Minimum"); } // Add the ensemble maximum value, if requested if(ens_info->nc_info.do_max) { - write_ens_var_float(ens_info, ens_max, ens_dp, + write_ens_var_float(ens_info, ens_max.data(), ens_dp, "ENS_MAX", "Ensemble Maximum"); } // Add the ensemble range, if requested if(ens_info->nc_info.do_range) { - write_ens_var_float(ens_info, ens_range, ens_dp, + write_ens_var_float(ens_info, ens_range.data(), ens_dp, "ENS_RANGE", "Ensemble Range"); } // Add the ensemble valid data count, if requested if(ens_info->nc_info.do_vld) { - write_ens_var_int(ens_info, ens_vld, ens_dp, + write_ens_var_int(ens_info, ens_vld.data(), ens_dp, "ENS_VLD", "Ensemble Valid Data Count"); } @@ -1032,29 +1040,27 @@ void write_ens_nc(GenEnsProdVarInfo *ens_info, int n_ens_vld, // Process all CDP thresholds except 0 and 100 for(vector::iterator it = simp.begin(); it != simp.end(); it++) { - if(it->ptype() == perc_thresh_climo_dist && + if(it->ptype() == perc_thresh_fcst_climo_dist && !is_eq(it->pvalue(), 0.0) && !is_eq(it->pvalue(), 100.0)) { - snprintf(type_str, sizeof(type_str), "CLIMO_CDP%i", + snprintf(type_str, sizeof(type_str), "CLIMO_FCDP%i", nint(it->pvalue())); cdp_dp = normal_cdf_inv(it->pvalue()/100.0, cmn_dp, csd_dp); - write_ens_data_plane(ens_info, cdp_dp, ens_dp, - type_str, - "Climatology distribution percentile"); + write_ens_data_plane(ens_info, cdp_dp, ens_dp, type_str, + "Forecast climatology distribution percentile"); + } + else if(it->ptype() == perc_thresh_obs_climo_dist && + !is_eq(it->pvalue(), 0.0) && + !is_eq(it->pvalue(), 100.0)) { + snprintf(type_str, sizeof(type_str), "CLIMO_OCDP%i", + nint(it->pvalue())); + cdp_dp = normal_cdf_inv(it->pvalue()/100.0, cmn_dp, csd_dp); + write_ens_data_plane(ens_info, cdp_dp, ens_dp, type_str, + "Observation climatology distribution percentile"); } } // end for it } - // Deallocate and clean up - if(ens_mean) { delete [] ens_mean; ens_mean = (float *) nullptr; } - if(ens_stdev) { delete [] ens_stdev; ens_stdev = (float *) nullptr; } - if(ens_minus) { delete [] ens_minus; ens_minus = (float *) nullptr; } - if(ens_plus) { delete [] ens_plus; ens_plus = (float *) nullptr; } - if(ens_min) { delete [] ens_min; ens_min = (float *) nullptr; } - if(ens_max) { delete [] ens_max; ens_max = (float *) nullptr; } - if(ens_range) { delete [] ens_range; ens_range = (float *) nullptr; } - if(ens_vld) { delete [] ens_vld; ens_vld = (int *) nullptr; } - return; } diff --git a/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.h b/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.h index ba23574c30..bb76ce0c63 100644 --- a/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.h +++ b/src/tools/other/gen_ens_prod/gen_ens_prod_conf_info.h @@ -112,10 +112,10 @@ class GenEnsProdConfInfo { //////////////////////////////////////////////////////////////////////// -inline int GenEnsProdConfInfo::get_n_var() const { return(n_var); } -inline int GenEnsProdConfInfo::get_max_n_cat() const { return(max_n_cat); } -inline int GenEnsProdConfInfo::get_n_nbrhd() const { return(nbrhd_prob.width.n()); } -inline int GenEnsProdConfInfo::get_compression_level() { return(conf.nc_compression()); } +inline int GenEnsProdConfInfo::get_n_var() const { return n_var; } +inline int GenEnsProdConfInfo::get_max_n_cat() const { return max_n_cat; } +inline int GenEnsProdConfInfo::get_n_nbrhd() const { return nbrhd_prob.width.n(); } +inline int GenEnsProdConfInfo::get_compression_level() { return conf.nc_compression(); } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/gen_vx_mask/Makefile.in b/src/tools/other/gen_vx_mask/Makefile.in index 0454fcc726..4203003677 100644 --- a/src/tools/other/gen_vx_mask/Makefile.in +++ b/src/tools/other/gen_vx_mask/Makefile.in @@ -222,6 +222,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/gen_vx_mask/gen_vx_mask.cc b/src/tools/other/gen_vx_mask/gen_vx_mask.cc index e4c14b557b..83fe7cc568 100644 --- a/src/tools/other/gen_vx_mask/gen_vx_mask.cc +++ b/src/tools/other/gen_vx_mask/gen_vx_mask.cc @@ -177,24 +177,8 @@ void process_command_line(int argc, char **argv) { void process_input_grid(DataPlane &dp) { - // Parse the input grid as a white-space separated string - StringArray sa; - sa.parse_wsss(input_gridname); - - // Search for a named grid - if(sa.n() == 1 && find_grid_by_name(sa[0].c_str(), grid)) { - mlog << Debug(3) - << "Use input grid named \"" << input_gridname << "\".\n"; - } - // Parse grid definition - else if(sa.n() > 1 && parse_grid_def(sa, grid)) { - mlog << Debug(3) - << "Use input grid defined by string \"" << input_gridname - << "\".\n"; - } - // Extract the grid from a gridded data file - else { - + if (!build_grid_by_grid_string(input_gridname, grid, "process_input_grid", false)) { + // Extract the grid from a gridded data file mlog << Debug(3) << "Use input grid defined by file \"" << input_gridname << "\".\n"; @@ -284,22 +268,7 @@ void process_mask_file(DataPlane &dp) { // For the grid mask type, support named grids and grid // specification strings if(mask_type == MaskType::Grid) { - - // Parse the mask file as a white-space separated string - StringArray sa; - sa.parse_wsss(mask_filename); - - // Search for a named grid - if(sa.n() == 1 && find_grid_by_name(sa[0].c_str(), grid_mask)) { - mlog << Debug(3) - << "Use mask grid named \"" << mask_filename << "\".\n"; - } - // Parse grid definition - else if(sa.n() > 1 && parse_grid_def(sa, grid_mask)) { - mlog << Debug(3) - << "Use mask grid defined by string \"" << mask_filename - << "\".\n"; - } + build_grid_by_grid_string(mask_filename, grid_mask, "process_mask_file", false); } // Parse as a gridded data file if not already set @@ -432,9 +401,8 @@ void get_data_plane(const ConcatString &file_name, } // Attempt to open the data file - Met2dDataFileFactory mtddf_factory; - Met2dDataFile *mtddf_ptr = (Met2dDataFile *) nullptr; - mtddf_ptr = mtddf_factory.new_met_2d_data_file(file_name.c_str(), ftype); + Met2dDataFile *mtddf_ptr = Met2dDataFileFactory::new_met_2d_data_file( + file_name.c_str(), ftype); if(!mtddf_ptr) { mlog << Error << "\nget_data_plane() -> " << "can't open input file \"" << file_name << "\"\n\n"; @@ -454,9 +422,7 @@ void get_data_plane(const ConcatString &file_name, if(local_cs.length() > 0) { // Allocate new VarInfo object - VarInfoFactory vi_factory; - VarInfo *vi_ptr = (VarInfo *) nullptr; - vi_ptr = vi_factory.new_var_info(mtddf_ptr->file_type()); + VarInfo *vi_ptr = VarInfoFactory::new_var_info(mtddf_ptr->file_type()); if(!vi_ptr) { mlog << Error << "\nget_data_plane() -> " << "can't allocate new VarInfo pointer.\n\n"; @@ -475,7 +441,8 @@ void get_data_plane(const ConcatString &file_name, } // Dump the range of data values read - double dmin, dmax; + double dmin; + double dmax; dp.data_range(dmin, dmax); mlog << Debug(3) << "Read field \"" << vi_ptr->magic_str() << "\" from \"" @@ -502,7 +469,6 @@ bool get_gen_vx_mask_config_str(MetNcMetDataFile *mnmdf_ptr, ConcatString &config_str) { bool status = false; ConcatString tool; - int i; // Check for null pointer if(!mnmdf_ptr) return status; @@ -514,7 +480,7 @@ bool get_gen_vx_mask_config_str(MetNcMetDataFile *mnmdf_ptr, if(tool != program_name) return status; // Loop through the NetCDF variables - for(i=0; iMetNc->Nvars; i++) { + for(int i=0; iMetNc->Nvars; i++) { // Skip the lat/lon variables if(mnmdf_ptr->MetNc->Var[i].name == "lat" || @@ -546,7 +512,7 @@ void get_shapefile_strings() { << dbf_filename << "\n"; // Open the database file - if(!(f.open(dbf_filename.c_str()))) { + if(!f.open(dbf_filename.c_str())) { mlog << Error << "\nget_shapefile_strings() -> " << "unable to open database file \"" << dbf_filename << "\"\n\n"; @@ -613,7 +579,7 @@ void get_shapefile_records() { } // Open shapefile - if(!(f.open(shape_filename))) { + if(!f.open(shape_filename)) { mlog << Error << "\nget_shapefile_records() -> " << "unable to open shape file \"" << shape_filename << "\"\n\n"; @@ -698,13 +664,14 @@ bool is_shape_str_match(const int i_shape, const StringArray &names, const Strin //////////////////////////////////////////////////////////////////////// void apply_poly_mask(DataPlane & dp) { - int x, y, n_in; + int n_in = 0; bool inside; - double lat, lon; + double lat; + double lon; // Check the Lat/Lon of each grid point being inside the polyline - for(x=0,n_in=0; x::const_iterator poly_it; for(poly_it = poly_list.begin(); @@ -1315,7 +1295,7 @@ void apply_shape_mask(DataPlane & dp) { DataPlane combine(const DataPlane &dp_data, const DataPlane &dp_mask, SetLogic logic) { - int x, y, n_in; + int n_in = 0; bool v_data, v_mask; double v; DataPlane dp; @@ -1339,8 +1319,8 @@ DataPlane combine(const DataPlane &dp_data, const DataPlane &dp_mask, dp.set_size(grid.nx(), grid.ny()); // Process each point - for(x=0,n_in=0; x mask_data(grid.nx()*grid.ny()); // Loop through each grid point - for(x=0; x " << "error with mask_var->put\n\n"; - // Delete allocated memory - if(mask_data) { delete[] mask_data; mask_data = (float *) nullptr; } exit(1); } - // Delete allocated memory - if(mask_data) { delete[] mask_data; mask_data = (float *) nullptr; } - delete f_out; f_out = (NcFile *) nullptr; diff --git a/src/tools/other/gis_utils/Makefile.in b/src/tools/other/gis_utils/Makefile.in index aa3780a9bc..0afd97e00c 100644 --- a/src/tools/other/gis_utils/Makefile.in +++ b/src/tools/other/gis_utils/Makefile.in @@ -234,6 +234,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/grid_diag/Makefile.in b/src/tools/other/grid_diag/Makefile.in index c93be79561..65e3b5196f 100644 --- a/src/tools/other/grid_diag/Makefile.in +++ b/src/tools/other/grid_diag/Makefile.in @@ -224,6 +224,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/grid_diag/grid_diag.cc b/src/tools/other/grid_diag/grid_diag.cc index 87f263b68b..cd5ddc843b 100644 --- a/src/tools/other/grid_diag/grid_diag.cc +++ b/src/tools/other/grid_diag/grid_diag.cc @@ -24,7 +24,6 @@ // //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -51,7 +50,6 @@ using namespace std; using namespace netCDF; - //////////////////////////////////////////////////////////////////////// static void process_command_line(int, char **); @@ -293,7 +291,8 @@ void process_series(void) { if(!(cur_grid == grid)) { mlog << Debug(2) << "Regridding field " << data_info->magic_str_attr() - << " to the verification grid.\n"; + << " to the verification grid using " + << data_info->regrid().get_str() << ".\n"; data_dp[i_var] = met_regrid(data_dp[i_var], cur_grid, grid, data_info->regrid()); @@ -431,9 +430,9 @@ void setup_histograms(void) { bin_max.clear(); bin_mid.clear(); for(int k=0; kmagic_str_attr() - << " histogram with " << n_bins << " bins from " - << min << " to " << max << ".\n"; - + << "Initializing " << data_info->magic_str_attr() + << " histogram with " << n_bins << " bins from " + << min << " to " << max << ".\n"; + histograms[i_var_str] = vector(); init_pdf(n_bins, histograms[i_var_str]); // Keep track of unique output variable names if(nc_var_sa.has( data_info->magic_str_attr() )) unique_variable_names = false; nc_var_sa.add(data_info->magic_str_attr()); - + } // for i_var } @@ -462,7 +461,7 @@ void setup_histograms(void) { void setup_joint_histograms(void) { ConcatString i_var_str, j_var_str, ij_var_str; - for(int i_var=0; i_varn_bins(); @@ -487,7 +486,7 @@ void setup_joint_histograms(void) { init_joint_pdf(n_bins, n_joint_bins, joint_histograms[ij_var_str]); } // end for j_var - } // end for i_var + } // end for i_var } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/gsi_tools/Makefile.in b/src/tools/other/gsi_tools/Makefile.in index c7f068cfd9..455195d757 100644 --- a/src/tools/other/gsi_tools/Makefile.in +++ b/src/tools/other/gsi_tools/Makefile.in @@ -261,6 +261,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/gsi_tools/conv_record.h b/src/tools/other/gsi_tools/conv_record.h index f482c6fd0b..658db1cec8 100644 --- a/src/tools/other/gsi_tools/conv_record.h +++ b/src/tools/other/gsi_tools/conv_record.h @@ -89,15 +89,15 @@ class ConvFile { //////////////////////////////////////////////////////////////////////// -inline bool ConvFile::get_swap_endian() const { return ( SwapEndian ); } +inline bool ConvFile::get_swap_endian() const { return SwapEndian; } -inline int ConvFile::get_rec_pad_size() const { return ( RecPadSize ); } +inline int ConvFile::get_rec_pad_size() const { return RecPadSize; } -inline unixtime ConvFile::date() const { return ( Date ); } +inline unixtime ConvFile::date() const { return Date; } -inline int ConvFile::n_rec() const { return ( Nrec ); } +inline int ConvFile::n_rec() const { return Nrec; } -inline int ConvFile::n_pair() const { return ( Npair ); } +inline int ConvFile::n_pair() const { return Npair; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/gsi_tools/gsi_record.h b/src/tools/other/gsi_tools/gsi_record.h index 29392c49eb..2c6c3c3206 100644 --- a/src/tools/other/gsi_tools/gsi_record.h +++ b/src/tools/other/gsi_tools/gsi_record.h @@ -75,8 +75,8 @@ class GsiRecord { //////////////////////////////////////////////////////////////////////// -inline bool GsiRecord::shuffle () const { return ( Shuffle ); } -inline int GsiRecord::rec_pad_length () const { return ( RecPadLength ); } +inline bool GsiRecord::shuffle () const { return Shuffle; } +inline int GsiRecord::rec_pad_length () const { return RecPadLength; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/gsi_tools/gsi_util.cc b/src/tools/other/gsi_tools/gsi_util.cc index 5381ebe2d1..82b4000ae9 100644 --- a/src/tools/other/gsi_tools/gsi_util.cc +++ b/src/tools/other/gsi_tools/gsi_util.cc @@ -301,152 +301,35 @@ bool is_retr(const char *s) { void setup_header(StatHdrColumns &shc, const StringArray &name, const StringArray &value, const char *default_line_type) { - int index; - SingleThresh st; - - // MODEL - if(name.has("MODEL", index)) { - shc.set_model(value[index].c_str()); - } - else { - shc.set_model(default_model); - } - - // DESC - if(name.has("DESC", index)) { - shc.set_desc(value[index].c_str()); - } - else { - shc.set_desc(default_desc); - } - - // FCST_LEAD - if(name.has("FCST_LEAD", index)) { - shc.set_fcst_lead_sec(timestring_to_sec(value[index].c_str())); - } - else { - shc.set_fcst_lead_sec(default_lead); - } - - // FCST_VALID_BEG, FCST_VALID_END - if(name.has("FCST_VALID_BEG", index)) { - shc.set_fcst_valid_beg(timestring_to_unix(value[index].c_str())); - not_has_FCST_VALID_BEG = false; - } - if(name.has("FCST_VALID_END", index)) { - shc.set_fcst_valid_end(timestring_to_unix(value[index].c_str())); - not_has_FCST_VALID_END = false; - } - - // OBS_LEAD - if(name.has("OBS_LEAD", index)) { - shc.set_obs_lead_sec(timestring_to_sec(value[index].c_str())); - } - else { - shc.set_obs_lead_sec(default_lead); - } - - // OBS_VALID_BEG, OBS_VALID_END - if(name.has("OBS_VALID_BEG", index)) { - shc.set_obs_valid_beg(timestring_to_unix(value[index].c_str())); - not_has_OBS_VALID_BEG = false; - } - if(name.has("OBS_VALID_END", index)) { - shc.set_obs_valid_end(timestring_to_unix(value[index].c_str())); - not_has_OBS_VALID_END = false; - } - - // FCST_VAR - if(name.has("FCST_VAR", index)) { - shc.set_fcst_var(value[index]); - not_has_FCST_VAR = false; - } - - // FCST_LEV - if(name.has("FCST_LEV", index)) { - shc.set_fcst_lev(value[index].c_str()); - } - else { - shc.set_fcst_lev(default_lev); - } - - // OBS_VAR - if(name.has("OBS_VAR", index)) { - shc.set_obs_var(value[index]); - not_has_OBS_VAR = false; - } - - // OBS_LEV - if(name.has("OBS_LEV", index)) { - shc.set_obs_lev(value[index].c_str()); - } - else { - shc.set_obs_lev(default_lev); - } - - // OBTYPE - if(name.has("OBTYPE", index)) { - shc.set_obtype(value[index].c_str()); - not_has_OBTYPE = false; - } - else { - shc.set_obtype(default_obtype); - } - - // VX_MASK - if(name.has("VX_MASK", index)) { - shc.set_mask(value[index].c_str()); - } - else { - shc.set_mask(default_vx_mask); - } - - // INTERP_MTHD - if(name.has("INTERP_MTHD", index)) { - shc.set_interp_mthd(value[index]); - } - else { - shc.set_interp_mthd((string)default_interp_mthd); - } - - // INTERP_PNTS - if(name.has("INTERP_PNTS", index)) { - shc.set_interp_wdth(nint(sqrt(atof(value[index].c_str())))); - } - else { - shc.set_interp_wdth(default_interp_wdth); - } - - // FCST_THRESH - if(name.has("FCST_THRESH", index)) st.set(value[index].c_str()); - else st.set(default_thresh); + SingleThresh st(na_str); + + // Initialize the header + shc.set_model("GSI"); + shc.set_desc(na_str); + shc.set_fcst_lead_sec(0); + shc.set_fcst_valid_beg(0); + shc.set_fcst_valid_end(0); + shc.set_obs_lead_sec(0); + shc.set_obs_valid_beg(0); + shc.set_obs_valid_end(0); + shc.set_fcst_var(na_str); + shc.set_fcst_units(na_str); + shc.set_fcst_lev(na_str); + shc.set_obs_var(na_str); + shc.set_obs_units(na_str); + shc.set_obs_lev(na_str); + shc.set_obtype(na_str); + shc.set_mask(na_str); + shc.set_interp_mthd(na_str); + shc.set_interp_wdth(0); shc.set_fcst_thresh(st); - - // OBS_THRESH - if(name.has("OBS_THRESH", index)) st.set(value[index].c_str()); - else st.set(default_thresh); shc.set_obs_thresh(st); - - // COV_THRESH - if(name.has("COV_THRESH", index)) st.set(value[index].c_str()); - else st.set(default_thresh); shc.set_cov_thresh(st); + shc.set_alpha(bad_data_double); + shc.set_line_type(default_line_type); - // ALPHA - if(name.has("ALPHA", index)) { - shc.set_alpha(atof(value[index].c_str())); - } - else { - shc.set_alpha(default_alpha); - } - - // LINE_TYPE - if(name.has("LINE_TYPE", index)) { - shc.set_line_type(value[index].c_str()); - } - else { - shc.set_line_type(default_line_type); - } + // Apply the -set_hdr options + shc.apply_set_hdr_opts(name, value); return; } diff --git a/src/tools/other/gsi_tools/gsi_util.h b/src/tools/other/gsi_tools/gsi_util.h index b635884ed2..dfd4390247 100644 --- a/src/tools/other/gsi_tools/gsi_util.h +++ b/src/tools/other/gsi_tools/gsi_util.h @@ -18,21 +18,9 @@ //////////////////////////////////////////////////////////////////////// // Constants -static const char default_model[] = "GSI"; -static const char default_desc[] = "NA"; -static const int default_lead = 0; -static const char default_lev[] = "NA"; -static const char default_obtype[] = "NA"; -static const char default_vx_mask[] = "NA"; -static const char default_interp_mthd[] = "NA"; -static const int default_interp_wdth = 0; -static const char default_thresh[] = "NA"; -static const double default_alpha = bad_data_double; - -static const int bad_setup_qc = -999; -static const char key_sep[] = ":"; - -static const char conv_id_str[] = "conv"; +static const int bad_setup_qc = -999; +static const char key_sep[] = ":"; +static const char conv_id_str[] = "conv"; static const char *micro_id_str [] = { "amsua", "amsub", "mhs", @@ -83,16 +71,6 @@ struct RadData { //////////////////////////////////////////////////////////////////////// -static bool not_has_FCST_VALID_BEG = true; -static bool not_has_FCST_VALID_END = true; -static bool not_has_OBS_VALID_BEG = true; -static bool not_has_OBS_VALID_END = true; -static bool not_has_FCST_VAR = true; -static bool not_has_OBS_VAR = true; -static bool not_has_OBTYPE = true; - -//////////////////////////////////////////////////////////////////////// - ConvData parse_conv_data(const ConvRecord &r, const int i); RadData parse_rad_data (const RadRecord &r, const int i, const int chval, const int use); diff --git a/src/tools/other/gsi_tools/gsid2mpr.cc b/src/tools/other/gsi_tools/gsid2mpr.cc index 8915b73ce6..d0468d3b08 100644 --- a/src/tools/other/gsi_tools/gsid2mpr.cc +++ b/src/tools/other/gsi_tools/gsid2mpr.cc @@ -16,12 +16,12 @@ // ---- ---- ---- ----------- // 000 06/09/15 Bullock New // 001 01/26/16 Halley Gotway Add -no_check_dup option. -// 002 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 003 10/03/22 Prestopnik MET #2227 Remove using namespace std from header files +// 002 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 003 10/03/22 Prestopnik MET #2227 Remove using namespace std from header files. +// 004 07/17/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -47,7 +47,6 @@ using namespace std; - //////////////////////////////////////////////////////////////////////// static void process_conv(const char *conv_filename, const char *output_filename); @@ -365,13 +364,13 @@ void write_mpr_row_conv(AsciiTable &at, int row, const ConvData &d) { int col; // Update header for current data - if(not_has_FCST_VALID_BEG) shc.set_fcst_valid_beg(d.fcst_ut); - if(not_has_FCST_VALID_END) shc.set_fcst_valid_end(d.fcst_ut); - if(not_has_OBS_VALID_BEG) shc.set_obs_valid_beg(d.obs_ut); - if(not_has_OBS_VALID_END) shc.set_obs_valid_end(d.obs_ut); - if(not_has_FCST_VAR) shc.set_fcst_var(d.var); - if(not_has_OBS_VAR) shc.set_obs_var(d.var); - if(not_has_OBTYPE) shc.set_obtype(d.obtype.c_str()); + if(!hdr_name.has("FCST_VALID_BEG")) shc.set_fcst_valid_beg(d.fcst_ut); + if(!hdr_name.has("FCST_VALID_END")) shc.set_fcst_valid_end(d.fcst_ut); + if(!hdr_name.has("OBS_VALID_BEG")) shc.set_obs_valid_beg(d.obs_ut); + if(!hdr_name.has("OBS_VALID_END")) shc.set_obs_valid_end(d.obs_ut); + if(!hdr_name.has("FCST_VAR")) shc.set_fcst_var(d.var); + if(!hdr_name.has("OBS_VAR")) shc.set_obs_var(d.var); + if(!hdr_name.has("OBTYPE")) shc.set_obtype(d.obtype.c_str()); // Write header columns write_header_cols(shc, at, row); @@ -390,9 +389,11 @@ void write_mpr_row_conv(AsciiTable &at, int row, const ConvData &d) { at.set_entry(row, col++, d.guess); // FCST at.set_entry(row, col++, d.obs); // OBS at.set_entry(row, col++, d.obs_qc[0]); // OBS_QC - at.set_entry(row, col++, na_str); // CLIMO_MEAN - at.set_entry(row, col++, na_str); // CLIMO_STDEV - at.set_entry(row, col++, na_str); // CLIMO_CDF + at.set_entry(row, col++, na_str); // OBS_CLIMO_MEAN + at.set_entry(row, col++, na_str); // OBS_CLIMO_STDEV + at.set_entry(row, col++, na_str); // OBS_CLIMO_CDF + at.set_entry(row, col++, na_str); // FCST_CLIMO_MEAN + at.set_entry(row, col++, na_str); // FCST_CLIMO_STDEV // Write extra columns at.set_entry(row, col++, d.prs); // OBS_PRS @@ -420,12 +421,12 @@ void write_mpr_row_rad(AsciiTable &at, int row, const RadData & d) { int col; // Update header for current data - if(not_has_FCST_VALID_BEG) shc.set_fcst_valid_beg(d.fcst_ut); - if(not_has_FCST_VALID_END) shc.set_fcst_valid_end(d.fcst_ut); - if(not_has_OBS_VALID_BEG) shc.set_obs_valid_beg(d.obs_ut); - if(not_has_OBS_VALID_END) shc.set_obs_valid_end(d.obs_ut); - if(not_has_FCST_VAR) shc.set_fcst_var(d.var); - if(not_has_OBS_VAR) shc.set_obs_var(d.var); + if(!hdr_name.has("FCST_VALID_BEG")) shc.set_fcst_valid_beg(d.fcst_ut); + if(!hdr_name.has("FCST_VALID_END")) shc.set_fcst_valid_end(d.fcst_ut); + if(!hdr_name.has("OBS_VALID_BEG")) shc.set_obs_valid_beg(d.obs_ut); + if(!hdr_name.has("OBS_VALID_END")) shc.set_obs_valid_end(d.obs_ut); + if(!hdr_name.has("FCST_VAR")) shc.set_fcst_var(d.var); + if(!hdr_name.has("OBS_VAR")) shc.set_obs_var(d.var); // Write header columns write_header_cols(shc, at, row); @@ -444,9 +445,11 @@ void write_mpr_row_rad(AsciiTable &at, int row, const RadData & d) { at.set_entry(row, col++, d.guess); // FCST at.set_entry(row, col++, d.obs); // OBS at.set_entry(row, col++, d.obs_qc[0]); // OBS_QC - at.set_entry(row, col++, na_str); // CLIMO_MEAN - at.set_entry(row, col++, na_str); // CLIMO_STDEV - at.set_entry(row, col++, na_str); // CLIMO_CDF + at.set_entry(row, col++, na_str); // OBS_CLIMO_MEAN + at.set_entry(row, col++, na_str); // OBS_CLIMO_STDEV + at.set_entry(row, col++, na_str); // OBS_CLIMO_CDF + at.set_entry(row, col++, na_str); // FCST_CLIMO_MEAN + at.set_entry(row, col++, na_str); // FCST_CLIMO_STDEV // Write extra columns at.set_entry(row, col++, d.use); // CHAN_USE diff --git a/src/tools/other/gsi_tools/gsidens2orank.cc b/src/tools/other/gsi_tools/gsidens2orank.cc index 0c75717fa8..d7d4b5bee4 100644 --- a/src/tools/other/gsi_tools/gsidens2orank.cc +++ b/src/tools/other/gsi_tools/gsidens2orank.cc @@ -15,8 +15,9 @@ // Mod# Date Name Description // ---- ---- ---- ----------- // 000 07/09/15 Halley Gotway New -// 001 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 002 10/03/22 Prestopnik MET #2227 Remove namespace std from header files +// 001 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 002 10/03/22 Prestopnik MET #2227 Remove namespace std from header files. +// 003 07/17/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// @@ -266,10 +267,11 @@ void process_conv_data(ConvData &d, int i_mem) { conv_data.push_back(d); // Store the current observation info - ens_pd.add_point_obs(d.sid.c_str(), d.lat, d.lon, + ClimoPntInfo cpi(bad_data_double, bad_data_double, + bad_data_double, bad_data_double); + ens_pd.add_point_obs(d.obtype.c_str(), d.sid.c_str(), d.lat, d.lon, bad_data_double, bad_data_double, d.obs_ut, d.prs, - d.elv, d.obs, na_str, bad_data_double, bad_data_double, - default_grid_weight); + d.elv, d.obs, na_str, cpi, default_weight); // Initialize ensemble members and mean to bad data for(i=0; in_use); // N_USE @@ -696,12 +702,15 @@ void write_orank_row_rad(AsciiTable &at, int row, int i_obs) { } at.set_entry(row, col++, cs); // OBS_QC at.set_entry(row, col++, ens_pd.mn_na[i_obs]); // ENS_MEAN - at.set_entry(row, col++, bad_data_double); // CLIMO - at.set_entry(row, col++, square_root(ens_pd.var_na[i_obs])); // ENS_SPREAD + at.set_entry(row, col++, bad_data_double); // OBS_CLIMO_MEAN + at.set_entry(row, col++, square_root(ens_pd.var_na[i_obs])); // SPREAD at.set_entry(row, col++, bad_data_double); // ENS_MEAN_OERR at.set_entry(row, col++, bad_data_double); // SPREAD_OERR at.set_entry(row, col++, bad_data_double); // SPREAD_PLUS_OERR + at.set_entry(row, col++, bad_data_double); // OBS_CLIMO_STDEV + at.set_entry(row, col++, bad_data_double); // FCST_CLIMO_MEAN + at.set_entry(row, col++, bad_data_double); // FCST_CLIMO_STDEV // Write extra columns at.set_entry(row, col++, d->n_use); // N_USE diff --git a/src/tools/other/gsi_tools/rad_record.h b/src/tools/other/gsi_tools/rad_record.h index 9f7f11f1ea..9159946d08 100644 --- a/src/tools/other/gsi_tools/rad_record.h +++ b/src/tools/other/gsi_tools/rad_record.h @@ -180,25 +180,25 @@ class RadFile { //////////////////////////////////////////////////////////////////////// -inline bool RadFile::get_swap_endian() const { return ( SwapEndian ); } +inline bool RadFile::get_swap_endian() const { return SwapEndian; } -inline int RadFile::get_rec_pad_size() const { return ( RecPadSize ); } +inline int RadFile::get_rec_pad_size() const { return RecPadSize; } -inline unixtime RadFile::date() const { return ( Date ); } +inline unixtime RadFile::date() const { return Date; } -inline int RadFile::n_channels() const { return ( Nchannels ); } +inline int RadFile::n_channels() const { return Nchannels; } -inline int RadFile::n_diag() const { return ( Ndiag ); } -inline int RadFile::n_rec() const { return ( Nrec ); } -inline int RadFile::n_pair() const { return ( Npair ); } +inline int RadFile::n_diag() const { return Ndiag; } +inline int RadFile::n_rec() const { return Nrec ; } +inline int RadFile::n_pair() const { return Npair; } -inline int RadFile::n1() const { return ( N1 ); } -inline int RadFile::n2() const { return ( N2 ); } +inline int RadFile::n1() const { return N1; } +inline int RadFile::n2() const { return N2; } -inline int RadFile::n12() const { return ( N1*N2 ); } +inline int RadFile::n12() const { return N1*N2; } -inline int RadFile::iextra() const { return ( R_params.iextra ); } -inline int RadFile::jextra() const { return ( R_params.jextra ); } +inline int RadFile::iextra() const { return R_params.iextra; } +inline int RadFile::jextra() const { return R_params.jextra; } inline bool RadFile::has_extra() const { return ( (R_params.iextra != 0) && (R_params.jextra != 0) ); } @@ -271,12 +271,12 @@ class RadRecord : public GsiRecord { //////////////////////////////////////////////////////////////////////// -inline int RadRecord::n1() const { return ( N1 ); } -inline int RadRecord::n2() const { return ( N2 ); } +inline int RadRecord::n1() const { return N1; } +inline int RadRecord::n2() const { return N2; } inline bool RadRecord::has_extra() const { return ( extra != 0 ); } -inline unixtime RadRecord::date() const { return ( Date ); } +inline unixtime RadRecord::date() const { return Date; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/ioda2nc/Makefile.in b/src/tools/other/ioda2nc/Makefile.in index 37f335deae..cff7fbea89 100644 --- a/src/tools/other/ioda2nc/Makefile.in +++ b/src/tools/other/ioda2nc/Makefile.in @@ -237,6 +237,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/ioda2nc/ioda2nc.cc b/src/tools/other/ioda2nc/ioda2nc.cc index da466c79d2..6e2d57c25b 100644 --- a/src/tools/other/ioda2nc/ioda2nc.cc +++ b/src/tools/other/ioda2nc/ioda2nc.cc @@ -572,12 +572,12 @@ void process_ioda_file(int i_pb) { ? (npbmsg * nmsg_percent / 100) : nmsg; } - float *hdr_lat_arr = new float[nlocs]; - float *hdr_lon_arr = new float[nlocs]; - float *hdr_elv_arr = new float[nlocs]; - float *obs_pres_arr = new float[nlocs]; - float *obs_hght_arr = new float[nlocs]; - float *hdr_time_arr = new float[nlocs]; + vector hdr_lat_arr (nlocs); + vector hdr_lon_arr (nlocs); + vector hdr_elv_arr (nlocs); + vector obs_pres_arr (nlocs); + vector obs_hght_arr (nlocs); + vector hdr_time_arr (nlocs); char *hdr_vld_block = new char[nlocs*ndatetime]; char *hdr_msg_types = nullptr; char *hdr_station_ids = nullptr; @@ -592,9 +592,9 @@ void process_ioda_file(int i_pb) { for (int i=0; i ibuf(n_data, 0); mlog << Debug(2) << "Processing Lidar points\t= " << n_data << "\n"; @@ -379,17 +379,13 @@ mlog << Debug(2) << "Processing Lidar points\t= " << n_data << "\n"; // populate the hdr_typ variable // -memset(ibuf, 0, n_data*sizeof(int)); - -obs_vars->hdr_typ_var.putVar(ibuf); +obs_vars->hdr_typ_var.putVar(ibuf.data()); // // populate the hdr_sid variable // -memset(ibuf, 0, n_data*sizeof(int)); - -obs_vars->hdr_sid_var.putVar(ibuf); +obs_vars->hdr_sid_var.putVar(ibuf.data()); nc_point_obs.add_header_strings(hdr_typ_string, na_str); // @@ -401,13 +397,9 @@ nc_point_obs.add_header_strings(hdr_typ_string, na_str); float ff[2]; -float *fhdr_lat_buf = new float[n_data]; -float *fhdr_lon_buf = new float[n_data]; -float *fhdr_elv_buf = new float[n_data]; - -memset(fhdr_lat_buf, 0, n_data * sizeof(float)); -memset(fhdr_lon_buf, 0, n_data * sizeof(float)); -memset(fhdr_elv_buf, 0, n_data * sizeof(float)); +vector fhdr_lat_buf(n_data, 0.0); +vector fhdr_lon_buf(n_data, 0.0); +vector fhdr_elv_buf(n_data, 0.0); for (j=0; jhdr_lat_var.putVar(fhdr_lat_buf); -obs_vars->hdr_lon_var.putVar(fhdr_lon_buf); -obs_vars->hdr_elv_var.putVar(fhdr_elv_buf); +obs_vars->hdr_lat_var.putVar(fhdr_lat_buf.data()); +obs_vars->hdr_lon_var.putVar(fhdr_lon_buf.data()); +obs_vars->hdr_elv_var.putVar(fhdr_elv_buf.data()); -delete [] fhdr_lat_buf; -delete [] fhdr_lon_buf; -delete [] fhdr_elv_buf; - // // populate the hdr_vld variable // @@ -441,8 +429,6 @@ unixtime t; get_hdf_var_info(hdf_sd_id, hdf_time_name, info); -memset(ibuf, 0, n_data*sizeof(int)); - for (j=0; jhdr_vld_var.putVar(ibuf); +obs_vars->hdr_vld_var.putVar(ibuf.data()); -delete[] ibuf; - // // populate the obs_arr variable // @@ -585,7 +569,7 @@ void write_nc_record(const float * f, int qc_value) snprintf(junk, sizeof(junk), "%d", qc_value); nc_point_obs.write_observation(f, junk); } - + return; } diff --git a/src/tools/other/madis2nc/Makefile.in b/src/tools/other/madis2nc/Makefile.in index c6eb822fc3..e70b8e0d65 100644 --- a/src/tools/other/madis2nc/Makefile.in +++ b/src/tools/other/madis2nc/Makefile.in @@ -224,6 +224,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/madis2nc/madis2nc.cc b/src/tools/other/madis2nc/madis2nc.cc index 2c2c4f3853..8831dde2aa 100644 --- a/src/tools/other/madis2nc/madis2nc.cc +++ b/src/tools/other/madis2nc/madis2nc.cc @@ -305,38 +305,38 @@ void process_madis_file(const char *madis_file) { // Switch on the MADIS type and process accordingly. switch(my_mtype) { - case(MadisType::metar): + case MadisType::metar: process_madis_metar(f_in); break; - case(MadisType::raob): + case MadisType::raob: process_madis_raob(f_in); break; - case (MadisType::profiler): + case MadisType::profiler: process_madis_profiler(f_in); break; - case(MadisType::maritime): + case MadisType::maritime: process_madis_maritime(f_in); break; - case(MadisType::mesonet): + case MadisType::mesonet: process_madis_mesonet(f_in); break; - case(MadisType::acarsProfiles): + case MadisType::acarsProfiles: process_madis_acarsProfiles(f_in); break; - case(MadisType::coop): - case(MadisType::HDW): - case(MadisType::HDW1h): - case(MadisType::hydro): - case(MadisType::POES): - case(MadisType::acars): - case(MadisType::radiometer): - case(MadisType::sao): - case(MadisType::satrad): - case(MadisType::snow): - case(MadisType::none): + case MadisType::coop: + case MadisType::HDW: + case MadisType::HDW1h: + case MadisType::hydro: + case MadisType::POES: + case MadisType::acars: + case MadisType::radiometer: + case MadisType::sao: + case MadisType::satrad: + case MadisType::snow: + case MadisType::none: default: mlog << Error << "\nprocess_madis_file() -> " << "MADIS type (" << enum_class_as_int(my_mtype) @@ -3737,18 +3737,18 @@ void set_mask_poly(const StringArray & a) { //////////////////////////////////////////////////////////////////////// void set_mask_sid(const StringArray & a) { - ConcatString mask_name; // List the station ID mask mlog << Debug(1) << "Station ID Mask: " << a[0] << "\n"; - parse_sid_mask(a[0], mask_sid, mask_name); + MaskSID ms = parse_sid_mask(a[0]); + for(const auto &pair : ms.sid_map()) mask_sid.add(pair.first); // List the length of the station ID mask mlog << Debug(2) - << "Parsed Station ID Mask: " << mask_name - << " containing " << mask_sid.n_elements() << " points\n"; + << "Parsed Station ID Mask (" << ms.name() + << ") containing " << mask_sid.n() << " stations\n"; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/madis2nc/madis2nc_conf_info.cc b/src/tools/other/madis2nc/madis2nc_conf_info.cc index c4b864ab13..953aa296f1 100644 --- a/src/tools/other/madis2nc/madis2nc_conf_info.cc +++ b/src/tools/other/madis2nc/madis2nc_conf_info.cc @@ -59,7 +59,7 @@ void Madis2NcConfInfo::clear() //////////////////////////////////////////////////////////////////////// void Madis2NcConfInfo::read_config(const string &default_filename, - const string &user_filename) + const string &user_filename) { // Read the config file constants diff --git a/src/tools/other/mode_graphics/Makefile.in b/src/tools/other/mode_graphics/Makefile.in index ead2e5dcb4..abc9301ea0 100644 --- a/src/tools/other/mode_graphics/Makefile.in +++ b/src/tools/other/mode_graphics/Makefile.in @@ -245,6 +245,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/mode_graphics/cgraph_font.h b/src/tools/other/mode_graphics/cgraph_font.h index f8dc85b537..f6970fbcbd 100644 --- a/src/tools/other/mode_graphics/cgraph_font.h +++ b/src/tools/other/mode_graphics/cgraph_font.h @@ -130,7 +130,7 @@ class CgFontCollection { //////////////////////////////////////////////////////////////////////// -inline int CgFontCollection::n_fonts() const { return ( Nelements ); } +inline int CgFontCollection::n_fonts() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_graphics/cgraph_main.h b/src/tools/other/mode_graphics/cgraph_main.h index 6352ee23eb..f7f6ce148b 100644 --- a/src/tools/other/mode_graphics/cgraph_main.h +++ b/src/tools/other/mode_graphics/cgraph_main.h @@ -265,7 +265,7 @@ inline void CgraphBase::stroke() { cairo_stroke (Cr); return; } inline void CgraphBase::fill() { cairo_fill (Cr); return; } inline void CgraphBase::clip() { cairo_clip (Cr); return; } -inline const Box & CgraphBase::page() const { return ( PageBox ); } +inline const Box & CgraphBase::page() const { return PageBox; } inline void CgraphBase::setlinecap_butt() { cairo_set_line_cap (Cr, CAIRO_LINE_CAP_BUTT); return; } inline void CgraphBase::setlinecap_round() { cairo_set_line_cap (Cr, CAIRO_LINE_CAP_ROUND); return; } @@ -275,7 +275,7 @@ inline void CgraphBase::setlinejoin_miter() { cairo_set_line_join (Cr, CAIRO_LIN inline void CgraphBase::setlinejoin_round() { cairo_set_line_join (Cr, CAIRO_LINE_JOIN_ROUND); return; } inline void CgraphBase::setlinejoin_bevel() { cairo_set_line_join (Cr, CAIRO_LINE_JOIN_BEVEL); return; } -inline double CgraphBase::c_fudge_y(double __Y__) const { return ( __Y__ ); } +inline double CgraphBase::c_fudge_y(double __Y__) const { return __Y__; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_graphics/color_stack.h b/src/tools/other/mode_graphics/color_stack.h index f225905cd2..4bc5b543d2 100644 --- a/src/tools/other/mode_graphics/color_stack.h +++ b/src/tools/other/mode_graphics/color_stack.h @@ -72,7 +72,7 @@ class ColorStack { //////////////////////////////////////////////////////////////////////// -inline int ColorStack::depth() const { return ( Nelements ); } +inline int ColorStack::depth() const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_graphics/mode_nc_output_file.h b/src/tools/other/mode_graphics/mode_nc_output_file.h index 177c059905..0187f6a169 100644 --- a/src/tools/other/mode_graphics/mode_nc_output_file.h +++ b/src/tools/other/mode_graphics/mode_nc_output_file.h @@ -161,23 +161,23 @@ class ModeNcOutputFile { //////////////////////////////////////////////////////////////////////// -inline int ModeNcOutputFile::nx() const { return ( Nx ); } -inline int ModeNcOutputFile::ny() const { return ( Ny ); } +inline int ModeNcOutputFile::nx() const { return Nx; } +inline int ModeNcOutputFile::ny() const { return Ny; } -inline const Grid & ModeNcOutputFile::grid() const { return ( *_Grid ); } +inline const Grid & ModeNcOutputFile::grid() const { return *_Grid; } -inline ConcatString ModeNcOutputFile::filename() const { return ( Filename ); } +inline ConcatString ModeNcOutputFile::filename() const { return Filename; } -inline unixtime ModeNcOutputFile::valid_time() const { return ( ValidTime ); } -inline unixtime ModeNcOutputFile::init_time () const { return ( InitTime ); } +inline unixtime ModeNcOutputFile::valid_time() const { return ValidTime; } +inline unixtime ModeNcOutputFile::init_time () const { return InitTime; } -inline int ModeNcOutputFile::accum_time () const { return ( AccumTime ); } +inline int ModeNcOutputFile::accum_time () const { return AccumTime; } -// inline int ModeNcOutputFile::n_fcst_objs () const { return ( NFcstObjs ); } -// inline int ModeNcOutputFile::n_obs_objs () const { return ( NObsObjs ); } +// inline int ModeNcOutputFile::n_fcst_objs () const { return NFcstObjs; } +// inline int ModeNcOutputFile::n_obs_objs () const { return NObsObjs ; } // -// inline int ModeNcOutputFile::n_fcst_clus () const { return ( NFcstClus ); } -// inline int ModeNcOutputFile::n_obs_clus () const { return ( NObsClus ); } +// inline int ModeNcOutputFile::n_fcst_clus () const { return NFcstClus; } +// inline int ModeNcOutputFile::n_obs_clus () const { return NObsClus ; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_graphics/plot_mode_field.cc b/src/tools/other/mode_graphics/plot_mode_field.cc index 6f5e7fe26c..bf37364728 100644 --- a/src/tools/other/mode_graphics/plot_mode_field.cc +++ b/src/tools/other/mode_graphics/plot_mode_field.cc @@ -920,16 +920,16 @@ void draw_map(Cgraph & plot, const Box & map_box, const Grid & grid) { int j; -Dictionary & s = *(sources); -const DictionaryEntry * e = 0; -Dictionary * dict = 0; +Dictionary & s = *sources; +const DictionaryEntry * e = nullptr; +Dictionary * dict = nullptr; for (j=0; jis_dictionary()) ) { + if ( ! e->is_dictionary() ) { mlog << Error << "\n\n " << program_name << ": draw_map() -> non-dictionary found " diff --git a/src/tools/other/mode_time_domain/2d_att.cc b/src/tools/other/mode_time_domain/2d_att.cc index 2ea1ff0844..f65ed2ae7f 100644 --- a/src/tools/other/mode_time_domain/2d_att.cc +++ b/src/tools/other/mode_time_domain/2d_att.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -399,15 +398,7 @@ a.ObjectNumber = obj_number; moments = mask_2d.calc_2d_moments(); -if ( moments.N == 0 ) { - - // mlog << Error << "\n\n calc_2d_single_atts() -> empty object!\n\n"; - - // exit ( 1 ); - - return a; - -} +if ( moments.N == 0 ) return a; a.Xbar = (moments.Sx)/(moments.N); a.Ybar = (moments.Sy)/(moments.N); @@ -430,7 +421,8 @@ values = new float [a.Area]; if ( !values ) { - mlog << Error << "\n\n calc_2d_single_atts() -> memory allocation error\n\n"; + mlog << Error << "\ncalc_2d_single_atts() -> " + << "memory allocation error\n\n"; exit ( 1 ); @@ -626,6 +618,3 @@ return; //////////////////////////////////////////////////////////////////////// - - - diff --git a/src/tools/other/mode_time_domain/2d_att.h b/src/tools/other/mode_time_domain/2d_att.h index 6975969278..75f6260ea1 100644 --- a/src/tools/other/mode_time_domain/2d_att.h +++ b/src/tools/other/mode_time_domain/2d_att.h @@ -162,48 +162,48 @@ class SingleAtt2D { //////////////////////////////////////////////////////////////////////// -inline int SingleAtt2D::object_number() const { return ( ObjectNumber ); } +inline int SingleAtt2D::object_number() const { return ObjectNumber; } -inline int SingleAtt2D::cluster_number() const { return ( ClusterNumber ); } +inline int SingleAtt2D::cluster_number() const { return ClusterNumber; } -inline int SingleAtt2D::time_index() const { return ( TimeIndex ); } +inline int SingleAtt2D::time_index() const { return TimeIndex; } inline void SingleAtt2D::set_object_number (int _n) { ObjectNumber = _n; return; } inline void SingleAtt2D::set_cluster_number (int _n) { ClusterNumber = _n; return; } -inline int SingleAtt2D::area() const { return ( Area ); } +inline int SingleAtt2D::area() const { return Area; } inline void SingleAtt2D::set_area(int _A) { Area = _A; return; } inline void SingleAtt2D::set_time_index(int _t) { TimeIndex = _t; return; } -inline double SingleAtt2D::xbar() const { return ( Xbar ); } -inline double SingleAtt2D::ybar() const { return ( Ybar ); } +inline double SingleAtt2D::xbar() const { return Xbar; } +inline double SingleAtt2D::ybar() const { return Ybar; } -inline double SingleAtt2D::centroid_lat() const { return ( CentroidLat ); } -inline double SingleAtt2D::centroid_lon() const { return ( CentroidLon ); } +inline double SingleAtt2D::centroid_lat() const { return CentroidLat; } +inline double SingleAtt2D::centroid_lon() const { return CentroidLon; } -inline double SingleAtt2D::axis() const { return ( AxisAngle ); } +inline double SingleAtt2D::axis() const { return AxisAngle; } -inline double SingleAtt2D::ptile_10() const { return ( Ptile_10 ); } -inline double SingleAtt2D::ptile_25() const { return ( Ptile_25 ); } -inline double SingleAtt2D::ptile_50() const { return ( Ptile_50 ); } -inline double SingleAtt2D::ptile_75() const { return ( Ptile_75 ); } -inline double SingleAtt2D::ptile_90() const { return ( Ptile_90 ); } +inline double SingleAtt2D::ptile_10() const { return Ptile_10; } +inline double SingleAtt2D::ptile_25() const { return Ptile_25; } +inline double SingleAtt2D::ptile_50() const { return Ptile_50; } +inline double SingleAtt2D::ptile_75() const { return Ptile_75; } +inline double SingleAtt2D::ptile_90() const { return Ptile_90; } -inline int SingleAtt2D::ptile_value() const { return ( Ptile_Value ); } -inline double SingleAtt2D::ptile_user() const { return ( Ptile_User ); } +inline int SingleAtt2D::ptile_value() const { return Ptile_Value; } +inline double SingleAtt2D::ptile_user() const { return Ptile_User ; } -inline bool SingleAtt2D::is_fcst() const { return ( IsFcst ); } -inline bool SingleAtt2D::is_obs () const { return ( ! IsFcst ); } +inline bool SingleAtt2D::is_fcst() const { return IsFcst; } +inline bool SingleAtt2D::is_obs () const { return !IsFcst; } -inline bool SingleAtt2D::is_cluster () const { return ( Is_Cluster ); } -inline bool SingleAtt2D::is_simple () const { return ( ! Is_Cluster ); } +inline bool SingleAtt2D::is_cluster () const { return Is_Cluster; } +inline bool SingleAtt2D::is_simple () const { return !Is_Cluster; } -inline unixtime SingleAtt2D::valid_time () const { return ( ValidTime ); } +inline unixtime SingleAtt2D::valid_time () const { return ValidTime; } -inline int SingleAtt2D::lead_time () const { return ( Lead_Time ); } +inline int SingleAtt2D::lead_time () const { return Lead_Time; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/2d_att_array.cc b/src/tools/other/mode_time_domain/2d_att_array.cc index 86a3f99081..b805304c0f 100644 --- a/src/tools/other/mode_time_domain/2d_att_array.cc +++ b/src/tools/other/mode_time_domain/2d_att_array.cc @@ -7,20 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - -//////////////////////////////////////////////////////////////////////// - - - // - // Warning: This file is machine generated - // - // Do not edit by hand - // - // - // Created by arraygen on September 17, 2015 10:11 am MDT - // - - //////////////////////////////////////////////////////////////////////// @@ -127,13 +113,10 @@ void SingleAtt2DArray::clear() if ( e ) { delete [] e; e = (SingleAtt2D *) nullptr; } - Nelements = 0; Nalloc = 0; -// AllocInc = 50; // don't reset AllocInc - return; @@ -174,7 +157,8 @@ SingleAtt2D * u = new SingleAtt2D [N]; if ( !u ) { - mlog << Error << "SingleAtt2DArray::extend(int) -> memory allocation error\n\n"; + mlog << Error << "\nSingleAtt2DArray::extend(int) -> " + << "memory allocation error\n\n"; exit ( 1 ); @@ -238,7 +222,8 @@ void SingleAtt2DArray::set_alloc_inc(int N) if ( N < 0 ) { - mlog << Error << "SingleAtt2DArray::set_alloc_int(int) -> bad value ... " << N << "\n\n"; + mlog << Error << "\nSingleAtt2DArray::set_alloc_int(int) -> " + << "bad value ... " << N << "\n\n"; exit ( 1 ); @@ -299,7 +284,8 @@ SingleAtt2D & SingleAtt2DArray::operator[](int N) const if ( (N < 0) || (N >= Nelements) ) { - mlog << Error << "\n\n SingleAtt2DArray::operator[](int) -> range check error ... " << N << "\n\n"; + mlog << Error << "\nSingleAtt2DArray::operator[](int) -> " + << "range check error ... " << N << "\n\n"; exit ( 1 ); } @@ -344,8 +330,8 @@ unixtime SingleAtt2DArray::valid_time(int index) const if ( (index < 0) || (index >= Nelements) ) { - mlog << Error - << "SingleAtt2DArray::valid_time(int) const -> range check error\n\n"; + mlog << Error << "\nSingleAtt2DArray::valid_time(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -365,8 +351,8 @@ int SingleAtt2DArray::lead_time(int index) const if ( (index < 0) || (index >= Nelements) ) { - mlog << Error - << "SingleAtt2DArray::lead_time(int) const -> range check error\n\n"; + mlog << Error << "\nSingleAtt2DArray::lead_time(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -386,8 +372,8 @@ int SingleAtt2DArray::time_index(int index) const if ( (index < 0) || (index >= Nelements) ) { - mlog << Error - << "SingleAtt2DArray::time_index(int) const -> range check error\n\n"; + mlog << Error << "\nSingleAtt2DArray::time_index(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -401,5 +387,3 @@ return e[index].time_index(); //////////////////////////////////////////////////////////////////////// - - diff --git a/src/tools/other/mode_time_domain/2d_att_array.h b/src/tools/other/mode_time_domain/2d_att_array.h index 6963664805..968b917211 100644 --- a/src/tools/other/mode_time_domain/2d_att_array.h +++ b/src/tools/other/mode_time_domain/2d_att_array.h @@ -84,8 +84,8 @@ class SingleAtt2DArray { //////////////////////////////////////////////////////////////////////// -inline int SingleAtt2DArray::n_elements() const { return ( Nelements ); } -inline int SingleAtt2DArray::n () const { return ( Nelements ); } +inline int SingleAtt2DArray::n_elements() const { return Nelements; } +inline int SingleAtt2DArray::n () const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/2d_moments.cc b/src/tools/other/mode_time_domain/2d_moments.cc index ed1053d819..fc3cad6289 100644 --- a/src/tools/other/mode_time_domain/2d_moments.cc +++ b/src/tools/other/mode_time_domain/2d_moments.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -25,12 +24,10 @@ using namespace std; //////////////////////////////////////////////////////////////////////// - // // Code for class Mtd_2D_Moments // - //////////////////////////////////////////////////////////////////////// @@ -177,7 +174,8 @@ void Mtd_2D_Moments::centralize() if ( N == 0 ) { - mlog << Error << "\n\n Mtd_2D_Moments::centralize() -> no data!\n\n"; + mlog << Error << "\nMtd_2D_Moments::centralize() -> " + << "no data!\n\n"; exit ( 1 ); @@ -220,7 +218,8 @@ double Mtd_2D_Moments::calc_2D_axis_plane_angle() const if ( ! IsCentralized ) { - mlog << Error << "\n\n Mtd_2D_Moments::calc_2D_axis_plane_angle() const -> moments must be centralized first!\n\n"; + mlog << Error << "\nMtd_2D_Moments::calc_2D_axis_plane_angle() const -> " + << "moments must be centralized first!\n\n"; exit ( 1 ); @@ -242,6 +241,3 @@ return angle; //////////////////////////////////////////////////////////////////////// - - - diff --git a/src/tools/other/mode_time_domain/2d_moments.h b/src/tools/other/mode_time_domain/2d_moments.h index 1f379a79f5..3dcf4c995c 100644 --- a/src/tools/other/mode_time_domain/2d_moments.h +++ b/src/tools/other/mode_time_domain/2d_moments.h @@ -57,7 +57,6 @@ class Mtd_2D_Moments { int area() const; - // // do stuff // @@ -66,18 +65,15 @@ class Mtd_2D_Moments { void centralize(); - double calc_2D_axis_plane_angle() const; - - }; //////////////////////////////////////////////////////////////////////// -inline int Mtd_2D_Moments::area() const { return ( N ); } +inline int Mtd_2D_Moments::area() const { return N; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/3d_att.cc b/src/tools/other/mode_time_domain/3d_att.cc index 3f30a12786..a347be824e 100644 --- a/src/tools/other/mode_time_domain/3d_att.cc +++ b/src/tools/other/mode_time_domain/3d_att.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -779,8 +778,6 @@ IntersectionVol = 0; IsSimple = true; -// UnionVol = 0; - TimeCentroidDelta = 0.0; SpaceCentroidDist = 0.0; @@ -830,9 +827,6 @@ ObsClusterNumber = a.ObsClusterNumber; IntersectionVol = a.IntersectionVol; -// UnionVol = a.UnionVol; - - TimeCentroidDelta = a.TimeCentroidDelta; SpaceCentroidDist = a.SpaceCentroidDist; @@ -954,20 +948,6 @@ return; } -//////////////////////////////////////////////////////////////////////// - -/* -void PairAtt3D::set_union_volume(int k) - -{ - -UnionVol = k; - -return; - -} -*/ - //////////////////////////////////////////////////////////////////////// @@ -1261,7 +1241,7 @@ moments = mask.calc_3d_moments(); if ( moments.N == 0 ) { - mlog << Error << "\n\ncalc_3d_single_atts() -> " + mlog << Error << "\ncalc_3d_single_atts() -> " << "empty object!\n\n"; exit ( 1 ); @@ -1354,7 +1334,7 @@ values = new float [Vol]; if ( !values ) { - mlog << Error << "\n\ncalc_3d_single_atts() -> " + mlog << Error << "\ncalc_3d_single_atts() -> " << "memory allocation error\n\n"; exit ( 1 ); @@ -1453,7 +1433,6 @@ for (x=0; x<(fcst_obj.nx()); ++x) { } p.set_intersection_volume (IV); -// p.set_union_volume (UV); // // centroid distances @@ -1546,188 +1525,6 @@ return p; } -//////////////////////////////////////////////////////////////////////// - -/* -double calc_total_interest(const PairAtt3D & p, const MtdConfigInfo & conf) - -{ - -double t = 0.0; -double num, den; // numerator and denominator in the expression for total interest -double I, w; -PiecewiseLinear * f = 0; - -num = 0.0; -den = 0.0; - - // - // We don't need to use "is_eq" to check whether each weight is - // nonzero, because the MtdConfigInfo::read_config() function has - // already done that. That same function has already tested that - // the weights are not all zero. - // - - // - // space centroid dist - // - -w = conf.space_centroid_dist_wt; - -if ( w != 0.0 ) { - - f = conf.space_centroid_dist_if; - - I = (*f)(p.space_centroid_dist()); - - num += w*I; - - den += w; - -} - - // - // time centroid delta - // - -w = conf.time_centroid_delta_wt; - -if ( w != 0.0 ) { - - f = conf.time_centroid_delta_if; - - I = (*f)(p.time_centroid_delta()); - - num += w*I; - - den += w; - -} - - // - // speed delta - // - -w = conf.speed_delta_wt; - -if ( w != 0.0 ) { - - f = conf.speed_delta_if; - - I = (*f)(p.speed_delta()); - - num += w*I; - - den += w; - -} - - // - // direction difference - // - -w = conf.direction_diff_wt; - -if ( w != 0.0 ) { - - f = conf.direction_diff_if; - - I = (*f)(p.direction_difference()); - - num += w*I; - - den += w; - -} - - // - // volume ratio - // - -w = conf.volume_ratio_wt; - -if ( w != 0.0 ) { - - f = conf.volume_ratio_if; - - I = (*f)(p.volume_ratio()); - - num += w*I; - - den += w; - -} - - // - // axis angle difference - // - -w = conf.axis_angle_diff_wt; - -if ( w != 0.0 ) { - - f = conf.axis_angle_diff_if; - - I = (*f)(p.axis_angle_diff()); - - num += w*I; - - den += w; - -} - - // - // start time delta - // - -w = conf.start_time_delta_wt; - -if ( w != 0.0 ) { - - f = conf.start_time_delta_if; - - I = (*f)(p.start_time_delta()); - - num += w*I; - - den += w; - -} - - // - // end time delta - // - -w = conf.end_time_delta_wt; - -if ( w != 0.0 ) { - - f = conf.end_time_delta_if; - - I = (*f)(p.end_time_delta()); - - num += w*I; - - den += w; - -} - - // - // The denominator is just the sum of the weights, - // which, as stated above, we already know is nonzero. - // - -t = num/den; - - // - // done - // - -return t; - -} -*/ - //////////////////////////////////////////////////////////////////////// @@ -1738,15 +1535,6 @@ double calc_2d_dist(const double x1_grid, const double y1_grid, double dist; - // - // distance in grid units - // - -// const double dx = x1_grid - x2_grid; -// const double dy = y1_grid - y2_grid; -// -// dist = sqrt ( dx*dx + dy*dy ); - // // great circle distance // @@ -1771,8 +1559,3 @@ return dist; //////////////////////////////////////////////////////////////////////// - - - - - diff --git a/src/tools/other/mode_time_domain/3d_att.h b/src/tools/other/mode_time_domain/3d_att.h index 8d1db58227..d0b6084316 100644 --- a/src/tools/other/mode_time_domain/3d_att.h +++ b/src/tools/other/mode_time_domain/3d_att.h @@ -196,58 +196,58 @@ class SingleAtt3D { //////////////////////////////////////////////////////////////////////// -inline int SingleAtt3D::object_number () const { return ( ObjectNumber ); } -inline int SingleAtt3D::cluster_number () const { return ( ClusterNumber ); } +inline int SingleAtt3D::object_number () const { return ObjectNumber ; } +inline int SingleAtt3D::cluster_number () const { return ClusterNumber; } -inline bool SingleAtt3D::is_fcst () const { return ( IsFcst ); } -inline bool SingleAtt3D::is_obs () const { return ( ! IsFcst ); } +inline bool SingleAtt3D::is_fcst () const { return IsFcst; } +inline bool SingleAtt3D::is_obs () const { return !IsFcst; } -inline bool SingleAtt3D::is_simple () const { return ( IsSimple ); } -inline bool SingleAtt3D::is_cluster () const { return ( ! IsSimple ); } +inline bool SingleAtt3D::is_simple () const { return IsSimple; } +inline bool SingleAtt3D::is_cluster () const { return !IsSimple; } inline void SingleAtt3D::set_object_number (int _n) { ObjectNumber = _n; return; } inline void SingleAtt3D::set_cluster_number (int _n) { ClusterNumber = _n; return; } -inline int SingleAtt3D::volume() const { return ( Volume ); } +inline int SingleAtt3D::volume() const { return Volume; } inline void SingleAtt3D::set_volume(int _v) { Volume = _v; return; } -inline double SingleAtt3D::complexity() const { return ( Complexity ); } +inline double SingleAtt3D::complexity() const { return Complexity; } inline void SingleAtt3D::set_complexity(double _v) { Complexity = _v; return; } -inline double SingleAtt3D::xbar() const { return ( Xbar ); } -inline double SingleAtt3D::ybar() const { return ( Ybar ); } -inline double SingleAtt3D::tbar() const { return ( Tbar ); } +inline double SingleAtt3D::xbar() const { return Xbar; } +inline double SingleAtt3D::ybar() const { return Ybar; } +inline double SingleAtt3D::tbar() const { return Tbar; } -inline double SingleAtt3D::centroid_lat() const { return ( Centroid_Lat ); } -inline double SingleAtt3D::centroid_lon() const { return ( Centroid_Lon ); } +inline double SingleAtt3D::centroid_lat() const { return Centroid_Lat; } +inline double SingleAtt3D::centroid_lon() const { return Centroid_Lon; } -inline int SingleAtt3D::xmin() const { return ( Xmin ); } -inline int SingleAtt3D::xmax() const { return ( Xmax ); } +inline int SingleAtt3D::xmin() const { return Xmin; } +inline int SingleAtt3D::xmax() const { return Xmax; } -inline int SingleAtt3D::ymin() const { return ( Ymin ); } -inline int SingleAtt3D::ymax() const { return ( Ymax ); } +inline int SingleAtt3D::ymin() const { return Ymin; } +inline int SingleAtt3D::ymax() const { return Ymax; } -inline int SingleAtt3D::tmin() const { return ( Tmin ); } -inline int SingleAtt3D::tmax() const { return ( Tmax ); } +inline int SingleAtt3D::tmin() const { return Tmin; } +inline int SingleAtt3D::tmax() const { return Tmax; } -inline double SingleAtt3D::xdot() const { return ( Xvelocity ); } -inline double SingleAtt3D::ydot() const { return ( Yvelocity ); } +inline double SingleAtt3D::xdot() const { return Xvelocity; } +inline double SingleAtt3D::ydot() const { return Yvelocity; } -inline double SingleAtt3D::spatial_axis() const { return ( SpatialAxisAngle ); } +inline double SingleAtt3D::spatial_axis() const { return SpatialAxisAngle; } -inline double SingleAtt3D::cdist_travelled() const { return ( CdistTravelled ); } +inline double SingleAtt3D::cdist_travelled() const { return CdistTravelled; } -inline double SingleAtt3D::ptile_10() const { return ( Ptile_10 ); } -inline double SingleAtt3D::ptile_25() const { return ( Ptile_25 ); } -inline double SingleAtt3D::ptile_50() const { return ( Ptile_50 ); } -inline double SingleAtt3D::ptile_75() const { return ( Ptile_75 ); } -inline double SingleAtt3D::ptile_90() const { return ( Ptile_90 ); } +inline double SingleAtt3D::ptile_10() const { return Ptile_10; } +inline double SingleAtt3D::ptile_25() const { return Ptile_25; } +inline double SingleAtt3D::ptile_50() const { return Ptile_50; } +inline double SingleAtt3D::ptile_75() const { return Ptile_75; } +inline double SingleAtt3D::ptile_90() const { return Ptile_90; } -inline int SingleAtt3D::ptile_value() const { return ( Ptile_Value ); } -inline double SingleAtt3D::ptile_user() const { return ( Ptile_User ); } +inline int SingleAtt3D::ptile_value() const { return Ptile_Value; } +inline double SingleAtt3D::ptile_user() const { return Ptile_User ; } //////////////////////////////////////////////////////////////////////// @@ -308,9 +308,6 @@ class PairAtt3D { void dump(std::ostream &, int depth = 0) const; - // SingleAtt3D Fcst; - // SingleAtt3D Obs; - // // set stuff // @@ -388,33 +385,32 @@ class PairAtt3D { //////////////////////////////////////////////////////////////////////// -inline int PairAtt3D::fcst_obj_number() const { return ( FcstObjectNumber ); } -inline int PairAtt3D::obs_obj_number() const { return ( ObsObjectNumber ); } +inline int PairAtt3D::fcst_obj_number() const { return FcstObjectNumber; } +inline int PairAtt3D::obs_obj_number() const { return ObsObjectNumber; } -inline int PairAtt3D::fcst_cluster_number() const { return ( FcstClusterNumber ); } -inline int PairAtt3D::obs_cluster_number() const { return ( ObsClusterNumber ); } +inline int PairAtt3D::fcst_cluster_number() const { return FcstClusterNumber; } +inline int PairAtt3D::obs_cluster_number() const { return ObsClusterNumber; } -inline int PairAtt3D::intersection_vol () const { return ( IntersectionVol ); } -// inline int PairAtt3D::union_vol () const { return ( UnionVol ); } +inline int PairAtt3D::intersection_vol () const { return IntersectionVol; } -inline double PairAtt3D::time_centroid_delta () const { return ( TimeCentroidDelta ); } -inline double PairAtt3D::space_centroid_dist () const { return ( SpaceCentroidDist ); } +inline double PairAtt3D::time_centroid_delta () const { return TimeCentroidDelta; } +inline double PairAtt3D::space_centroid_dist () const { return SpaceCentroidDist; } -inline double PairAtt3D::direction_difference () const { return ( DirectionDifference ); } -inline double PairAtt3D::speed_delta () const { return ( SpeedDelta ); } +inline double PairAtt3D::direction_difference () const { return DirectionDifference; } +inline double PairAtt3D::speed_delta () const { return SpeedDelta; } -inline double PairAtt3D::volume_ratio () const { return ( VolumeRatio ); } -inline double PairAtt3D::axis_angle_diff () const { return ( AxisDiff ); } +inline double PairAtt3D::volume_ratio () const { return VolumeRatio; } +inline double PairAtt3D::axis_angle_diff () const { return AxisDiff; } -inline int PairAtt3D::start_time_delta () const { return ( StartTimeDelta ); } -inline int PairAtt3D::end_time_delta () const { return ( EndTimeDelta ); } +inline int PairAtt3D::start_time_delta () const { return StartTimeDelta; } +inline int PairAtt3D::end_time_delta () const { return EndTimeDelta; } -inline int PairAtt3D::duration_difference () const { return ( DurationDifference ); } +inline int PairAtt3D::duration_difference () const { return DurationDifference; } -inline double PairAtt3D::total_interest () const { return ( TotalInterest ); } +inline double PairAtt3D::total_interest () const { return TotalInterest; } -inline bool PairAtt3D::is_simple () const { return ( IsSimple ); } -inline bool PairAtt3D::is_cluster () const { return ( ! IsSimple ); } +inline bool PairAtt3D::is_simple () const { return IsSimple; } +inline bool PairAtt3D::is_cluster () const { return !IsSimple; } inline void PairAtt3D::set_simple () { IsSimple = true; return; } inline void PairAtt3D::set_cluster () { IsSimple = false; return; } @@ -430,8 +426,6 @@ extern PairAtt3D calc_3d_pair_atts(const Object & _fcst_obj, const SingleAtt3D & _fa, const SingleAtt3D & _oa); -// extern double calc_total_interest(const PairAtt3D &, const MtdConfigInfo &); - //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/3d_att_pair_array.cc b/src/tools/other/mode_time_domain/3d_att_pair_array.cc index cd5bcd1ad6..8ff840be1c 100644 --- a/src/tools/other/mode_time_domain/3d_att_pair_array.cc +++ b/src/tools/other/mode_time_domain/3d_att_pair_array.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -113,15 +112,10 @@ void PairAtt3DArray::clear() if ( e ) { delete [] e; e = (PairAtt3D *) nullptr; } - - Nelements = 0; Nalloc = 0; -// AllocInc = 100; // don't reset AllocInc - - return; } @@ -161,7 +155,8 @@ PairAtt3D * u = new PairAtt3D [N]; if ( !u ) { - mlog << Error << "PairAtt3DArray::extend(int) -> memory allocation error\n\n"; + mlog << Error << "\nPairAtt3DArray::extend(int) -> " + << "memory allocation error\n\n"; exit ( 1 ); @@ -225,7 +220,8 @@ void PairAtt3DArray::set_alloc_inc(int N) if ( N < 0 ) { - mlog << Error << "PairAtt3DArray::set_alloc_int(int) -> bad value ... " << N << "\n\n"; + mlog << Error << "\nPairAtt3DArray::set_alloc_int(int) -> " + << "bad value ... " << N << "\n\n"; exit ( 1 ); @@ -286,7 +282,8 @@ PairAtt3D & PairAtt3DArray::operator[](int N) const if ( (N < 0) || (N >= Nelements) ) { - mlog << Error << "\n\n PairAtt3DArray::operator[](int) -> range check error ... " << N << "\n\n"; + mlog << Error << "\nPairAtt3DArray::operator[](int) -> " + << "range check error ... " << N << "\n\n"; exit ( 1 ); } @@ -305,7 +302,8 @@ int PairAtt3DArray::fcst_obj_number(int k) const if ( (k < 0) || (k >= Nelements) ) { - mlog << Error << "\n\n PairAtt3DArray::fcst_obj_number(int) -> range check error\n\n"; + mlog << Error << "\nPairAtt3DArray::fcst_obj_number(int) -> " + << "range check error\n\n"; exit ( 1 ); @@ -325,7 +323,8 @@ int PairAtt3DArray::obs_obj_number(int k) const if ( (k < 0) || (k >= Nelements) ) { - mlog << Error << "\n\n PairAtt3DArray::obs_obj_number(int) -> range check error\n\n"; + mlog << Error << "\nPairAtt3DArray::obs_obj_number(int) -> " + << "range check error\n\n"; exit ( 1 ); @@ -345,7 +344,8 @@ int PairAtt3DArray::fcst_cluster_number(int k) const if ( (k < 0) || (k >= Nelements) ) { - mlog << Error << "\n\n PairAtt3DArray::fcst_cluster_number(int) -> range check error\n\n"; + mlog << Error << "\nPairAtt3DArray::fcst_cluster_number(int) -> " + << "range check error\n\n"; exit ( 1 ); @@ -365,7 +365,8 @@ int PairAtt3DArray::obs_cluster_number(int k) const if ( (k < 0) || (k >= Nelements) ) { - mlog << Error << "\n\n PairAtt3DArray::obs_cluster_number(int) -> range check error\n\n"; + mlog << Error << "\nPairAtt3DArray::obs_cluster_number(int) -> " + << "range check error\n\n"; exit ( 1 ); @@ -385,7 +386,8 @@ double PairAtt3DArray::total_interest(int k) const if ( (k < 0) || (k >= Nelements) ) { - mlog << Error << "\n\n PairAtt3DArray::total_interest(int) -> range check error\n\n"; + mlog << Error << "\nPairAtt3DArray::total_interest(int) -> " + << "range check error\n\n"; exit ( 1 ); diff --git a/src/tools/other/mode_time_domain/3d_att_pair_array.h b/src/tools/other/mode_time_domain/3d_att_pair_array.h index e933bbd6a9..470324e562 100644 --- a/src/tools/other/mode_time_domain/3d_att_pair_array.h +++ b/src/tools/other/mode_time_domain/3d_att_pair_array.h @@ -92,15 +92,14 @@ class PairAtt3DArray { void patch_cluster_numbers(const MM_Engine &); - }; //////////////////////////////////////////////////////////////////////// -inline int PairAtt3DArray::n_elements() const { return ( Nelements ); } -inline int PairAtt3DArray::n () const { return ( Nelements ); } +inline int PairAtt3DArray::n_elements() const { return Nelements; } +inline int PairAtt3DArray::n () const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/3d_att_single_array.cc b/src/tools/other/mode_time_domain/3d_att_single_array.cc index d8fdba1252..dabf30bdeb 100644 --- a/src/tools/other/mode_time_domain/3d_att_single_array.cc +++ b/src/tools/other/mode_time_domain/3d_att_single_array.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -113,14 +112,10 @@ void SingleAtt3DArray::clear() if ( e ) { delete [] e; e = (SingleAtt3D *) nullptr; } - - Nelements = 0; Nalloc = 0; -// AllocInc = 100; // don't reset AllocInc - return; @@ -161,7 +156,8 @@ SingleAtt3D * u = new SingleAtt3D [N]; if ( !u ) { - mlog << Error << "SingleAtt3DArray::extend(int) -> memory allocation error\n\n"; + mlog << Error << "\nSingleAtt3DArray::extend(int) -> " + << "memory allocation error\n\n"; exit ( 1 ); @@ -225,7 +221,8 @@ void SingleAtt3DArray::set_alloc_inc(int N) if ( N < 0 ) { - mlog << Error << "SingleAtt3DArray::set_alloc_int(int) -> bad value ... " << N << "\n\n"; + mlog << Error << "\nSingleAtt3DArray::set_alloc_int(int) -> " + << "bad value ... " << N << "\n\n"; exit ( 1 ); @@ -286,7 +283,8 @@ SingleAtt3D & SingleAtt3DArray::operator[](int N) const if ( (N < 0) || (N >= Nelements) ) { - mlog << Error << "\n\n SingleAtt3DArray::operator[](int) -> range check error ... " << N << "\n\n"; + mlog << Error << "\nSingleAtt3DArray::operator[](int) -> " + << "range check error ... " << N << "\n\n"; exit ( 1 ); } diff --git a/src/tools/other/mode_time_domain/3d_att_single_array.h b/src/tools/other/mode_time_domain/3d_att_single_array.h index efe0afd6c1..d501de1d05 100644 --- a/src/tools/other/mode_time_domain/3d_att_single_array.h +++ b/src/tools/other/mode_time_domain/3d_att_single_array.h @@ -77,8 +77,8 @@ class SingleAtt3DArray { //////////////////////////////////////////////////////////////////////// -inline int SingleAtt3DArray::n_elements() const { return ( Nelements ); } -inline int SingleAtt3DArray::n () const { return ( Nelements ); } +inline int SingleAtt3DArray::n_elements() const { return Nelements; } +inline int SingleAtt3DArray::n () const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/3d_conv.cc b/src/tools/other/mode_time_domain/3d_conv.cc index d204ee61b0..b095989391 100644 --- a/src/tools/other/mode_time_domain/3d_conv.cc +++ b/src/tools/other/mode_time_domain/3d_conv.cc @@ -154,8 +154,6 @@ struct DataHandle { { - int j; - _out.put('\n'); _out << " DataHandle:\n"; @@ -168,13 +166,13 @@ struct DataHandle { _out << " plane_loaded = ["; - for (j=0; j still doesn't allow for bad data!\n\n" -// << "\n\n"; - - const int Nxy = Nx*Ny; const int Nxyz = Nx*Ny*Nt; @@ -262,11 +253,12 @@ ok_sum_plane_buf = new bool [Nxy]; handle.set_size(Nx, Ny, time_radius); conv_data = new double [Nxyz]; -for (k=0; k memory allocation error\n\n"; + mlog << Error << "\nMtdFloatFile::convolve(const int, const int, const int) const: process() -> " + << "memory allocation error\n\n"; exit ( 1 ); @@ -279,11 +271,9 @@ if ( !conv_data ) { min_conv_value = 1.0e100; max_conv_value = -1.0e100; -time_start = time(0); - -// cout << "\n\n n = " << mtd_three_to_one(Nx, Ny, Nt, 88, 397, 0) << "\n\n"; +unixtime time_start = time(nullptr); -for (t=0; t bad size\n\n"; + mlog << Error << "\nDataHandle::set_size() -> " + << "bad size\n\n"; exit ( 1 ); @@ -538,9 +517,6 @@ bool status = false; const int nx = mtd.nx(); const int ny = mtd.ny(); - -// mlog << Debug(5) << "In get_data_plane\n"; - for (y=0; y " + << "unable to write image file: " << filename << "\n\n"; exit ( 1 ); @@ -1044,11 +1009,10 @@ for (int x=0; x " + << "unable to write image file: " << filename << "\n\n"; exit ( 1 ); @@ -1062,7 +1026,3 @@ return; //////////////////////////////////////////////////////////////////////// - - - - diff --git a/src/tools/other/mode_time_domain/3d_moments.cc b/src/tools/other/mode_time_domain/3d_moments.cc index 7c2b4362d4..53e1b8b547 100644 --- a/src/tools/other/mode_time_domain/3d_moments.cc +++ b/src/tools/other/mode_time_domain/3d_moments.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -187,7 +186,8 @@ void Mtd_3D_Moments::centralize() if ( N == 0 ) { - mlog << Error << "\n\n Mtd_3D_Moments::centralize() -> no data!\n\n"; + mlog << Error << "\nMtd_3D_Moments::centralize() -> " + << "no data!\n\n"; exit ( 1 ); @@ -242,7 +242,8 @@ void Mtd_3D_Moments::calc_3d_velocity(double & vx, double & vy) const if ( ! IsCentralized ) { - mlog << Error << "\n\n Mtd_3D_Moments::calc_3d_velocity(double &, double &) const -> moments must be centralized first!\n\n"; + mlog << Error << "\nMtd_3D_Moments::calc_3d_velocity(double &, double &) const -> " + << "moments must be centralized first!\n\n"; exit ( 1 ); @@ -266,7 +267,8 @@ double Mtd_3D_Moments::calc_3d_axis_plane_angle() const if ( ! IsCentralized ) { - mlog << Error << "\n\n Mtd_3D_Moments::calc_3d_axis_plane_angle() const -> moments must be centralized first!\n\n"; + mlog << Error << "\nMtd_3D_Moments::calc_3d_axis_plane_angle() const -> " + << "moments must be centralized first!\n\n"; exit ( 1 ); @@ -288,6 +290,3 @@ return angle; //////////////////////////////////////////////////////////////////////// - - - diff --git a/src/tools/other/mode_time_domain/3d_moments.h b/src/tools/other/mode_time_domain/3d_moments.h index 603548be44..7f5f8bb02f 100644 --- a/src/tools/other/mode_time_domain/3d_moments.h +++ b/src/tools/other/mode_time_domain/3d_moments.h @@ -70,20 +70,17 @@ class Mtd_3D_Moments { void centralize(); - void calc_3d_velocity(double & vx, double & vy) const; double calc_3d_axis_plane_angle() const; - - }; //////////////////////////////////////////////////////////////////////// -inline int Mtd_3D_Moments::volume() const { return ( N ); } +inline int Mtd_3D_Moments::volume() const { return N; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/Makefile.am b/src/tools/other/mode_time_domain/Makefile.am index 34ad4461ee..b75702d973 100644 --- a/src/tools/other/mode_time_domain/Makefile.am +++ b/src/tools/other/mode_time_domain/Makefile.am @@ -12,8 +12,6 @@ include ${top_srcdir}/Make-include bin_PROGRAMS = mtd mtd_SOURCES = mtd.cc \ mtdfiletype_to_string.cc mtdfiletype_to_string.h \ - nc_utils_local.cc nc_utils_local.h \ - nc_grid.cc nc_grid.h \ 3d_moments.cc 3d_moments.h \ 2d_moments.cc 2d_moments.h \ fo_node.cc fo_node.h \ diff --git a/src/tools/other/mode_time_domain/Makefile.in b/src/tools/other/mode_time_domain/Makefile.in index 3f3d8b446a..532000c57e 100644 --- a/src/tools/other/mode_time_domain/Makefile.in +++ b/src/tools/other/mode_time_domain/Makefile.in @@ -102,7 +102,6 @@ CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" PROGRAMS = $(bin_PROGRAMS) am_mtd_OBJECTS = mtd-mtd.$(OBJEXT) mtd-mtdfiletype_to_string.$(OBJEXT) \ - mtd-nc_utils_local.$(OBJEXT) mtd-nc_grid.$(OBJEXT) \ mtd-3d_moments.$(OBJEXT) mtd-2d_moments.$(OBJEXT) \ mtd-fo_node.$(OBJEXT) mtd-fo_node_array.$(OBJEXT) \ mtd-fo_graph.$(OBJEXT) mtd-mtd_config_info.$(OBJEXT) \ @@ -152,8 +151,7 @@ am__depfiles_remade = ./$(DEPDIR)/mtd-2d_att.Po \ ./$(DEPDIR)/mtd-mtd_partition.Po \ ./$(DEPDIR)/mtd-mtd_read_data.Po \ ./$(DEPDIR)/mtd-mtd_txt_output.Po \ - ./$(DEPDIR)/mtd-mtdfiletype_to_string.Po \ - ./$(DEPDIR)/mtd-nc_grid.Po ./$(DEPDIR)/mtd-nc_utils_local.Po + ./$(DEPDIR)/mtd-mtdfiletype_to_string.Po am__mv = mv -f AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) @@ -263,6 +261,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ @@ -371,8 +370,6 @@ top_srcdir = @top_srcdir@ MAINTAINERCLEANFILES = Makefile.in mtd_SOURCES = mtd.cc \ mtdfiletype_to_string.cc mtdfiletype_to_string.h \ - nc_utils_local.cc nc_utils_local.h \ - nc_grid.cc nc_grid.h \ 3d_moments.cc 3d_moments.h \ 2d_moments.cc 2d_moments.h \ fo_node.cc fo_node.h \ @@ -543,8 +540,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mtd-mtd_read_data.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mtd-mtd_txt_output.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mtd-mtdfiletype_to_string.Po@am__quote@ # am--include-marker -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mtd-nc_grid.Po@am__quote@ # am--include-marker -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mtd-nc_utils_local.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @@ -594,34 +589,6 @@ mtd-mtdfiletype_to_string.obj: mtdfiletype_to_string.cc @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o mtd-mtdfiletype_to_string.obj `if test -f 'mtdfiletype_to_string.cc'; then $(CYGPATH_W) 'mtdfiletype_to_string.cc'; else $(CYGPATH_W) '$(srcdir)/mtdfiletype_to_string.cc'; fi` -mtd-nc_utils_local.o: nc_utils_local.cc -@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT mtd-nc_utils_local.o -MD -MP -MF $(DEPDIR)/mtd-nc_utils_local.Tpo -c -o mtd-nc_utils_local.o `test -f 'nc_utils_local.cc' || echo '$(srcdir)/'`nc_utils_local.cc -@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/mtd-nc_utils_local.Tpo $(DEPDIR)/mtd-nc_utils_local.Po -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='nc_utils_local.cc' object='mtd-nc_utils_local.o' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o mtd-nc_utils_local.o `test -f 'nc_utils_local.cc' || echo '$(srcdir)/'`nc_utils_local.cc - -mtd-nc_utils_local.obj: nc_utils_local.cc -@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT mtd-nc_utils_local.obj -MD -MP -MF $(DEPDIR)/mtd-nc_utils_local.Tpo -c -o mtd-nc_utils_local.obj `if test -f 'nc_utils_local.cc'; then $(CYGPATH_W) 'nc_utils_local.cc'; else $(CYGPATH_W) '$(srcdir)/nc_utils_local.cc'; fi` -@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/mtd-nc_utils_local.Tpo $(DEPDIR)/mtd-nc_utils_local.Po -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='nc_utils_local.cc' object='mtd-nc_utils_local.obj' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o mtd-nc_utils_local.obj `if test -f 'nc_utils_local.cc'; then $(CYGPATH_W) 'nc_utils_local.cc'; else $(CYGPATH_W) '$(srcdir)/nc_utils_local.cc'; fi` - -mtd-nc_grid.o: nc_grid.cc -@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT mtd-nc_grid.o -MD -MP -MF $(DEPDIR)/mtd-nc_grid.Tpo -c -o mtd-nc_grid.o `test -f 'nc_grid.cc' || echo '$(srcdir)/'`nc_grid.cc -@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/mtd-nc_grid.Tpo $(DEPDIR)/mtd-nc_grid.Po -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='nc_grid.cc' object='mtd-nc_grid.o' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o mtd-nc_grid.o `test -f 'nc_grid.cc' || echo '$(srcdir)/'`nc_grid.cc - -mtd-nc_grid.obj: nc_grid.cc -@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT mtd-nc_grid.obj -MD -MP -MF $(DEPDIR)/mtd-nc_grid.Tpo -c -o mtd-nc_grid.obj `if test -f 'nc_grid.cc'; then $(CYGPATH_W) 'nc_grid.cc'; else $(CYGPATH_W) '$(srcdir)/nc_grid.cc'; fi` -@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/mtd-nc_grid.Tpo $(DEPDIR)/mtd-nc_grid.Po -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='nc_grid.cc' object='mtd-nc_grid.obj' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o mtd-nc_grid.obj `if test -f 'nc_grid.cc'; then $(CYGPATH_W) 'nc_grid.cc'; else $(CYGPATH_W) '$(srcdir)/nc_grid.cc'; fi` - mtd-3d_moments.o: 3d_moments.cc @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(mtd_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT mtd-3d_moments.o -MD -MP -MF $(DEPDIR)/mtd-3d_moments.Tpo -c -o mtd-3d_moments.o `test -f '3d_moments.cc' || echo '$(srcdir)/'`3d_moments.cc @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/mtd-3d_moments.Tpo $(DEPDIR)/mtd-3d_moments.Po @@ -1066,8 +1033,6 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/mtd-mtd_read_data.Po -rm -f ./$(DEPDIR)/mtd-mtd_txt_output.Po -rm -f ./$(DEPDIR)/mtd-mtdfiletype_to_string.Po - -rm -f ./$(DEPDIR)/mtd-nc_grid.Po - -rm -f ./$(DEPDIR)/mtd-nc_utils_local.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags @@ -1136,8 +1101,6 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/mtd-mtd_read_data.Po -rm -f ./$(DEPDIR)/mtd-mtd_txt_output.Po -rm -f ./$(DEPDIR)/mtd-mtdfiletype_to_string.Po - -rm -f ./$(DEPDIR)/mtd-nc_grid.Po - -rm -f ./$(DEPDIR)/mtd-nc_utils_local.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic diff --git a/src/tools/other/mode_time_domain/fo_graph.cc b/src/tools/other/mode_time_domain/fo_graph.cc index 48b031813a..2ecdf62c25 100644 --- a/src/tools/other/mode_time_domain/fo_graph.cc +++ b/src/tools/other/mode_time_domain/fo_graph.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -168,7 +167,8 @@ int FO_Graph::f_index(int f_num) const if ( (f_num < 0) || (f_num >= N_fcst) ) { - mlog << Error << "\n\n FO_Graph::f_index(int f_num) const -> range check error!\n\n"; + mlog << Error << "\nFO_Graph::f_index(int f_num) const -> " + << "range check error!\n\n"; exit ( 1 ); @@ -188,7 +188,8 @@ int FO_Graph::o_index(int o_num) const if ( (o_num < 0) || (o_num >= N_obs) ) { - mlog << Error << "\n\n FO_Graph::o_index(int o_num) const -> range check error!\n\n"; + mlog << Error << "\nFO_Graph::o_index(int o_num) const -> " + << "range check error!\n\n"; exit ( 1 ); @@ -214,7 +215,8 @@ if ( (n_f == 0) && (n_o == 0) ) trouble = true; if ( trouble ) { - mlog << Error << "\n\n FO_Graph::set_size(int n_f, int n_o) -> bad n_f or n_o value(s)\n\n"; + mlog << Error << "\nFO_Graph::set_size(int n_f, int n_o) -> " + << "bad n_f or n_o value(s)\n\n"; exit ( 1 ); @@ -229,9 +231,6 @@ N_total = N_fcst + N_obs; TheGraph = new FO_Node [N_total*N_total]; - - - // // done // @@ -365,7 +364,8 @@ void FO_Graph::erase_edges() if ( ! TheGraph ) { - mlog << Error << "\n\n FO_Graph::erase_edges() -> empty graph!\n\n"; + mlog << Error << "\nFO_Graph::erase_edges() -> " + << "empty graph!\n\n"; exit ( 1 ); @@ -394,7 +394,8 @@ void FO_Graph::do_dump_table(AsciiTable & table) const if ( ! TheGraph ) { - mlog << Error << "\n\n FO_Graph::dump_as_table() -> empty graph!\n\n"; + mlog << Error << "\nO_Graph::dump_as_table() -> " + << "empty graph!\n\n"; exit ( 1 ); @@ -410,16 +411,6 @@ const int obs_stop = obs_start + N_obs - 1; table.set_size(N_total + 2, N_total + 2); -// for (r=0; r<(table.nrows()); ++r) { -// -// for (c=0; c<(table.ncols()); ++c) { -// -// table.set_entry(r, c, '.'); -// -// } -// -// } - c = fcst_stop + 1; for (r=1; r<(table.nrows()); ++r) { @@ -499,7 +490,6 @@ for (j=0; j memory allocation error\n\n"; + mlog << Error << "\nFO_Node_Array::extend(int) -> " + << "memory allocation error\n\n"; exit ( 1 ); @@ -225,7 +222,8 @@ void FO_Node_Array::set_alloc_inc(int N) if ( N < 0 ) { - mlog << Error << "FO_Node_Array::set_alloc_int(int) -> bad value ... " << N << "\n\n"; + mlog << Error << "\nFO_Node_Array::set_alloc_int(int) -> " + << "bad value ... " << N << "\n\n"; exit ( 1 ); @@ -286,7 +284,8 @@ FO_Node & FO_Node_Array::operator[](int N) const if ( (N < 0) || (N >= Nelements) ) { - mlog << Error << "\n\n FO_Node_Array::operator[](int) -> range check error ... " << N << "\n\n"; + mlog << Error << "\nFO_Node_Array::operator[](int) -> " + << "range check error ... " << N << "\n\n"; exit ( 1 ); } diff --git a/src/tools/other/mode_time_domain/fo_node_array.h b/src/tools/other/mode_time_domain/fo_node_array.h index d778a78e5e..72928b5046 100644 --- a/src/tools/other/mode_time_domain/fo_node_array.h +++ b/src/tools/other/mode_time_domain/fo_node_array.h @@ -74,8 +74,8 @@ class FO_Node_Array { //////////////////////////////////////////////////////////////////////// -inline int FO_Node_Array::n_elements() const { return ( Nelements ); } -inline int FO_Node_Array::n () const { return ( Nelements ); } +inline int FO_Node_Array::n_elements() const { return Nelements; } +inline int FO_Node_Array::n () const { return Nelements; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/interest_calc.cc b/src/tools/other/mode_time_domain/interest_calc.cc index f3e98e21c6..630fb92855 100644 --- a/src/tools/other/mode_time_domain/interest_calc.cc +++ b/src/tools/other/mode_time_domain/interest_calc.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -361,6 +360,3 @@ return sum; //////////////////////////////////////////////////////////////////////// - - - diff --git a/src/tools/other/mode_time_domain/interest_calc.h b/src/tools/other/mode_time_domain/interest_calc.h index 120a53137b..7cf4d1362d 100644 --- a/src/tools/other/mode_time_domain/interest_calc.h +++ b/src/tools/other/mode_time_domain/interest_calc.h @@ -84,7 +84,6 @@ class InterestCalculator { double operator()(const PairAtt3D &); - }; diff --git a/src/tools/other/mode_time_domain/mm_engine.cc b/src/tools/other/mode_time_domain/mm_engine.cc index a170f64b64..1928c97594 100644 --- a/src/tools/other/mode_time_domain/mm_engine.cc +++ b/src/tools/other/mode_time_domain/mm_engine.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -208,12 +207,8 @@ for (j=0; j<(graph.n_fcst()); ++j) { if ( ! graph.has_fo_edge(j, k) ) continue; - // mlog << Debug(5) << "\n Merging fcst " << j << ", obs " << k << '\n' << flush; - part.merge_values(f_i, o_i); - // specialzed_dump(graph.n_fcst(), graph.n_obs(), p); - } // for k } // for j diff --git a/src/tools/other/mode_time_domain/mm_engine.h b/src/tools/other/mode_time_domain/mm_engine.h index c36850fd91..628089f281 100644 --- a/src/tools/other/mode_time_domain/mm_engine.h +++ b/src/tools/other/mode_time_domain/mm_engine.h @@ -103,10 +103,10 @@ class MM_Engine { //////////////////////////////////////////////////////////////////////// -inline int MM_Engine::n_fcst_simples () const { return ( graph.n_fcst () ); } -inline int MM_Engine::n_obs_simples () const { return ( graph.n_obs () ); } +inline int MM_Engine::n_fcst_simples () const { return graph.n_fcst(); } +inline int MM_Engine::n_obs_simples () const { return graph.n_obs (); } -inline int MM_Engine::n_composites () const { return ( N_Composites ); } +inline int MM_Engine::n_composites () const { return N_Composites; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/mtd.cc b/src/tools/other/mode_time_domain/mtd.cc index 14cd5a18bb..aaad44203d 100644 --- a/src/tools/other/mode_time_domain/mtd.cc +++ b/src/tools/other/mode_time_domain/mtd.cc @@ -151,7 +151,6 @@ default_config_filename = replace_path(default_config_path); config.read_config(default_config_filename.c_str(), local_config_filename.c_str()); - // // determine the input file types // - check the config file for the file_type @@ -263,8 +262,6 @@ fcst_raw.regrid(to_grid, config.fcst_info->regrid()); // ConcatString prefix; -// int year, month, day, hour, minute, second; -// char junk[256]; prefix = make_output_prefix(config, obs_raw.start_valid_time()); @@ -394,10 +391,6 @@ MtdIntFile fo, oo; if ( have_pairs ) { - // mlog << Debug(5) << "\n Calculating pair attributes ... (Nf = " - // << (fcst_obj.n_objects()) << ", No = " - // << (obs_obj.n_objects()) << ")\n\n"; - for (j=0; j<(fcst_obj.n_objects()); ++j) { fo = fcst_obj.select(j + 1); @@ -412,9 +405,6 @@ if ( have_pairs ) { p.set_simple(); - // mlog << Debug(5) << " (F_" << j << ", O_" << k << ") " - // << p.total_interest() << '\n'; - pa_simple.add(p); } @@ -424,7 +414,6 @@ if ( have_pairs ) { } // if have_pairs - // // calculate 2d simple attributes // @@ -520,9 +509,6 @@ for (j=0; j<(obs_obj.n_objects()); ++j) { } // for j - - - // // create graph // @@ -554,26 +540,14 @@ if ( have_pairs ) { for (j=0; j 5 ) fcst_cluster_att.dump(cout); - mlog << Debug(2) << "Calculating 3D obs cluster attributes\n"; @@ -622,8 +594,6 @@ if ( have_pairs ) { att_3 = calc_3d_single_atts(mask, obs_raw, config.model.c_str(), config.inten_perc_value); - // if ( att.Xvelocity > 20.0 ) mask.write("w.nc"); - att_3.set_object_number(j + 1); // 1-based att_3.set_obs(); @@ -634,8 +604,6 @@ if ( have_pairs ) { } - // obs_cluster_att.dump(cout); - } // if have_pairs // @@ -670,12 +638,8 @@ if ( have_pairs ) { p.set_cluster(); - // p.set_total_interest(e.calc(p)); p.set_total_interest(-1.0); - // mlog << Debug(5) << " (F_" << j << ", O_" << k << ") " - // << p.total_interest() << '\n'; - pa_cluster.add(p); } @@ -787,9 +751,6 @@ if ( have_pairs ) { } // for j - - - } // if have pairs // diff --git a/src/tools/other/mode_time_domain/mtd_config_info.cc b/src/tools/other/mode_time_domain/mtd_config_info.cc index 97b9d90e29..c23af5a638 100644 --- a/src/tools/other/mode_time_domain/mtd_config_info.cc +++ b/src/tools/other/mode_time_domain/mtd_config_info.cc @@ -191,51 +191,6 @@ conf.read(default_file_name); if ( user_file_name ) conf.read(user_file_name); - // check the fuzzy-engine weights - // calculation of total interest assumes these tests - - // - // Update: these tests are not really needed since the InterestCalculator - // class checks this - // - -/* -bool all_zero = true; - -if ( is_eq(space_centroid_dist_wt, 0.0) ) space_centroid_dist_wt = 0.0; -else all_zero = false; - -if ( is_eq(time_centroid_delta_wt, 0.0) ) time_centroid_delta_wt = 0.0; -else all_zero = false; - -if ( is_eq(speed_delta_wt, 0.0) ) speed_delta_wt = 0.0; -else all_zero = false; - -if ( is_eq(direction_diff_wt, 0.0) ) direction_diff_wt = 0.0; -else all_zero = false; - -if ( is_eq(volume_ratio_wt, 0.0) ) volume_ratio_wt = 0.0; -else all_zero = false; - -if ( is_eq(axis_angle_diff_wt, 0.0) ) axis_angle_diff_wt = 0.0; -else all_zero = false; - -if ( is_eq(start_time_delta_wt, 0.0) ) start_time_delta_wt = 0.0; -else all_zero = false; - -if ( is_eq(end_time_delta_wt, 0.0) ) end_time_delta_wt = 0.0; -else all_zero = false; - - -if ( all_zero ) { - - mlog << Error << "\n\n MtdConfigInfo::read_config() -> all the fuzzy engine weights are zero!\n\n"; - - exit ( 1 ); - -} -*/ - // // done // @@ -259,10 +214,6 @@ void MtdConfigInfo::process_config(GrdFileType ftype, GrdFileType otype) bool status = false; double sum; - // Dump the contents of the config file - - // if(mlog.verbosity_level() >= 5) conf.dump(cout); - // Initialize clear(); @@ -298,19 +249,6 @@ void MtdConfigInfo::process_config(GrdFileType ftype, GrdFileType otype) fcst_info->set_dict(*(fcst_dict->lookup_dictionary(conf_key_field))); obs_info->set_dict(*(obs_dict->lookup_dictionary(conf_key_field))); - // Dump the contents of the VarInfo objects -// -// if(mlog.verbosity_level() >= 5) { -// mlog << Debug(5) -// << "Parsed forecast field:\n"; -// fcst_info->dump(cout); -// mlog << Debug(5) -// << "Parsed observation field:\n"; -// obs_info->dump(cout); -// } -// - - // No support for wind direction if(fcst_info->is_wind_direction() || obs_info->is_wind_direction()) { @@ -350,68 +288,6 @@ void MtdConfigInfo::process_config(GrdFileType ftype, GrdFileType otype) fcst_conv_thresh = fcst_dict->lookup_thresh(conf_key_conv_thresh); obs_conv_thresh = obs_dict->lookup_thresh(conf_key_conv_thresh); - // Conf: fcst.vld_thresh and obs.vld_thresh - - // fcst_vld_thresh = fcst_dict->lookup_double(conf_key_vld_thresh); - // obs_vld_thresh = obs_dict->lookup_double(conf_key_vld_thresh); - - // Conf: fcst.merge_thresh and obs.merge_thresh - - // fcst_merge_thresh = fcst_dict->lookup_thresh(conf_key_merge_thresh); - // obs_merge_thresh = obs_dict->lookup_thresh(conf_key_merge_thresh); - - // Conf: fcst.merge_flag and obs.merge_flag - - // fcst_merge_flag = int_to_mergetype(fcst_dict->lookup_int(conf_key_merge_flag)); - // obs_merge_flag = int_to_mergetype(obs_dict->lookup_int(conf_key_merge_flag)); - - // Conf: mask_missing_flag - - // mask_missing_flag = int_to_fieldtype(conf.lookup_int(conf_key_mask_missing_flag)); - - // Conf: match_flag - - // match_flag = int_to_matchtype(conf.lookup_int(conf_key_match_flag)); - - // Check that match_flag is set between 0 and 3 -/* - if(match_flag == MatchType::None && - (fcst_merge_flag != MergeType::None || obs_merge_flag != MergeType::None) ) { - mlog << Warning << "\nMtdConfigInfo::process_config() -> " - << "When matching is disabled (match_flag = " - << matchtype_to_string(match_flag) - << ") but merging is requested (fcst_merge_flag = " - << mergetype_to_string(fcst_merge_flag) - << ", obs_merge_flag = " - << mergetype_to_string(obs_merge_flag) - << ") any merging information will be discarded.\n\n"; - } -*/ - // Conf: max_centroid_dist - - // max_centroid_dist = conf.lookup_double(conf_key_max_centroid_dist); - - // Check that max_centroid_dist is > 0 -/* - if(max_centroid_dist <= 0) { - mlog << Warning << "\nMtdConfigInfo::process_config() -> " - << "max_centroid_dist (" << max_centroid_dist - << ") should be set > 0\n\n"; - } - -*/ - // Conf: mask.grid - -/* - mask_grid_name = conf.lookup_string(conf_key_mask_grid); - mask_grid_flag = int_to_fieldtype(conf.lookup_int(conf_key_mask_grid_flag)); - - // Conf: mask.poly - - mask_poly_name = conf.lookup_string(conf_key_mask_poly); - mask_poly_flag = int_to_fieldtype(conf.lookup_int(conf_key_mask_poly_flag)); -*/ - // Conf: weight dict = conf.lookup_dictionary(conf_key_weight); @@ -494,20 +370,6 @@ void MtdConfigInfo::process_config(GrdFileType ftype, GrdFileType otype) exit(1); } - // Conf: print_interest_thresh - - // print_interest_thresh = conf.lookup_double(conf_key_print_interest_thresh); - - // Check that print_interest_thresh is between 0 and 1. -/* - if(print_interest_thresh < 0 || print_interest_thresh > 1) { - mlog << Error << "\nMtdConfigInfo::process_config() -> " - << "print_interest_thresh (" << print_interest_thresh - << ") must be set between 0 and 1.\n\n"; - exit(1); - } -*/ - // Conf: nc_pairs_flag parse_nc_info(); @@ -516,10 +378,6 @@ void MtdConfigInfo::process_config(GrdFileType ftype, GrdFileType otype) parse_txt_info(); - // Conf: ct_stats_flag - - // ct_stats_flag = conf.lookup_bool(conf_key_ct_stats_flag); - // Conf: output_prefix output_prefix = conf.lookup_string(conf_key_output_prefix); @@ -564,8 +422,8 @@ e = conf.lookup(conf_key_nc_output); if ( !e ) { - mlog << Error - << "\n\n MtdConfigInfo::parse_nc_info() -> lookup failed for key \"" + mlog << Error << "\nMtdConfigInfo::parse_nc_info() -> " + << "lookup failed for key \"" << conf_key_nc_output << "\"\n\n"; exit ( 1 ); @@ -590,11 +448,9 @@ if ( type == BooleanType ) { if ( type != DictionaryType ) { - mlog << Error - << "\n\n MtdConfigInfo::parse_nc_info() -> bad type (" - << configobjecttype_to_string(type) - << ") for key \"" - << conf_key_nc_pairs_flag << "\"\n\n"; + mlog << Error << "\nMtdConfigInfo::parse_nc_info() -> " + << "bad type (" << configobjecttype_to_string(type) + << ") for key \"" << conf_key_nc_pairs_flag << "\"\n\n"; exit ( 1 ); @@ -608,10 +464,8 @@ Dictionary * d = e->dict_value(); nc_info.do_latlon = d->lookup_bool(conf_key_latlon_flag); nc_info.do_raw = d->lookup_bool(conf_key_raw_flag); -// nc_info.do_object_raw = d->lookup_bool(conf_key_object_raw_flag); nc_info.do_object_id = d->lookup_bool(conf_key_object_id_flag); nc_info.do_cluster_id = d->lookup_bool(conf_key_cluster_id_flag); -// nc_info.do_polylines = d->lookup_bool(conf_key_do_polylines_flag); // // done @@ -638,9 +492,8 @@ e = conf.lookup(key); if ( !e ) { - mlog << Error - << "\n\n MtdConfigInfo::parse_txt_info() -> lookup failed for key \"" - << key << "\"\n\n"; + mlog << Error << "\nMtdConfigInfo::parse_txt_info() -> " + << "lookup failed for key \"" << key << "\"\n\n"; exit ( 1 ); @@ -654,11 +507,9 @@ const ConfigObjectType type = e->type(); if ( type != DictionaryType ) { - mlog << Error - << "\n\n MtdConfigInfo::parse_txt_info() -> bad type (" - << configobjecttype_to_string(type) - << ") for key \"" - << key << "\"\n\n"; + mlog << Error << "\nMtdConfigInfo::parse_txt_info() -> " + << "bad type (" << configobjecttype_to_string(type) + << ") for key \"" << key << "\"\n\n"; exit ( 1 ); @@ -851,7 +702,6 @@ bool MtdNcOutInfo::all_false() const { -// bool status = do_latlon || do_raw || do_object_raw || do_object_id || do_cluster_id || do_polylines; bool status = do_latlon || do_raw || do_object_id || do_cluster_id; return !status; @@ -868,10 +718,8 @@ void MtdNcOutInfo::set_all_false() do_latlon = false; do_raw = false; -// do_object_raw = false; do_object_id = false; do_cluster_id = false; -// do_polylines = false; return; @@ -887,10 +735,8 @@ void MtdNcOutInfo::set_all_true() do_latlon = true; do_raw = true; -// do_object_raw = true; do_object_id = true; do_cluster_id = true; -// do_polylines = true; return; diff --git a/src/tools/other/mode_time_domain/mtd_file_base.cc b/src/tools/other/mode_time_domain/mtd_file_base.cc index 8bb5abd972..f46a43392b 100644 --- a/src/tools/other/mode_time_domain/mtd_file_base.cc +++ b/src/tools/other/mode_time_domain/mtd_file_base.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -23,11 +22,11 @@ #include "mtd_file.h" #include "mtd_partition.h" #include "mtd_nc_defs.h" -#include "nc_grid.h" -#include "nc_utils_local.h" #include "mtdfiletype_to_string.h" +#include "get_met_grid.h" #include "vx_math.h" +#include "vx_nc_util.h" using namespace std; using namespace netCDF; @@ -179,8 +178,6 @@ if ( G ) { G->dump(out, depth + 1); - // out << prefix << (G->xml_serialize()) << '\n'; - } else { out << prefix << "Grid = 0\n"; @@ -238,7 +235,8 @@ Grid MtdFileBase::grid() const if ( !G ) { - mlog << Error << "\n\n MtdFileBase::grid() const -> no grid!\n\n"; + mlog << Error << "\nMtdFileBase::grid() const -> " + << "no grid!\n\n"; exit ( 1 ); @@ -271,7 +269,8 @@ void MtdFileBase::latlon_to_xy(double lat, double lon, double & x, double & y) c if ( !G ) { - mlog << Error << "\n\n MtdFileBase::latlon_to_xy() -> no grid!\n\n"; + mlog << Error << "\nMtdFileBase::latlon_to_xy() -> " + << "no grid!\n\n"; exit ( 1 ); @@ -333,7 +332,8 @@ void MtdFileBase::xy_to_latlon(double x, double y, double & lat, double & lon) c if ( !G ) { - mlog << Error << "\n\n MtdFileBase::xy_to_latlon() -> no grid!\n\n"; + mlog << Error << "\nMtdFileBase::xy_to_latlon() -> " + << "no grid!\n\n"; exit ( 1 ); @@ -355,7 +355,8 @@ unixtime MtdFileBase::valid_time(int t) const if ( (t < 0) || ( t >= Nt) ) { - mlog << Error << "\n\n MtdFileBase::valid_time(int t) -> range check error\n\n"; + mlog << Error << "\nMtdFileBase::valid_time(int t) -> " + << "range check error\n\n"; exit ( 1 ); @@ -375,7 +376,8 @@ unixtime MtdFileBase::actual_valid_time(int t) const if ( (t < 0) || ( t >= (int)ActualValidTimes.size()) ) { - mlog << Error << "\n\n MtdFileBase::valid_time(int t) -> range check error\n\n"; + mlog << Error << "\nMtdFileBase::valid_time(int t) -> " + << "range check error\n\n"; exit ( 1 ); @@ -395,7 +397,8 @@ int MtdFileBase::lead_time(int index) const if ( (index < 0) || ( index >= Nt) ) { - mlog << Error << "\n\n MtdFileBase::lead_time(int t) -> range check error\n\n"; + mlog << Error << "\nMtdFileBase::lead_time(int t) -> " + << "range check error\n\n"; exit ( 1 ); @@ -413,7 +416,6 @@ void MtdFileBase::read(NcFile & f) { -//NcDim * dim = 0; NcDim dim; // Nx, Ny, Nt @@ -427,40 +429,40 @@ Ny = GET_NC_SIZE(dim); dim = get_nc_dim(&f, nt_dim_name); Nt = GET_NC_SIZE(dim); -//dim = 0; - // Grid G = new Grid; -read_nc_grid(f, *G); +read_netcdf_grid(&f, *G); // timestamp info -StartValidTime = parse_start_time(string_att(f, start_time_att_name)); +ConcatString s; + +get_att_value_string(&f, start_time_att_name, s); + +StartValidTime = timestring_to_unix(s.text()); -DeltaT = string_att_as_int (f, delta_t_att_name); +DeltaT = get_att_value_int(&f, delta_t_att_name); // FileType -// ConcatString s = (string)string_att(f, filetype_att_name); -ConcatString s; bool status = false; -s.add(string_att(f, filetype_att_name)); +get_att_value_string(&f, filetype_att_name, s); status = string_to_mtdfiletype(s.text(), FileType); if ( ! status ) { - mlog << Error << "\n\n MtdFileBase::read(NcFile &) -> unable to parse filetype string \"" + mlog << Error << "\nMtdFileBase::read(NcFile &) -> " + << "unable to parse filetype string \"" << s << "\"\n\n"; exit ( 1 ); } - // // done // @@ -480,19 +482,20 @@ void MtdFileBase::write(NcFile & f) const char junk[256]; ConcatString s; - // Nx, Ny, Nt + // Add the time dimension -add_dim(&f, nx_dim_name, Nx); -add_dim(&f, ny_dim_name, Ny); add_dim(&f, nt_dim_name, Nt); // Grid -write_nc_grid(f, *G); +NcDim ny_dim; +NcDim nx_dim; + +write_netcdf_proj(&f, *G, ny_dim, nx_dim); // timestamp info -s = start_time_string(StartValidTime); +s = unix_to_yyyymmdd_hhmmss(StartValidTime); add_att(&f, start_time_att_name, s.text()); @@ -508,7 +511,6 @@ s = mtdfiletype_to_string(FileType); add_att(&f, filetype_att_name, s.text()); - // // done // @@ -527,8 +529,8 @@ void MtdFileBase::set_lead_time(int index, int value) if ( (index < 0) || (index >= Nt) ) { - mlog << Error - << "MtdFileBase::set_lead_time(int index, int value) -> range check error on index ... " + mlog << Error << "MtdFileBase::set_lead_time(int index, int value) -> " + << "range check error on index ... " << index << "\n\n"; exit ( 1 ); @@ -553,7 +555,3 @@ return; //////////////////////////////////////////////////////////////////////// - - - - diff --git a/src/tools/other/mode_time_domain/mtd_file_base.h b/src/tools/other/mode_time_domain/mtd_file_base.h index 92bc827f82..52407160de 100644 --- a/src/tools/other/mode_time_domain/mtd_file_base.h +++ b/src/tools/other/mode_time_domain/mtd_file_base.h @@ -164,18 +164,18 @@ class MtdFileBase { //////////////////////////////////////////////////////////////////////// -inline int MtdFileBase::nx() const { return ( Nx ); } -inline int MtdFileBase::ny() const { return ( Ny ); } -inline int MtdFileBase::nt() const { return ( Nt ); } +inline int MtdFileBase::nx() const { return Nx; } +inline int MtdFileBase::ny() const { return Ny; } +inline int MtdFileBase::nt() const { return Nt; } -inline int MtdFileBase::nxy () const { return ( Nx*Ny ); } -inline int MtdFileBase::nxyt () const { return ( Nx*Ny*Nt ); } +inline int MtdFileBase::nxy () const { return Nx*Ny ; } +inline int MtdFileBase::nxyt () const { return Nx*Ny*Nt; } -inline unixtime MtdFileBase::start_valid_time() const { return ( StartValidTime ); } +inline unixtime MtdFileBase::start_valid_time() const { return StartValidTime; } -inline int MtdFileBase::delta_t() const { return ( DeltaT ); } +inline int MtdFileBase::delta_t() const { return DeltaT; } -inline MtdFileType MtdFileBase::filetype() const { return ( FileType ); } +inline MtdFileType MtdFileBase::filetype() const { return FileType; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/mode_time_domain/mtd_file_float.cc b/src/tools/other/mode_time_domain/mtd_file_float.cc index f76f349aa0..bdfacb4e46 100644 --- a/src/tools/other/mode_time_domain/mtd_file_float.cc +++ b/src/tools/other/mode_time_domain/mtd_file_float.cc @@ -24,9 +24,9 @@ #include "mtd_file.h" #include "mtd_partition.h" #include "mtd_nc_defs.h" -#include "nc_utils_local.h" #include "vx_math.h" +#include "vx_nc_util.h" using namespace std; using namespace netCDF; @@ -168,7 +168,6 @@ if ( f.Data ) { } - // // done // @@ -255,7 +254,8 @@ void MtdFloatFile::set_spatial_radius(int spatial_r) if ( spatial_r < 0 ) { - mlog << Error << "\n\n MtdFloatFile::set_spatial_radius(int) -> bad value ... " << spatial_r << "\n\n"; + mlog << Error << "\nMtdFloatFile::set_spatial_radius(int) -> " + << "bad value ... " << spatial_r << "\n\n"; exit ( 1 ); @@ -277,7 +277,8 @@ void MtdFloatFile::set_time_window(int beg, int end) if ( end < beg ) { - mlog << Error << "\n\n MtdFloatFile::set_time_window(int) -> bad values ... " << beg << " and " << end << "\n\n"; + mlog << Error << "\nMtdFloatFile::set_time_window(int) -> " + << "bad values ... " << beg << " and " << end << "\n\n"; exit ( 1 ); @@ -317,7 +318,8 @@ void MtdFloatFile::put(const DataPlane & plane, const int t) if ( (plane.nx() != Nx) || (plane.ny() != Ny) ) { - mlog << Error << "\n\n MtdFloatFile::put(const DataPlane &, const int) -> plane wrong size!\n\n"; + mlog << Error << "\nMtdFloatFile::put(const DataPlane &, const int) -> " + << "plane wrong size!\n\n"; exit ( 1 ); @@ -325,7 +327,8 @@ if ( (plane.nx() != Nx) || (plane.ny() != Ny) ) { if ( (t < 0) || (t >= Nt) ) { - mlog << Error << "\n\n MtdFloatFile::put(const DataPlane &, const int) -> bad time\n\n"; + mlog << Error << "\nMtdFloatFile::put(const DataPlane &, const int) -> " + << "bad time\n\n"; exit ( 1 ); @@ -359,8 +362,6 @@ for (x=0; x no data!\n\n"; + mlog << Error << "\nMtdFloatFile::threshold(double, MtdIntFile &) const -> " + << "no data!\n\n"; exit ( 1 ); @@ -542,7 +544,8 @@ void MtdFloatFile::threshold(const SingleThresh & t, MtdIntFile & out) const if ( !Data ) { - mlog << Error << "\n\n MtdFloatFile::threshold(double, MtdIntFile &) const -> no data!\n\n"; + mlog << Error << "\nMtdFloatFile::threshold(double, MtdIntFile &) const -> " + << "no data!\n\n"; exit ( 1 ); @@ -592,7 +595,6 @@ out.set_threshold(-9999.0); out.set_filetype(mtd_file_mask); - // // done // @@ -629,11 +631,9 @@ void MtdFloatFile::read(NcFile & f) { -//NcVar * var = 0; NcVar var; - // // read the base class stuff // @@ -642,8 +642,8 @@ MtdFileBase::read(f); // DataMin, DataMax -DataMin = string_att_as_double (f, min_value_att_name); -DataMax = string_att_as_double (f, max_value_att_name); +DataMin = (float) get_att_value_double(&f, min_value_att_name); +DataMax = (float) get_att_value_double(&f, max_value_att_name); // Data @@ -651,24 +651,6 @@ set_size(Nx, Ny, Nt); var = get_nc_var(&f, data_field_name); -//if ( !(var->set_cur(0, 0, 0)) ) { -// -// mlog << Error << "\n\n MtdFloatFile::read() -> trouble setting corner\n\n"; -// -// exit ( 1 ); -// -//} -// -//// const time_t t_start = time(0); // for timing the data read operation -// -//if ( ! (var->get(Data, Nt, Ny, Nx)) ) { -// -// mlog << Error << "\n\n MtdFloatFile::read(const char *) -> trouble getting data\n\n"; -// -// exit ( 1 ); -// -//} - LongArray offsets; // {0,0,0}; LongArray lengths; // {Nt, Ny, Nx}; @@ -679,19 +661,15 @@ lengths.add(Nt); lengths.add(Ny); lengths.add(Nx); -//if ( ! get_nc_data(&var, Data, (long *){Nt, Ny, Nx}, (long *){0,0,0}) ) { if ( ! get_nc_data(&var, Data, lengths, offsets) ) { - mlog << Error << "\n\n MtdFloatFile::read(const char *) -> trouble getting data\n\n"; + mlog << Error << "\nMtdFloatFile::read(const char *) -> " + << "trouble getting data\n\n"; exit ( 1 ); } -// const time_t t_stop = time(0); // for timing the data read operation - -// mlog << Debug(5) << "\n\n MtdFloatFile::read(): Time to read data = " << (t_stop - t_start) << " seconds\n\n" << flush; - // // done // @@ -708,13 +686,9 @@ void MtdFloatFile::write(NcFile & f) const { -//NcDim * nx_dim = 0; -//NcDim * ny_dim = 0; -//NcDim * nt_dim = 0; NcDim nx_dim; NcDim ny_dim; NcDim nt_dim; -//NcVar * data_var = 0; NcVar data_var ; const char format [] = "%.3f"; char junk[256]; @@ -764,24 +738,6 @@ add_var(&f, data_field_name, ncFloat, nt_dim, ny_dim, nx_dim); data_var = get_nc_var(&f, data_field_name); -//if ( !(data_var->set_cur(0, 0, 0)) ) { -// -// mlog << Error << "\n\n MtdFloatFile::write() -> trouble setting corner on data field\n\n"; -// -// exit ( 1 ); -// -//} -// -//// const time_t t_start = time(0); // for timing the data write operation -// -//if ( !(data_var->put(Data, Nt, Ny, Nx)) ) { -// -// mlog << Error << "\n\n MtdFloatFile::write() -> trouble with put in data field\n\n"; -// -// exit ( 1 ); -// -//} - LongArray offsets; // {0,0,0}; LongArray lengths; // {Nt, Ny, Nx}; @@ -792,19 +748,15 @@ lengths.add(Nt); lengths.add(Ny); lengths.add(Nx); -//if ( ! get_nc_data(&data_var, Data, (long *){Nt, Ny, Nx}, (long *){0,0,0}) ) { if ( ! get_nc_data(&data_var, Data, lengths, offsets) ) { - mlog << Error << "\n\n MtdFloatFile::read(const char *) -> trouble getting data\n\n"; + mlog << Error << "\nMtdFloatFile::read(const char *) -> " + << "trouble getting data\n\n"; exit ( 1 ); } -// const time_t t_stop = time(0); // for timing the data write operation - -// mlog << Debug(5) << "\n\n MtdFloatFile::write(): Time to write data = " << (t_stop - t_start) << " seconds\n\n" << flush; - // // done // @@ -825,11 +777,10 @@ NcFile f(_filename, NcFile::replace); if ( IS_INVALID_NC(f) ) { - mlog << Error << "\n\n MtdFloatFile::write(const char *) -> unable to open netcdf output file \"" << _filename << "\"\n\n"; + mlog << Error << "\nMtdFloatFile::write(const char *) -> " + << "unable to open netcdf output file: " << _filename << "\n\n"; - // exit ( 1 ); - - return; + exit ( 1 ); } @@ -853,7 +804,8 @@ MtdFloatFile MtdFloatFile::const_t_slice(int t) const if ( (t < 0) || (t >= Nt) ) { - mlog << Error << "\n\n MtdFloatFile MtdFloatFile::const_t_slice(int) const -> range check error\n\n"; + mlog << Error << "\nMtdFloatFile MtdFloatFile::const_t_slice(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -921,7 +873,8 @@ void MtdFloatFile::get_data_plane(const int t, DataPlane & out) if ( (t < 0) || (t >= Nt) ) { - mlog << Error << "\n\n MtdFloatFile::get_data_plane() -> range check error on t\n\n"; + mlog << Error << "\nMtdFloatFile::get_data_plane() -> " + << "range check error on t\n\n"; exit ( 1 ); @@ -956,9 +909,6 @@ for (x=0; x= Nt) ) { - mlog << Error << "\n\n " << method_name << "range check error on t\n\n"; + mlog << Error << "\n" << method_name + << "range check error on t\n\n"; exit ( 1 ); @@ -985,7 +937,8 @@ if ( (t < 0) || (t >= Nt) ) { if ( (d.nx() != Nx) || (d.ny() != Ny) ) { - mlog << Error << "\n\n " << method_name << "data plane is wrong size!\n\n"; + mlog << Error << "\n" << method_name + << "data plane is wrong size!\n\n"; exit ( 1 ); @@ -1002,8 +955,9 @@ for (x=0; x=data_cnt) { - mlog << Debug(4) << method_name << "offset " << n - << " is out of range (" << " from " << x << ", " << y <<", " << t <<")\n"; + mlog << Debug(4) << method_name + << "offset " << n << " is out of range (from " + << x << ", " << y <<", " << t <<")\n"; continue; } @@ -1085,7 +1039,3 @@ return; //////////////////////////////////////////////////////////////////////// - - - - diff --git a/src/tools/other/mode_time_domain/mtd_file_float.h b/src/tools/other/mode_time_domain/mtd_file_float.h index a9935531cf..cfa75b2b8d 100644 --- a/src/tools/other/mode_time_domain/mtd_file_float.h +++ b/src/tools/other/mode_time_domain/mtd_file_float.h @@ -137,21 +137,21 @@ class MtdFloatFile : public MtdFileBase { //////////////////////////////////////////////////////////////////////// -inline int MtdFloatFile::spatial_radius() const { return ( Spatial_Radius ); } +inline int MtdFloatFile::spatial_radius() const { return Spatial_Radius; } -inline int MtdFloatFile::time_beg() const { return ( TimeBeg ); } -inline int MtdFloatFile::time_end() const { return ( TimeEnd ); } +inline int MtdFloatFile::time_beg() const { return TimeBeg; } +inline int MtdFloatFile::time_end() const { return TimeEnd; } -inline float MtdFloatFile::data_min() const { return ( DataMin ); } -inline float MtdFloatFile::data_max() const { return ( DataMax ); } +inline float MtdFloatFile::data_min() const { return DataMin; } +inline float MtdFloatFile::data_max() const { return DataMax; } -inline const float * MtdFloatFile::data() const { return ( Data ); } +inline const float * MtdFloatFile::data() const { return Data; } inline float MtdFloatFile::operator()(int _x, int _y, int _t) const { -return ( Data[mtd_three_to_one(Nx, Ny, Nt, _x, _y, _t)] ); +return Data[mtd_three_to_one(Nx, Ny, Nt, _x, _y, _t)]; } diff --git a/src/tools/other/mode_time_domain/mtd_file_int.cc b/src/tools/other/mode_time_domain/mtd_file_int.cc index 92f6e214a6..2cbe00e400 100644 --- a/src/tools/other/mode_time_domain/mtd_file_int.cc +++ b/src/tools/other/mode_time_domain/mtd_file_int.cc @@ -24,9 +24,9 @@ #include "mtd_file.h" #include "mtd_partition.h" #include "mtd_nc_defs.h" -#include "nc_utils_local.h" #include "vx_math.h" +#include "vx_nc_util.h" using namespace std; using namespace netCDF; @@ -302,7 +302,8 @@ void MtdIntFile::set_radius(int r) if ( r < 0 ) { - mlog << Error << "\n MtdIntFile::set_radius(int) -> bad value ... " << r << "\n\n"; + mlog << Error << "\nMtdIntFile::set_radius(int) -> " + << "bad value ... " << r << "\n\n"; exit ( 1 ); @@ -325,7 +326,8 @@ void MtdIntFile::set_time_window(int beg, int end) if ( end < beg ) { - mlog << Error << "\n MtdIntFile::set_time_window(int) -> bad values ... " << beg << " and " << end << "\n\n"; + mlog << Error << "\nMtdIntFile::set_time_window(int) -> " + << "bad values ... " << beg << " and " << end << "\n\n"; exit ( 1 ); @@ -347,14 +349,6 @@ void MtdIntFile::set_threshold(double t) { -// if ( t < 0.0 ) { -// -// mlog << Error << "\n MtdIntFile::set_threshold(double) -> bad value ... " << t << "\n\n"; -// -// exit ( 1 ); -// -// } - Threshold = t; return; @@ -421,11 +415,8 @@ void MtdIntFile::read(NcFile & f) { -//NcVar * var = 0; NcVar var ; - - // // read the base class stuff // @@ -434,8 +425,8 @@ MtdFileBase::read(f); // DataMin, DataMax -DataMin = string_att_as_double (f, min_value_att_name); -DataMax = string_att_as_double (f, max_value_att_name); +DataMin = get_att_value_int(&f, min_value_att_name); +DataMax = get_att_value_int(&f, max_value_att_name); // Data @@ -443,24 +434,6 @@ set_size(Nx, Ny, Nt); var = get_nc_var(&f, data_field_name); -//if ( !(var->set_cur(0, 0, 0)) ) { -// -// mlog << Error << "\n MtdIntFile::read() -> trouble setting corner\n\n"; -// -// exit ( 1 ); -// -//} -// -//// const time_t t_start = time(0); // for timing the data read operation -// -//if ( ! (var->get(Data, Nt, Ny, Nx)) ) { -// -// mlog << Error << "\n MtdIntFile::read(const char *) -> trouble getting data\n\n"; -// -// exit ( 1 ); -// -//} - LongArray offsets; // {0,0,0}; LongArray lengths; // {Nt, Ny, Nx}; @@ -471,19 +444,15 @@ lengths.add(Nt); lengths.add(Ny); lengths.add(Nx); -//if ( ! get_nc_data(&var, Data, (long *){Nt, Ny, Nx}, (long *){0,0,0}) ) { if ( ! get_nc_data(&var, Data, lengths, offsets) ) { - mlog << Error << "\n MtdIntFile::read(const char *) -> trouble getting data\n\n"; + mlog << Error << "\nMtdIntFile::read(const char *) -> " + << "trouble getting data\n\n"; exit ( 1 ); } -// const time_t t_stop = time(0); // for timing the data read operation - -// mlog << Debug(5) << "\n MtdIntFile::read(): Time to read data = " << (t_stop - t_start) << " seconds\n\n" << flush; - // // done // @@ -500,23 +469,16 @@ void MtdIntFile::write(NcFile & f) const { -//NcDim * nx_dim = 0; -//NcDim * ny_dim = 0; -//NcDim * nt_dim = 0; -//NcDim * n_obj_dim = 0; -NcDim nx_dim ; -NcDim ny_dim ; -NcDim nt_dim ; +NcDim nx_dim; +NcDim ny_dim; +NcDim nt_dim; NcDim n_obj_dim; -//NcVar * data_var = 0; -//NcVar * volumes_var = 0; -NcVar data_var ; -NcVar volumes_var ; +NcVar data_var; +NcVar volumes_var; const char format [] = "%d"; char junk[256]; const bool is_split = (ObjVolume != 0); - // // write stuff from parent class // @@ -571,8 +533,6 @@ add_att(&f, threshold_att_name, Threshold); data_var = add_var(&f, data_field_name, ncInt, nt_dim, ny_dim, nx_dim); -//data_var = get_nc_var(&f, data_field_name); - LongArray offsets; // {0,0,0}; LongArray lengths; // {Nt, Ny, Nx}; @@ -585,30 +545,13 @@ lengths.add(Nx); if ( ! put_nc_data(&data_var, Data, lengths, offsets) ) { - mlog << Error << "\n MtdIntFile::write(const char *) -> trouble getting data\n\n"; + mlog << Error << "\nMtdIntFile::write(const char *) -> " + << "trouble getting data\n\n"; exit ( 1 ); } -//if ( !(data_var->set_cur(0, 0, 0)) ) { -// -// mlog << Error << "\n MtdIntFile::write() -> trouble setting corner on data field\n\n"; -// -// exit ( 1 ); -// -//} -// -//// const time_t t_start = time(0); // for timing the data write operation -// -//if ( !(data_var->put(Data, Nt, Ny, Nx)) ) { -// -// mlog << Error << "\n MtdIntFile::write() -> trouble writing data field\n\n"; -// -// exit ( 1 ); -// -//} - // // volumes, if needed // @@ -617,11 +560,10 @@ if ( is_split ) { volumes_var = add_var(&f, volumes_name, ncInt, n_obj_dim); - //volumes_var = get_nc_var(&f, volumes_name); - if ( !(put_nc_data(&volumes_var, ObjVolume, Nobjects, 0)) ) { - mlog << Error << "\n MtdIntFile::write() -> trouble writing object volumes\n\n"; + mlog << Error << "\nMtdIntFile::write() -> " + << "trouble writing object volumes\n\n"; exit ( 1 ); @@ -629,10 +571,6 @@ if ( is_split ) { } // if is_split -// const time_t t_stop = time(0); // for timing the data write operation - -// mlog << Debug(5) << "\n MtdIntFile::write(): Time to write data = " << (t_stop - t_start) << " seconds\n\n" << flush; - // // done // @@ -653,7 +591,8 @@ NcFile f(_filename, NcFile::replace); if ( IS_INVALID_NC(f) ) { - mlog << Error << "\n MtdIntFile::write(const char *) -> unable to open netcdf output file \"" << _filename << "\"\n\n"; + mlog << Error << "\nMtdIntFile::write(const char *) -> " + << "unable to open netcdf output file: " << _filename << "\n\n"; // exit ( 1 ); @@ -681,7 +620,8 @@ MtdIntFile MtdIntFile::const_t_slice(const int t) const if ( (t < 0) || (t >= Nt) ) { - mlog << Error << "\n MtdIntFile MtdIntFile::const_t_slice(int) const -> range check error\n\n"; + mlog << Error << "\nMtdIntFile MtdIntFile::const_t_slice(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -750,7 +690,8 @@ MtdIntFile MtdIntFile::const_t_mask(const int t, const int obj_num) const // if ( (t < 0) || (t >= Nt) ) { - mlog << Error << "\n MtdIntFile MtdIntFile::const_t_mask(int) const -> range check error\n\n"; + mlog << Error << "\nMtdIntFile MtdIntFile::const_t_mask(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -938,7 +879,8 @@ void MtdIntFile::zero_border(int n) if ( !Data ) { - mlog << Error << "\n MtdIntFile::zero_border(int) -> no data field!\n\n"; + mlog << Error << "\nMtdIntFile::zero_border(int) -> " + << "no data field!\n\n"; exit ( 1 ); @@ -946,7 +888,8 @@ if ( !Data ) { if ( 2*n >= min(Nx, Ny) ) { - mlog << Error << "\n MtdIntFile::zero_border(int) -> border size too large!\n\n"; + mlog << Error << "\nMtdIntFile::zero_border(int) -> " + << "border size too large!\n\n"; exit ( 1 ); @@ -999,7 +942,8 @@ void MtdIntFile::set_to_zeroes() if ( !Data ) { - mlog << Error << "\n MtdIntFile::set_to_zeroes() -> no data!\n\n"; + mlog << Error << "\nMtdIntFile::set_to_zeroes() -> " + << "no data!\n\n"; exit ( 1 ); @@ -1025,7 +969,8 @@ MtdIntFile MtdIntFile::split_const_t(int & n_shapes) const if ( Nt != 1 ) { - mlog << Error << "\n split_const_t(int &) -> not const-time slice!\n\n"; + mlog << Error << "\nsplit_const_t(int &) -> " + << "not const-time slice!\n\n"; exit ( 1 ); @@ -1041,7 +986,6 @@ Mtd_Partition p; const MtdIntFile & id = *this; - d.set_size(Nx, Ny, 1); d.set_grid(*G); @@ -1261,7 +1205,8 @@ int MtdIntFile::volume(int k) const if ( !ObjVolume ) { - mlog << Error << "\n MtdIntFile::volume(int) -> field not split!\n\n"; + mlog << Error << "\nMtdIntFile::volume(int) -> " + << "field not split!\n\n"; exit ( 1 ); @@ -1269,7 +1214,8 @@ if ( !ObjVolume ) { if ( (k < 0) || (k >= Nobjects) ) { - mlog << Error << "\n MtdIntFile::volume(int) -> range check error!\n\n"; + mlog << Error << "\nMtdIntFile::volume(int) -> " + << "range check error!\n\n"; exit ( 1 ); @@ -1290,7 +1236,8 @@ int MtdIntFile::total_volume() const if ( !ObjVolume ) { - mlog << Error << "\n MtdIntFile::total_volume() -> field not split!\n\n"; + mlog << Error << "\nMtdIntFile::total_volume() -> " + << "field not split!\n\n"; exit ( 1 ); @@ -1354,21 +1301,8 @@ int j, k; const int n3 = Nx*Ny*Nt; int * old_to_new = (int *) nullptr; int * new_volumes = (int *) nullptr; -// double * new_intensities = (double *) nullptr; int * d = Data; - -// if ( n_new == 0 ) { -// -// mlog << Error << "\n MtdIntFile::sift_objects() -> no objects left!\n\n"; -// -// exit ( 1 ); -// -// } - - - - if ( n_new > 0 ) { old_to_new = new int [Nobjects]; @@ -1381,17 +1315,12 @@ if ( n_new > 0 ) { new_volumes[j] = ObjVolume[new_to_old[j]]; - // new_intensities[j] = MaxConvIntensity[new_to_old[j]]; - } for (j=0; j 0 ) { } -// mlog << Debug(5) << "replace count = " << replace_count << '\n' << flush; -// mlog << Debug(5) << "zero count = " << zero_count << '\n' << flush; - // // rewire // @@ -1539,7 +1465,8 @@ for (x=0; x empty object!\n\n"; + mlog << Error << "\nMtdIntFile::calc_3d_centroid() const -> " + << "empty object!\n\n"; exit ( 1 ); @@ -1587,15 +1514,7 @@ for (x=0; x empty object!\n\n"; - - // exit ( 1 ); - - return; - -} +if ( count == 0 ) return; xbar /= count; ybar /= count; @@ -1641,7 +1560,8 @@ MtdIntFile MtdIntFile::select(int n) const // 1-based if ( (n < 1) || (n > Nobjects) ) { - mlog << Error << "\n MtdIntFile::select(int) -> range check error on n ... " + mlog << Error << "\nMtdIntFile::select(int) -> " + << "range check error on n ... " << "NObjects = " << Nobjects << " ... " << "n = " << n << "\n\n"; @@ -1693,7 +1613,8 @@ MtdIntFile MtdIntFile::select_cluster(const IntArray & a) const // 1-based if ( (a.min() < 0) || (a.max() > Nobjects) ) { - mlog << Error << "\n MtdIntFile::select_cluster(const IntArray &) -> range check error\n\n"; + mlog << Error << "\nMtdIntFile::select_cluster(const IntArray &) -> " + << "range check error\n\n"; exit ( 1 ); @@ -1757,7 +1678,8 @@ int MtdIntFile::x_left(const int y) const if ( (y < 0) || (y >= Ny) ) { - mlog << Error << "\n MtdIntFile::x_left(int) -> range check error\n\n"; + mlog << Error << "\nMtdIntFile::x_left(int) -> " + << "range check error\n\n"; exit ( 1 ); @@ -1786,7 +1708,8 @@ int MtdIntFile::x_right(const int y) const if ( (y < 0) || (y >= Ny) ) { - mlog << Error << "\n MtdIntFile::x_right(int) -> range check error\n\n"; + mlog << Error << "\nMtdIntFile::x_right(int) -> " + << "range check error\n\n"; exit ( 1 ); @@ -1875,7 +1798,8 @@ Mtd_2D_Moments MtdIntFile::calc_2d_moments() const if ( Nt != 1 ) { - mlog << Error << "\n MtdIntFile::calc_2d_moments() const -> not a 2D object!\n\n"; + mlog << Error << "\nMtdIntFile::calc_2d_moments() const -> " + << "not a 2D object!\n\n"; exit ( 1 ); @@ -1936,9 +1860,6 @@ MtdIntFile after; MtdIntFile rv; Mtd_Partition p; const int zero_border_size = 2; -// int imin, imax; - - // // find the partition @@ -1979,8 +1900,6 @@ for (j=1; j<(mask.nt()); ++j) { } -// p.dump(cout); - n_shapes = p.n_elements(); // @@ -2017,7 +1936,8 @@ for (t=0; t<(mask.nt()); ++t) { if ( nc < 0 ) { - mlog << Error << "\n split(const MtdIntFile &, int &) -> can't find cell!\n\n"; + mlog << Error << "\nsplit(const MtdIntFile &, int &) -> " + << "can't find cell!\n\n"; exit ( 1 ); @@ -2051,7 +1971,8 @@ void adjust_obj_numbers(MtdIntFile & s, int delta) if ( s.nt() != 1 ) { - mlog << Error << "\n adjust_obj_numbers() -> not const-time slice!\n\n"; + mlog << Error << "\nadjust_obj_numbers() -> " + << "not const-time slice!\n\n"; exit ( 1 ); @@ -2106,8 +2027,6 @@ for (x=0; x trouble opening output file \"" - << output_filename << "\"\n\n"; + mlog << Error << "\ndo_mtd_nc_output() -> " + << "trouble opening output file: " + << output_filename << "\n\n"; exit ( 1 ); @@ -68,11 +69,9 @@ const bool have_pairs = (fcst_obj.n_objects() != 0) && ( obs_obj.n_objects() != 0); // - // dimensions + // add time dimension // -nx_dim = add_dim(&out, nx_dim_name, fcst_raw.nx()); -ny_dim = add_dim(&out, ny_dim_name, fcst_raw.ny()); nt_dim = add_dim(&out, nt_dim_name, fcst_raw.nt()); // @@ -82,7 +81,7 @@ nt_dim = add_dim(&out, nt_dim_name, fcst_raw.nt()); write_netcdf_global(&out, output_filename, "MTD", config.model.c_str(), config.obtype.c_str(), config.desc.c_str()); -write_nc_grid(out, fcst_raw.grid()); +write_netcdf_proj(&out, fcst_raw.grid(), ny_dim, nx_dim); // // variables @@ -142,19 +141,18 @@ NcFile out(output_filename, NcFile::replace); if ( IS_INVALID_NC(out) ) { - mlog << Error << "\n\n do_mtd_nc_output[single]() -> trouble opening output file \"" - << output_filename << "\"\n\n"; + mlog << Error << "\ndo_mtd_nc_output[single]() -> " + << "trouble opening output file: " + << output_filename << "\n\n"; exit ( 1 ); } // - // dimensions + // add time dimension // -nx_dim = add_dim(&out, nx_dim_name, raw.nx()); -ny_dim = add_dim(&out, ny_dim_name, raw.ny()); nt_dim = add_dim(&out, nt_dim_name, raw.nt()); // @@ -164,7 +162,7 @@ nt_dim = add_dim(&out, nt_dim_name, raw.nt()); write_netcdf_global(&out, output_filename, "MTD", config.model.c_str(), config.obtype.c_str(), config.desc.c_str()); -write_nc_grid(out, raw.grid()); +write_netcdf_proj(&out, raw.grid(), ny_dim, nx_dim); // // variables @@ -217,12 +215,11 @@ NcVar lon_var = add_var(&out, lon_name, ncFloat, ny_dim, nx_dim); add_att(&lat_var, "long_name", "Latitude"); add_att(&lon_var, "long_name", "Longitude"); -float * lat_data = new float [nx*ny]; -float * lon_data = new float [nx*ny]; +vector lat_data(nx*ny); +vector lon_data(nx*ny); - -Lat = lat_data; -Lon = lon_data; +Lat = lat_data.data(); +Lon = lon_data.data(); for (y=0; y out_data(n3); -const char * const name = ( is_fcst ? fcst_clus_id_name : obs_clus_id_name ); +const string name = ( is_fcst ? fcst_clus_id_name : obs_clus_id_name ); NcVar var = add_var(&out, name, ncInt, nt_dim, ny_dim, nx_dim); @@ -367,7 +357,7 @@ add_att(&var, "_FillValue", bad_data_int); const int n_objects = ( is_fcst ? (e.n_fcst_simples()) : (e.n_obs_simples()) ); -remap = new int [n_objects + 1]; +vector remap(n_objects + 1); remap[0] = 0; @@ -382,26 +372,20 @@ for (j=1; j<=n_objects; ++j) { } -op = out_data; - for (j=0; j= Nelements) ) { - mlog << Error << "\n\n EquivalenceClass::element(int) const -> range check error\n\n"; + mlog << Error << "\nEquivalenceClass::element(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -529,28 +507,6 @@ return; } -//////////////////////////////////////////////////////////////////////// - -/* -bool Mtd_Partition::has(int k) const - -{ - -int j; -EquivalenceClass ** c = C; - -for (j=0; jhas(k) ) return true; - -} - - -return false; - -} -*/ - //////////////////////////////////////////////////////////////////////// @@ -560,7 +516,8 @@ bool Mtd_Partition::has(int index, int k) const if ( (index < 0) || (index >= Nelements) ) { - mlog << Error << "\n\n Mtd_Partition::has(int index, int k) const -> range check error on index\n\n"; + mlog << Error << "\nMtd_Partition::has(int index, int k) const -> " + << "range check error on index\n\n"; exit ( 1 ); @@ -602,7 +559,8 @@ void Mtd_Partition::merge_classes(int nclass_1, int nclass_2) if ( (nclass_1 < 0) || (nclass_1 >= Nelements) || (nclass_2 < 0) || (nclass_2 >= Nelements) ) { - mlog << Error << "\n\n Mtd_Partition::merge_classes() -> range check error\n\n"; + mlog << Error << "\nMtd_Partition::merge_classes() -> " + << "range check error\n\n"; exit ( 1 ); @@ -662,7 +620,8 @@ nclass_2 = which_class(value_2); if ( (nclass_1 < 0) || (nclass_2 < 0) ) { - mlog << Error << "\n\n Mtd_Partition::merge_values() -> bad values ... " + mlog << Error << "\nMtd_Partition::merge_values() -> " + << "bad values ... " << "(value_1, value_2) = " << value_1 << ", " << value_2 << " ... " << "(nclass_1, nclass_2) = " << nclass_1 << ", " << nclass_2 << "\n\n"; @@ -711,7 +670,8 @@ const EquivalenceClass * Mtd_Partition::operator()(int k) const if ( (k < 0) || (k >= Nelements) ) { - mlog << Error << "\n\n Mtd_Partition::operator()(int) const -> range check error\n\n"; + mlog << Error << "\nMtd_Partition::operator()(int) const -> " + << "range check error\n\n"; exit ( 1 ); @@ -778,8 +738,6 @@ for (j=0; jhas(k) ) return ( true ); + if ( (*c)->has(k) ) return true; } -return ( false ); +return false; } diff --git a/src/tools/other/mode_time_domain/mtd_read_data.cc b/src/tools/other/mode_time_domain/mtd_read_data.cc index 74733a8de6..a48357b1f6 100644 --- a/src/tools/other/mode_time_domain/mtd_read_data.cc +++ b/src/tools/other/mode_time_domain/mtd_read_data.cc @@ -33,9 +33,12 @@ vector mtd_read_data(MtdConfigInfo & config, VarInfo & varinfo, { +static const char *method_name = "mtd_read_data() -> "; + if ( filenames.n() < 2 ) { - mlog << Error << "\n\n mtd_read_data() -> need at least 2 data files!\n\n"; + mlog << Error << "\n" << method_name + << "need at least 2 data files!\n\n"; exit ( 1 ); @@ -52,14 +55,15 @@ vector valid_times; for (j=0; j<(filenames.n()); ++j) { - mlog << Debug(2) - << "mtd_read_data() -> processing file \"" << filenames[j] << "\"\n"; + mlog << Debug(2) << method_name + << "processing file: " << filenames[j] << "\n"; data_2d_file = factory.new_met_2d_data_file(filenames[j].c_str(), varinfo.file_type()); if ( ! data_2d_file->data_plane(varinfo, plane) ) { - mlog << Error << "\n\n mtd_read_data() -> unable to get data plane at time " << j << "\n\n"; + mlog << Error << "\n" << method_name + << "unable to get data plane at time " << j << "\n\n"; exit ( 1 ); @@ -67,7 +71,8 @@ for (j=0; j<(filenames.n()); ++j) { if ( ! data_2d_file->data_plane(varinfo, plane) ) { - mlog << Error << "\n\n mtd_read_data() -> unable to get data plane at time " << j << "\n\n"; + mlog << Error << "\n" << method_name + << "unable to get data plane at time " << j << "\n\n"; exit ( 1 ); @@ -157,9 +162,13 @@ if (variableTimeIncs) { unixtime umean = (unixtime)mean; unixtime uvar = (unixtime)var; unixtime suvar = (unixtime)svar; - mlog << Warning << "\n\n mtd_read_data() -> File time increments are not constant, could be problematic\n"; - mlog << Warning << " mtd_read_data() -> Using MODE of the increments, mode=" << dt_start << "\n"; - mlog << Warning << " mtd_read_data() -> Time increment properties: mean=" << umean << " variance=" << uvar << " sqrt(var)=" << suvar << "\n\n"; + mlog << Warning << "\n" << method_name + << "file time increments are not constant, could be problematic\n\n"; + mlog << Warning << "\n" << method_name + << "using MODE of the increments, mode=" << dt_start << "\n\n"; + mlog << Warning << "\n" << method_name + << "Time increment properties: mean=" << umean << " variance=" << uvar + << " sqrt(var)=" << suvar << "\n\n"; } } diff --git a/src/tools/other/mode_time_domain/mtd_txt_output.cc b/src/tools/other/mode_time_domain/mtd_txt_output.cc index f1ee61aa5d..b3f9ea14b4 100644 --- a/src/tools/other/mode_time_domain/mtd_txt_output.cc +++ b/src/tools/other/mode_time_domain/mtd_txt_output.cc @@ -7,7 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - //////////////////////////////////////////////////////////////////////// @@ -56,13 +55,13 @@ ofstream out; AsciiTable table; const int Nobj = fcst_att.n() + obs_att.n(); - out.open(output_filename); if ( ! out ) { - mlog << Error << "\n\n do_3d_single_txt_output() -> unable to open output filename \"" - << output_filename << "\'\n\n"; + mlog << Error << "\ndo_3d_single_txt_output() -> " + << "unable to open output filename: " + << output_filename << "\n\n"; exit ( 1 ); @@ -172,13 +171,13 @@ ofstream out; AsciiTable table; const int Nobj = att.n(); - out.open(output_filename); if ( ! out ) { - mlog << Error << "\n\n do_3d_single_txt_output[single]() -> unable to open output filename \"" - << output_filename << "\'\n\n"; + mlog << Error << "\ndo_3d_single_txt_output[single]() -> " + << "unable to open output filename: " + << output_filename << "\n\n"; exit ( 1 ); @@ -322,8 +321,9 @@ out.open(output_filename); if ( ! out ) { - mlog << Error << "\n\n do_3d_pair_txt_output() -> unable to open output filename \"" - << output_filename << "\'\n\n"; + mlog << Error << "\ndo_3d_pair_txt_output() -> " + << "unable to open output filename: " + << output_filename << "\n\n"; exit ( 1 ); @@ -415,13 +415,13 @@ ofstream out; AsciiTable table; const int n_total = fcst_simple_att.n() + obs_simple_att.n() + fcst_cluster_att.n() + obs_cluster_att.n(); - out.open(output_filename); if ( ! out ) { - mlog << Error << "\n\n do_2d_txt_output() -> unable to open output filename \"" - << output_filename << "\'\n\n"; + mlog << Error << "\ndo_2d_txt_output() -> " + << "unable to open output filename: " + << output_filename << "\n\n"; exit ( 1 ); @@ -613,13 +613,13 @@ ofstream out; AsciiTable table; const int n_total = att.n(); - out.open(output_filename); if ( ! out ) { - mlog << Error << "\n\n do_2d_txt_output[single]() -> unable to open output filename \"" - << output_filename << "\'\n\n"; + mlog << Error << "\ndo_2d_txt_output[single]() -> " + << "unable to open output filename: " + << output_filename << "\n\n"; exit ( 1 ); diff --git a/src/tools/other/mode_time_domain/mtdfiletype_to_string.cc b/src/tools/other/mode_time_domain/mtdfiletype_to_string.cc index e7a2ff1a4e..0208bf537e 100644 --- a/src/tools/other/mode_time_domain/mtdfiletype_to_string.cc +++ b/src/tools/other/mode_time_domain/mtdfiletype_to_string.cc @@ -73,6 +73,7 @@ else if ( strcmp(text, "mtd_file_conv" ) == 0 ) { t = mtd_file_conv; re else if ( strcmp(text, "mtd_file_mask" ) == 0 ) { t = mtd_file_mask; return true; } else if ( strcmp(text, "mtd_file_object" ) == 0 ) { t = mtd_file_object; return true; } else if ( strcmp(text, "no_mtd_file_type") == 0 ) { t = no_mtd_file_type; return true; } + // // nope // diff --git a/src/tools/other/mode_time_domain/nc_grid.cc b/src/tools/other/mode_time_domain/nc_grid.cc deleted file mode 100644 index e5865a1c4a..0000000000 --- a/src/tools/other/mode_time_domain/nc_grid.cc +++ /dev/null @@ -1,665 +0,0 @@ -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* -// ** Copyright UCAR (c) 1992 - 2024 -// ** University Corporation for Atmospheric Research (UCAR) -// ** National Center for Atmospheric Research (NCAR) -// ** Research Applications Lab (RAL) -// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - - -//////////////////////////////////////////////////////////////////////// - - -#include -#include -#include -#include -#include -#include - -#include - -#include "nc_utils_local.h" -#include "nc_grid.h" - -using namespace std; -using namespace netCDF; - - -//////////////////////////////////////////////////////////////////////// - - -static bool read_nc_st_grid (NcFile &, Grid &); -static bool read_nc_lc_grid (NcFile &, Grid &); -static bool read_nc_latlon_grid (NcFile &, Grid &); - -static void write_nc_st_grid (NcFile &, const StereographicData &); -static void write_nc_lc_grid (NcFile &, const LambertData &); -static void write_nc_latlon_grid (NcFile &, const LatLonData &); - - -//////////////////////////////////////////////////////////////////////// - - -bool read_nc_grid(NcFile & f, Grid & g) - -{ - - bool status = false; - ConcatString proj; - //const ConcatString proj = string_att(f, "Projection"); - get_att_value_string(&f, (string)"Projection", proj); - - - g.clear(); - - - if ( proj == "Polar Stereographic" ) { - - status = read_nc_st_grid(f, g); - - } else if ( proj == "Lambert Conformal" ) { - - status = read_nc_lc_grid(f, g); - - } else if ( proj == "LatLon" ) { - - status = read_nc_latlon_grid(f, g); - - } else { - - mlog << Error << "\n\n read_nc_grid() -> haven't written code to parse \"" << proj << "\" grids yet!\n\n"; - - return false; - - } - - - // - // done - // - -return status; - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool write_nc_grid(NcFile & f, const Grid & g) - -{ - -GridInfo info = g.info(); - -if ( !(info.ok()) ) { - - mlog << Error << "\n\n write_nc_grid(NcFile &, const Grid &) -> can't get information from grid!\n\n"; - - exit ( 1 ); - -} - - if ( info.st ) write_nc_st_grid (f, *(info.st)); -else if ( info.lc ) write_nc_lc_grid (f, *(info.lc)); -else if ( info.ll ) write_nc_latlon_grid (f, *(info.ll)); -else { - - mlog << Error << "\n\n bool write_nc_grid(NcFile &, const Grid &) -> unsupported projection type\n\n"; - - exit ( 1 ); - -} - - // - // done - // - -return false; - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool read_nc_st_grid(NcFile & f, Grid & g) - -{ - - StereographicData data; - ConcatString c; - - // - // name - // - - data.name = "Unknown stereographic"; - - // - // hemisphere - // - - //c = string_att(f, "hemisphere"); - get_att_value_string(&f, (string)"hemisphere", c); - -data.hemisphere = c[0]; - - // - // scale latitude - // - -data.scale_lat = string_att_as_double(f, "scale_lat"); - - // - // lat/lon pin - // - -data.lat_pin = string_att_as_double(f, "lat_pin"); -data.lon_pin = string_att_as_double(f, "lon_pin"); - -data.lon_pin *= -1.0; - - // - // x/y pin - // - -data.x_pin = string_att_as_double(f, "x_pin"); -data.y_pin = string_att_as_double(f, "y_pin"); - - // - // orientation longitude - // - -data.lon_orient = string_att_as_double(f, "lon_orient"); - -data.lon_orient *= -1.0; - - // - // D, R - // - -data.d_km = string_att_as_double(f, "d_km"); -data.r_km = string_att_as_double(f, "r_km"); - - // - // Nx, Ny - // - -data.nx = string_att_as_int(f, "nx"); -data.ny = string_att_as_int(f, "ny"); - -data.eccentricity = 0.; -data.false_east = 0.; -data.false_north = 0.; -data.scale_factor = 1.0; -data.dy_km = data.d_km; - - // - // done - // - -g.set(data); - -return true; - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool read_nc_lc_grid(NcFile & f, Grid & g) - -{ - -LambertData data; -g.clear(); - - // - // name - // - -data.name = "Unknown lambert"; - - // - // scale latitudes - // - -data.scale_lat_1 = string_att_as_double(f, "scale_lat_1"); -data.scale_lat_2 = string_att_as_double(f, "scale_lat_2"); - - // - // lat/lon pin - // - -data.lat_pin = string_att_as_double(f, "lat_pin"); -data.lon_pin = string_att_as_double(f, "lon_pin"); - -data.lon_pin *= -1.0; - - // - // x/y pin - // - -data.x_pin = string_att_as_double(f, "x_pin"); -data.y_pin = string_att_as_double(f, "y_pin"); - - // - // orientation longitude - // - -data.lon_orient = string_att_as_double(f, "lon_orient"); - -data.lon_orient *= -1.0; - - // - // D, R - // - -data.d_km = string_att_as_double(f, "d_km"); -data.r_km = string_att_as_double(f, "r_km"); - - // - // Nx, Ny - // - -data.nx = string_att_as_int(f, "nx"); -data.ny = string_att_as_int(f, "ny"); - - // - // Rotation angle - // - -data.so2_angle = 0.0; - - // - // done - // - -g.set(data); - -return true; - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool read_nc_latlon_grid(NcFile & f, Grid & g) - -{ - -LatLonData data; - -g.clear(); - - // - // name - // - -data.name = "Unknown latlon"; - - // - // lower-left lat/lon - // - -data.lat_ll = string_att_as_double(f, "lat_ll"); -data.lon_ll = string_att_as_double(f, "lon_ll"); - -data.lon_ll = -(data.lon_ll); - - // - // lat/lon deltas - // - -data.delta_lat = string_att_as_double(f, "delta_lat"); -data.delta_lon = string_att_as_double(f, "delta_lon"); - - // - // grid size - // - -data.Nlat = string_att_as_int(f, "Nlat"); -data.Nlon = string_att_as_int(f, "Nlon"); - - - // - // done - // - -g.set(data); - -return true; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void write_nc_st_grid(NcFile & f, const StereographicData & data) - -{ - -ConcatString junk; -ConcatString j2; - - // - // name - // - -add_att(&f, "Projection", "Polar Stereographic"); - - // - // hemisphere - // - -junk = data.hemisphere; - -add_att(&f, "hemisphere", junk); - - // - // scale latitude - // - - j2.format("%.5f", data.scale_lat); - -fix_float(j2); - - junk.format("%s degrees_north", j2.c_str()); - -add_att(&f, "scale_lat", junk); - - // - // lat/lon pin point - // - -junk.format("%.5f", data.lat_pin); - -fix_float(junk); - -add_att(&f, "lat_pin", junk); - - - junk.format("%.5f", -(data.lon_pin)); - -fix_float(junk); - -add_att(&f, "lon_pin", junk); - - // - // x/y pin point - // - - junk.format("%.5f", data.x_pin); - -fix_float(junk); - -add_att(&f, "x_pin", junk); - - - junk.format("%.5f", data.y_pin); - -fix_float(junk); - -add_att(&f, "y_pin", junk); - - // - // orientation longitude - // - - junk.format("%.5f", -(data.lon_orient)); - -fix_float(junk); - -add_att(&f, "lon_orient", junk); - - // - // D and R - // - - j2.format("%.5f", data.d_km); - -fix_float(j2); - - junk.format("%s km", j2.c_str()); - -add_att(&f, "d_km", junk); - - - j2.format("%.5f", data.r_km); - -fix_float(j2); - - junk.format("%s km", j2.c_str()); - -add_att(&f, "r_km", junk); - - // - // nx and ny - // - - junk.format("%d", data.nx); - -add_att(&f, "nx", junk); - - - junk.format("%d", data.ny); - -add_att(&f, "ny", junk); - - - // - // done - // - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void write_nc_lc_grid(NcFile & f, const LambertData & data) - -{ - -ConcatString junk; -ConcatString j2; - - // - // name - // - -add_att(&f, "Projection", "Lambert Conformal"); - - // - // scale latitudes - // - - junk.format("%.5f", data.scale_lat_1); - -fix_float(junk); - -add_att(&f, "scale_lat_1", junk); - - - junk.format("%.5f", data.scale_lat_2); - -fix_float(junk); - -add_att(&f, "scale_lat_2", junk); - - // - // lat/lon pin point - // - - junk.format("%.5f", data.lat_pin); - -fix_float(junk); - -add_att(&f, "lat_pin", junk); - - - junk.format("%.5f", -(data.lon_pin)); - -fix_float(junk); - -add_att(&f, "lon_pin", junk); - - // - // x/y pin point - // - - junk.format("%.5f", data.x_pin); - -fix_float(junk); - -add_att(&f, "x_pin", junk); - - - junk.format("%.5f", data.y_pin); - -fix_float(junk); - -add_att(&f, "y_pin", junk); - - // - // orientation longitude - // - - junk.format("%.5f", -(data.lon_orient)); - -fix_float(junk); - -add_att(&f, "lon_orient", junk); - - // - // D and R - // - - j2.format("%.5f", data.d_km); - -fix_float(j2); - - junk.format("%s km", j2.c_str()); - -add_att(&f, "d_km", junk); - - - j2.format("%.5f", data.r_km); - -fix_float(j2); - - junk.format("%s km", j2.c_str()); - -add_att(&f, "r_km", junk); - - // - // nx and ny - // - - junk.format("%d", data.nx); - -add_att(&f, "nx", junk); - - - junk.format("%d", data.ny); - -add_att(&f, "ny", junk); - - - // - // done - // - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void write_nc_latlon_grid (NcFile & f, const LatLonData & data) - -{ - -ConcatString junk; - - - // - // name - // - -add_att(&f, "Projection", "LatLon"); - - // - // lower left point - // - - junk.format("%.5f", data.lat_ll); - -fix_float(junk); - -add_att(&f, "lat_ll", junk); - - - junk.format("%.5f", -(data.lon_ll)); - -fix_float(junk); - -add_att(&f, "lon_ll", junk); - - // - // lat/lon deltas - // - - junk.format("%.5f", data.delta_lat); - -fix_float(junk); - -add_att(&f, "delta_lat", junk); - - - junk.format("%.5f", data.delta_lon); - -fix_float(junk); - -add_att(&f, "delta_lon", junk); - - // - // grid size - // - - junk.format("%d", data.Nlat); - -fix_float(junk); - -add_att(&f, "Nlat", junk); - - - junk.format("%d", data.Nlon); - -fix_float(junk); - -add_att(&f, "Nlon", junk); - - - // - // done - // - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - diff --git a/src/tools/other/mode_time_domain/nc_grid.h b/src/tools/other/mode_time_domain/nc_grid.h deleted file mode 100644 index c130990c8c..0000000000 --- a/src/tools/other/mode_time_domain/nc_grid.h +++ /dev/null @@ -1,41 +0,0 @@ -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* -// ** Copyright UCAR (c) 1992 - 2024 -// ** University Corporation for Atmospheric Research (UCAR) -// ** National Center for Atmospheric Research (NCAR) -// ** Research Applications Lab (RAL) -// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - - -//////////////////////////////////////////////////////////////////////// - - -#ifndef __NETCDF_GRIDS_H__ -#define __NETCDF_GRIDS_H__ - - -//////////////////////////////////////////////////////////////////////// - - -#include "nc_utils.h" -#include "vx_grid.h" - - -//////////////////////////////////////////////////////////////////////// - - -extern bool read_nc_grid(netCDF::NcFile &, Grid &); - -extern bool write_nc_grid(netCDF::NcFile &, const Grid &); - - -//////////////////////////////////////////////////////////////////////// - - -#endif /* __NETCDF_GRIDS_H__ */ - - -//////////////////////////////////////////////////////////////////////// - - diff --git a/src/tools/other/mode_time_domain/nc_utils_local.cc b/src/tools/other/mode_time_domain/nc_utils_local.cc deleted file mode 100644 index 00761859cb..0000000000 --- a/src/tools/other/mode_time_domain/nc_utils_local.cc +++ /dev/null @@ -1,191 +0,0 @@ -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* -// ** Copyright UCAR (c) 1992 - 2024 -// ** University Corporation for Atmospheric Research (UCAR) -// ** National Center for Atmospheric Research (NCAR) -// ** Research Applications Lab (RAL) -// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - - -//////////////////////////////////////////////////////////////////////// - - -#include -#include -#include -#include -#include -#include - -#include - -#include "nc_utils.h" -#include "nc_utils_local.h" -#include "vx_log.h" - -using namespace std; -using namespace netCDF; - - -//////////////////////////////////////////////////////////////////////// - - -static const int cbuf_size = 8096; - -static char cbuf [cbuf_size]; - - -//////////////////////////////////////////////////////////////////////// - - -const char * string_att(const NcFile & Nc, const char * name) - -{ - -const char *method_name = "string_att() -> "; -NcGroupAtt *att = get_nc_att(&Nc, (string)name); - -if ( GET_NC_TYPE_ID_P(att) != NcType::nc_CHAR ) { - - mlog << Error << "\n" << method_name << "attribute \"" << name - << "\" is not a character string!\n\n"; - - exit ( 1 ); - -} - -ConcatString value; -get_att_value_chars(att, value); -m_strncpy(cbuf, value.c_str(), cbuf_size - 1, method_name); - -cbuf[cbuf_size - 1] = (char) 0; - -if (att) { delete att; att = nullptr; } - - // - // done - // - -return cbuf; - -} - - -//////////////////////////////////////////////////////////////////////// - - -double string_att_as_double (const NcFile & Nc, const char * name) - -{ - -const char * c = string_att(Nc, name); - -double value = atof(c); - -return value; - -} - - -//////////////////////////////////////////////////////////////////////// - - -int string_att_as_int (const NcFile & Nc, const char * name) - -{ - -const char * c = string_att(Nc, name); - -int k = atoi(c); - -return k; - -} - - -//////////////////////////////////////////////////////////////////////// - - -long long string_att_as_ll (const NcFile & Nc, const char * name) - -{ - -const char * c = string_att(Nc, name); - -long long k = atoll(c); - -return k; - -} - - -//////////////////////////////////////////////////////////////////////// - - // - // example: 20100517_010000 - // - -unixtime parse_start_time(const char * text) - -{ - -int k; -int month, day, year, hour, minute, second; -unixtime t; -const int n = m_strlen(text); - -if ( n != 15 ) { - - mlog << Error << "\n\n parse_start_time() -> bad string ... \"" << text << "\"\n\n"; - - exit ( 1 ); - -} - -k = atoi(text); - -year = k/10000; -month = (k%10000)/100; -day = k%100; - -k = atoi(text + 9); - -hour = k/10000; -minute = (k%10000)/100; -second = k%100; - -t = mdyhms_to_unix(month, day, year, hour, minute, second); - - - // - // done - // - -return t; - -} - - -//////////////////////////////////////////////////////////////////////// - - -ConcatString start_time_string(const unixtime t) - -{ - -int month, day, year, hour, minute, second; -char junk[256]; - -unix_to_mdyhms(t, month, day, year, hour, minute, second); - -snprintf(junk, sizeof(junk), "%04d%02d%02d_%02d%02d%02d", year, month, day, hour, minute, second); - -return ConcatString(junk); - -} - - -//////////////////////////////////////////////////////////////////////// - - diff --git a/src/tools/other/mode_time_domain/nc_utils_local.h b/src/tools/other/mode_time_domain/nc_utils_local.h deleted file mode 100644 index 2f1f1df055..0000000000 --- a/src/tools/other/mode_time_domain/nc_utils_local.h +++ /dev/null @@ -1,50 +0,0 @@ -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* -// ** Copyright UCAR (c) 1992 - 2024 -// ** University Corporation for Atmospheric Research (UCAR) -// ** National Center for Atmospheric Research (NCAR) -// ** Research Applications Lab (RAL) -// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - - -//////////////////////////////////////////////////////////////////////// - - -#ifndef __MTD_NETCDF_UTILS_H__ -#define __MTD_NETCDF_UTILS_H__ - - -//////////////////////////////////////////////////////////////////////// - - -#include "concat_string.h" -#include "vx_cal.h" -#include "nc_utils.h" - -//////////////////////////////////////////////////////////////////////// - - -extern const char * string_att (const netCDF::NcFile &, const char * name); -extern int string_att_as_int (const netCDF::NcFile &, const char * name); -extern long long string_att_as_ll (const netCDF::NcFile &, const char * name); -extern double string_att_as_double (const netCDF::NcFile &, const char * name); - - -//////////////////////////////////////////////////////////////////////// - - -extern unixtime parse_start_time(const char *); - -extern ConcatString start_time_string(const unixtime); - - -//////////////////////////////////////////////////////////////////////// - - -#endif /* __MTD_NETCDF_UTILS_H__ */ - - -//////////////////////////////////////////////////////////////////////// - - diff --git a/src/tools/other/modis_regrid/Makefile.in b/src/tools/other/modis_regrid/Makefile.in index 68ff194a77..5e97e8735a 100644 --- a/src/tools/other/modis_regrid/Makefile.in +++ b/src/tools/other/modis_regrid/Makefile.in @@ -233,6 +233,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/modis_regrid/cloudsat_swath_file.h b/src/tools/other/modis_regrid/cloudsat_swath_file.h index 9853b7c75b..b6be77c59a 100644 --- a/src/tools/other/modis_regrid/cloudsat_swath_file.h +++ b/src/tools/other/modis_regrid/cloudsat_swath_file.h @@ -81,9 +81,9 @@ class SatDimension { //////////////////////////////////////////////////////////////////////// -inline ConcatString SatDimension::name() const { return ( Name ); } +inline ConcatString SatDimension::name() const { return Name; } -inline int SatDimension::size() const { return ( Size ); } +inline int SatDimension::size() const { return Size; } //////////////////////////////////////////////////////////////////////// @@ -167,18 +167,18 @@ class SatAttribute { //////////////////////////////////////////////////////////////////////// -inline ConcatString SatAttribute::name() const { return ( Name ); } +inline ConcatString SatAttribute::name() const { return Name; } -inline int SatAttribute::number_type() const { return ( Numbertype ); } +inline int SatAttribute::number_type() const { return Numbertype; } -inline int SatAttribute::bytes() const { return ( Bytes ); } +inline int SatAttribute::bytes() const { return Bytes; } -inline int SatAttribute::n_values() const { return ( Nvalues ); } +inline int SatAttribute::n_values() const { return Nvalues; } -inline int SatAttribute::ival(int n) const { return ( Ival[n] ); } -inline double SatAttribute::dval(int n) const { return ( Dval[n] ); } +inline int SatAttribute::ival(int n) const { return Ival[n]; } +inline double SatAttribute::dval(int n) const { return Dval[n]; } -inline ConcatString SatAttribute::sval() const { return ( Sval ); } +inline ConcatString SatAttribute::sval() const { return Sval; } //////////////////////////////////////////////////////////////////////// @@ -253,12 +253,12 @@ class SwathDataField { //////////////////////////////////////////////////////////////////////// -inline ConcatString SwathDataField::name() const { return ( Name ); } +inline ConcatString SwathDataField::name() const { return Name; } -inline int SwathDataField::get_rank () const { return ( Rank ); } -inline int SwathDataField::numbertype () const { return ( Numbertype ); } +inline int SwathDataField::get_rank () const { return Rank ; } +inline int SwathDataField::numbertype () const { return Numbertype ; } -inline int SwathDataField::n_dimensions () const { return ( Ndimensions ); } +inline int SwathDataField::n_dimensions () const { return Ndimensions; } //////////////////////////////////////////////////////////////////////// @@ -374,15 +374,15 @@ class CloudsatSwath { //////////////////////////////////////////////////////////////////////// -inline ConcatString CloudsatSwath::name() const { return ( Name ); } +inline ConcatString CloudsatSwath::name() const { return Name; } -inline int CloudsatSwath::swath_id() const { return ( SwathId ); } +inline int CloudsatSwath::swath_id() const { return SwathId; } -inline int CloudsatSwath::n_data_fields() const { return ( Ndatafields ); } +inline int CloudsatSwath::n_data_fields() const { return Ndatafields; } -inline int CloudsatSwath::n_attributes () const { return ( Nattributes ); } +inline int CloudsatSwath::n_attributes () const { return Nattributes; } -inline int CloudsatSwath::n_geo_fields () const { return ( Ngeofields ); } +inline int CloudsatSwath::n_geo_fields () const { return Ngeofields; } //////////////////////////////////////////////////////////////////////// @@ -448,11 +448,11 @@ class CloudsatSwathFile { //////////////////////////////////////////////////////////////////////// -inline ConcatString CloudsatSwathFile::filename() const { return ( Filename ); } +inline ConcatString CloudsatSwathFile::filename() const { return Filename; } -inline int CloudsatSwathFile::file_id() const { return ( FileId ); } +inline int CloudsatSwathFile::file_id() const { return FileId; } -inline int CloudsatSwathFile::n_swaths() const { return ( Nswaths ); } +inline int CloudsatSwathFile::n_swaths() const { return Nswaths; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/modis_regrid/data_plane_to_netcdf.cc b/src/tools/other/modis_regrid/data_plane_to_netcdf.cc index e13719a321..819a67885b 100644 --- a/src/tools/other/modis_regrid/data_plane_to_netcdf.cc +++ b/src/tools/other/modis_regrid/data_plane_to_netcdf.cc @@ -79,57 +79,57 @@ void write_grid_to_netcdf(const DataPlane & plane, const Grid & grid, const char NcDim lat_dim ; NcDim lon_dim ; NcVar f_var ; - - + + // Create a new NetCDF file and open it f_out = open_ncfile(out_filename, true); - + if(IS_INVALID_NC_P(f_out)) { mlog << Error << "\nwrite_netcdf() -> " - << "trouble opening output file " << out_filename - << "\n\n"; + << "trouble opening output file " << out_filename + << "\n\n"; delete f_out; f_out = (NcFile *) nullptr; exit(1); } - + // Add global attributes const char * program_name = "data_plane_to_netcdf"; write_netcdf_global(f_out, out_filename, program_name); - + // Add the projection information write_netcdf_proj(f_out, grid, lat_dim, lon_dim); // Add the lat/lon variables write_netcdf_latlon(f_out, &lat_dim, &lon_dim, grid); - + int deflate_level = get_compress(); //if (deflate_level < 0) deflate_level = 0; - + // Define variable f_var = add_var(f_out, (string)var_info.name(), ncFloat, lat_dim, lon_dim, deflate_level); - + // Add variable attributes add_att(&f_var, "name", (string)var_info.name()); add_att(&f_var, "units", (string)var_info.units()); add_att(&f_var, "long_name", (string)var_info.long_name()); add_att(&f_var, "_FillValue", bad_data_float); - + // Write out the times write_netcdf_var_times(&f_var, plane); - + // Write the data if (!put_nc_data_with_dims(&f_var, plane.data(), plane.ny(), plane.nx())) { mlog << Error << "\nwrite_netcdf() -> " - << "error with f_var->put()\n\n"; + << "error with f_var->put()\n\n"; exit(1); } - + // Close and clean up delete f_out; f_out = (NcFile *) nullptr; - + return; } diff --git a/src/tools/other/modis_regrid/modis_file.h b/src/tools/other/modis_regrid/modis_file.h index 9ef823dfed..245ea3347f 100644 --- a/src/tools/other/modis_regrid/modis_file.h +++ b/src/tools/other/modis_regrid/modis_file.h @@ -137,12 +137,12 @@ class ModisFile { //////////////////////////////////////////////////////////////////////// -inline ConcatString ModisFile::filename() const { return ( Filename ); } +inline ConcatString ModisFile::filename() const { return Filename; } -inline int ModisFile::file_id() const { return ( FileId ); } +inline int ModisFile::file_id() const { return FileId; } -inline int ModisFile::dim0() const { return ( Dim0 ); } -inline int ModisFile::dim1() const { return ( Dim1 ); } +inline int ModisFile::dim0() const { return Dim0; } +inline int ModisFile::dim1() const { return Dim1; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/pb2nc/Makefile.in b/src/tools/other/pb2nc/Makefile.in index 05629adb61..f10cc0fcae 100644 --- a/src/tools/other/pb2nc/Makefile.in +++ b/src/tools/other/pb2nc/Makefile.in @@ -252,6 +252,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/pb2nc/pb2nc.cc b/src/tools/other/pb2nc/pb2nc.cc index 2561288484..94a7750bad 100644 --- a/src/tools/other/pb2nc/pb2nc.cc +++ b/src/tools/other/pb2nc/pb2nc.cc @@ -58,9 +58,10 @@ // from header files // 019 07/21/23 Prestopnik, J. MET #2615 Add #include to compile // successfully using gcc12 +// 020 08/26/24 Halley Gotway MET #2938 Silence center time warnings +// //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -386,6 +387,7 @@ static bool insert_pbl(float *obs_arr, const float pbl_value, const int pbl_co static int interpolate_by_pressure(int length, float *pres_data, float *var_data); static void interpolate_pqtzuv(float*, float*, float*); static bool is_valid_pb_data(float pb_value); +static void check_fortran_file_id(const int unit, const char *method_name); static void log_merged_tqz_uv(map pqtzuv_map_tq, map pqtzuv_map_uv, map &pqtzuv_map_merged, @@ -431,10 +433,7 @@ void dump_pb_data( msg_typ_ret[i] = keep_message_type(vld_msg_typ_list[i]); } - if (unit > MAX_FORTRAN_FILE_ID || unit < MIN_FORTRAN_FILE_ID) { - mlog << Error << "\n" << method_name - << "Invalid file ID [" << unit << "] between 1 and 99.\n\n"; - } + check_fortran_file_id(unit, method_name); dumppb_(blk_file.c_str(), &unit, dump_dir.c_str(), &len1, prefix.c_str(), &len2, msg_typ_ret); } @@ -469,7 +468,7 @@ int met_main(int argc, char *argv[]) { if (collect_metadata) { // Process each PrepBufr file - for(int i=0; i MAX_FORTRAN_FILE_ID || _file_id < MIN_FORTRAN_FILE_ID) { - mlog << Error << "\nsave_bufr_table_to_file() -> " - << "Invalid file ID [" << _file_id << "] between 1 and 99.\n\n"; - } + check_fortran_file_id(_file_id, method_name); openpb_(blk_file, &_file_id); dump_tbl_(blk_file, &_file_id, tbl_filename.c_str(), &len); closepb_(&_file_id); @@ -822,6 +819,7 @@ void get_variable_info(ConcatString blk_file, int unit) { } remove_temp_file(tbl_filename); + return; } @@ -898,8 +896,7 @@ void process_pbfile(int i_pb) { clock_t start_t, end_t, method_start, method_end; start_t = end_t = method_start = method_end = clock(); - IntArray diff_file_times; - int diff_file_time_count; + IntArray unique_msg_ut; StringArray variables_big_nlevels; static const char *method_name_s = "process_pbfile()"; static const char *method_name = "process_pbfile() -> "; @@ -936,7 +933,7 @@ void process_pbfile(int i_pb) { if(dump_flag) { // Check for multiple PrepBufr files - if(pbfile.n_elements() > 1) { + if(pbfile.n() > 1) { mlog << Error << "\n" << method_name << "the \"-dump\" and \"-pbfile\" options may not be " << "used together. Only one Bufr file may be dump " @@ -946,16 +943,12 @@ void process_pbfile(int i_pb) { unit = dump_unit+i_pb; prefix = get_short_name(pbfile[i_pb].c_str()); - dump_pb_data((dump_unit+i_pb), prefix, blk_file, method_name); + dump_pb_data((dump_unit+i_pb), prefix, blk_file, method_name_s); } - // Open the blocked temp PrepBufr file for reading unit = file_unit + i_pb; - if (unit > MAX_FORTRAN_FILE_ID || unit < MIN_FORTRAN_FILE_ID) { - mlog << Error << "\n" << method_name - << "Invalid file ID [" << unit << "] between 1 and 99.\n\n"; - } + check_fortran_file_id(unit, method_name_s); openpb_(blk_file.c_str(), &unit); // Compute the number of PrepBufr records in the current file. @@ -1074,15 +1067,14 @@ void process_pbfile(int i_pb) { cape_h = pbl_h = 0; cape_p = pbl_p = bad_data_float; - diff_file_time_count = 0; cycle_minute = missing_cycle_minute; // initialize - // Derive quantities which can be derived from - // P, Q, T, Z, U, V + // Check the number of variables to be derived from: + // P, Q, T, Z, U, V if (n_derive_gc > bufr_derive_cfgs.size()) { - mlog << Debug(3) << "\n" << method_name - << "Skip the derived variables because of not requested (" - << bufr_derive_cfgs.size() << ").\n\n"; + mlog << Debug(3) << method_name + << "No observation variables requested to be derived (" + << bufr_derive_cfgs.size() << ").\n"; } for (int idx=0; idx= debug_level_for_performance) { end_t = clock(); - cout << (end_t-start_t)/double(CLOCKS_PER_SEC) - << " seconds\n"; + cout << (end_t-start_t)/double(CLOCKS_PER_SEC) << " seconds\n"; start_t = clock(); } } @@ -1156,10 +1147,10 @@ void process_pbfile(int i_pb) { << " to " << end_time_str << "\n"; } - else if(file_ut != msg_ut) { - diff_file_time_count++; - if (!diff_file_times.has(msg_ut)) diff_file_times.add(msg_ut); - } + + // Keep track of the unique message reference times, + // searching from newest to oldest + unique_msg_ut.add_uniq(msg_ut, false); // Add minutes by calling IUPVS01(unit, "MINU") if (cycle_minute != missing_cycle_minute) { @@ -1809,7 +1800,7 @@ void process_pbfile(int i_pb) { int n_other_file_obs = 0; int n_other_total_obs = 0; int n_other_hdr_obs = 0; - int var_count = bufr_obs_name_arr.n_elements(); + int var_count = bufr_obs_name_arr.n(); for (int vIdx=0; vIdx= debug_level_for_performance) { end_t = clock(); log_message << (end_t-start_t)/double(CLOCKS_PER_SEC) << " seconds"; - //start_t = clock(); } cout << log_message << "\n"; } - if(0 < diff_file_time_count && 0 < diff_file_times.n_elements()) { - mlog << Warning << "\n" << method_name - << "The observation time should remain the same for " - << "all " << (is_prepbufr ? "PrepBufr" : "Bufr") << " messages\n"; - mlog << Warning << method_name << " " - << diff_file_time_count << " messages with different reference time (" - << unix_to_yyyymmdd_hhmmss(file_ut) << "):\n"; - for (int idx=0; idx 1) { + + ConcatString msg_cs; + msg_cs << "Found " << unique_msg_ut.n() << " unique " + << (is_prepbufr ? "PrepBufr" : "Bufr") + << " message reference time(s) from " + << unix_to_yyyymmdd_hhmmss(unique_msg_ut.min()) << " to " + << unix_to_yyyymmdd_hhmmss(unique_msg_ut.max()) << ".\n"; + + // Print warning if the time window was not defined on the command line + if(valid_beg_ut == (unixtime) 0 && + valid_end_ut == (unixtime) 0) { + mlog << Warning << "\n" << method_name + << msg_cs << "\n" + << R"(Set the "-valid_beg" and/or "-valid_end" )" + << "command line options to define the retention " + << "time window.\n\n"; + } + else { + mlog << Debug(3) << msg_cs; } - mlog << Warning << "\n"; } nc_point_obs.write_observation(); - if(mlog.verbosity_level() > 0) cout << "\n" << flush; - mlog << Debug(2) << "Messages processed\t\t\t= " << npbmsg << "\n" << "Rejected based on message type\t\t= " @@ -2039,7 +2037,7 @@ void process_pbfile(int i_pb) { if (cal_cape) { mlog << Debug(3) << "\nDerived CAPE = " << cape_count << "\tZero = " << cape_cnt_zero_values - << "\n\tnot derived: No cape inputs = " << (cape_cnt_no_levels) + << "\n\tnot derived: No cape inputs = " << cape_cnt_no_levels << "\tNo vertical levels = " << cape_cnt_surface_msgs << "\n\tfiltered: " << cape_cnt_missing_values << ", " << cape_cnt_too_big @@ -2072,7 +2070,7 @@ void process_pbfile(int i_pb) { int debug_level = 5; if(mlog.verbosity_level() >= debug_level) { log_message = "Filtered time:"; - for (kk=0; kk MAX_FORTRAN_FILE_ID || unit < MIN_FORTRAN_FILE_ID) { - mlog << Error << "\n" << method_name << " -> " - << "Invalid file ID [" << unit << "] between 1 and 99 for BUFR table.\n\n"; - } + check_fortran_file_id(unit, method_name); get_variable_info(blk_file, unit); // The input PrepBufr file is blocked already. unit = dump_unit + i_pb; - if (unit > MAX_FORTRAN_FILE_ID || unit < MIN_FORTRAN_FILE_ID) { - mlog << Error << "\n" << method_name << " -> " - << "Invalid file ID [" << unit << "] between 1 and 99.\n\n"; - } + check_fortran_file_id(unit, method_name); // Compute the number of PrepBufr records in the current file. numpbmsg_new_(blk_file.c_str(), &unit, &npbmsg); @@ -2201,7 +2193,7 @@ void process_pbfile_metadata(int i_pb) { StringArray unchecked_var_list; if (check_all) { - for(i=0; i variable \"" @@ -2401,9 +2393,9 @@ void process_pbfile_metadata(int i_pb) { } } // if (0 == i_read) - if (0 == unchecked_var_list.n_elements()) break; + if (0 == unchecked_var_list.n()) break; - int var_count = unchecked_var_list.n_elements(); + int var_count = unchecked_var_list.n(); for (int vIdx=var_count-1; vIdx>=0; vIdx--) { int nlev2, count; bool has_valid_data; @@ -2454,7 +2446,7 @@ void process_pbfile_metadata(int i_pb) { bool has_prepbufr_vars = false; const char * tmp_var_name; bufr_obs_name_arr.clear(); - for (index=0; index " - << "No PrepBufr messages retained. Nothing to write.\n\n"; + mlog << Warning << "\n" << method_name << "-> " + << "No PrepBufr messages retained. No output file written.\n\n"; + // Delete the NetCDF file remove_temp_file(ncfile); - exit(1); + + return; } nc_point_obs.get_obs_vars()->attr_pb2nc = true; @@ -2534,7 +2528,7 @@ void write_netcdf_hdr_data() { StringArray nc_var_unit_arr; StringArray nc_var_desc_arr; map obs_var_map = conf_info.getObsVarMap(); - for(int i=0; i= 0 && (do_all_vars || code < bufr_target_variables.n_elements())); + return(code >= 0 && (do_all_vars || code < bufr_target_variables.n())); } //////////////////////////////////////////////////////////////////////// bool keep_level_category(int category) { - return(conf_info.level_category.n_elements() == 0 || + return(conf_info.level_category.n() == 0 || conf_info.level_category.has(category, false)); } @@ -2822,7 +2816,7 @@ float derive_grib_code(int gc, float *pqtzuv, float *pqtzuv_qty, switch(gc) { // Pressure Reduced to Mean Sea Level - case(prmsl_grib_code): + case prmsl_grib_code: p = (double) pqtzuv[0]; t = (double) pqtzuv[2]; z = (double) pqtzuv[3]; @@ -2833,14 +2827,14 @@ float derive_grib_code(int gc, float *pqtzuv, float *pqtzuv_qty, break; // Humidity mixing ratio - case(mixr_grib_code): + case mixr_grib_code: q = (double) pqtzuv[1]; qty = pqtzuv_qty[1]; result = (float) convert_q_to_w(q); break; // Dewpoint temperature: derived from p and q - case(dpt_grib_code): + case dpt_grib_code: p = (double) pqtzuv[0]; q = (double) pqtzuv[1]; qty = pqtzuv_qty[0]; @@ -2851,7 +2845,7 @@ float derive_grib_code(int gc, float *pqtzuv, float *pqtzuv_qty, break; // Relative humidity - case(rh_grib_code): + case rh_grib_code: p = (double) pqtzuv[0]; q = (double) pqtzuv[1]; t = (double) pqtzuv[2]; @@ -2862,7 +2856,7 @@ float derive_grib_code(int gc, float *pqtzuv, float *pqtzuv_qty, break; // Wind direction (direction wind is coming from): derived from u and v - case(wdir_grib_code): + case wdir_grib_code: u = (double) pqtzuv[4]; v = (double) pqtzuv[5]; qty = pqtzuv_qty[4]; @@ -2871,7 +2865,7 @@ float derive_grib_code(int gc, float *pqtzuv, float *pqtzuv_qty, break; // Wind speed: derived from u and v - case(wind_grib_code): + case wind_grib_code: u = (double) pqtzuv[4]; v = (double) pqtzuv[5]; qty = pqtzuv_qty[4]; @@ -2895,8 +2889,8 @@ void display_bufr_variables(const StringArray &all_vars, const StringArray &all_ ConcatString description; ConcatString line_buf; - mlog << Debug(1) << "\n Header variables (" << hdr_arr.n_elements() << ") :\n"; - for(i=0; i pqtzuv_map_tq, selected_levels.add(nint(pqtzuv[0])); } if (start_offset > 0) { - // Replace the interpolated records with common records. - mlog << Error << "\n" << method_name << "Excluded " << start_offset << " records\n"; + + // Replace the interpolated records with common records + mlog << Debug(5) << method_name << "Excluded " << start_offset << " records\n"; + // Find vertical levels with both data float highest_pressure = bad_data_float; for (it = pqtzuv_map_tq.begin(); it!=pqtzuv_map_tq.end(); ++it) { @@ -3318,8 +3314,8 @@ void interpolate_pqtzuv(float *prev_pqtzuv, float *cur_pqtzuv, float *next_pqtzu if ((nint(prev_pqtzuv[0]) == nint(cur_pqtzuv[0])) || (nint(next_pqtzuv[0]) == nint(cur_pqtzuv[0])) || (nint(prev_pqtzuv[0]) == nint(next_pqtzuv[0]))) { - mlog << Error << "\n" << method_name - << " Can't interpolate because of same pressure levels. prev: " + mlog << Debug(9) << method_name + << " can't interpolate because of same pressure levels. prev: " << prev_pqtzuv[0] << ", cur: " << cur_pqtzuv[0] << ", next: " << prev_pqtzuv[0] << "\n\n"; } @@ -3359,6 +3355,16 @@ static bool is_valid_pb_data(float pb_value) { //////////////////////////////////////////////////////////////////////// +void check_fortran_file_id(const int unit, const char *method_name) { + if (unit > MAX_FORTRAN_FILE_ID || unit < MIN_FORTRAN_FILE_ID) { + mlog << Error << "\n" << method_name << " -> " + << "Invalid file ID [" << unit << "] between 1 and 99.\n\n"; + exit(1); + } +} + +//////////////////////////////////////////////////////////////////////// + void merge_records(float *first_pqtzuv, map pqtzuv_map_pivot, map pqtzuv_map_aux, map &pqtzuv_map_merged) { @@ -3448,7 +3454,7 @@ void log_tqz_and_uv(map pqtzuv_map_tq, log_array.add(buf.c_str()); } offset = 0; - for (int idx=log_array.n_elements()-1; idx>=0; idx--) { + for (int idx=log_array.n()-1; idx>=0; idx--) { mlog << Debug(PBL_DEBUG_LEVEL) << method_name << "TQZ record: " << offset++ << "\t" << log_array[idx] << "\n"; } @@ -3462,7 +3468,7 @@ void log_tqz_and_uv(map pqtzuv_map_tq, log_array.add(buf.c_str()); } offset = 0; - for (int idx=log_array.n_elements()-1; idx>=0; idx--) { + for (int idx=log_array.n()-1; idx>=0; idx--) { mlog << Debug(PBL_DEBUG_LEVEL) << method_name << " UV record: " << offset++ << "\t" << log_array[idx] << "\n"; } @@ -3495,7 +3501,7 @@ void log_merged_tqz_uv(map pqtzuv_map_tq, log_array.add(buf.c_str()); } int offset = 0; - for (int idx=log_array.n_elements()-1; idx>=0; idx--) { + for (int idx=log_array.n()-1; idx>=0; idx--) { mlog << Debug(PBL_DEBUG_LEVEL) << method_name << " merged: " << offset++ << "\t" << log_array[idx] << "\n"; } @@ -3522,7 +3528,7 @@ void log_pbl_input(int pbl_level, const char *method_name) { int offset = 0; mlog << Debug(PBL_DEBUG_LEVEL) << method_name << "input to calpbl_ (buffer): index, P, Q, T, Z, U, V\n"; - for (int idx=log_array.n_elements()-1; idx>=0; idx--) { + for (int idx=log_array.n()-1; idx>=0; idx--) { mlog << Debug(PBL_DEBUG_LEVEL) << method_name << " " << offset++ << "\t" << log_array[idx] << "\n"; } diff --git a/src/tools/other/pb2nc/pb2nc_conf_info.cc b/src/tools/other/pb2nc/pb2nc_conf_info.cc index 2de9d778af..d84a97b038 100644 --- a/src/tools/other/pb2nc/pb2nc_conf_info.cc +++ b/src/tools/other/pb2nc/pb2nc_conf_info.cc @@ -142,9 +142,9 @@ void PB2NCConfInfo::read_config(const char *default_file_name, void PB2NCConfInfo::process_config() { int i; - ConcatString s, mask_name; + ConcatString s; + ConcatString mask_name; StringArray sa; - StringArray * sid_list = 0; Dictionary *dict = (Dictionary *) nullptr; // Dump the contents of the config file @@ -184,10 +184,8 @@ void PB2NCConfInfo::process_config() { // Conf: station_id sa = conf.lookup_string_array(conf_key_station_id); - sid_list = new StringArray [sa.n_elements()]; - for(i=0; i 0) { - - // Parse as a white-space separated string - sa.parse_wsss(plot_grid_string); - - // Search for a named grid - if(sa.n() == 1 && find_grid_by_name(sa[0].c_str(), grid)) { - mlog << Debug(3) << "Use the grid named \"" - << plot_grid_string << "\".\n"; - } - // Parse grid definition - else if(sa.n() > 1 && parse_grid_def(sa, grid)) { - mlog << Debug(3) << "Use the grid defined by string \"" - << plot_grid_string << "\".\n"; - } - // Extract the grid from a gridded data file - else { + if (!build_grid_by_grid_string(plot_grid_string, grid, + "PlotPointObsConfInfo::process_config -> ", false)) { + // Extract the grid from a gridded data file mlog << Debug(3) << "Use the grid defined by file \"" << plot_grid_string << "\".\n"; @@ -499,7 +486,8 @@ void PlotPointObsConfInfo::process_config( // Regrid, if requested if(grid_data_info->regrid().enable) { mlog << Debug(1) << "Regridding field " - << grid_data_info->magic_str() << ".\n"; + << grid_data_info->magic_str() << " using " + << grid_data_info->regrid().get_str() << ".\n"; Grid to_grid(parse_vx_grid(grid_data_info->regrid(), &grid, &grid)); grid_data = met_regrid(grid_data, grid, to_grid, diff --git a/src/tools/other/point2grid/Makefile.in b/src/tools/other/point2grid/Makefile.in index 17f72484ab..9c8efa6ea4 100644 --- a/src/tools/other/point2grid/Makefile.in +++ b/src/tools/other/point2grid/Makefile.in @@ -236,6 +236,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/point2grid/point2grid.cc b/src/tools/other/point2grid/point2grid.cc index d11044dc5f..cd24f6bac9 100644 --- a/src/tools/other/point2grid/point2grid.cc +++ b/src/tools/other/point2grid/point2grid.cc @@ -21,15 +21,17 @@ // 001 01-25-21 Halley Gotway MET #1630 Handle zero obs. // 002 07-06-22 Howard Soh METplus-Internal #19 Rename main to met_main // 003 10-03-23 Prestopnik MET #2227 Remove namespace std and netCDF from header files +// 004 05-20-24 Howard Soh MET #2867 Fix -qc ADP bug. +// 005 06-24-24 Halley Gotway MET #2880 Filter obs_quality. +// 006 10-21-24 Halley Gotway MET #3000 Reduce warnings. // //////////////////////////////////////////////////////////////////////// - #include #include #include -#include +#include #include "main.h" #include "vx_log.h" @@ -40,6 +42,7 @@ #include "vx_regrid.h" #include "vx_util.h" #include "vx_statistics.h" +#include "var_info_nc_cf.h" #include "nc_obs_util.h" #include "nc_point_obs_in.h" @@ -53,33 +56,29 @@ using namespace std; using namespace netCDF; - //////////////////////////////////////////////////////////////////////// static ConcatString program_name; // Constants -static const int TYPE_UNKNOWN = 0; // Can not process the input file -static const int TYPE_OBS = 1; // MET Point Obs NetCDF (from xxx2nc) -static const int TYPE_NCCF = 2; // CF NetCDF with time and lat/lon variables -static const int TYPE_GOES = 5; -static const int TYPE_GOES_ADP = 6; -static const int TYPE_PYTHON = 7; // MET Point Obs NetCDF from PYTHON - -static const InterpMthd DefaultInterpMthd = InterpMthd::UW_Mean; -static const int DefaultInterpWdth = 2; -static const double DefaultVldThresh = 0.5; +constexpr int TYPE_UNKNOWN = 0; // Can not process the input file +constexpr int TYPE_OBS = 1; // MET Point Obs NetCDF (from xxx2nc) +constexpr int TYPE_NCCF = 2; // CF NetCDF with time and lat/lon variables +constexpr int TYPE_GOES = 5; +constexpr int TYPE_GOES_ADP = 6; +constexpr int TYPE_PYTHON = 7; // MET Point Obs NetCDF from PYTHON -static const float MISSING_LATLON = -999.0; -static const int QC_NA_INDEX = -1; -static const int LEVEL_FOR_PERFORMANCE = 6; +constexpr InterpMthd DefaultInterpMthd = InterpMthd::UW_Mean; +constexpr int DefaultInterpWdth = 2; +constexpr double DefaultVldThresh = 0.5; -static const char * default_config_filename = "MET_BASE/config/Point2GridConfig_default"; +constexpr float MISSING_LATLON = -999.0; +constexpr int QC_NA_INDEX = -1; +constexpr int LEVEL_FOR_PERFORMANCE = 6; -static const string lat_dim_name_list = "x"; // "lat,latitude"; -static const string lon_dim_name_list = "y"; // "lon,longitude"; +constexpr char default_config_filename[] = "MET_BASE/config/Point2GridConfig_default"; -static const char * GOES_global_attr_names[] = { +static const vector GOES_global_attr_names = { "naming_authority", "project", "production_site", @@ -110,7 +109,7 @@ static IntArray message_type_list; // Variables for command line arguments static ConcatString InputFilename; static ConcatString OutputFilename; -static ConcatString AdpFilename; +static ConcatString adp_filename; static ConcatString config_filename; static PointToGridConfInfo conf_info; static StringArray FieldSA; @@ -126,6 +125,19 @@ static NcFile *nc_out = (NcFile *) nullptr; static NcDim lat_dim ; static NcDim lon_dim ; +static int adp_qc_high; /* 3 as baseline algorithm, 0 for enterprise algorithm */ +static int adp_qc_medium; /* 1 as baseline algorithm, 1 for enterprise algorithm */ +static int adp_qc_low; /* 0 as baseline algorithm, 2 for enterprise algorithm */ + +enum class GOES_QC { + HIGH = 0, + MEDIUM, + LOW, + NA +}; + +static const ConcatString att_name_values = "flag_values"; +static const ConcatString att_name_meanings = "flag_meanings"; //////////////////////////////////////////////////////////////////////// @@ -134,7 +146,7 @@ static void process_data_file(); static void process_point_file(NcFile *nc_in, MetConfig &config, VarInfo *, const Grid to_grid); #ifdef WITH_PYTHON -static void process_point_python(string python_command, MetConfig &config, +static void process_point_python(const string python_command, MetConfig &config, VarInfo *vinfo, const Grid to_grid, bool use_xarray); #endif static void process_point_nccf_file(NcFile *nc_in, MetConfig &config, @@ -159,9 +171,9 @@ static void set_gaussian_dx(const StringArray &); static void set_gaussian_radius(const StringArray &); static unixtime compute_unixtime(NcVar *time_var, unixtime var_value); -static bool get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, - NcVar var_lat, NcVar var_lon, bool *skip_times); -static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, +static bool get_grid_mapping(const Grid &fr_grid, const Grid &to_grid, IntArray *cellMapping, + NcVar var_lat, NcVar var_lon, vector skip_times); +static bool get_grid_mapping(const Grid &to_grid, IntArray *cellMapping, const IntArray obs_index_array, const int *obs_hids, const float *hdr_lats, const float *hdr_lons); static int get_obs_type(NcFile *nc_in); @@ -171,17 +183,18 @@ static void regrid_nc_variable(NcFile *nc_in, Met2dDataFile *fr_mtddf, static bool keep_message_type(const int mt_index); -static bool has_lat_lon_vars(NcFile *nc_in); +static bool has_lat_lon_vars(const NcFile *nc_in); +static void set_adp_gc_values(NcVar var_adp_qc); //////////////////////////////////////////////////////////////////////// // for GOES 16 // static const int factor_float_to_int = 1000000; -static const char *key_geostationary_data = "MET_GEOSTATIONARY_DATA"; -static const char *dim_name_lat = "lat"; -static const char *dim_name_lon = "lon"; -static const char *var_name_lat = "latitude"; -static const char *var_name_lon = "longitude"; +constexpr char key_geostationary_data[] = "MET_GEOSTATIONARY_DATA"; +constexpr char dim_name_lat[] = "lat"; +constexpr char dim_name_lon[] = "lon"; +constexpr char var_name_lat[] = "latitude"; +constexpr char var_name_lon[] = "longitude"; static const ConcatString vname_dust("Dust"); static const ConcatString vname_smoke("Smoke"); @@ -190,23 +203,22 @@ static IntArray qc_flags; static void process_goes_file(NcFile *nc_in, MetConfig &config, VarInfo *, const Grid fr_grid, const Grid to_grid); static unixtime find_valid_time(NcVar time_var); -static ConcatString get_goes_grid_input(MetConfig config, Grid fr_grid, Grid to_grid); -static void get_grid_mapping(Grid fr_grid, Grid to_grid, - IntArray *cellMapping, ConcatString geostationary_file); -static int get_lat_count(NcFile *); -static int get_lon_count(NcFile *); -static NcVar get_goes_nc_var(NcFile *nc, const ConcatString var_name, +static ConcatString get_goes_grid_input(MetConfig config, const Grid fr_grid); +static void get_grid_mapping(const Grid &fr_grid, const Grid &to_grid, + IntArray *cellMapping, const ConcatString &geostationary_file); +static int get_lat_count(const NcFile *); +static int get_lon_count(const NcFile *); +static NcVar get_goes_nc_var(NcFile *nc, const ConcatString &var_name, bool exit_if_error=true); static bool is_time_mismatch(NcFile *nc_in, NcFile *nc_adp); -static ConcatString make_geostationary_filename(Grid fr_grid, Grid to_grid, - ConcatString regrid_name); -static void regrid_goes_variable(NcFile *nc_in, VarInfo *vinfo, +static ConcatString make_geostationary_filename(Grid fr_grid); +static void regrid_goes_variable(NcFile *nc_in, const VarInfo *vinfo, DataPlane &fr_dp, DataPlane &to_dp, Grid fr_grid, Grid to_grid, IntArray *cellMapping, NcFile *nc_adp); static void save_geostationary_data(const ConcatString geostationary_file, const float *latitudes, const float *longitudes, - const GoesImagerData grid_data); -static void set_qc_flags(const StringArray &); + const GoesImagerData &grid_data); +static void set_goes_qc_flags(const StringArray &); //////////////////////////////////////////////////////////////////////// @@ -226,13 +238,13 @@ int met_main(int argc, char *argv[]) { //////////////////////////////////////////////////////////////////////// -const string get_tool_name() { +string get_tool_name() { return "point2grid"; } //////////////////////////////////////////////////////////////////////// -void process_command_line(int argc, char **argv) { +static void process_command_line(int argc, char **argv) { CommandLine cline; static const char *method_name = "process_command_line() -> "; @@ -261,7 +273,9 @@ void process_command_line(int argc, char **argv) { cline.add(set_vld_thresh, "-vld_thresh", 1); cline.add(set_name, "-name", 1); cline.add(set_compress, "-compress", 1); - cline.add(set_qc_flags, "-qc", 1); + cline.add(set_goes_qc_flags, "-goes_qc", 1); + // Also support old -qc option + cline.add(set_goes_qc_flags, "-qc", 1); cline.add(set_adp, "-adp", 1); cline.add(set_config, "-config", 1); cline.add(set_prob_cat_thresh, "-prob_cat_thresh", 1); @@ -292,7 +306,7 @@ void process_command_line(int argc, char **argv) { // Check if the input file #ifdef WITH_PYTHON if (use_python) { - int offset = python_command.find("="); + auto offset = python_command.find("="); if (offset == std::string::npos) { mlog << Error << "\n" << method_name << "trouble parsing the python command " << python_command << ".\n\n"; @@ -318,12 +332,10 @@ void process_command_line(int argc, char **argv) { // Check that same variable is required multiple times without -name argument if(VarNameSA.n() == 0) { - VarInfo *vinfo; MetConfig config; - VarInfoFactory v_factory; ConcatString vname; StringArray var_names; - vinfo = v_factory.new_var_info(FileType_NcMet); + VarInfo *vinfo = VarInfoFactory::new_var_info(FileType_NcMet); for(int i=0; iclear(); // Populate the VarInfo object using the config string @@ -365,11 +377,11 @@ void process_command_line(int argc, char **argv) { //////////////////////////////////////////////////////////////////////// -void process_data_file() { - Grid fr_grid, to_grid; +static void process_data_file() { + Grid fr_grid; GrdFileType ftype; ConcatString run_cs; - NcFile *nc_in = (NcFile *) nullptr; + auto nc_in = (NcFile *) nullptr; static const char *method_name = "process_data_file() -> "; // Initialize configuration object @@ -378,24 +390,23 @@ void process_data_file() { config.read_string(FieldSA[0].c_str()); // Note: The command line argument MUST processed before this - if (compress_level < 0) compress_level = config.nc_compression(); + if (compress_level < 0) compress_level = conf_info.conf.nc_compression(); // Get the gridded file type from config string, if present - ftype = parse_conf_file_type(&config); + ftype = parse_conf_file_type(&conf_info.conf); // Open the input file mlog << Debug(1) << "Reading data file: " << InputFilename << "\n"; bool goes_data = false; bool use_python = false; int obs_type; - Met2dDataFileFactory m_factory; - Met2dDataFile *fr_mtddf = (Met2dDataFile *) nullptr; + auto fr_mtddf = (Met2dDataFile *) nullptr; #ifdef WITH_PYTHON string python_command = InputFilename; bool use_xarray = (0 == python_command.find(conf_val_python_xarray)); use_python = use_xarray || (0 == python_command.find(conf_val_python_numpy)); if (use_python) { - int offset = python_command.find("="); + auto offset = python_command.find("="); if (offset == std::string::npos) { mlog << Error << "\n" << method_name << "trouble parsing the python command " << python_command << ".\n\n"; @@ -414,11 +425,11 @@ void process_data_file() { // Get the obs type before opening NetCDF obs_type = get_obs_type(nc_in); goes_data = (obs_type == TYPE_GOES || obs_type == TYPE_GOES_ADP); - + if (obs_type == TYPE_UNKNOWN && ftype == FileType_NcCF) obs_type = TYPE_NCCF; if (obs_type == TYPE_NCCF) setenv(nc_att_met_point_nccf, "yes", 1); // Read the input data file - fr_mtddf = m_factory.new_met_2d_data_file(InputFilename.c_str(), ftype); + fr_mtddf = Met2dDataFileFactory::new_met_2d_data_file(InputFilename.c_str(), ftype); } if(!fr_mtddf) { @@ -431,9 +442,7 @@ void process_data_file() { ftype = fr_mtddf->file_type(); // Setup the VarInfo request object - VarInfoFactory v_factory; - VarInfo *vinfo; - vinfo = v_factory.new_var_info(ftype); + VarInfo *vinfo = VarInfoFactory::new_var_info(ftype); if(!vinfo) { mlog << Error << "\n" << method_name @@ -452,7 +461,7 @@ void process_data_file() { #endif // Determine the "to" grid - to_grid = parse_vx_grid(RGInfo, &fr_grid, &fr_grid); + Grid to_grid = parse_vx_grid(RGInfo, &fr_grid, &fr_grid); mlog << Debug(2) << "Interpolation options: " << "method = " << interpmthd_to_string(RGInfo.method) @@ -460,11 +469,11 @@ void process_data_file() { // Build the run command string run_cs << "Point obs (" << fr_grid.serialize() << ") to " << to_grid.serialize(); - + if (goes_data) { mlog << Debug(2) << "Input grid: " << fr_grid.serialize() << "\n"; - ConcatString grid_string = get_goes_grid_input(config, fr_grid, to_grid); - if (grid_string.length() > 0) run_cs << " with " << grid_string; + ConcatString grid_string = get_goes_grid_input(config, fr_grid); + if (!grid_string.empty()) run_cs << " with " << grid_string; } mlog << Debug(2) << "Output grid: " << to_grid.serialize() << "\n"; @@ -497,7 +506,7 @@ void process_data_file() { close_nc(); // Clean up - if(nc_in) { delete nc_in; nc_in = 0; } + if(nc_in) { delete nc_in; nc_in = nullptr; } if(fr_mtddf) { delete fr_mtddf; fr_mtddf = (Met2dDataFile *) nullptr; } if(vinfo) { delete vinfo; vinfo = (VarInfo *) nullptr; } @@ -509,7 +518,7 @@ void process_data_file() { bool get_nc_data_int_array(NcFile *nc, char *var_name, int *data_array, bool stop=true) { bool status = false; - NcVar nc_var = get_nc_var(nc, (char *)var_name, stop); + NcVar nc_var = get_nc_var(nc, var_name, stop); if (IS_INVALID_NC(nc_var)) { if (stop) exit(1); } @@ -536,7 +545,7 @@ bool get_nc_data_int_array(NcFile *nc, const char *var_name, int *data_array, bo // returns true if no error bool get_nc_data_float_array(NcFile *nc, char *var_name, float *data_array) { - NcVar nc_var = get_nc_var(nc, (char *)var_name); + NcVar nc_var = get_nc_var(nc, var_name); if (IS_INVALID_NC(nc_var)) exit(1); bool status = get_nc_data(&nc_var, data_array); @@ -585,24 +594,43 @@ bool get_nc_data_string_array(NcFile *nc, const char *var_name, //////////////////////////////////////////////////////////////////////// -int get_obs_type(NcFile *nc) { +static int get_obs_type(NcFile *nc) { int obs_type = TYPE_UNKNOWN; + MetConfig config; ConcatString att_val_scene_id; ConcatString att_val_project; ConcatString input_type; static const char *method_name = "get_obs_type() -> "; - - bool has_project = get_global_att(nc, (string)"project", att_val_project); - bool has_scene_id = get_global_att(nc, (string)"scene_id", att_val_scene_id); - if( has_scene_id && has_project && att_val_project == "GOES" ) { + + bool has_attr_grid = false; + auto vinfo = VarInfoFactory::new_var_info(FileType_NcCF); + for(int i=0; iclear(); + // Populate the VarInfo object using the config string + config.read_string(FieldSA[i].c_str()); + vinfo->set_dict(config); + if (vinfo->grid_attr().is_set()) { + has_attr_grid = true; + break; + } + } + if (vinfo) { delete vinfo; vinfo = (VarInfo *) nullptr; } + + if (has_attr_grid) { + obs_type = TYPE_NCCF; + input_type = "OBS_NCCF"; + } + else if (get_global_att(nc, (string)"scene_id", att_val_scene_id) + && get_global_att(nc, (string)"project", att_val_project) + && att_val_project == "GOES" ) { obs_type = TYPE_GOES; input_type = "GOES"; - if (0 < AdpFilename.length()) { + if (!adp_filename.empty()) { obs_type = TYPE_GOES_ADP; input_type = "GOES_ADP"; - if (!file_exists(AdpFilename.c_str())) { + if (!file_exists(adp_filename.c_str())) { mlog << Error << "\n" << method_name - << "ADP input \"" << AdpFilename << "\" does not exist!\n\n"; + << "ADP input \"" << adp_filename << "\" does not exist!\n\n"; exit(1); } } @@ -624,7 +652,7 @@ int get_obs_type(NcFile *nc) { //////////////////////////////////////////////////////////////////////// // Check the message types -void prepare_message_types(const StringArray hdr_types) { +void prepare_message_types(const StringArray &hdr_types) { static const char *method_name = "prepare_message_types() -> "; message_type_list.clear(); if (0 < conf_info.message_type.n()) { @@ -653,10 +681,9 @@ void prepare_message_types(const StringArray hdr_types) { //////////////////////////////////////////////////////////////////////// -IntArray prepare_qc_array(const IntArray qc_flags, StringArray qc_tables) { +IntArray prepare_qc_array(const StringArray &qc_tables) { IntArray qc_idx_array; - bool has_qc_flags = (qc_flags.n() > 0); - if (has_qc_flags) { + if (qc_flags.n() > 0) { for(int idx=0; idx prepare_qoes_qc_array() { + std::set qc_flags_set; + for(int idx=0; idxget_header_data(); + const MetPointHeader *header_data = met_point_obs->get_header_data(); MetPointObsData *obs_data = met_point_obs->get_point_obs_data(); - nhdr = met_point_obs->get_hdr_cnt(); - nobs = met_point_obs->get_obs_cnt(); + int nhdr = met_point_obs->get_hdr_cnt(); + int nobs = met_point_obs->get_obs_cnt(); bool empty_input = (nhdr == 0 && nobs == 0); bool use_var_id = met_point_obs->is_using_var_id(); - float *hdr_lats = new float[nhdr]; - float *hdr_lons = new float[nhdr]; + vector hdr_lats(nhdr, bad_data_float); + vector hdr_lons(nhdr, bad_data_float); IntArray var_index_array; IntArray valid_time_array; StringArray qc_tables = met_point_obs->get_qty_data(); @@ -715,446 +753,440 @@ void process_point_met_data(MetPointData *met_point_obs, MetConfig &config, VarI StringArray hdr_valid_times = header_data->vld_array; hdr_valid_times.sort(); - met_point_obs->get_lats(hdr_lats); - met_point_obs->get_lons(hdr_lons); + met_point_obs->get_lats(hdr_lats.data()); + met_point_obs->get_lons(hdr_lons.data()); // Check the message types prepare_message_types(header_data->typ_array); // Check and read obs_vid and obs_var if exists - bool success_to_read = true; - - if (success_to_read) { - bool has_qc_flags = (qc_flags.n() > 0); - IntArray qc_idx_array = prepare_qc_array(qc_flags, qc_tables); - - // Initialize size and values of output fields - nx = to_grid.nx(); - ny = to_grid.ny(); - to_dp.set_size(nx, ny); - to_dp.set_constant(bad_data_double); - cnt_dp.set_size(nx, ny); - cnt_dp.set_constant(0); - mask_dp.set_size(nx, ny); - mask_dp.set_constant(0); - if (has_prob_thresh || do_gaussian_filter) { - prob_dp.set_size(nx, ny); - prob_dp.set_constant(0); - prob_mask_dp.set_size(nx, ny); - prob_mask_dp.set_constant(0); - } - - // Loop through the requested fields - int obs_count_zero_to, obs_count_non_zero_to; - int obs_count_zero_from, obs_count_non_zero_from; - IntArray *cellMapping = (IntArray *) nullptr; - obs_count_zero_to = obs_count_non_zero_to = 0; - obs_count_zero_from = obs_count_non_zero_from = 0; - for(int i=0; i 0); + IntArray qc_idx_array = prepare_qc_array(qc_tables); - var_idx_or_gc = -1; - - // Initialize - vinfo->clear(); + // Initialize size and values of output fields + int nx = to_grid.nx(); + int ny = to_grid.ny(); + to_dp.set_size(nx, ny); + to_dp.set_constant(bad_data_double); + cnt_dp.set_size(nx, ny); + cnt_dp.set_constant(0); + mask_dp.set_size(nx, ny); + mask_dp.set_constant(0); + if (has_prob_thresh || do_gaussian_filter) { + prob_dp.set_size(nx, ny); + prob_dp.set_constant(0); + prob_mask_dp.set_size(nx, ny); + prob_mask_dp.set_constant(0); + } - // Populate the VarInfo object using the config string - config.read_string(FieldSA[i].c_str()); - vinfo->set_dict(config); + // Loop through the requested fields + int obs_count_zero_to = 0; + int obs_count_zero_from = 0; + int obs_count_non_zero_to = 0; + int obs_count_non_zero_from = 0; + vector cellMapping; + for(int i=0; iname(); - bool exit_by_field_name_error = false; - if (vname == "obs_val" || vname == "obs_lvl" || vname == "obs_hgt") { - exit_by_field_name_error = true; - error_msg << "The variable \"" << vname - << "\" exists but is not a valid field name.\n"; + var_idx_or_gc = -1; + + // Initialize + vinfo->clear(); + + // Populate the VarInfo object using the config string + config.read_string(FieldSA[i].c_str()); + vinfo->set_dict(config); + + // Check the variable name + ConcatString error_msg; + vname = vinfo->name(); + bool exit_by_field_name_error = false; + if (vname == "obs_val" || vname == "obs_lvl" || vname == "obs_hgt") { + exit_by_field_name_error = true; + error_msg << "The variable \"" << vname + << "\" exists but is not a valid field name.\n"; + } + else { + if (use_var_id) { + if (!var_names.has(vname, var_idx_or_gc)) { + exit_by_field_name_error = true; + error_msg << "The variable \"" << vname << "\" is not available.\n"; + } } else { - if (use_var_id) { - if (!var_names.has(vname, var_idx_or_gc)) { + const int TMP_BUF_LEN = 128; + char grib_code[TMP_BUF_LEN + 1]; + var_idx_or_gc = atoi(vname.c_str()); + snprintf(grib_code, TMP_BUF_LEN, "%d", var_idx_or_gc); + if (vname != grib_code) { + ConcatString var_id = conf_info.get_var_id(vname); + if( var_id.nonempty() ) { + var_idx_or_gc = atoi(var_id.c_str()); + snprintf(grib_code, TMP_BUF_LEN, "%d", var_idx_or_gc); + } + else { exit_by_field_name_error = true; - error_msg << "The variable \"" << vname << "\" is not available.\n"; + error_msg << "Invalid GRIB code [" << vname << "]\n"; } } else { - const int TMP_BUF_LEN = 128; - char grib_code[TMP_BUF_LEN + 1]; - var_idx_or_gc = atoi(vname.c_str()); - snprintf(grib_code, TMP_BUF_LEN, "%d", var_idx_or_gc); - if (vname != grib_code) { - ConcatString var_id = conf_info.get_var_id(vname); - if( var_id.nonempty() ) { - var_idx_or_gc = atoi(var_id.c_str()); - snprintf(grib_code, TMP_BUF_LEN, "%d", var_idx_or_gc); - } - else { - exit_by_field_name_error = true; - error_msg << "Invalid GRIB code [" << vname << "]\n"; + bool not_found_grib_code = true; + for (int idx=0; idxobs_ids[idx]) { + not_found_grib_code = false; + break; } } - else { - bool not_found_grib_code = true; - for (idx=0; idxobs_ids[idx]) { - not_found_grib_code = false; - break; - } - } - if (not_found_grib_code) { - exit_by_field_name_error = true; - error_msg << "No data for the GRIB code [" << vname << "]\n"; - } + if (not_found_grib_code) { + exit_by_field_name_error = true; + error_msg << "No data for the GRIB code [" << vname << "]\n"; } } } + } - if (exit_by_field_name_error) { - ConcatString log_msg; - if (use_var_id) { - for (idx=0; idxobs_ids[idx])) { - grib_codes.add(obs_data->obs_ids[idx]); - if (0 < idx) log_msg << ", "; - log_msg << obs_data->obs_ids[idx]; - } - } - } - if (empty_input) { - mlog << Warning << "\n" << method_name - << error_msg << "\tBut ignored because of empty input\n\n"; - } - else { - mlog << Error << "\n" << method_name - << error_msg - << "Try setting the \"name\" in the \"-field\" command line option to one of the available names:\n" - << "\t" << log_msg << "\n\n"; - exit(1); + if (exit_by_field_name_error) { + ConcatString log_msg; + if (use_var_id) { + for (int idx=0; idxvalid(); - if (valid_time == 0) valid_time = conf_info.valid_time; - requested_valid_time = valid_time; - if (0 < valid_time) { - valid_beg_ut = valid_end_ut = valid_time; - if (!is_bad_data(conf_info.beg_ds)) valid_beg_ut += conf_info.beg_ds; - if (!is_bad_data(conf_info.end_ds)) valid_end_ut += conf_info.end_ds; - for(idx=0; idxobs_ids[idx])) { + grib_codes.add(obs_data->obs_ids[idx]); + if (0 < idx) log_msg << ", "; + log_msg << obs_data->obs_ids[idx]; } } - valid_time_array.add(bad_data_int); // added dummy entry + } + if (empty_input) { + mlog << Debug(2) << method_name + << error_msg << "\tBut ignored because of empty input\n"; } else { - valid_time_from_config = false; - // Set the latest available valid time - valid_time = 0; - for(idx=0; idx valid_time) valid_time = obs_time; + mlog << Error << "\n" << method_name + << error_msg + << "Try setting the \"name\" in the \"-field\" command line option to one of the available names:\n" + << "\t" << log_msg << "\n\n"; + exit(1); + } + } + + // Check the time range. Apply the time window + bool valid_time_from_config = true; + unixtime valid_beg_ut; + unixtime valid_end_ut; + unixtime obs_time; + + valid_time_array.clear(); + valid_time = vinfo->valid(); + if (valid_time == 0) valid_time = conf_info.valid_time; + requested_valid_time = valid_time; + if (0 < valid_time) { + valid_beg_ut = valid_end_ut = valid_time; + if (!is_bad_data(conf_info.beg_ds)) valid_beg_ut += conf_info.beg_ds; + if (!is_bad_data(conf_info.end_ds)) valid_end_ut += conf_info.end_ds; + for(int idx=0; idx valid_time) valid_time = obs_time; } + } + mlog << Debug(3) << method_name << "valid_time from " + << (valid_time_from_config ? "config" : "input data") << ": " + << unix_to_yyyymmdd_hhmmss(valid_time) << "\n"; - var_index_array.clear(); - // Select output variable name - vname = (VarNameSA.n() == 0) - ? conf_info.get_var_name(vinfo->name()) - : conf_info.get_var_name(VarNameSA[i]); - mlog << Debug(4) << method_name - << "var: " << vname << ", index: " << var_idx_or_gc << ".\n"; - - var_count = var_count2 = to_count = 0; - filtered_by_time = filtered_by_msg_type = filtered_by_qc = 0; - for (idx=0; idx < nobs; idx++) { - if (var_idx_or_gc == obs_data->obs_ids[idx]) { - var_count2++; - hdr_idx = obs_data->obs_hids[idx]; - if (0 < valid_time_array.n() && - !valid_time_array.has(header_data->vld_idx_array[hdr_idx])) { - filtered_by_time++; - continue; - } + to_dp.set_init(valid_time); + to_dp.set_valid(valid_time); + cnt_dp.set_init(valid_time); + cnt_dp.set_valid(valid_time); + mask_dp.set_init(valid_time); + mask_dp.set_valid(valid_time); + if (has_prob_thresh || do_gaussian_filter) { + prob_dp.set_init(valid_time); + prob_dp.set_valid(valid_time); + prob_mask_dp.set_init(valid_time); + prob_mask_dp.set_valid(valid_time); + } - if(!keep_message_type(header_data->typ_idx_array[hdr_idx])) { - filtered_by_msg_type++; - continue; - } + var_index_array.clear(); + // Select output variable name + vname = (VarNameSA.n() == 0) + ? conf_info.get_var_name(vinfo->name()) + : conf_info.get_var_name(VarNameSA[i]); + mlog << Debug(4) << method_name + << "var: " << vname << ", index: " << var_idx_or_gc << ".\n"; + + int var_count = 0; + int var_count2 = 0; + int to_count = 0; + int filtered_by_time = 0; + int filtered_by_msg_type = 0; + int filtered_by_qc = 0; + for (int idx=0; idx < nobs; idx++) { + if (var_idx_or_gc == obs_data->obs_ids[idx]) { + var_count2++; + hdr_idx = obs_data->obs_hids[idx]; + if (0 < valid_time_array.n() && + !valid_time_array.has(header_data->vld_idx_array[hdr_idx])) { + filtered_by_time++; + continue; + } - // Filter by QC flag - if (has_qc_flags && !qc_idx_array.has(obs_data->obs_qids[idx])) { - filtered_by_qc++; - continue; - } + if(!keep_message_type(header_data->typ_idx_array[hdr_idx])) { + filtered_by_msg_type++; + continue; + } - var_index_array.add(idx); - var_count++; - if (is_eq(obs_data->obs_vals[idx], 0.)) obs_count_zero_from++; - else obs_count_non_zero_from++; + // Filter by QC flag (-qc command line option) + if (has_qc_flags && !qc_idx_array.has(obs_data->obs_qids[idx])) { + filtered_by_qc++; + continue; } + + // Filter by QC inclusion/exclusion lists (obs_quality_inc/exc config option) + if ((conf_info.obs_qty_inc.n() > 0 && + !conf_info.obs_qty_inc.has(obs_data->get_obs_qty(idx))) || + (conf_info.obs_qty_exc.n() > 0 && + conf_info.obs_qty_exc.has(obs_data->get_obs_qty(idx)))) { + filtered_by_qc++; + continue; + } + + var_index_array.add(idx); + var_count++; + if (is_eq(obs_data->obs_vals[idx], (float)0.)) obs_count_zero_from++; + else obs_count_non_zero_from++; } + } - if (cellMapping) { - for (idx=0; idx<(nx*ny); idx++) cellMapping[idx].clear(); - delete [] cellMapping; + cellMapping.clear(); + cellMapping.resize(nx * ny); + if( get_grid_mapping(to_grid, cellMapping.data(), var_index_array, + obs_data->obs_hids.data(), + hdr_lats.data(), hdr_lons.data()) ) { + int from_index; + IntArray cellArray; + NumArray dataArray; + int offset = 0; + int valid_count = 0; + int censored_count = 0; + double data_value; + double from_min_value = 10e10; + double from_max_value = -10e10; + + // Initialize counter and output fields + to_dp.set_constant(bad_data_double); + cnt_dp.set_constant(0); + mask_dp.set_constant(0); + if (has_prob_thresh || do_gaussian_filter) { + prob_dp.set_constant(0); + prob_mask_dp.set_constant(0); } - cellMapping = new IntArray[nx * ny]; - if( get_grid_mapping(to_grid, cellMapping, var_index_array, - obs_data->obs_hids, hdr_lats, hdr_lons) ) { - int from_index; - IntArray cellArray; - NumArray dataArray; - int offset = 0; - int valid_count = 0; - int absent_count = 0; - int censored_count = 0; - int qc_filtered_count = 0; - int adp_qc_filtered_count = 0; - float data_value; - float from_min_value = 10e10; - float from_max_value = -10e10; - - // Initialize counter and output fields - to_count = 0; - to_dp.set_constant(bad_data_double); - cnt_dp.set_constant(0); - mask_dp.set_constant(0); - if (has_prob_thresh || do_gaussian_filter) { - prob_dp.set_constant(0); - prob_mask_dp.set_constant(0); - } - for (int x_idx = 0; x_idxget_obs_val(from_index); - if (is_bad_data(data_value)) continue; - - if(mlog.verbosity_level() >= 4) { - if (from_min_value > data_value) from_min_value = data_value; - if (from_max_value < data_value) from_max_value = data_value; - } + for (int x_idx = 0; x_idxget_obs_val(from_index); + if (is_bad_data(data_value)) continue; + + if(mlog.verbosity_level() >= 4) { + if (from_min_value > data_value) from_min_value = data_value; + if (from_max_value < data_value) from_max_value = data_value; + } - for(int ic=0; iccensor_thresh().n(); ic++) { - // Break out after the first match. - if(vinfo->censor_thresh()[ic].check(data_value)) { - data_value = vinfo->censor_val()[ic]; - censored_count++; - break; - } + for(int ic=0; iccensor_thresh().n(); ic++) { + // Break out after the first match. + if(vinfo->censor_thresh()[ic].check(data_value)) { + data_value = vinfo->censor_val()[ic]; + censored_count++; + break; } + } - dataArray.add(data_value); - valid_count++; + dataArray.add(data_value); + valid_count++; + } + if (0 < valid_count) to_count++; + + int data_count = dataArray.n(); + if (0 < data_count) { + double to_value; + if (RGInfo.method == InterpMthd::Min) to_value = dataArray.min(); + else if (RGInfo.method == InterpMthd::Max) to_value = dataArray.max(); + else if (RGInfo.method == InterpMthd::Median) { + dataArray.sort_array(); + to_value = dataArray[data_count/2]; + if (0 == data_count % 2) + to_value = (float)(to_value + dataArray[(data_count/2)+1])/2; + } + else to_value = dataArray.sum() / data_count; + + if (is_eq(to_value, 0.)) obs_count_zero_to++; + else obs_count_non_zero_to++; + + cnt_dp.set(data_count, x_idx, y_idx); + mask_dp.set(1, x_idx, y_idx); + to_dp.set(to_value, x_idx, y_idx); + if ((has_prob_thresh && prob_cat_thresh.check(to_value)) + || (do_gaussian_filter && !has_prob_thresh)) { + prob_dp.set(1, x_idx, y_idx); + prob_mask_dp.set(1, x_idx, y_idx); } - if (0 < valid_count) to_count++; - - int data_count = dataArray.n(); - if (0 < data_count) { - float to_value; - if (RGInfo.method == InterpMthd::Min) to_value = dataArray.min(); - else if (RGInfo.method == InterpMthd::Max) to_value = dataArray.max(); - else if (RGInfo.method == InterpMthd::Median) { - dataArray.sort_array(); - to_value = dataArray[data_count/2]; - if (0 == data_count % 2) - to_value = (to_value + dataArray[(data_count/2)+1])/2; - } - else to_value = dataArray.sum() / data_count; - - if (is_eq(to_value, 0.)) obs_count_zero_to++; - else obs_count_non_zero_to++; - - cnt_dp.set(data_count, x_idx, y_idx); - mask_dp.set(1, x_idx, y_idx); - to_dp.set(to_value, x_idx, y_idx); - if ((has_prob_thresh && prob_cat_thresh.check(to_value)) - || (do_gaussian_filter && !has_prob_thresh)) { - prob_dp.set(1, x_idx, y_idx); - prob_mask_dp.set(1, x_idx, y_idx); - } - if (1 < data_count) { - mlog << Debug(9) << method_name - << " to_value:" << to_value - << " at " << x_idx << "," << y_idx - << ", max: " << dataArray.max() - << ", min: " << dataArray.min() - << ", mean: " << dataArray.sum()/data_count - << " from " << data_count << " data values.\n"; - } - mlog << Debug(8) << method_name << "data at " << x_idx << "," << y_idx - << ", value: " << to_value << "\n"; + if (1 < data_count) { + mlog << Debug(9) << method_name + << " to_value: " << to_value + << " at " << x_idx << "," << y_idx + << ", max: " << dataArray.max() + << ", min: " << dataArray.min() + << ", mean: " << dataArray.sum()/data_count + << " from " << data_count << " data values.\n"; } + mlog << Debug(8) << method_name << "data at " << x_idx << "," << y_idx + << ", value: " << to_value << "\n"; } } } } + } - // Write the regridded data - write_nc(to_dp, to_grid, vinfo, vname.c_str()); + // Write the regridded data + write_nc(to_dp, to_grid, vinfo, vname.c_str()); - vname_cnt = vname; - vname_cnt << "_cnt"; - vname_mask = vname; - vname_mask << "_mask"; + ConcatString vname_cnt = vname; + vname_cnt << "_cnt"; + ConcatString vname_mask = vname; + vname_mask << "_mask"; - ConcatString tmp_long_name; - ConcatString var_long_name = vinfo->long_name(); - ConcatString dim_string = "(*,*)"; + ConcatString var_long_name = vinfo->long_name(); + ConcatString dim_string = "(*,*)"; - tmp_long_name = vname_cnt; - tmp_long_name << dim_string; - vinfo->set_long_name(tmp_long_name.c_str()); - write_nc_int(cnt_dp, to_grid, vinfo, vname_cnt.c_str()); + ConcatString tmp_long_name = vname_cnt; + tmp_long_name << dim_string; + vinfo->set_long_name(tmp_long_name.c_str()); + write_nc_int(cnt_dp, to_grid, vinfo, vname_cnt.c_str()); - tmp_long_name = vname_mask; - tmp_long_name << dim_string; - vinfo->set_long_name(tmp_long_name.c_str()); - write_nc_int(mask_dp, to_grid, vinfo, vname_mask.c_str()); + tmp_long_name = vname_mask; + tmp_long_name << dim_string; + vinfo->set_long_name(tmp_long_name.c_str()); + write_nc_int(mask_dp, to_grid, vinfo, vname_mask.c_str()); - if (has_prob_thresh || do_gaussian_filter) { - ConcatString vname_prob = vname; - vname_prob << "_prob_" << prob_cat_thresh.get_abbr_str(); - ConcatString vname_prob_mask = vname_prob; - vname_prob_mask << "_mask"; + if (has_prob_thresh || do_gaussian_filter) { + ConcatString vname_prob = vname; + vname_prob << "_prob_" << prob_cat_thresh.get_abbr_str(); + ConcatString vname_prob_mask = vname_prob; + vname_prob_mask << "_mask"; - if (do_gaussian_filter) interp_gaussian_dp(prob_dp, RGInfo.gaussian, RGInfo.vld_thresh); + if (do_gaussian_filter) interp_gaussian_dp(prob_dp, RGInfo.gaussian, RGInfo.vld_thresh); - tmp_long_name = vname_prob; - tmp_long_name << dim_string; - vinfo->set_long_name(tmp_long_name.c_str()); - write_nc(prob_dp, to_grid, vinfo, vname_prob.c_str()); - if (do_gaussian_filter) { - NcVar prob_var = get_var(nc_out, vname_prob.c_str()); - if (IS_VALID_NC(prob_var)) { - add_att(&prob_var, "gaussian_radius", RGInfo.gaussian.radius); - add_att(&prob_var, "gaussian_dx", RGInfo.gaussian.dx); - add_att(&prob_var, "trunc_factor", RGInfo.gaussian.trunc_factor); - } + tmp_long_name = vname_prob; + tmp_long_name << dim_string; + vinfo->set_long_name(tmp_long_name.c_str()); + write_nc(prob_dp, to_grid, vinfo, vname_prob.c_str()); + if (do_gaussian_filter) { + NcVar prob_var = get_var(nc_out, vname_prob.c_str()); + if (IS_VALID_NC(prob_var)) { + add_att(&prob_var, "gaussian_radius", RGInfo.gaussian.radius); + add_att(&prob_var, "gaussian_dx", RGInfo.gaussian.dx); + add_att(&prob_var, "trunc_factor", RGInfo.gaussian.trunc_factor); } - - tmp_long_name = vname_prob_mask; - tmp_long_name << dim_string; - vinfo->set_long_name(tmp_long_name.c_str()); - write_nc_int(prob_mask_dp, to_grid, vinfo, vname_prob_mask.c_str()); } - vinfo->set_long_name(var_long_name.c_str()); - mlog << Debug(7) << method_name << "obs_count_zero_to: " << obs_count_zero_to - << ", obs_count_non_zero_to: " << obs_count_non_zero_to << "\n"; + tmp_long_name = vname_prob_mask; + tmp_long_name << dim_string; + vinfo->set_long_name(tmp_long_name.c_str()); + write_nc_int(prob_mask_dp, to_grid, vinfo, vname_prob_mask.c_str()); + } + vinfo->set_long_name(var_long_name.c_str()); + + mlog << Debug(7) << method_name << "obs_count_zero_to: " << obs_count_zero_to + << ", obs_count_non_zero_to: " << obs_count_non_zero_to << "\n"; - ConcatString log_msg; - log_msg << "Filtered by time: " << filtered_by_time; - if (0 < requested_valid_time) { - log_msg << " [" << unix_to_yyyymmdd_hhmmss(requested_valid_time) << "]"; + ConcatString log_msg; + int filtered_count = filtered_by_msg_type + filtered_by_qc + filtered_by_time; + log_msg << "Filtered " << filtered_count << " of " << var_count2 + << " observations by time: " << filtered_by_time; + if (0 < requested_valid_time) { + log_msg << " [" << unix_to_yyyymmdd_hhmmss(requested_valid_time) << "]"; + } + log_msg << ", by message type: " << filtered_by_msg_type; + if (0 < filtered_by_msg_type) { + log_msg << " [" << write_css(conf_info.message_type) << "]"; + } + log_msg << ", and by QC: " << filtered_by_qc; + if (0 < filtered_by_qc) { + if (0 < qc_flags.n()) { + log_msg << " [-qc " + << write_css(qc_flags) + << "]"; } - log_msg << ", by msg_type: " << filtered_by_msg_type; - if (0 < filtered_by_msg_type) { - log_msg << " ["; - for(idx=0; idx 0) log_msg << ","; - log_msg << conf_info.message_type[idx]; - } - log_msg << "]"; + if (0 < conf_info.obs_qty_inc.n()) { + log_msg << " [obs_quality_inc = " + << write_css(conf_info.obs_qty_inc) + << "]"; } - log_msg << ", by QC: " << filtered_by_qc; - if (0 < filtered_by_qc) { - log_msg << " ["; - for(idx=0; idx 0) log_msg << ","; - log_msg << qc_flags[idx]; - } - log_msg << "]"; + if (0 < conf_info.obs_qty_exc.n()) { + log_msg << " [obs_quality_exc = " + << write_css(conf_info.obs_qty_exc) + << "]"; } - log_msg << ", out of " << var_count2; - int filtered_count = filtered_by_msg_type + filtered_by_qc + requested_valid_time; - if (0 == var_count) { - if (0 == filtered_count) { - mlog << Warning << "\n" << method_name - << "No valid data for the variable [" - << vinfo->name() << "]\n\n"; - } - else { - mlog << Warning << "\n" << method_name - << "No valid data after filtering.\n\t" - << log_msg << ".\n\n"; - } + } + log_msg << "."; + if (0 == var_count) { + if (0 == filtered_count) { + mlog << Debug(2) << method_name + << "No valid data for the variable [" + << vinfo->name() << "]\n"; } else { - mlog << Debug(2) << method_name << "var_count=" << var_count - << ", grid: " << to_count << " out of " << (nx * ny) << " " - << (0 < filtered_count ? log_msg.c_str() : " ") << "\n"; + mlog << Debug(2) << method_name + << "No valid data after filtering.\n\t" + << log_msg << "\n"; } - } // end for i - - if (cellMapping) { - delete [] cellMapping; cellMapping = (IntArray *) nullptr; } - } - - delete [] hdr_lats; - delete [] hdr_lons; + else { + mlog << Debug(3) << "Using " << var_count << " " + << vinfo->name() << " observations to populate " << to_count + << " of " << to_grid.nxy() << " grid points.\n"; + if (0 < filtered_count ) mlog << log_msg << "\n"; + } + } // end for i return; } //////////////////////////////////////////////////////////////////////// -void process_point_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, - const Grid to_grid) { - ConcatString vname, vname_cnt, vname_mask; - DataPlane fr_dp, to_dp; - DataPlane cnt_dp, mask_dp; - DataPlane prob_dp, prob_mask_dp; - NcVar var_obs_gc, var_obs_var; - +static void process_point_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, + const Grid to_grid) { + ConcatString vname; clock_t start_clock = clock(); - - unixtime requested_valid_time, valid_time; static const char *method_name = "process_point_file() -> "; static const char *method_name_s = "process_point_file()"; @@ -1177,7 +1209,7 @@ void process_point_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, nc_point_obs.close(); mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; } @@ -1185,21 +1217,10 @@ void process_point_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, #ifdef WITH_PYTHON -void process_point_python(string python_command, MetConfig &config, VarInfo *vinfo, - const Grid to_grid, bool use_xarray) { - int idx, hdr_idx; - ConcatString vname, vname_cnt, vname_mask; - DataPlane fr_dp, to_dp; - DataPlane cnt_dp, mask_dp; - DataPlane prob_dp, prob_mask_dp; - NcVar var_obs_gc, var_obs_var; - +static void process_point_python(const string python_command, MetConfig &config, + VarInfo *vinfo, const Grid to_grid, bool use_xarray) { clock_t start_clock = clock(); - bool has_prob_thresh = !prob_cat_thresh.check(bad_data_double); - - unixtime requested_valid_time, valid_time; static const char *method_name = "process_point_python() -> "; - static const char *method_name_s = "process_point_python()"; // Check for at least one configuration string if(FieldSA.n() < 1) { @@ -1225,7 +1246,7 @@ void process_point_python(string python_command, MetConfig &config, VarInfo *vin met_point_file.close(); mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; return; } @@ -1233,24 +1254,73 @@ void process_point_python(string python_command, MetConfig &config, VarInfo *vin //////////////////////////////////////////////////////////////////////// -void process_point_nccf_file(NcFile *nc_in, MetConfig &config, - VarInfo *vinfo, Met2dDataFile *fr_mtddf, - const Grid to_grid) { - ConcatString vname, vname_cnt, vname_mask; +static void process_point_nccf_file(NcFile *nc_in, MetConfig &config, + VarInfo *vinfo, Met2dDataFile *fr_mtddf, + const Grid to_grid) { + ConcatString vname; DataPlane fr_dp, to_dp; DataPlane cnt_dp, mask_dp; unixtime valid_beg_ut, valid_end_ut; - bool *skip_times = 0; - double *valid_times = 0; + vector skip_times; int filtered_by_time = 0; + int time_from_size = 1; clock_t start_clock = clock(); bool opt_all_attrs = false; Grid fr_grid = fr_mtddf->grid(); - int from_size = fr_grid.nx() * fr_grid.ny(); static const char *method_name = "process_point_nccf_file() -> "; - NcVar var_lat = get_nc_var_lat(nc_in); - NcVar var_lon = get_nc_var_lon(nc_in); + NcVar var_lat; + NcVar var_lon; + bool user_defined_latlon = false; + ConcatString lat_vname = conf_info.get_var_name(conf_key_lat_vname); + ConcatString lon_vname = conf_info.get_var_name(conf_key_lon_vname); + + if (lat_vname != conf_key_lat_vname && lon_vname != conf_key_lon_vname) { + if (has_var(nc_in, lat_vname.c_str())) { + var_lat = get_nc_var(nc_in, lat_vname.c_str()); + } + if (has_var(nc_in, lon_vname.c_str())) { + var_lon = get_nc_var(nc_in, lon_vname.c_str()); + } + if (IS_INVALID_NC(var_lat)) { + mlog << Error << "\n" << method_name + << "can not find the latitude variable (" << lat_vname + << ") from the config file (" << config_filename << ").\n\n"; + exit(1); + } + else if (IS_INVALID_NC(var_lon)) { + mlog << Error << "\n" << method_name + << "can not find the longitude variable (" << lon_vname + << ") from the config file (" << config_filename << ").\n\n"; + exit(1); + } + else user_defined_latlon = true; + } + // Find lat/lon variables from the coordinates attribue + if (0 < FieldSA.n() && !user_defined_latlon) { + ConcatString coordinates_value; + auto var_info = VarInfoNcCF(*(VarInfoNcCF *)vinfo); + // Initialize + var_info.clear(); + // Populate the VarInfo object using the config string + config.read_string(FieldSA[0].c_str()); + var_info.set_dict(config); + NcVar var_data = get_nc_var(nc_in, var_info.name().c_str()); + if (get_nc_att_value(&var_data, coordinates_att_name, coordinates_value)) { + StringArray sa = coordinates_value.split(" "); + ConcatString units; + for (int idx=0; idx valid_times(time_from_size, bad_data_double); + if (get_nc_data(&time_var, valid_times.data())) { int sec_per_unit = 0; bool no_leap_year = false; - unixtime ref_ut = (unixtime) 0; + auto ref_ut = (unixtime) 0; unixtime tmp_time; if( conf_info.valid_time > 0 ) { if (!is_bad_data(conf_info.beg_ds)) valid_beg_ut += conf_info.beg_ds; if (!is_bad_data(conf_info.end_ds)) valid_end_ut += conf_info.end_ds; ref_ut = get_reference_unixtime(&time_var, sec_per_unit, no_leap_year); } - for (int i=0; i 0 ) { tmp_time = add_to_unixtime(ref_ut, sec_per_unit, valid_times[i], no_leap_year); skip_times[i] = (valid_beg_ut > tmp_time || tmp_time > valid_end_ut); - if( skip_times[i]) filtered_by_time++; + if( skip_times[i] ) filtered_by_time++; } else skip_times[i] = false; if (max_time < valid_times[i]) max_time = valid_times[i]; @@ -1304,29 +1380,66 @@ void process_point_nccf_file(NcFile *nc_in, MetConfig &config, else valid_time = find_valid_time(time_var); } to_dp.set_size(to_grid.nx(), to_grid.ny()); - IntArray *cellMapping = new IntArray[to_grid.nx() * to_grid.ny()]; - get_grid_mapping(fr_grid, to_grid, cellMapping, var_lat, var_lon, skip_times); - if( skip_times ) delete [] skip_times; - if( valid_times ) delete [] valid_times; + vector var_cell_mapping; + vector cellMapping(to_grid.nx() * to_grid.ny()); + get_grid_mapping(fr_grid, to_grid, cellMapping.data(), var_lat, var_lon, skip_times); // Loop through the requested fields for(int i=0; iclear(); + var_cell_mapping.clear(); // Populate the VarInfo object using the config string config.read_string(FieldSA[i].c_str()); vinfo->set_dict(config); + NcVar var_data = get_nc_var(nc_in, vinfo->name().c_str()); + ConcatString coordinates_value; + if (!user_defined_latlon && get_nc_att_value(&var_data, coordinates_att_name, coordinates_value)) { + StringArray sa = coordinates_value.split(" "); + int count = sa.n_elements(); + if (count >= 2) { + bool match_lat = false; + bool match_lon = false; + for (int idx=0; idx 0 ? + var_cell_mapping.data() : + cellMapping.data())); // List range of data values if(mlog.verbosity_level() >= 2) { - double fr_dmin, fr_dmax, to_dmin, to_dmax; + double fr_dmin; + double fr_dmax; + double to_dmin; + double to_dmax; fr_dp.data_range(fr_dmin, fr_dmax); to_dp.data_range(to_dmin, to_dmax); mlog << Debug(2) << "Range of data (" << FieldSA[i] << ")\n" @@ -1346,11 +1459,11 @@ void process_point_nccf_file(NcFile *nc_in, MetConfig &config, write_nc(to_dp, to_grid, vinfo, vname.c_str()); NcVar to_var = get_nc_var(nc_out, vname.c_str()); - NcVar var_data = get_nc_var(nc_in, vinfo->name().c_str()); bool has_prob_thresh = !prob_cat_thresh.check(bad_data_double); if (has_prob_thresh || do_gaussian_filter) { - DataPlane prob_dp, prob_mask_dp; + DataPlane prob_dp; + DataPlane prob_mask_dp; ConcatString vname_prob = vname; vname_prob << "_prob_" << prob_cat_thresh.get_abbr_str(); int nx = to_dp.nx(); @@ -1361,7 +1474,7 @@ void process_point_nccf_file(NcFile *nc_in, MetConfig &config, prob_dp.set_constant(0); for (int x=0; xgrid(); static const char *method_name = "regrid_nc_variable() -> "; @@ -1420,10 +1531,7 @@ void regrid_nc_variable(NcFile *nc_in, Met2dDataFile *fr_mtddf, << InputFilename << "\"\n\n"; exit(1); } - - int from_lat_cnt = fr_grid.ny(); - int from_lon_cnt = fr_grid.nx(); - int from_data_size = from_lat_cnt * from_lon_cnt; + if(!fr_mtddf->data_plane(*vinfo, fr_dp)) { mlog << Error << "\n" << method_name << "Trouble reading data \"" @@ -1431,120 +1539,123 @@ void regrid_nc_variable(NcFile *nc_in, Met2dDataFile *fr_mtddf, << InputFilename << "\"\n\n"; exit(1); } - else { - bool is_to_north = !fr_grid.get_swap_to_north(); - float *from_data = new float[from_data_size]; - for (int xIdx=0; xIdx from_data(from_data_size, bad_data_float); + for (int xIdx=0; xIdx= 4) { - if (from_min_value > data_value) from_min_value = data_value; - if (from_max_value < data_value) from_max_value = data_value; - } + } + mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " + << get_exe_duration(start_clock) << " seconds for read variable\n"; + + int from_index; + int no_map_cnt = 0; + int missing_cnt = 0; + int non_missing_cnt = 0; + double data_value; + IntArray cellArray; + NumArray dataArray; + double from_min_value = 10e10; + double from_max_value = -10e10; + int to_lat_cnt = to_grid.ny(); + int to_lon_cnt = to_grid.nx(); + + missing_cnt = non_missing_cnt = 0; + to_dp.set_constant(bad_data_double); + + for (int xIdx=0; xIdx= 9) { - double to_lat, to_lon; - to_grid.xy_to_latlon(xIdx,yIdx, to_lat, to_lon); - to_lon *= -1; - if (1 == data_cnt) - mlog << Debug(9) << method_name - << "value: " << to_value << " to (" << to_lon << ", " << to_lat - << ") from offset " << from_index << ".\n"; - else - mlog << Debug(9) << method_name - << "value: " << to_value - << ", max: " << dataArray.max() - << ", min: " << dataArray.min() - << ", mean: " << dataArray.sum()/data_cnt - << " from " << data_cnt << " (out of " << cellArray.n() - << ") data values to (" << to_lon << ", " << to_lat << ").\n"; - } + dataArray.add(data_value); + non_missing_cnt++; + if(mlog.verbosity_level() >= 4) { + if (from_min_value > data_value) from_min_value = data_value; + if (from_max_value < data_value) from_max_value = data_value; } } - else { - no_map_cnt++; + + if (0 < dataArray.n()) { + double to_value; + int data_cnt = dataArray.n(); + if (1 == data_cnt) to_value = dataArray[0]; + else if (RGInfo.method == InterpMthd::Min) to_value = dataArray.min(); + else if (RGInfo.method == InterpMthd::Max) to_value = dataArray.max(); + else if (RGInfo.method == InterpMthd::Median) { + dataArray.sort_array(); + to_value = dataArray[data_cnt/2]; + if (0 == data_cnt % 2) + to_value = (to_value + dataArray[(data_cnt/2)+1])/2; + } + else to_value = dataArray.sum() / data_cnt; // UW_Mean + + to_dp.set(to_value, xIdx, yIdx); + to_cell_cnt++; + if(mlog.verbosity_level() >= 9) { + double to_lat; + double to_lon; + to_grid.xy_to_latlon(xIdx,yIdx, to_lat, to_lon); + to_lon *= -1; + if (1 == data_cnt) + mlog << Debug(9) << method_name + << "value: " << to_value << " to (" << to_lon << ", " << to_lat + << ") from offset " << from_index << ".\n"; + else + mlog << Debug(9) << method_name + << "value: " << to_value + << ", max: " << dataArray.max() + << ", min: " << dataArray.min() + << ", mean: " << dataArray.sum()/data_cnt + << " from " << data_cnt << " (out of " << cellArray.n() + << ") data values to (" << to_lon << ", " << to_lat << ").\n"; + } } } + else { + no_map_cnt++; + } } - - delete [] from_data; - - mlog << Debug(4) << method_name << "[Count] data cells: " << to_cell_cnt - << ", missing: " << missing_cnt << ", non_missing: " << non_missing_cnt - << ", non mapped cells: " << no_map_cnt - << " out of " << (to_lat_cnt*to_lon_cnt) - << "\n\tRange: data: [" << from_min_value << " - " << from_max_value - << "]\n"; } + mlog << Debug(4) << method_name << "[Count] data cells: " << to_cell_cnt + << ", missing: " << missing_cnt << ", non_missing: " << non_missing_cnt + << ", non mapped cells: " << no_map_cnt + << " out of " << (to_lat_cnt*to_lon_cnt) + << "\n\tRange: data: [" << from_min_value << " - " << from_max_value + << "]\n"; + if (to_cell_cnt == 0) { - mlog << Warning << "\n" << method_name - << " There are no matching cells between input and the target grid.\n\n"; + mlog << Debug(2) << method_name + << "There are no matching cells between input and the target grid.\n"; } mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; } //////////////////////////////////////////////////////////////////////// -void open_nc(const Grid &grid, ConcatString run_cs) { +static void open_nc(const Grid &grid, ConcatString run_cs) { // Create output file nc_out = open_ncfile(OutputFilename.c_str(), true); @@ -1576,7 +1687,7 @@ void open_nc(const Grid &grid, ConcatString run_cs) { void write_nc_data(const DataPlane &dp, const Grid &grid, NcVar *data_var) { // Allocate memory to store data values for each grid point - float *data = new float [grid.nx()*grid.ny()]; + vector data(grid.nx()*grid.ny(), bad_data_float); // Store the data int grid_nx = grid.nx(); @@ -1584,20 +1695,17 @@ void write_nc_data(const DataPlane &dp, const Grid &grid, NcVar *data_var) { for(int x=0; x " << "error writing data to the output file.\n\n"; exit(1); } - // Clean up - if(data) { delete [] data; data = (float *) nullptr; } - return; } @@ -1606,7 +1714,7 @@ void write_nc_data(const DataPlane &dp, const Grid &grid, NcVar *data_var) { void write_nc_data_int(const DataPlane &dp, const Grid &grid, NcVar *data_var) { // Allocate memory to store data values for each grid point - int *data = new int [grid.nx()*grid.ny()]; + vector data(grid.nx()*grid.ny(), bad_data_int); // Store the data int grid_nx = grid.nx(); @@ -1619,22 +1727,19 @@ void write_nc_data_int(const DataPlane &dp, const Grid &grid, NcVar *data_var) { } // end for x // Write out the data - if(!put_nc_data_with_dims(data_var, &data[0], grid.ny(), grid.nx())) { + if(!put_nc_data_with_dims(data_var, data.data(), grid.ny(), grid.nx())) { mlog << Error << "\nwrite_nc_data_int() -> " << "error writing data to the output file.\n\n"; exit(1); } - // Clean up - if(data) { delete [] data; data = (int *) nullptr; } - return; } //////////////////////////////////////////////////////////////////////// -void write_nc(const DataPlane &dp, const Grid &grid, - const VarInfo *vinfo, const char *vname) { +static void write_nc(const DataPlane &dp, const Grid &grid, + const VarInfo *vinfo, const char *vname) { int deflate_level = compress_level; if (deflate_level < 0) deflate_level = 0; @@ -1655,8 +1760,8 @@ void write_nc(const DataPlane &dp, const Grid &grid, //////////////////////////////////////////////////////////////////////// -void write_nc_int(const DataPlane &dp, const Grid &grid, - const VarInfo *vinfo, const char *vname) { +static void write_nc_int(const DataPlane &dp, const Grid &grid, + const VarInfo *vinfo, const char *vname) { int deflate_level = compress_level; if (deflate_level < 0) deflate_level = 0; @@ -1675,34 +1780,33 @@ void write_nc_int(const DataPlane &dp, const Grid &grid, return; } -// //////////////////////////////////////////////////////////////////////// +// // GOES related modules // - //////////////////////////////////////////////////////////////////////// -void process_goes_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, - const Grid fr_grid, const Grid to_grid) { +static void process_goes_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, + const Grid fr_grid, const Grid to_grid) { DataPlane fr_dp, to_dp; ConcatString vname; - int global_attr_count; + const size_t global_attr_count = GOES_global_attr_names.size(); bool opt_all_attrs = false; clock_t start_clock = clock(); - NcFile *nc_adp = (NcFile *) nullptr; + auto nc_adp = (NcFile *) nullptr; static const char *method_name = "process_goes_file() -> "; ConcatString tmp_dir = config.get_tmp_dir(); ConcatString geostationary_file(tmp_dir); geostationary_file.add("/"); - geostationary_file.add(make_geostationary_filename(fr_grid, to_grid, RGInfo.name)); + geostationary_file.add(make_geostationary_filename(fr_grid)); // Open ADP file if exists - if (0 < AdpFilename.length() && file_exists(AdpFilename.c_str())) { - nc_adp = open_ncfile(AdpFilename.c_str()); + if (!adp_filename.empty() && file_exists(adp_filename.c_str())) { + nc_adp = open_ncfile(adp_filename.c_str()); if (IS_INVALID_NC_P(nc_adp)) { mlog << Error << "\n" << method_name - << "Can't open the ADP input \"" << AdpFilename << "\"\n\n"; + << "Can't open the ADP input \"" << adp_filename << "\"\n\n"; exit(1); } else if (is_time_mismatch(nc_in, nc_adp)) { @@ -1713,9 +1817,8 @@ void process_goes_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, NcVar time_var = get_nc_var_time(nc_in); unixtime valid_time = find_valid_time(time_var); to_dp.set_size(to_grid.nx(), to_grid.ny()); - global_attr_count = sizeof(GOES_global_attr_names)/sizeof(*GOES_global_attr_names); - IntArray *cellMapping = new IntArray[to_grid.nx() * to_grid.ny()]; - get_grid_mapping(fr_grid, to_grid, cellMapping, geostationary_file); + vector cellMapping(to_grid.nx() * to_grid.ny()); + get_grid_mapping(fr_grid, to_grid, cellMapping.data(), geostationary_file); // Loop through the requested fields for(int i=0; i= 2) { - double fr_dmin, fr_dmax, to_dmin, to_dmax; + double fr_dmin; + double fr_dmax; + double to_dmin; + double to_dmax; fr_dp.data_range(fr_dmin, fr_dmax); to_dp.data_range(to_dmin, to_dmax); mlog << Debug(2) << "Range of data (" << FieldSA[i] << ")\n" @@ -1757,7 +1863,7 @@ void process_goes_file(NcFile *nc_in, MetConfig &config, VarInfo *vinfo, NcVar to_var = get_nc_var(nc_out, vname.c_str()); NcVar var_data = get_goes_nc_var(nc_in, vinfo->name()); if(IS_VALID_NC(var_data)) { - for (int idx=0; idx mapVar = GET_NC_VARS_P(nc_in); - for (multimap::iterator itVar = mapVar.begin(); - itVar != mapVar.end(); ++itVar) { - if ((*itVar).first == "t" - || string::npos != (*itVar).first.find("time")) { - NcVar from_var = (*itVar).second; + for (const auto &kv : mapVar) { + if (kv.first == "t" || string::npos != kv.first.find("time")) { + NcVar from_var = kv.second; copy_nc_var(nc_out, &from_var); } } - //copy_nc_atts(_nc_in, nc_out, opt_all_attrs); - delete nc_adp; nc_adp = 0; - delete [] cellMapping; cellMapping = (IntArray *) nullptr; + delete nc_adp; nc_adp = nullptr; mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; return; } @@ -1865,6 +1968,23 @@ void check_lat_lon(int data_size, float *latitudes, float *longitudes) { << method_name << "LONG: " << min_lon << " to " << max_lon << "\n"; } +//////////////////////////////////////////////////////////////////////// +// QC flags: 0=high, 1=medium, 2=low +// Enterpise algorithm: 0=high, 1=medium, 2=low +// Baseline algorithm: 3=high, 1=medium, 0=low (high=12/48, medium=4/16) +// returns bad_data_int if it does not belong to high, mediuam, or low. + +GOES_QC compute_adp_qc_flag(int adp_qc, int shift_bits) { + GOES_QC adp_qc_flag; + int particle_qc = ((adp_qc >> shift_bits) & 0x03); + if (particle_qc == adp_qc_high) adp_qc_flag = GOES_QC::HIGH; + else if (particle_qc == adp_qc_medium) adp_qc_flag = GOES_QC::MEDIUM; + else if (particle_qc == adp_qc_low) adp_qc_flag = GOES_QC::LOW; + else adp_qc_flag = GOES_QC::NA; + + return adp_qc_flag; +} + //////////////////////////////////////////////////////////////////////// static unixtime compute_unixtime(NcVar *time_var, unixtime var_value) { @@ -1885,7 +2005,19 @@ static unixtime compute_unixtime(NcVar *time_var, unixtime var_value) { //////////////////////////////////////////////////////////////////////// -static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, +GOES_QC convert_aod_qc_flag(int aod_qc) { + GOES_QC aod_qc_flag; + if (0 == aod_qc) aod_qc_flag = GOES_QC::HIGH; + else if (1 == aod_qc) aod_qc_flag = GOES_QC::MEDIUM; + else if (2 == aod_qc) aod_qc_flag = GOES_QC::LOW; + else aod_qc_flag = GOES_QC::NA; + + return aod_qc_flag; +} + +//////////////////////////////////////////////////////////////////////// + +static bool get_grid_mapping(const Grid &to_grid, IntArray *cellMapping, const IntArray obs_index_array, const int *obs_hids, const float *hdr_lats, const float *hdr_lons) { bool status = false; @@ -1894,41 +2026,37 @@ static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, int obs_count = obs_index_array.n(); if (0 == obs_count) { - mlog << Warning << "\n" << method_name - << "no valid point observation data!\n\n"; + mlog << Debug(2) << method_name + << "no valid point observation data!\n"; return status; } - int hdr_idx, obs_idx; - int count_in_grid; - double x, y; - float lat, lon; + double x; + double y; DataPlane to_dp; - int to_offset, idx_x, idx_y; int to_lat_count = to_grid.ny(); int to_lon_count = to_grid.nx(); - - count_in_grid = 0; + int count_in_grid = 0; to_dp.set_size(to_lon_count, to_lat_count); for (int idx=0; idx 0) ? 1.0*count_in_grid/obs_count*100 : 0) << "%)\n"; } mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; return status; } @@ -1946,59 +2074,63 @@ static bool get_grid_mapping(Grid to_grid, IntArray *cellMapping, static void get_grid_mapping_latlon( DataPlane from_dp, DataPlane to_dp, Grid to_grid, IntArray *cellMapping, float *latitudes, float *longitudes, - int from_lat_count, int from_lon_count, bool *skip_times, bool to_north) { - double x, y; - double to_ll_lat, to_ll_lon; - float lat, lon; - int idx_x, idx_y, to_offset; + int from_lat_count, int from_lon_count, vector skip_times, bool to_north, bool is_2d) { + double x; + double y; + double to_ll_lat; + double to_ll_lon; int count_in_grid = 0; clock_t start_clock = clock(); int to_lat_count = to_grid.ny(); int to_lon_count = to_grid.nx(); int to_size = to_lat_count * to_lon_count; int data_size = from_lat_count * from_lon_count; - static const char *method_name = "get_grid_mapping(lats, lons) -> "; + static const char *method_name = "get_grid_mapping_latlon(lats, lons) -> "; - int *to_cell_counts = new int[to_size]; - int *mapping_indices = new int[data_size]; - for (int xIdx=0; xIdx to_cell_counts(to_size, 0); + vector mapping_indices(data_size, bad_data_int); to_grid.xy_to_latlon(0, 0, to_ll_lat, to_ll_lon); - mlog << Debug(5) << method_name << " to_grid ll corner: (" << to_ll_lon << ", " << to_ll_lat << ")\n"; - + mlog << Debug(5) << method_name << "to_grid ll corner: (" << to_ll_lon << ", " << to_ll_lat << ")\n"; + //Count the number of cells to be mapped to TO_GRID //Following the logic at DataPlane::two_to_one(int x, int y) n = y*Nx + x; for (int yIdx=0; yIdx 0 && skip_times[coord_offset] ) continue; + } + float lat = latitudes[lat_offset]; + float lon = longitudes[lon_offset]; if( lat < MISSING_LATLON || lon < MISSING_LATLON ) continue; - to_grid.latlon_to_xy(lat, -1.0*lon, x, y); - idx_x = nint(x); - idx_y = nint(y); + to_grid.latlon_to_xy(lat, -1.0*rescale_lon(lon), x, y); + int idx_x = nint(x); + int idx_y = nint(y); if (0 <= idx_x && idx_x < to_lon_count && 0 <= idx_y && idx_y < to_lat_count) { - to_offset = to_dp.two_to_one(idx_x, idx_y); + int to_offset = to_dp.two_to_one(idx_x, idx_y); mapping_indices[coord_offset] = to_offset; to_cell_counts[to_offset] += 1; count_in_grid++; if(mlog.verbosity_level() >= 15) { - double to_lat, to_lon; + double to_lat; + double to_lon; to_grid.xy_to_latlon(idx_x, idx_y, to_lat, to_lon); - mlog << Debug(15) << method_name << " [" << xIdx << "," << yIdx << "] to " << coord_offset - << " (" << lon << ", " << lat << ") to (" << (to_lon*-1) << ", " << to_lat << ")\n"; + mlog << Debug(15) << method_name << "index: [" << xIdx << "," << yIdx << "] to " << coord_offset + << " (" << lon << ", " << lat << ") to (" << rescale_lon(-1*to_lon) << ", " << to_lat << ")\n"; } } } } mlog << Debug(LEVEL_FOR_PERFORMANCE+2) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds for mapping cells\n"; + << get_exe_duration(start_clock) << " seconds for mapping cells\n"; // Count the mapping cells for each to_cell and prepare IntArray int max_count = 0; - clock_t tmp_clock = clock(); + clock_t tmp_clock = clock(); for (int xIdx=0; xIdx 0 ) { @@ -2017,12 +2149,12 @@ static void get_grid_mapping_latlon( } } mlog << Debug(LEVEL_FOR_PERFORMANCE+1) << method_name << "took " - << (clock()-tmp_clock)/double(CLOCKS_PER_SEC) + << get_exe_duration(tmp_clock) << " seconds for extending IntArray (max_cells=" << max_count << ")\n"; // Build cell mapping for (int xIdx=0; xIdx= to_size ) { mlog << Error << "\n" << method_name @@ -2032,21 +2164,20 @@ static void get_grid_mapping_latlon( } else cellMapping[to_offset].add(xIdx); } - delete [] to_cell_counts; - delete [] mapping_indices; mlog << Debug(3) << method_name << "within grid: " << count_in_grid << " out of " << data_size << " (" << 1.0*count_in_grid/data_size*100 << "%)\n"; mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; } //////////////////////////////////////////////////////////////////////// -static bool get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, - NcVar var_lat, NcVar var_lon, bool *skip_times) { +static bool get_grid_mapping(const Grid &fr_grid, const Grid &to_grid, IntArray *cellMapping, + NcVar var_lat, NcVar var_lon, vector skip_times) { bool status = false; - DataPlane from_dp, to_dp; + DataPlane from_dp; + DataPlane to_dp; ConcatString cur_coord_name; clock_t start_clock = clock(); static const char *method_name = "get_grid_mapping(var_lat, var_lon) -> "; @@ -2055,7 +2186,14 @@ static bool get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, int to_lon_count = to_grid.nx(); int from_lat_count = fr_grid.ny(); int from_lon_count = fr_grid.nx(); - + if (0 == from_lat_count) { + int dim_offset = get_dim_count(&var_lat) - 1; + if (dim_offset < 0) dim_offset = 0; + from_lat_count = get_dim_size(&var_lat, dim_offset); + dim_offset = get_dim_count(&var_lon) - 2; + if (dim_offset < 0) dim_offset = 0; + from_lon_count = get_dim_size(&var_lon, dim_offset); + } // Override the from nx & ny from NetCDF if exists int data_size = from_lat_count * from_lon_count; mlog << Debug(4) << method_name << "data_size (ny*nx): " << data_size @@ -2078,15 +2216,17 @@ static bool get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, } else if (data_size > 0) { int last_idx = data_size - 1; - float *latitudes = new float[data_size]; - float *longitudes = new float[data_size]; - status = get_nc_data(&var_lat, latitudes); - if( status ) status = get_nc_data(&var_lon, longitudes); + int lat_count = get_data_size(&var_lat); + int lon_count = get_data_size(&var_lon); + vector latitudes(lat_count, bad_data_float); + vector longitudes(lon_count, bad_data_float); + status = get_nc_data(&var_lat, latitudes.data()); + if( status ) status = get_nc_data(&var_lon, longitudes.data()); if( status ) { get_grid_mapping_latlon(from_dp, to_dp, to_grid, cellMapping, - latitudes, longitudes, from_lat_count, - from_lon_count, skip_times, - !fr_grid.get_swap_to_north()); + latitudes.data(), longitudes.data(), + from_lat_count, from_lon_count, skip_times, + !fr_grid.get_swap_to_north(), (lon_count==data_size)); if (is_eq(latitudes[0], latitudes[last_idx]) || is_eq(longitudes[0], longitudes[last_idx])) { @@ -2096,11 +2236,9 @@ static bool get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, << longitudes[last_idx] << "\n\n"; } } - if( latitudes ) delete [] latitudes; - if( longitudes ) delete [] longitudes; } // if data_size > 0 mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; return status; } @@ -2112,7 +2250,7 @@ static unixtime find_valid_time(NcVar time_var) { if( IS_VALID_NC(time_var) || get_dim_count(&time_var) < 2) { int time_count = get_dim_size(&time_var, 0); - + double time_values [time_count + 1]; if (get_nc_data(&time_var, time_values)) { valid_time = compute_unixtime(&time_var, time_values[0]); @@ -2136,13 +2274,13 @@ static unixtime find_valid_time(NcVar time_var) { //////////////////////////////////////////////////////////////////////// -ConcatString get_goes_grid_input(MetConfig config, Grid fr_grid, Grid to_grid) { +static ConcatString get_goes_grid_input(MetConfig config, const Grid fr_grid) { ConcatString run_string; ConcatString env_coord_name; ConcatString tmp_dir = config.get_tmp_dir(); ConcatString geostationary_file(tmp_dir); geostationary_file.add("/"); - geostationary_file.add(make_geostationary_filename(fr_grid, to_grid, RGInfo.name)); + geostationary_file.add(make_geostationary_filename(fr_grid)); if (get_env(key_geostationary_data, env_coord_name) && env_coord_name.nonempty() && file_exists(env_coord_name.c_str())) { @@ -2157,10 +2295,11 @@ ConcatString get_goes_grid_input(MetConfig config, Grid fr_grid, Grid to_grid) { //////////////////////////////////////////////////////////////////////// -void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, - ConcatString geostationary_file) { +static void get_grid_mapping(const Grid &fr_grid, const Grid &to_grid, IntArray *cellMapping, + const ConcatString &geostationary_file) { static const char *method_name = "get_grid_mapping() -> "; - DataPlane from_dp, to_dp; + DataPlane from_dp; + DataPlane to_dp; ConcatString cur_coord_name; clock_t start_clock = clock(); @@ -2186,7 +2325,7 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, } // Override the from nx & ny from NetCDF if exists - NcFile *coord_nc_in = (NcFile *) nullptr; + auto coord_nc_in = (NcFile *) nullptr; if (has_coord_input) { mlog << Debug(2) << method_name << "Reading coord file: " << cur_coord_name << "\n"; coord_nc_in = open_ncfile(cur_coord_name.c_str()); @@ -2205,33 +2344,33 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, to_dp.set_size(to_lon_count, to_lat_count); if (data_size > 0) { - float *latitudes = (float *)nullptr; - float *longitudes = (float *)nullptr; - float *latitudes_buf = (float *)nullptr; - float *longitudes_buf = (float *)nullptr; - int buff_size = data_size*sizeof(float); + int lat_count = data_size; + int lon_count = data_size; + float *latitudes = nullptr; + float *longitudes = nullptr; + vector latitudes_buf(data_size, bad_data_float); + vector longitudes_buf(data_size, bad_data_float); GoesImagerData grid_data; grid_data.reset(); if (has_coord_input) { - latitudes_buf = new float[data_size]; - longitudes_buf = new float[data_size]; - - latitudes = latitudes_buf; - longitudes = longitudes_buf; - memset(latitudes, 0, buff_size); - memset(longitudes, 0, buff_size); if (IS_VALID_NC_P(coord_nc_in)) { NcVar var_lat = get_nc_var(coord_nc_in, var_name_lat); NcVar var_lon = get_nc_var(coord_nc_in, var_name_lon); if (IS_VALID_NC(var_lat) && IS_VALID_NC(var_lon)) { + lat_count = get_data_size(&var_lat); + lon_count = get_data_size(&var_lon); + latitudes = latitudes_buf.data(); + longitudes = longitudes_buf.data(); get_nc_data(&var_lat, latitudes); get_nc_data(&var_lon, longitudes); } } else { FILE *pFile = met_fopen ( cur_coord_name.c_str(), "rb" ); + latitudes = latitudes_buf.data(); + longitudes = longitudes_buf.data(); (void) fread (latitudes,sizeof(latitudes[0]),data_size,pFile); (void) fread (longitudes,sizeof(longitudes[0]),data_size,pFile); fclose (pFile); @@ -2246,15 +2385,15 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, int lat_mis_matching_count = 0; int lon_matching_count = 0; int lon_mis_matching_count = 0; - float *tmp_lats = grid_data.lat_values; - float *tmp_lons = grid_data.lon_values; + const float *tmp_lats = grid_data.lat_values; + const float *tmp_lons = grid_data.lon_values; for (int idx=0; idx MISSING_LATLON) && (tmp_lats[idx] > MISSING_LATLON)) { if (!is_eq(latitudes[idx], tmp_lats[idx], loose_tol)) { lat_mis_matching_count++; mlog << Warning << "\n" << method_name - << "diff lat at " << idx << " binary-computing: " + << "diff lat at " << idx << " binary-computing: " << latitudes[idx] << " - " << tmp_lats[idx] << " = " << (latitudes[idx]-tmp_lats[idx]) << "\n\n"; } @@ -2265,7 +2404,7 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, if (!is_eq(longitudes[idx], tmp_lons[idx], loose_tol)) { lon_mis_matching_count++; mlog << Warning << "\n" << method_name - << "diff lon at " << idx << " binary-computing: " + << "diff lon at " << idx << " binary-computing: " << longitudes[idx] << " - " << tmp_lons[idx] << " = " << (longitudes[idx]-tmp_lons[idx]) << "\n\n"; } @@ -2282,36 +2421,33 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, } } } - else { - if (fr_grid.info().gi) { - grid_data.copy(fr_grid.info().gi); - grid_data.compute_lat_lon(); - latitudes = grid_data.lat_values; - longitudes = grid_data.lon_values; - if (!file_exists(geostationary_file.c_str())) { - save_geostationary_data(geostationary_file, - latitudes, longitudes, grid_data); - } + else if (fr_grid.info().gi) { + grid_data.copy(fr_grid.info().gi); + grid_data.compute_lat_lon(); + latitudes = grid_data.lat_values; + longitudes = grid_data.lon_values; + if (!file_exists(geostationary_file.c_str())) { + save_geostationary_data(geostationary_file, + latitudes, longitudes, grid_data); } } - if (0 == latitudes) { + if (latitudes == nullptr) { mlog << Error << "\n" << method_name << "Fail to get latitudes!\n\n"; } - else if (0 == longitudes) { + else if (longitudes == nullptr) { mlog << Error << "\n" << method_name << "Fail to get longitudes!\n\n"; } else { check_lat_lon(data_size, latitudes, longitudes); - get_grid_mapping_latlon(from_dp, to_dp, to_grid, cellMapping, latitudes, - longitudes, from_lat_count, from_lon_count, 0, - !fr_grid.get_swap_to_north()); + vector skip_times; + get_grid_mapping_latlon(from_dp, to_dp, to_grid, cellMapping, + latitudes, longitudes, + from_lat_count, from_lon_count, skip_times, + !fr_grid.get_swap_to_north(), (lon_count==data_size)); } - if (latitudes_buf) delete [] latitudes_buf; - if (longitudes_buf) delete [] longitudes_buf; - grid_data.release(); } // if data_size > 0 @@ -2319,12 +2455,12 @@ void get_grid_mapping(Grid fr_grid, Grid to_grid, IntArray *cellMapping, if(coord_nc_in) delete coord_nc_in; mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; } //////////////////////////////////////////////////////////////////////// -int get_lat_count(NcFile *_nc) { +static int get_lat_count(const NcFile *_nc) { int lat_count = 0; NcDim dim_lat = get_nc_dim(_nc, dim_name_lat); if(IS_INVALID_NC(dim_lat)) dim_lat = get_nc_dim(_nc, "y"); @@ -2332,7 +2468,9 @@ int get_lat_count(NcFile *_nc) { return lat_count; } -int get_lon_count(NcFile *_nc) { +//////////////////////////////////////////////////////////////////////// + +static int get_lon_count(const NcFile *_nc) { int lon_count = 0; NcDim dim_lon = get_nc_dim(_nc, dim_name_lon); if(IS_INVALID_NC(dim_lon)) dim_lon = get_nc_dim(_nc, "x"); @@ -2342,14 +2480,19 @@ int get_lon_count(NcFile *_nc) { //////////////////////////////////////////////////////////////////////// -static NcVar get_goes_nc_var(NcFile *nc, const ConcatString var_name, +static NcVar get_goes_nc_var(NcFile *nc, const ConcatString &var_name, bool exit_if_error) { - NcVar var_data = get_nc_var(nc, var_name.c_str(), false); + NcVar var_data; + static const char *method_name = "get_goes_nc_var() -> "; + if (has_var(nc, var_name.c_str())) var_data = get_nc_var(nc, var_name.c_str(), false); if (IS_INVALID_NC(var_data)) { - var_data = get_nc_var(nc, var_name.split("_")[0].c_str()); + mlog << Debug(4) << method_name + << "The variable \"" << var_name << "\" does not exist. Find \"" + << var_name.split("_")[0] << "\" variable\n"; + var_data = get_nc_var(nc, var_name.split("_")[0].c_str()); } if (IS_INVALID_NC(var_data)) { - mlog << Error << "\nget_goes_nc_var() -> " + mlog << Error << "\n" << method_name << "The variable \"" << var_name << "\" does not exist\n\n"; if (exit_if_error) exit(1); } @@ -2359,8 +2502,7 @@ static NcVar get_goes_nc_var(NcFile *nc, const ConcatString var_name, //////////////////////////////////////////////////////////////////////// -static ConcatString make_geostationary_filename(Grid fr_grid, Grid to_grid, - ConcatString regrid_name) { +static ConcatString make_geostationary_filename(Grid fr_grid) { ConcatString geo_data_filename; GridInfo info = fr_grid.info(); @@ -2420,12 +2562,13 @@ static bool is_time_mismatch(NcFile *nc_in, NcFile *nc_adp) { //////////////////////////////////////////////////////////////////////// -void regrid_goes_variable(NcFile *nc_in, VarInfo *vinfo, +static void regrid_goes_variable(NcFile *nc_in, const VarInfo *vinfo, DataPlane &fr_dp, DataPlane &to_dp, Grid fr_grid, Grid to_grid, IntArray *cellMapping, NcFile *nc_adp) { bool has_qc_var = false; bool has_adp_qc_var = false; + const int log_debug_level = 4; clock_t start_clock = clock(); int to_lat_count = to_grid.ny(); int to_lon_count = to_grid.nx(); @@ -2436,14 +2579,15 @@ void regrid_goes_variable(NcFile *nc_in, VarInfo *vinfo, ConcatString goes_var_sub_name; ConcatString qc_var_name; uchar qc_value; - uchar *qc_data = new uchar[from_data_size]; - uchar *adp_data = new uchar[from_data_size]; - float *from_data = new float[from_data_size]; - unsigned short *adp_qc_data = new unsigned short[from_data_size]; + vector qc_data(from_data_size, -99); + vector adp_data(from_data_size, 1); + vector from_data(from_data_size, bad_data_float); + vector adp_qc_data(from_data_size, 255); static const char *method_name = "regrid_goes_variable() -> "; - // -99 is arbitrary number as invalid QC value - memset(qc_data, -99, from_data_size*sizeof(uchar)); + adp_qc_high = 3; /* 3 as baseline algorithm, 0 for enterprise algorithm */ + adp_qc_medium = 1; /* 1 as baseline algorithm, 1 for enterprise algorithm */ + adp_qc_low = 0; /* 0 as baseline algorithm, 2 for enterprise algorithm */ NcVar var_qc; NcVar var_adp; @@ -2452,11 +2596,9 @@ void regrid_goes_variable(NcFile *nc_in, VarInfo *vinfo, bool is_dust_only = false; bool is_smoke_only = false; string actual_var_name = GET_NC_NAME(var_data); - int actual_var_len = actual_var_name.length(); + auto actual_var_len = actual_var_name.length(); bool is_adp_variable = (0 != actual_var_name.compare(vinfo->name().c_str())); - memset(adp_data, 1, from_data_size*sizeof(uchar)); // Default: 1 = data present - memset(adp_qc_data, 255, from_data_size*sizeof(unsigned short)); if (is_adp_variable && IS_VALID_NC_P(nc_adp)) { is_dust_only = (0 == vinfo->name().comparecase((actual_var_len + 1), vname_dust.length(), vname_dust.c_str())); @@ -2467,45 +2609,46 @@ void regrid_goes_variable(NcFile *nc_in, VarInfo *vinfo, else if (is_smoke_only) var_adp = get_goes_nc_var(nc_adp, vname_smoke); if (IS_VALID_NC(var_adp)) { - get_nc_data(&var_adp, adp_data); + get_nc_data(&var_adp, adp_data.data(), true); - //Smoke:ancillary_variables = "DQF" ; ubyte DQF(y, x) ; + //ADP Smoke:ancillary_variables: ubyte DQF(y, x) if (get_att_value_string(&var_adp, (string)"ancillary_variables", qc_var_name)) { var_adp_qc = get_nc_var(nc_adp, qc_var_name.c_str()); if (IS_VALID_NC(var_adp_qc)) { - get_nc_data(&var_adp_qc, adp_qc_data); + get_nc_data(&var_adp_qc, adp_qc_data.data()); + set_adp_gc_values(var_adp_qc); has_adp_qc_var = true; mlog << Debug(5) << method_name << "found QC var: " << qc_var_name << " for " << GET_NC_NAME(var_adp) << ".\n"; } } else { - mlog << Warning << "\n" << method_name + mlog << Debug(2) << method_name << "QC var name (" << qc_var_name << " for " << GET_NC_NAME(var_adp) - << ") does not exist.\n\n"; + << ") does not exist.\n"; } } } mlog << Debug(5) << method_name << "is_dust: " << is_dust_only << ", is_smoke: " << is_smoke_only << "\n"; - //AOD:ancillary_variables = "DQF" ; byte DQF(y, x) ; + //AOD ancillary_variables: byte DQF(y, x) if (get_att_value_string(&var_data, (string)"ancillary_variables", qc_var_name)) { var_qc = get_nc_var(nc_in, qc_var_name.c_str()); if (IS_VALID_NC(var_qc)) { - get_nc_data(&var_qc, qc_data); + get_nc_data(&var_qc, qc_data.data()); has_qc_var = true; mlog << Debug(3) << method_name << "found QC var: " << qc_var_name << ".\n"; } else { - mlog << Warning << "\n" << method_name + mlog << Debug(2) << method_name << "QC var name (" << qc_var_name << ") does not exist.\n"; } } - get_nc_data(&var_data, (float *)from_data); + get_nc_data(&var_data, from_data.data()); fr_dp.set_size(from_lon_count, from_lat_count); for (int xIdx=0; xIdx 0); + std::set aod_qc_flags = prepare_qoes_qc_array(); missing_count = non_missing_count = 0; to_dp.set_constant(bad_data_double); + int shift_bits = 2; + if (is_dust_only) shift_bits += 2; + + int cnt_aod_qc_low = 0; + int cnt_aod_qc_high = 0; + int cnt_aod_qc_medium = 0; + int cnt_aod_qc_nr = 0; // no_retrieval_qf + int cnt_adp_qc_low = 0; + int cnt_adp_qc_high = 0; + int cnt_adp_qc_medium = 0; + int cnt_adp_qc_nr = 0; // no_retrieval_qf + int cnt_adp_qc_high_to_low = 0; + int cnt_adp_qc_high_to_medium = 0; + int cnt_adp_qc_medium_to_low = 0; for (int xIdx=0; xIdx= 4) { if (from_min_value > data_value) from_min_value = data_value; if (from_max_value < data_value) from_max_value = data_value; } + // Apply censor threshold + for(int i=0; icensor_thresh().n(); i++) { + // Break out after the first match. + if(vinfo->censor_thresh()[i].check(data_value)) { + data_value = vinfo->censor_val()[i]; + censored_count++; + break; + } + } + + // Check the data existance (always 1 if ADP variable does not exist) + if (0 == adp_data[from_index]) { + absent_count++; + continue; + } + // Filter by QC flag - qc_value = qc_data[from_index]; - if (!has_qc_var || !has_qc_flags || qc_flags.has(qc_value)) { - for(int i=0; icensor_thresh().n(); i++) { - // Break out after the first match. - if(vinfo->censor_thresh()[i].check(data_value)) { - data_value = vinfo->censor_val()[i]; - censored_count++; - break; + if (has_qc_flags && (has_qc_var || has_adp_qc_var)) { + qc_value = qc_data[from_index]; + GOES_QC aod_qc_flag = convert_aod_qc_flag(qc_value); + if (mlog.verbosity_level() >= log_debug_level) { + if (qc_min_value > qc_value) qc_min_value = qc_value; + if (qc_max_value < qc_value) qc_max_value = qc_value; + switch (aod_qc_flag) { + case GOES_QC::HIGH: cnt_aod_qc_high++; break; + case GOES_QC::MEDIUM: cnt_aod_qc_medium++; break; + case GOES_QC::LOW: cnt_aod_qc_low++; break; + default: cnt_aod_qc_nr++; break; } } - if (0 == adp_data[from_index]) { - absent_count++; - continue; - } + if (has_adp_qc_var) { + GOES_QC adp_qc_flag = compute_adp_qc_flag(adp_qc_data[from_index], shift_bits); + if (mlog.verbosity_level() >= log_debug_level) { + switch (adp_qc_flag) { + case GOES_QC::HIGH: cnt_adp_qc_high++; break; + case GOES_QC::MEDIUM: cnt_adp_qc_medium++; break; + case GOES_QC::LOW: cnt_adp_qc_low++; break; + default: cnt_adp_qc_nr++; break; + } + } - if (has_adp_qc_var && has_qc_flags) { - int shift_bits = 2; - if (is_dust_only) shift_bits += 2; - particle_qc = ((adp_qc_data[from_index] >> shift_bits) & 0x03); - int qc_for_flag = 3 - particle_qc; // high = 3, qc_flag for high = 0 - if (!qc_flags.has(qc_for_flag)) { + bool filter_out = GOES_QC::NA == adp_qc_flag; + if (!filter_out) { + /* Adjust the quality by AOD data QC */ + if (GOES_QC::LOW == aod_qc_flag) { + if (GOES_QC::LOW != adp_qc_flag) { + if (GOES_QC::HIGH == adp_qc_flag) cnt_adp_qc_high_to_low++; + else if (GOES_QC::MEDIUM == adp_qc_flag) cnt_adp_qc_medium_to_low++; + adp_qc_flag = GOES_QC::LOW; /* high/medium to low quality */ + } + } + else if (GOES_QC::MEDIUM == aod_qc_flag && GOES_QC::HIGH == adp_qc_flag) { + adp_qc_flag = GOES_QC::MEDIUM; /* high to medium quality */ + cnt_adp_qc_high_to_medium++; + } + if (0 == aod_qc_flags.count(adp_qc_flag)) filter_out = true; + } + if (filter_out) { adp_qc_filtered_count++; continue; } } - - dataArray.add(data_value); - if (mlog.verbosity_level() >= 4) { - if (qc_min_value > qc_value) qc_min_value = qc_value; - if (qc_max_value < qc_value) qc_max_value = qc_value; + else if (has_qc_var && 0 == aod_qc_flags.count(aod_qc_flag)) { + qc_filtered_count++; + continue; } } - else { - qc_filtered_count++; - } + dataArray.add(data_value); valid_count++; } if (0 < dataArray.n()) { int data_count = dataArray.n(); - float to_value; + double to_value; if (RGInfo.method == InterpMthd::Min) to_value = dataArray.min(); else if (RGInfo.method == InterpMthd::Max) to_value = dataArray.max(); else if (RGInfo.method == InterpMthd::Median) { @@ -2620,38 +2807,52 @@ void regrid_goes_variable(NcFile *nc_in, VarInfo *vinfo, << data_count << " data values.\n"; } } - else {} } } - delete [] qc_data; - delete [] adp_data; - delete [] from_data; - delete [] adp_qc_data; + int cnt_adjused_low = cnt_adp_qc_low + cnt_adp_qc_high_to_low + cnt_adp_qc_medium_to_low; + int cnt_adjused_high = cnt_adp_qc_high - cnt_adp_qc_high_to_medium - cnt_adp_qc_high_to_low; + int cnt_adjused_medium = cnt_adp_qc_medium + cnt_adp_qc_high_to_medium - cnt_adp_qc_medium_to_low; + int cnt_adjused_total = cnt_adp_qc_high_to_medium + cnt_adp_qc_high_to_low + cnt_adp_qc_medium_to_low; - mlog << Debug(4) << method_name << "Count: actual: " << to_cell_count + mlog << Debug(log_debug_level) << method_name << "Count: actual: " << to_cell_count << ", missing: " << missing_count << ", non_missing: " << non_missing_count - << "\n\tFiltered: by QC: " << qc_filtered_count + << "\n Filtered: by QC: " << qc_filtered_count << ", by adp QC: " << adp_qc_filtered_count << ", by absent: " << absent_count << ", total: " << (qc_filtered_count + adp_qc_filtered_count + absent_count) - << "\n\tRange: data: [" << from_min_value << " - " << from_max_value + << "\n Range: data: [" << from_min_value << " - " << from_max_value << "] QC: [" << qc_min_value << " - " << qc_max_value << "]\n"; + if (has_qc_flags) { + mlog << Debug(log_debug_level) + << " AOD QC: high=" << cnt_aod_qc_high + << " medium=" << cnt_aod_qc_medium << ", low=" << cnt_aod_qc_low + << ", no_retrieval=" << cnt_aod_qc_nr + << "\n ADP QC: high=" << cnt_adjused_high << " (" << cnt_adp_qc_high + << "), medium=" << cnt_adjused_medium << " (" << cnt_adp_qc_medium + << "), low=" << cnt_adjused_low << " (" << cnt_adp_qc_low + << "), no_retrieval=" << cnt_adp_qc_nr + << "\n adjusted: high to medium=" << cnt_adp_qc_high_to_medium + << ", high to low=" << cnt_adp_qc_high_to_low + << ", medium to low=" << cnt_adp_qc_medium_to_low + << ", total=" << cnt_adjused_total + << "\n"; + } if (to_cell_count == 0) { - mlog << Warning << "\n" << method_name - << "No valid data!\n\n"; + mlog << Debug(2) << method_name + << "No valid data!\n"; } mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; } //////////////////////////////////////////////////////////////////////// static void save_geostationary_data(const ConcatString geostationary_file, const float *latitudes, const float *longitudes, - const GoesImagerData grid_data) { + const GoesImagerData &grid_data) { bool has_error = false; int deflate_level = 0; clock_t start_clock = clock(); @@ -2674,7 +2875,7 @@ static void save_geostationary_data(const ConcatString geostationary_file, add_att(&lat_var, "units","degrees_south"); } add_att(&lat_var, "dy_rad", grid_data.dy_rad); - if(!put_nc_data((NcVar *)&lat_var, latitudes)) { + if(!put_nc_data(&lat_var, latitudes)) { has_error = true; mlog << Warning << "\nsave_geostationary_data() -> " << "Cannot save latitudes!\n\n"; @@ -2690,7 +2891,7 @@ static void save_geostationary_data(const ConcatString geostationary_file, add_att(&lon_var, "units","degrees_west"); } add_att(&lon_var, "dx_rad", grid_data.dx_rad); - if(!put_nc_data((NcVar *)&lon_var, longitudes)) { + if(!put_nc_data(&lon_var, longitudes)) { has_error = true; mlog << Warning << "\nsave_geostationary_data() -> " << "Cannot save longitudes!\n\n"; @@ -2710,15 +2911,15 @@ static void save_geostationary_data(const ConcatString geostationary_file, << geostationary_file << ") was saved\n"; } - delete nc_file; nc_file = 0; + delete nc_file; nc_file = nullptr; mlog << Debug(LEVEL_FOR_PERFORMANCE) << method_name << "took " - << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + << get_exe_duration(start_clock) << " seconds\n"; } //////////////////////////////////////////////////////////////////////// -void close_nc() { +static void close_nc() { // Clean up if(nc_out) { @@ -2734,8 +2935,7 @@ void close_nc() { //////////////////////////////////////////////////////////////////////// -//bool keep_message_type(const char *mt_str) { -bool keep_message_type(const int mt_index) { +static bool keep_message_type(const int mt_index) { bool keep = false; @@ -2747,7 +2947,7 @@ bool keep_message_type(const int mt_index) { //////////////////////////////////////////////////////////////////////// -bool has_lat_lon_vars(NcFile *nc) { +static bool has_lat_lon_vars(const NcFile *nc) { bool has_lat_var = IS_VALID_NC(get_nc_var_lat(nc)); bool has_lon_var = IS_VALID_NC(get_nc_var_lon(nc)); @@ -2758,12 +2958,12 @@ bool has_lat_lon_vars(NcFile *nc) { << " has_lat_var: " << has_lat_var << ", has_lon_var: " << has_lon_var << ", has_time_var: " << has_time_var << "\n"; - return (has_lat_var && has_lon_var && has_time_var); + return (has_lat_var && has_lon_var); } //////////////////////////////////////////////////////////////////////// -void usage() { +__attribute__((noreturn)) static void usage() { cout << "\n*** Model Evaluation Tools (MET" << met_version << ") ***\n\n" @@ -2774,7 +2974,8 @@ void usage() { << "\toutput_filename\n" << "\t-field string\n" << "\t[-config file]\n" - << "\t[-qc flags]\n" + << "\t[-goes_qc flags]\n" + << "\t[-adp adp_filename]\n" << "\t[-method type]\n" << "\t[-gaussian_dx n]\n" << "\t[-gaussian_radius n]\n" @@ -2801,21 +3002,26 @@ void usage() { << "\t\t\"-config file\" uses the specified configuration file " << "to generate gridded data (optional).\n" - << "\t\t\"-qc flags\" specifies a comma-separated list of QC flags, for example \"0,1\" (optional).\n" - << "\t\t\tOnly applied if grid_mapping is set to \"goes_imager_projection\" and the QC variable exists.\n" + << "\t\t\"-goes_qc flags\" specifies a comma-separated list of QC flags, " + << "for example \"0,1\" (optional).\n" + << "\t\t\tOnly used if grid_mapping is set to \"goes_imager_projection\" " + << "and the QC variable exists.\n" - << "\t\t\"-adp adp_file_name\" specifies a ADP data input for AOD dataset (ignored if the input is not AOD from GOES16/17).\n" + << "\t\t\"-adp adp_filename\" specifies an ADP input file for the AOD dataset.\n" + << "\t\t\tOnly used if the input is AOD from GOES16/17.\n" - << "\t\t\"-method type\" overrides the default regridding " - << "method (default: " << interpmthd_to_string(RGInfo.method) - << ", optional) to -field variable. Additional gaussian smoothing only to the probabililty variable" - << " with additional \"-method GAUSSIAN\" or \"-method MAXGAUSS\".\n" + << "\t\t\"-method type\" specifies the regridding method (default: " + << interpmthd_to_string(RGInfo.method) << ", optional).\n" + << "\t\t\tAdditional Gaussian smoothing only to the probability variable " + << "with additional \"-method GAUSSIAN\" or \"-method MAXGAUSS\".\n" - << "\t\t\"-gaussian_dx n\" specifies a delta distance for Gaussian smoothing." - << " The default is " << RGInfo.gaussian.dx << ". Ignored if not Gaussian method (optional).\n" + << "\t\t\"-gaussian_dx n\" specifies the delta distance for Gaussian smoothing (default: " + << RGInfo.gaussian.dx << ", optional).\n" + << "\t\t\tOnly used for the Gaussian method.\n" - << "\t\t\"-gaussian_radius n\" specifies the radius of influence for Gaussian smoothing." - << " The default is " << RGInfo.gaussian.radius << "). Ignored if not Gaussian method (optional).\n" + << "\t\t\"-gaussian_radius n\" specifies the radius of influence for Gaussian smoothing (default: " + << RGInfo.gaussian.radius << ", optional).\n" + << "\t\t\tOnly used for the Gaussian method (optional).\n" << "\t\t\"-prob_cat_thresh string\" sets the threshold to compute the probability of occurrence (optional).\n" @@ -2839,13 +3045,42 @@ void usage() { //////////////////////////////////////////////////////////////////////// -void set_field(const StringArray &a) { +static void set_adp_gc_values(NcVar var_adp_qc) { + ConcatString att_flag_meanings; + + if (get_nc_att_value(&var_adp_qc, (ConcatString)"flag_meanings", att_flag_meanings)) { + StringArray flag_meanings = to_lower(att_flag_meanings).split(" "); + unsigned short flag_values[flag_meanings.n()+128]; /* reserve enough storage */ + for (int i=0; i> 2) & 0x03; + } + if (flag_meanings.has("medium_confidence_smoke_detection_qf", idx)) { + adp_qc_medium = (flag_values[idx] >> 2) & 0x03; + } + if (flag_meanings.has("high_confidence_smoke_detection_qf", idx)) { + adp_qc_high = (flag_values[idx] >> 2) & 0x03; + } + } + mlog << Debug(4) << "set_adp_gc_values() " + << " high_confidence = " << adp_qc_high + << ", medium_confidence = " << adp_qc_medium + << ", low_confidence = " << adp_qc_low << "\n"; + } +} + +//////////////////////////////////////////////////////////////////////// + +static void set_field(const StringArray &a) { FieldSA.add(a[0]); } //////////////////////////////////////////////////////////////////////// -void set_method(const StringArray &a) { +static void set_method(const StringArray &a) { InterpMthd method_id = string_to_interpmthd(a[0].c_str()); if (method_id == InterpMthd::Gaussian || method_id == InterpMthd::MaxGauss ) { do_gaussian_filter = true; @@ -2857,13 +3092,13 @@ void set_method(const StringArray &a) { //////////////////////////////////////////////////////////////////////// -void set_prob_cat_thresh(const StringArray &a) { +static void set_prob_cat_thresh(const StringArray &a) { prob_cat_thresh.set(a[0].c_str()); } //////////////////////////////////////////////////////////////////////// -void set_vld_thresh(const StringArray &a) { +static void set_vld_thresh(const StringArray &a) { RGInfo.vld_thresh = atof(a[0].c_str()); if(RGInfo.vld_thresh > 1 || RGInfo.vld_thresh < 0) { mlog << Error << "\nset_vld_thresh() -> " @@ -2875,25 +3110,25 @@ void set_vld_thresh(const StringArray &a) { //////////////////////////////////////////////////////////////////////// -void set_name(const StringArray & a) { +static void set_name(const StringArray & a) { VarNameSA.add_css(a[0]); } //////////////////////////////////////////////////////////////////////// -void set_config(const StringArray & a) { +static void set_config(const StringArray & a) { config_filename = a[0]; } //////////////////////////////////////////////////////////////////////// -void set_compress(const StringArray & a) { +static void set_compress(const StringArray & a) { compress_level = atoi(a[0].c_str()); } //////////////////////////////////////////////////////////////////////// -void set_qc_flags(const StringArray & a) { +static void set_goes_qc_flags(const StringArray & a) { int qc_flag; StringArray sa; @@ -2906,24 +3141,24 @@ void set_qc_flags(const StringArray & a) { //////////////////////////////////////////////////////////////////////// -void set_adp(const StringArray & a) { - AdpFilename = a[0]; - if (!file_exists(AdpFilename.c_str())) { +static void set_adp(const StringArray & a) { + adp_filename = a[0]; + if (!file_exists(adp_filename.c_str())) { mlog << Error << "\nset_adp() -> " - << "\"" << AdpFilename << "\" does not exist\n\n"; + << "\"" << adp_filename << "\" does not exist\n\n"; exit(1); } } //////////////////////////////////////////////////////////////////////// -void set_gaussian_dx(const StringArray &a) { +static void set_gaussian_dx(const StringArray &a) { RGInfo.gaussian.dx = atof(a[0].c_str()); } //////////////////////////////////////////////////////////////////////// -void set_gaussian_radius(const StringArray &a) { +static void set_gaussian_radius(const StringArray &a) { RGInfo.gaussian.radius = atof(a[0].c_str()); } diff --git a/src/tools/other/point2grid/point2grid_conf_info.cc b/src/tools/other/point2grid/point2grid_conf_info.cc index ba2989212f..b05ae88a36 100644 --- a/src/tools/other/point2grid/point2grid_conf_info.cc +++ b/src/tools/other/point2grid/point2grid_conf_info.cc @@ -8,13 +8,11 @@ //////////////////////////////////////////////////////////////////////// - #include "point2grid_conf_info.h" #include "vx_log.h" using namespace std; - //////////////////////////////////////////////////////////////////////// // // Code for class PointToGridConfInfo @@ -49,7 +47,8 @@ void PointToGridConfInfo::clear() { // Initialize values message_type.clear(); beg_ds = end_ds = bad_data_int; - quality_mark_thresh = bad_data_int; + obs_qty_inc.clear(); + obs_qty_exc.clear(); version.clear(); valid_time = 0; def_var_name_map.clear(); @@ -85,7 +84,7 @@ void PointToGridConfInfo::read_config(const char *default_file_name, void PointToGridConfInfo::process_config() { ConcatString s; StringArray sa; - Dictionary *dict = (Dictionary *) nullptr; + auto dict = (Dictionary *) nullptr; // Dump the contents of the config file if(mlog.verbosity_level() >= 5) conf.dump(cout); @@ -110,23 +109,18 @@ void PointToGridConfInfo::process_config() { // Conf: var_name_map var_name_map = parse_conf_key_value_map(&conf, conf_key_var_name_map); - // Conf: quality_mark_thresh - quality_mark_thresh = conf.lookup_int(conf_key_quality_mark_thresh); + // Conf: obs_quality_inc + obs_qty_inc = parse_conf_obs_qty_inc(&conf); - // Check the value - if(quality_mark_thresh < 0 || quality_mark_thresh > 15) { - mlog << Warning << "\nPointToGridConfInfo::process_config() -> " - << "the \"" << conf_key_quality_mark_thresh - << "\" entry (" << quality_mark_thresh - << ") should be set between 0 and 15.\n\n"; - } + // Conf: obs_quality_exc + obs_qty_exc = parse_conf_obs_qty_exc(&conf); return; } //////////////////////////////////////////////////////////////////////// -ConcatString PointToGridConfInfo::get_var_id(const ConcatString var_name) { +ConcatString PointToGridConfInfo::get_var_id(const ConcatString &var_name) { ConcatString var_id; map::iterator ptr; @@ -136,7 +130,7 @@ ConcatString PointToGridConfInfo::get_var_id(const ConcatString var_name) { break; } } - + if( var_id.empty() ) { for (ptr=def_var_name_map.begin(); ptr != def_var_name_map.end(); ptr++) { if( ptr->second == var_name ) { @@ -151,17 +145,14 @@ ConcatString PointToGridConfInfo::get_var_id(const ConcatString var_name) { //////////////////////////////////////////////////////////////////////// -ConcatString PointToGridConfInfo::get_var_name(const ConcatString var_name) { +ConcatString PointToGridConfInfo::get_var_name(const ConcatString &var_name) { ConcatString out_var; - ConcatString t_name; - - t_name = var_name_map[var_name]; + ConcatString t_name = var_name_map[var_name]; if (t_name.empty()) t_name = def_var_name_map[var_name]; if (t_name.empty()) { - ConcatString tmp_key, tmp_value; - tmp_key = "grib_code_"; + ConcatString tmp_key = "grib_code_"; tmp_key << atoi(var_name.c_str()); - tmp_value = var_name_map[tmp_key]; + ConcatString tmp_value = var_name_map[tmp_key]; if (tmp_value.empty()) tmp_value = def_var_name_map[tmp_key]; if (!tmp_value.empty()) t_name = tmp_value; } diff --git a/src/tools/other/point2grid/point2grid_conf_info.h b/src/tools/other/point2grid/point2grid_conf_info.h index f6dff25b55..725099b89c 100644 --- a/src/tools/other/point2grid/point2grid_conf_info.h +++ b/src/tools/other/point2grid/point2grid_conf_info.h @@ -29,8 +29,6 @@ class PointToGridConfInfo { private: void init_from_scratch(); - - protected: std::map var_name_map; std::map def_var_name_map; @@ -44,7 +42,8 @@ class PointToGridConfInfo { unixtime valid_time; // valid time int beg_ds; // Time range of observations to be retained, int end_ds; // Defined relative to the PrepBufr center time (seconds) - int quality_mark_thresh; // Quality marks to be retained + StringArray obs_qty_inc; // Quality mark strings to be included + StringArray obs_qty_exc; // Quality mark strings to be excluded ConcatString version; // Config file version PointToGridConfInfo(); @@ -54,8 +53,8 @@ class PointToGridConfInfo { void process_config(); void read_config(const char *, const char *); - ConcatString get_var_name(const ConcatString); - ConcatString get_var_id(const ConcatString); + ConcatString get_var_name(const ConcatString &); + ConcatString get_var_id(const ConcatString &); }; //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/regrid_data_plane/Makefile.in b/src/tools/other/regrid_data_plane/Makefile.in index 61c475b1ae..75f0f17630 100644 --- a/src/tools/other/regrid_data_plane/Makefile.in +++ b/src/tools/other/regrid_data_plane/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/regrid_data_plane/regrid_data_plane.cc b/src/tools/other/regrid_data_plane/regrid_data_plane.cc index 28ec4704ea..0306c670a6 100644 --- a/src/tools/other/regrid_data_plane/regrid_data_plane.cc +++ b/src/tools/other/regrid_data_plane/regrid_data_plane.cc @@ -390,7 +390,7 @@ void open_nc(const Grid &grid, ConcatString run_cs) { void write_nc_data(const DataPlane &dp, const Grid &grid, NcVar *data_var) { // Allocate memory to store data values for each grid point - float *data = new float [grid.nx()*grid.ny()]; + vector data(grid.nx()*grid.ny()); // Store the data int grid_nx = grid.nx(); @@ -403,15 +403,12 @@ void write_nc_data(const DataPlane &dp, const Grid &grid, NcVar *data_var) { } // end for x // Write out the data - if(!put_nc_data_with_dims(data_var, &data[0], grid.ny(), grid.nx())) { + if(!put_nc_data_with_dims(data_var, data.data(), grid.ny(), grid.nx())) { mlog << Error << "\nwrite_nc_data() -> " << "error writing data to the output file.\n\n"; exit(1); } - // Clean up - if(data) { delete [] data; data = (float *) nullptr; } - return; } diff --git a/src/tools/other/shift_data_plane/Makefile.in b/src/tools/other/shift_data_plane/Makefile.in index 4eb7b7f50b..aa38481fea 100644 --- a/src/tools/other/shift_data_plane/Makefile.in +++ b/src/tools/other/shift_data_plane/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/shift_data_plane/shift_data_plane.cc b/src/tools/other/shift_data_plane/shift_data_plane.cc index 40e2f742bc..7f5e56da5b 100644 --- a/src/tools/other/shift_data_plane/shift_data_plane.cc +++ b/src/tools/other/shift_data_plane/shift_data_plane.cc @@ -336,7 +336,7 @@ void write_netcdf(const DataPlane &dp, const Grid &grid, add_att(&data_var, "smoothing_shape", gtf.enum2String(Shape)); // Allocate memory to store data values for each grid point - float *data = new float [grid.nx()*grid.ny()]; + vector data(grid.nx()*grid.ny()); // Store the data for(int x=0; x " << "error writing data to the output file.\n\n"; exit(1); } // Clean up - if(data) { delete [] data; data = (float *) nullptr; } if(f_out) { delete f_out; f_out = (NcFile *) nullptr; } diff --git a/src/tools/other/wwmca_tool/Makefile.in b/src/tools/other/wwmca_tool/Makefile.in index 972b3f9255..e3be6b2af6 100644 --- a/src/tools/other/wwmca_tool/Makefile.in +++ b/src/tools/other/wwmca_tool/Makefile.in @@ -285,6 +285,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/other/wwmca_tool/af_file.h b/src/tools/other/wwmca_tool/af_file.h index dbcbc0bdf3..eba9dfff68 100644 --- a/src/tools/other/wwmca_tool/af_file.h +++ b/src/tools/other/wwmca_tool/af_file.h @@ -93,12 +93,12 @@ class AFDataFile { //////////////////////////////////////////////////////////////////////// -inline int AFDataFile::nx() const { return ( af_nx ); } -inline int AFDataFile::ny() const { return ( af_ny ); } +inline int AFDataFile::nx() const { return af_nx; } +inline int AFDataFile::ny() const { return af_ny; } -inline unixtime AFDataFile::init () const { return ( Init ); } -inline unixtime AFDataFile::valid () const { return ( Valid ); } -inline char AFDataFile::hemisphere () const { return ( Hemisphere ); } +inline unixtime AFDataFile::init () const { return Init; } +inline unixtime AFDataFile::valid () const { return Valid; } +inline char AFDataFile::hemisphere () const { return Hemisphere; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/wwmca_tool/interp_base.h b/src/tools/other/wwmca_tool/interp_base.h index 885623521d..52f7015efe 100644 --- a/src/tools/other/wwmca_tool/interp_base.h +++ b/src/tools/other/wwmca_tool/interp_base.h @@ -119,9 +119,9 @@ class Interpolator { //////////////////////////////////////////////////////////////////////// -inline int Interpolator::width() const { return ( Width ); } +inline int Interpolator::width() const { return Width; } -inline int Interpolator::wm1o2() const { return ( Wm1o2 ); } +inline int Interpolator::wm1o2() const { return Wm1o2; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/wwmca_tool/wwmca_ref.h b/src/tools/other/wwmca_tool/wwmca_ref.h index 9cbee01559..3006612804 100644 --- a/src/tools/other/wwmca_tool/wwmca_ref.h +++ b/src/tools/other/wwmca_tool/wwmca_ref.h @@ -134,7 +134,7 @@ class WwmcaRegridder { //////////////////////////////////////////////////////////////////////// -inline GridHemisphere WwmcaRegridder::hemi() const { return ( Hemi ); } +inline GridHemisphere WwmcaRegridder::hemi() const { return Hemi; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/tc_utils/Makefile.in b/src/tools/tc_utils/Makefile.in index 7c74bba2fa..858a6de0b8 100644 --- a/src/tools/tc_utils/Makefile.in +++ b/src/tools/tc_utils/Makefile.in @@ -232,6 +232,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/rmw_analysis/Makefile.in b/src/tools/tc_utils/rmw_analysis/Makefile.in index 9a89e51057..6d37dbd043 100644 --- a/src/tools/tc_utils/rmw_analysis/Makefile.in +++ b/src/tools/tc_utils/rmw_analysis/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_diag/Makefile.in b/src/tools/tc_utils/tc_diag/Makefile.in index b884c28f85..66b5eb365d 100644 --- a/src/tools/tc_utils/tc_diag/Makefile.in +++ b/src/tools/tc_utils/tc_diag/Makefile.in @@ -227,6 +227,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_diag/python_tc_diag.cc b/src/tools/tc_utils/tc_diag/python_tc_diag.cc index 991295370f..f7791d8bcf 100644 --- a/src/tools/tc_utils/tc_diag/python_tc_diag.cc +++ b/src/tools/tc_utils/tc_diag/python_tc_diag.cc @@ -306,7 +306,7 @@ bool parse_python_diag_data(PyObject *module_obj, // Get the diag_data item PyObject *data_obj = PyDict_GetItem(module_dict_obj, - PyUnicode_FromString(diag_data_dict_name)); + PyUnicode_FromString(diag_data_dict_name)); if(!data_obj || !PyDict_Check(data_obj)) { mlog << Warning << "\n" << method_name @@ -384,7 +384,7 @@ bool parse_python_string_value_map(PyObject *dict, long pos; PyObject *data_obj = PyDict_GetItem(dict, - PyUnicode_FromString(name)); + PyUnicode_FromString(name)); if(!data_obj || !PyDict_Check(data_obj)) { mlog << Warning << "\n" << method_name diff --git a/src/tools/tc_utils/tc_diag/tc_diag.cc b/src/tools/tc_utils/tc_diag/tc_diag.cc index dfe2532d90..00abca8909 100644 --- a/src/tools/tc_utils/tc_diag/tc_diag.cc +++ b/src/tools/tc_utils/tc_diag/tc_diag.cc @@ -867,7 +867,7 @@ void compute_lat_lon(TcrmwGrid& grid, ia * grid.azimuth_delta_deg(), lat, lon); lat_arr[i] = lat; - lon_arr[i] = -lon; // degrees east to west + lon_arr[i] = -lon; // degrees west to east } } @@ -1331,14 +1331,11 @@ void copy_time_vars(NcFile *to_nc, NcFile *from_nc, int i_time) { } // Allocate buffer - double *buf = new double[buf_size]; + vector buf(buf_size); // Copy the data for this time slice - get_nc_data(&from_var, buf); - to_var.putVar(offsets, counts, buf); - - // Cleanup - if(buf) { delete[] buf; buf = (double *) nullptr; } + get_nc_data(&from_var, buf.data()); + to_var.putVar(offsets, counts, buf.data()); } // end for i @@ -1665,7 +1662,7 @@ void OutFileInfo::write_nc_diag() { // Allocate space int n_prs_data = vld_dim.getSize() * prs_dim.getSize(); - float *prs_data = new float [n_prs_data]; + vector prs_data(n_prs_data); ConcatString diag_name; // Loop over the pressure diagnostic names @@ -1698,7 +1695,7 @@ void OutFileInfo::write_nc_diag() { } // end for j // Write the data - write_nc_diag_prs_vals(prs_diag[i], prs_data); + write_nc_diag_prs_vals(prs_diag[i], prs_data.data()); } // end for i @@ -1708,9 +1705,6 @@ void OutFileInfo::write_nc_diag() { write_nc_diag_vals(*it, diag_custom_map.at(*it)); } - // Clean up - if(prs_data) { delete [] prs_data; prs_data = (float *) nullptr; } - return; } @@ -2198,8 +2192,8 @@ void TmpFileInfo::setup_nc_file(const DomainInfo &di, // Define latitude and longitude arrays TcrmwData d = di.data; int nra = d.range_n * d.azimuth_n; - double *lat_arr = new double[nra]; - double *lon_arr = new double[nra]; + vector lat_arr(nra); + vector lon_arr(nra); // Set grid center d.lat_center = pnt_ptr->lat(); @@ -2262,19 +2256,15 @@ void TmpFileInfo::setup_nc_file(const DomainInfo &di, lat_var, lon_var); // Compute lat and lon coordinate arrays - compute_lat_lon(ra_grid, lat_arr, lon_arr); + compute_lat_lon(ra_grid, lat_arr.data(), lon_arr.data()); // Write coordinate arrays - write_tc_data(tmp_out, ra_grid, 0, lat_var, lat_arr); - write_tc_data(tmp_out, ra_grid, 0, lon_var, lon_arr); + write_tc_data(tmp_out, ra_grid, 0, lat_var, lat_arr.data()); + write_tc_data(tmp_out, ra_grid, 0, lon_var, lon_arr.data()); // Write track point values write_tc_track_point(tmp_out, vld_dim, *pnt_ptr); - // Clean up - if(lat_arr) { delete[] lat_arr; lat_arr = (double *) nullptr; } - if(lon_arr) { delete[] lon_arr; lon_arr = (double *) nullptr; } - return; } @@ -2286,9 +2276,8 @@ void TmpFileInfo::write_nc_data(const VarInfo *vi, const DataPlane &dp_in, RegridInfo ri = vi->regrid(); mlog << Debug(4) << "Regridding \"" << vi->magic_str() - << "\" to the \"" << domain << "\" domain using the " - << interpmthd_to_string(ri.method) << "(" << ri.width - << ") interpolation method.\n"; + << "\" to the \"" << domain << "\" domain using " + << ri.get_str() << ".\n"; // Do the cylindrical coordinate transformation if(dp_in.nxy() > 0) { diff --git a/src/tools/tc_utils/tc_dland/Makefile.in b/src/tools/tc_utils/tc_dland/Makefile.in index 66bc377e52..b31ca8a29e 100644 --- a/src/tools/tc_utils/tc_dland/Makefile.in +++ b/src/tools/tc_utils/tc_dland/Makefile.in @@ -235,6 +235,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_dland/tc_dland.cc b/src/tools/tc_utils/tc_dland/tc_dland.cc index 0a563b5e30..97c1e21d25 100644 --- a/src/tools/tc_utils/tc_dland/tc_dland.cc +++ b/src/tools/tc_utils/tc_dland/tc_dland.cc @@ -203,7 +203,6 @@ void process_land_data() { void process_distances() { int n, x, y, c, npts, nlog, imin; double lat, lon; - float *dland = (float *) nullptr; // Instantiate the grid Grid grid(GridData); @@ -250,7 +249,7 @@ void process_distances() { add_att(&dland_var, "_FillValue", bad_data_float); // Allocate memory to store the data values for each grid point - dland = new float [grid.nx()*grid.ny()]; + vector dland(grid.nx()*grid.ny()); // Dump out grid info mlog << Debug(2) @@ -294,16 +293,13 @@ void process_distances() { // Write the computed distances to the output file mlog << Debug(3) << "Writing distance to land variable.\n"; - if(!put_nc_data_with_dims(&dland_var, &dland[0], grid.ny(), grid.nx())) { - if(dland) { delete [] dland; dland = (float *) nullptr; } + if(!put_nc_data_with_dims(&dland_var, dland.data(), grid.ny(), grid.nx())) { + delete f_out; mlog << Error << "\nprocess_distances() -> " << "error with dland_var->put\n\n"; exit(1); } - // Delete allocated memory - if(dland) { delete [] dland; dland = (float *) nullptr; } - // Close the output NetCDF file delete f_out; f_out = (NcFile *) nullptr; diff --git a/src/tools/tc_utils/tc_dland/tc_poly.h b/src/tools/tc_utils/tc_dland/tc_poly.h index 55cb053e16..db7d654eb8 100644 --- a/src/tools/tc_utils/tc_dland/tc_poly.h +++ b/src/tools/tc_utils/tc_dland/tc_poly.h @@ -65,7 +65,7 @@ class TCPoly { //////////////////////////////////////////////////////////////////////// -inline ConcatString TCPoly::name() const { return(Name); } +inline ConcatString TCPoly::name() const { return Name; } //////////////////////////////////////////////////////////////////////// @@ -110,7 +110,7 @@ class TCPolyArray { //////////////////////////////////////////////////////////////////////// -inline int TCPolyArray::n_polys() const { return(NPolys); } +inline int TCPolyArray::n_polys() const { return NPolys; } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/tc_utils/tc_gen/Makefile.in b/src/tools/tc_utils/tc_gen/Makefile.in index 97b0b5994f..98881f5a22 100644 --- a/src/tools/tc_utils/tc_gen/Makefile.in +++ b/src/tools/tc_utils/tc_gen/Makefile.in @@ -225,6 +225,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_gen/tc_gen.cc b/src/tools/tc_utils/tc_gen/tc_gen.cc index 04e0b0c408..c02f65e9b4 100644 --- a/src/tools/tc_utils/tc_gen/tc_gen.cc +++ b/src/tools/tc_utils/tc_gen/tc_gen.cc @@ -1750,18 +1750,18 @@ void setup_txt_files(int n_model, int max_n_prob, int n_pair) { // 2x2 contingency table output: // 1 header + 2 vx methods * # models * # filters - case(i_fho): - case(i_ctc): - case(i_cts): + case i_fho: + case i_ctc: + case i_cts: n_rows = 1 + 2 * n_model * conf_info.n_vx(); break; // Nx2 probabilistic contingency table output: // 1 header + 1 vx method * # models * # probs * # filters - case(i_pct): - case(i_pstd): - case(i_pjc): - case(i_prc): + case i_pct: + case i_pstd: + case i_pjc: + case i_prc: n_rows = 1 + n_model * max_n_prob * conf_info.n_vx(); break; @@ -1775,19 +1775,19 @@ void setup_txt_files(int n_model, int max_n_prob, int n_pair) { // Compute the number of columns for this line type switch(i) { - case(i_pct): + case i_pct: n_cols = get_n_pct_columns(n_prob) + n_header_columns + 1; break; - case(i_pstd): + case i_pstd: n_cols = get_n_pstd_columns(n_prob) + n_header_columns + 1; break; - case(i_pjc): + case i_pjc: n_cols = get_n_pjc_columns(n_prob) + n_header_columns + 1; break; - case(i_prc): + case i_prc: n_cols = get_n_prc_columns(n_prob) + n_header_columns + 1; break; @@ -1823,19 +1823,19 @@ void setup_txt_files(int n_model, int max_n_prob, int n_pair) { // Write header row switch(i) { - case(i_pct): + case i_pct: write_pct_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pstd): + case i_pstd: write_pstd_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_pjc): + case i_pjc: write_pjc_header_row(1, n_prob, txt_at[i], 0, 0); break; - case(i_prc): + case i_prc: write_prc_header_row(1, n_prob, txt_at[i], 0, 0); break; @@ -2412,10 +2412,9 @@ void write_nc(GenCTCInfo &gci) { unixtime valid_end = (unixtime) 0; // Allocate memory - float *data = (float *) nullptr; int nx = gci.NcOutGrid->nx(); int ny = gci.NcOutGrid->ny(); - data = new float [nx*ny]; + vector data(nx*ny, 0.0); // Loop over vector of output types for(i=0; i " << "error writing NetCDF variable name " << var_name << "\n\n"; @@ -2543,9 +2539,6 @@ void write_nc(GenCTCInfo &gci) { } } - // Deallocate and clean up - if(data) { delete [] data; data = (float *) nullptr; } - return; } diff --git a/src/tools/tc_utils/tc_gen/tc_gen.h b/src/tools/tc_utils/tc_gen/tc_gen.h index fa19075829..13c31a2ae9 100644 --- a/src/tools/tc_utils/tc_gen/tc_gen.h +++ b/src/tools/tc_utils/tc_gen/tc_gen.h @@ -59,7 +59,7 @@ static const char * default_config_filename = "MET_BASE/config/TCGenConfig_default"; // Header columns -static const char **txt_columns[n_txt] = { +static const char * const * txt_columns[n_txt] = { fho_columns, ctc_columns, cts_columns, pct_columns, pstd_columns, pjc_columns, prc_columns, genmpr_columns @@ -73,7 +73,7 @@ static const int n_txt_columns[n_txt] = { }; // Text file abbreviations -static const char *txt_file_abbr[n_txt] = { +static const char * const txt_file_abbr[n_txt] = { "fho", "ctc", "cts", "pct", "pstd", "pjc", "prc", "genmpr" }; diff --git a/src/tools/tc_utils/tc_pairs/Makefile.in b/src/tools/tc_utils/tc_pairs/Makefile.in index 736cb2afe3..f56d6bbcbd 100644 --- a/src/tools/tc_utils/tc_pairs/Makefile.in +++ b/src/tools/tc_utils/tc_pairs/Makefile.in @@ -249,6 +249,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_rmw/Makefile.in b/src/tools/tc_utils/tc_rmw/Makefile.in index 0c5220595a..05430c1fa0 100644 --- a/src/tools/tc_utils/tc_rmw/Makefile.in +++ b/src/tools/tc_utils/tc_rmw/Makefile.in @@ -227,6 +227,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_rmw/tc_rmw.cc b/src/tools/tc_utils/tc_rmw/tc_rmw.cc index d32bc62d81..2dfe8afd6e 100644 --- a/src/tools/tc_utils/tc_rmw/tc_rmw.cc +++ b/src/tools/tc_utils/tc_rmw/tc_rmw.cc @@ -23,7 +23,6 @@ // //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -57,7 +56,6 @@ using namespace std; using namespace netCDF; - //////////////////////////////////////////////////////////////////////// static void usage(); @@ -69,22 +67,21 @@ static bool file_is_ok(const ConcatString &, const GrdFileType); static void process_rmw(); static void process_tracks(TrackInfoArray&); static void get_atcf_files(const StringArray&, - const StringArray&, StringArray&, StringArray&); + const StringArray&, StringArray&, StringArray&); static void process_track_files(const StringArray&, - const StringArray&, TrackInfoArray&); + const StringArray&, TrackInfoArray&); static bool is_keeper(const ATCFLineBase *); static void set_deck(const StringArray&); static void set_atcf_source(const StringArray&, - StringArray&, StringArray&); + StringArray&, StringArray&); static void set_data_files(const StringArray&); static void set_config(const StringArray&); static void set_out(const StringArray&); static void setup_grid(); static void setup_nc_file(); static void build_outfile_name(const ConcatString&, - const char*, ConcatString&); -static void compute_lat_lon(TcrmwGrid&, - double*, double*); + const char*, ConcatString&); +static void compute_lat_lon(TcrmwGrid&, double*, double*); static void process_fields(const TrackInfoArray&); //////////////////////////////////////////////////////////////////////// @@ -552,19 +549,19 @@ void set_out(const StringArray& a) { void setup_grid() { - grid_data.name = "TCRMW"; - grid_data.range_n = conf_info.n_range; - grid_data.azimuth_n = conf_info.n_azimuth; + tcrmw_data.name = "TCRMW"; + tcrmw_data.range_n = conf_info.n_range; + tcrmw_data.azimuth_n = conf_info.n_azimuth; // Define the maximum range in km based on the fixed increment if(is_bad_data(conf_info.rmw_scale)) { - grid_data.range_max_km = + tcrmw_data.range_max_km = conf_info.delta_range_km * (conf_info.n_range - 1); } - tcrmw_grid.set_from_data(grid_data); - grid.set(grid_data); + tcrmw_grid.set_from_data(tcrmw_data); + grid_out.set(tcrmw_data); } //////////////////////////////////////////////////////////////////////// @@ -651,8 +648,8 @@ void compute_lat_lon(TcrmwGrid& tcrmw_grid, ir * tcrmw_grid.range_delta_km(), ia * tcrmw_grid.azimuth_delta_deg(), lat, lon); - lat_arr[i] = lat; - lon_arr[i] = -lon; + lat_arr[i] = lat; + lon_arr[i] = -lon; // degrees west to east } } } @@ -693,12 +690,12 @@ void process_fields(const TrackInfoArray& tracks) { << point.lon() << ").\n"; // Set grid center - grid_data.lat_center = point.lat(); - grid_data.lon_center = -1.0*point.lon(); // internal sign change + tcrmw_data.lat_center = point.lat(); + tcrmw_data.lon_center = -1.0*point.lon(); // internal sign change // Define the maximum range in km relative to the radius of maximum winds if(!is_bad_data(conf_info.rmw_scale)) { - grid_data.range_max_km = + tcrmw_data.range_max_km = conf_info.rmw_scale * point.mrd() * tc_km_per_nautical_miles * (conf_info.n_range - 1); @@ -706,9 +703,9 @@ void process_fields(const TrackInfoArray& tracks) { // Re-define the range/azimuth grid tcrmw_grid.clear(); - tcrmw_grid.set_from_data(grid_data); - grid.clear(); - grid.set(grid_data); + tcrmw_grid.set_from_data(tcrmw_data); + grid_out.clear(); + grid_out.set(tcrmw_data); // Compute lat and lon coordinate arrays compute_lat_lon(tcrmw_grid, lat_arr, lon_arr); @@ -727,7 +724,7 @@ void process_fields(const TrackInfoArray& tracks) { for(int i_var = 0; i_var < conf_info.get_n_data(); i_var++) { - // Update with the valid time of the track point + // Update the variable info with the valid time of the track point data_info = conf_info.data_info[i_var]; string sname = data_info->name_attr().string(); @@ -736,38 +733,33 @@ void process_fields(const TrackInfoArray& tracks) { data_info->set_valid(valid_time); // Find data for this track point - get_series_entry(i_point, data_info, data_files, ftype, data_dp, latlon_arr); - - // Check data range - double data_min, data_max; - data_dp.data_range(data_min, data_max); - mlog << Debug(4) << "data_min:" << data_min << "\n"; - mlog << Debug(4) << "data_max:" << data_max << "\n"; - - // Regrid data - data_dp = met_regrid(data_dp, latlon_arr, grid, - data_info->regrid()); - data_dp.data_range(data_min, data_max); - mlog << Debug(4) << "data_min:" << data_min << "\n"; - mlog << Debug(4) << "data_max:" << data_max << "\n"; - - // If this is "U", setup everything for matching "V" - // and compute the radial/tangential winds - if(wind_converter.compute_winds_if_input_is_u( - i_point, sname, slevel, valid_time, data_files, ftype, - latlon_arr, lat_arr, lon_arr, grid, data_dp, tcrmw_grid)) { - write_tc_pressure_level_data(nc_out, tcrmw_grid, - pressure_level_indices, data_info->level_attr(), i_point, - data_3d_vars[conf_info.radial_velocity_field_name.string()], - wind_converter.get_wind_r_arr()); - write_tc_pressure_level_data(nc_out, tcrmw_grid, - pressure_level_indices, data_info->level_attr(), i_point, - data_3d_vars[conf_info.tangential_velocity_field_name.string()], - wind_converter.get_wind_t_arr()); + get_series_entry(i_point, data_info, data_files, ftype, data_dp, grid_in); + + // Regrid data and log the range of values before and after + double dmin, dmax, dmin_rgd, dmax_rgd; + data_dp.data_range(dmin, dmax); + data_dp = met_regrid(data_dp, grid_in, grid_out, data_info->regrid()); + data_dp.data_range(dmin_rgd, dmax_rgd); + + mlog << Debug(4) << data_info->magic_str() + << " input range (" << dmin << ", " << dmax + << "), regrid range (" << dmin_rgd << ", " << dmax_rgd << ")\n"; + + // if this is "U", setup everything for matching "V" and compute the radial/tangential + if(wind_converter.compute_winds_if_input_is_u(i_point, sname, slevel, valid_time, data_files, ftype, + grid_in, grid_out, data_dp, tcrmw_grid)) { + write_tc_pressure_level_data(nc_out, tcrmw_grid, + pressure_level_indices, data_info->level_attr(), i_point, + data_3d_vars[conf_info.radial_velocity_field_name.string()], + wind_converter.get_wind_r_arr()); + write_tc_pressure_level_data(nc_out, tcrmw_grid, + pressure_level_indices, data_info->level_attr(), i_point, + data_3d_vars[conf_info.tangential_velocity_field_name.string()], + wind_converter.get_wind_t_arr()); } // Write data - if(variable_levels[data_info->name_attr()].size() > 1) { + if(has_pressure_level(variable_levels[data_info->name_attr()])) { write_tc_pressure_level_data(nc_out, tcrmw_grid, pressure_level_indices, data_info->level_attr(), i_point, data_3d_vars[data_info->name_attr()], data_dp.data()); diff --git a/src/tools/tc_utils/tc_rmw/tc_rmw.h b/src/tools/tc_utils/tc_rmw/tc_rmw.h index 7691a2c012..3bd0bfae40 100644 --- a/src/tools/tc_utils/tc_rmw/tc_rmw.h +++ b/src/tools/tc_utils/tc_rmw/tc_rmw.h @@ -84,7 +84,6 @@ static TCRMWConfInfo conf_info; static GrdFileType ftype; static TCRMW_WindConverter wind_converter; - // Optional arguments static ConcatString out_dir; static ConcatString out_prefix; @@ -136,11 +135,10 @@ static std::map pressure_level_indices; // //////////////////////////////////////////////////////////////////////// -static DataPlane dp; -static Grid latlon_arr; -static TcrmwData grid_data; +static Grid grid_in; +static TcrmwData tcrmw_data; static TcrmwGrid tcrmw_grid; -static Grid grid; +static Grid grid_out; // Grid coordinate arrays static double* lat_arr; diff --git a/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc b/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc index b82f8e30da..58e4b9ea48 100644 --- a/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc +++ b/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.cc @@ -17,27 +17,25 @@ // ---- ---- ---- ----------- // 000 05/11/22 Albo Pulled the wind conversion into a class // 001 09/28/22 Prestopnik MET #2227 Remove namespace std from header files +// 002 05/03/24 Halley Gotway MET #2841 Fix radial and tangential winds // //////////////////////////////////////////////////////////////////////// - #include "tc_rmw_wind_converter.h" #include "series_data.h" #include "vx_regrid.h" using namespace std; - //////////////////////////////////////////////////////////////////////// -static void wind_ne_to_ra(const TcrmwGrid&, +static void wind_ne_to_rt(const TcrmwGrid&, const DataPlane&, const DataPlane&, - const double*, const double*, double*, double*); + double*, double*); //////////////////////////////////////////////////////////////////////// -void TCRMW_WindConverter::_free_winds_arrays(void) -{ +void TCRMW_WindConverter::_free_winds_arrays(void) { if (_windR != nullptr) { delete [] _windR; _windR = nullptr; @@ -96,6 +94,7 @@ void TCRMW_WindConverter::init(const TCRMWConfInfo *conf) { _vIndexMap[varlevel] = i_var; } } + // test for consistency if (_uIndexMap.size() != _vIndexMap.size()) { mlog << Warning << "Uneven number of u/v wind inputs, no wind conversion will be done:\n" @@ -103,12 +102,16 @@ void TCRMW_WindConverter::init(const TCRMWConfInfo *conf) { << _conf->v_wind_field_name.string() << " has " << _vIndexMap.size() << " inputs\n"; _computeWinds = false; } - map::const_iterator iu, iv; - for (iu=_uIndexMap.begin(), iv=_vIndexMap.begin(); iu!=_uIndexMap.end(); ++iu, ++iv) { - if (iu->first != iv->first) { - mlog << Warning << "Ordering of u/v wind input levels not the same, not implemented, no wind conversions will be done:\n" - << " " << iu->first << " " << iv->first << "\n"; - _computeWinds = false; + if (_computeWinds) { + map::const_iterator iu, iv; + for (iu=_uIndexMap.begin(), iv=_vIndexMap.begin(); iu!=_uIndexMap.end(); ++iu, ++iv) { + if (iu->first != iv->first) { + mlog << Warning << "Ordering of u/v wind input levels not the same, " + << "not implemented, no wind conversions will be done:\n" + << " " << iu->first << " " << iv->first << "\n"; + _computeWinds = false; + break; + } } } } @@ -132,9 +135,7 @@ void TCRMW_WindConverter::update_input(const string &variableName, const string void TCRMW_WindConverter::append_nc_output_vars(map > &variable_levels, map &variable_long_names, map &variable_units) { - if (!_computeWinds) { - return; - } + if (!_computeWinds) return; if (_foundUInInput && _foundVInInput) { variable_levels[_conf->tangential_velocity_field_name] = variable_levels[_conf->u_wind_field_name.string()]; @@ -146,14 +147,15 @@ void TCRMW_WindConverter::append_nc_output_vars(map > &va } else { if (!_foundUInInput) { - mlog << Warning << "\nTCWRMW_WindConverter::checkInputs() -> " + mlog << Warning << "\nTCWRMW_WindConverter::append_nc_output_vars() -> " << "field not found in input \"" << _conf->u_wind_field_name << "\"\n\n"; } if (!_foundVInInput) { - mlog << Warning << "\nTCWRMW_WindConverter::checkInputs() -> " + mlog << Warning << "\nTCWRMW_WindConverter::append_nc_output_vars() -> " << "field not found in input \"" << _conf->v_wind_field_name << "\"\n\n"; } - mlog << Warning << "\nNot computing radial and tangential winds\n\n"; + mlog << Warning << "\nTCWRMW_WindConverter::append_nc_output_vars() -> " + << "Not computing radial and tangential winds\n\n"; _computeWinds = false; } } @@ -166,11 +168,9 @@ bool TCRMW_WindConverter::compute_winds_if_input_is_u(int i_point, unixtime valid_time, const StringArray &data_files, const GrdFileType &ftype, - const Grid &latlon_arr, - const double *lat_arr, - const double *lon_arr, - const Grid &grid, - const DataPlane &data_dp, + const Grid &grid_in, + const Grid &grid_out, + const DataPlane &u_wind_dp, const TcrmwGrid &tcrmw_grid) { if (!_computeWinds) { return false; @@ -178,67 +178,75 @@ bool TCRMW_WindConverter::compute_winds_if_input_is_u(int i_point, int uIndex = -1; int vIndex = -1; - VarInfo *data_infoV = (VarInfo *) nullptr; + VarInfo *v_wind_info = (VarInfo *) nullptr; if (varName == _conf->u_wind_field_name.string()) { uIndex = _uIndexMap[varLevel]; vIndex = _vIndexMap[varLevel]; - data_infoV = _conf->data_info[vIndex]; - data_infoV->set_valid(valid_time); + v_wind_info = _conf->data_info[vIndex]; + v_wind_info->set_valid(valid_time); } else { // not the U input return false; } - DataPlane data_dpV; - Grid latlon_arrV; - get_series_entry(i_point, data_infoV, data_files, ftype, data_dpV, - latlon_arrV); - double data_min, data_max; - data_dpV.data_range(data_min, data_max); - mlog << Debug(4) << "V data_min:" << data_min << "\n"; - mlog << Debug(4) << "V data_max:" << data_max << "\n"; - data_dpV = met_regrid(data_dpV, latlon_arr, grid, data_infoV->regrid()); - data_dpV.data_range(data_min, data_max); - mlog << Debug(4) << "V data_min:" << data_min << "\n"; - mlog << Debug(4) << "V data_max:" << data_max << "\n"; - - // here's the conversion, at last - wind_ne_to_ra(tcrmw_grid, data_dp, data_dpV, lat_arr, lon_arr, - _windR, _windT); - // _windR and _windT now set + DataPlane v_wind_dp; + Grid v_wind_grid; + get_series_entry(i_point, v_wind_info, data_files, ftype, + v_wind_dp, v_wind_grid); + double dmin, dmax, dmin_rgd, dmax_rgd; + v_wind_dp.data_range(dmin, dmax); + v_wind_dp = met_regrid(v_wind_dp, v_wind_grid, grid_out, v_wind_info->regrid()); + v_wind_dp.data_range(dmin_rgd, dmax_rgd); + + mlog << Debug(4) << v_wind_info->magic_str() + << " input range (" << dmin << ", " << dmax + << "), regrid range (" << dmin_rgd << ", " << dmax_rgd << ")\n"; + + // Compute the radial and tangential winds and store in _windR and _windT + wind_ne_to_rt(tcrmw_grid, u_wind_dp, v_wind_dp, _windR, _windT); + return true; } - //////////////////////////////////////////////////////////////////////// -void wind_ne_to_ra(const TcrmwGrid& tcrmw_grid, +void wind_ne_to_rt(const TcrmwGrid& tcrmw_grid, const DataPlane& u_dp, const DataPlane& v_dp, - const double* lat_arr, const double* lon_arr, double* wind_r_arr, double* wind_t_arr) { - // Transform (u, v) to (radial, azimuthal) - for(int ir = 0; ir < tcrmw_grid.range_n(); ir++) { - for(int ia = 0; ia < tcrmw_grid.azimuth_n(); ia++) { - int i = ir * tcrmw_grid.azimuth_n() + ia; - double lat = lat_arr[i]; - double lon = - lon_arr[i]; - double u = u_dp.data()[i]; - double v = v_dp.data()[i]; - double wind_r; - double wind_t; - if(is_bad_data(u) || is_bad_data(v)) { - mlog << Debug(4) << "wind_ne_to_ra: latlon:" << lat << "," << lon << " winds are missing\n"; - wind_r = bad_data_double; - wind_t = bad_data_double; - } else { - tcrmw_grid.wind_ne_to_ra(lat, lon, u, v, wind_r, wind_t); - mlog << Debug(4) << "wind_ne_to_ra: latlon:" << lat << "," << lon << " uv:" << u << "," - << v << ", rt:" << wind_r << "," << wind_t <<"\n"; - } - wind_r_arr[i] = wind_r; - wind_t_arr[i] = wind_t; - } - } + + int n_rng = tcrmw_grid.range_n(); + int n_azi = tcrmw_grid.azimuth_n(); + + // Transform (u, v) to (radial, tangential) winds + for(int ir = 0; ir < n_rng; ir++) { + for(int ia = 0; ia < n_azi; ia++) { + + // Store data in reverse order + int i_rev = (n_rng - ir - 1) * n_azi + ia; + + double azi_deg = ia * tcrmw_grid.azimuth_delta_deg(); + double range_km = ir * tcrmw_grid.range_delta_km(); + + double lat, lon; + tcrmw_grid.range_azi_to_latlon(range_km, azi_deg, lat, lon); + + tcrmw_grid.wind_ne_to_rt(azi_deg, u_dp.data()[i_rev], v_dp.data()[i_rev], + wind_r_arr[i_rev], wind_t_arr[i_rev]); + + mlog << Debug(4) << "wind_ne_to_rt() -> " + << "center lat/lon (" << tcrmw_grid.lat_center_deg() + << ", " << tcrmw_grid.lon_center_deg() + << "), range (km): " << range_km + << ", azimuth (deg): " << azi_deg + << ", point lat/lon (" << lat << ", " << lon + << "), uv (" << u_dp.data()[i_rev] << ", " << v_dp.data()[i_rev] + << "), radial wind: " << wind_r_arr[i_rev] + << ", tangential wind: " << wind_t_arr[i_rev] << "\n"; + } // end for ia + } // end for ir + + return; } +//////////////////////////////////////////////////////////////////////// diff --git a/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.h b/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.h index 86dbc0d802..819dcb999a 100644 --- a/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.h +++ b/src/tools/tc_utils/tc_rmw/tc_rmw_wind_converter.h @@ -15,7 +15,7 @@ // // Mod# Date Name Description // ---- ---- ---- ----------- -// 000 05/11/22 DaveAlbo New +// 000 05/11/22 Albo New // //////////////////////////////////////////////////////////////////////// @@ -32,7 +32,6 @@ using std::map; using std::string; - //////////////////////////////////////////////////////////////////////// // // Constants @@ -98,8 +97,8 @@ class TCRMW_WindConverter { // if configured to compute winds, but didn't find U or V, turn off // the wind computations and report an error void append_nc_output_vars(std::map > &variable_levels, - std::map &variable_long_names, - std::map &variable_units); + std::map &variable_long_names, + std::map &variable_units); // Check input varName against U, and if it's a match, lookup V using the // map members, and then compute tangential and radial winds if it is so @@ -109,17 +108,15 @@ class TCRMW_WindConverter { // If true if returned, the winds can be accessed by calls to // get_wind_t_arr() and get_wind_r_arr() bool compute_winds_if_input_is_u(int i_point, - const string &varName, - const string &varLevel, - unixtime valid_time, - const StringArray &data_files, - const GrdFileType &ftype, - const Grid &latlon_arr, - const double *lat_arr, - const double *lon_arr, - const Grid &grid, - const DataPlane &data_dp, - const TcrmwGrid &tcrmw_grid); + const string &varName, + const string &varLevel, + unixtime valid_time, + const StringArray &data_files, + const GrdFileType &ftype, + const Grid &grid_in, + const Grid &grid_out, + const DataPlane &u_wind_dp, + const TcrmwGrid &tcrmw_grid); }; diff --git a/src/tools/tc_utils/tc_stat/Makefile.in b/src/tools/tc_utils/tc_stat/Makefile.in index 24bba4f72e..d6926559a0 100644 --- a/src/tools/tc_utils/tc_stat/Makefile.in +++ b/src/tools/tc_utils/tc_stat/Makefile.in @@ -227,6 +227,7 @@ MET_BUFRLIB = @MET_BUFRLIB@ MET_CAIRO = @MET_CAIRO@ MET_CAIROINC = @MET_CAIROINC@ MET_CAIROLIB = @MET_CAIROLIB@ +MET_CXX_STANDARD = @MET_CXX_STANDARD@ MET_ECKIT = @MET_ECKIT@ MET_ECKITINC = @MET_ECKITINC@ MET_ECKITLIB = @MET_ECKITLIB@ diff --git a/src/tools/tc_utils/tc_stat/tc_stat.cc b/src/tools/tc_utils/tc_stat/tc_stat.cc index 28025107aa..a5592c7b15 100644 --- a/src/tools/tc_utils/tc_stat/tc_stat.cc +++ b/src/tools/tc_utils/tc_stat/tc_stat.cc @@ -22,6 +22,7 @@ // 004 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main // 005 09/28/22 Prestopnik MET #2227 Remove namespace std from header files // 006 10/06/22 Halley Gotway MET #392 Incorporate diagnostics +// 007 06/14/24 Halley Gotway MET #2911 Support -set_hdr job command option // //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/tc_utils/tc_stat/tc_stat_job.cc b/src/tools/tc_utils/tc_stat/tc_stat_job.cc index ec8172776c..eb908b928f 100644 --- a/src/tools/tc_utils/tc_stat/tc_stat_job.cc +++ b/src/tools/tc_utils/tc_stat/tc_stat_job.cc @@ -184,6 +184,8 @@ void TCStatJob::init_from_scratch() { ValidMask.set_ignore_case(1); LineType.set_ignore_case(1); TrackWatchWarn.set_ignore_case(1); + ByColumn.set_ignore_case(1); + HdrName.set_ignore_case(1); clear(); @@ -230,6 +232,10 @@ void TCStatJob::clear() { EventEqualLead.clear(); EventEqualCases.clear(); + ByColumn.clear(); + HdrName.clear(); + HdrValue.clear(); + DumpFile.clear(); close_dump_file(); JobOut = (ofstream *) nullptr; @@ -315,6 +321,11 @@ void TCStatJob::assign(const TCStatJob & j) { InitDiagThreshMap = j.InitDiagThreshMap; PrintDiagWarning = j.PrintDiagWarning; + ByColumn = j.ByColumn; + + HdrName = j.HdrName; + HdrValue = j.HdrValue; + DumpFile = j.DumpFile; open_dump_file(); @@ -514,6 +525,15 @@ void TCStatJob::dump(ostream & out, int depth) const { out << prefix << "OutValidMask = " << (OutValidMaskName.nonempty() ? OutValidMaskName.text() : na_str) << "\n"; + out << prefix << "ByColumn ...\n"; + ByColumn.dump(out, depth + 1); + + out << prefix << "HdrName ...\n"; + HdrName.dump(out, depth + 1); + + out << prefix << "HdrValue ...\n"; + HdrValue.dump(out, depth + 1); + out << prefix << "DumpFile = " << (DumpFile.nonempty() ? DumpFile.text() : na_str) << "\n"; out << prefix << "StatFile = " << (StatFile.nonempty() ? StatFile.text() : na_str) << "\n"; @@ -1086,6 +1106,9 @@ StringArray TCStatJob::parse_job_command(const char *jobstring) { else if(c.compare("-event_equal_lead" ) == 0) { EventEqualLead.add_css_sec(a[i+1].c_str()); a.shift_down(i, 1); } else if(c.compare("-out_init_mask" ) == 0) { set_out_init_mask(a[i+1].c_str()); a.shift_down(i, 1); } else if(c.compare("-out_valid_mask" ) == 0) { set_out_valid_mask(a[i+1].c_str()); a.shift_down(i, 1); } + else if(c.compare("-by" ) == 0) { ByColumn.add_css(to_upper(a[i+1])); a.shift_down(i, 1); } + else if(c.compare("-set_hdr" ) == 0) { HdrName.add(to_upper(a[i+1])); + HdrValue.add(a[i+2]); a.shift_down(i, 2); } else if(c.compare("-dump_row" ) == 0) { DumpFile = a[i+1]; open_dump_file(); a.shift_down(i, 1); } else if(c.compare("-out_stat" ) == 0) { StatFile = a[i+1]; open_stat_file(); a.shift_down(i, 1); } else { b.add(a[i]); } @@ -1206,7 +1229,8 @@ void TCStatJob::close_stat_file() { //////////////////////////////////////////////////////////////////////// -void TCStatJob::dump_pair(const TrackPairInfo &pair, ofstream *out) { +void TCStatJob::dump_pair(const TrackPairInfo &pair, ofstream *out, + bool do_set_hdr) const { if(!out || pair.n_points() == 0) return; @@ -1258,7 +1282,8 @@ void TCStatJob::dump_pair(const TrackPairInfo &pair, ofstream *out) { // Write the TrackPairInfo object i_row = hdr_row; - write_track_pair_info(tchc, pair, out_at, i_row); + if(do_set_hdr) write_track_pair_info(tchc, pair, out_at, i_row, HdrName, HdrValue); + else write_track_pair_info(tchc, pair, out_at, i_row); // Write the AsciiTable to the file *out << out_at; @@ -1268,11 +1293,23 @@ void TCStatJob::dump_pair(const TrackPairInfo &pair, ofstream *out) { //////////////////////////////////////////////////////////////////////// -void TCStatJob::dump_line(const TCStatLine &line, ofstream *out) { +void TCStatJob::dump_line(const TCStatLine &line, ofstream *out, + bool do_set_hdr) const { if(!out) return; - *out << line; + // Apply -set_hdr options, if requested + if(do_set_hdr) { + TCStatLine line_set_hdr = line; + for(int i=0; i 0) s << "-dump_row " << DumpFile << " "; if(StatFile.length() > 0) @@ -1851,7 +1892,7 @@ void TCStatJobFilter::filter_tracks(TCPointCounts &n) { mlog << Debug(4) << "Processing track pair: " << pair.case_info() << "\n"; - if(DumpOut) dump_pair(pair, DumpOut); + if(DumpOut) dump_pair(pair, DumpOut, true); } } // end while } // end else @@ -1888,7 +1929,7 @@ void TCStatJobFilter::filter_lines(TCPointCounts &n) { // Check if this line should be kept if(!is_keeper_line(line, n)) continue; - if(DumpOut) dump_line(line, DumpOut); + if(DumpOut) dump_line(line, DumpOut, true); } // end while } // end else @@ -1955,7 +1996,6 @@ void TCStatJobSummary::init_from_scratch() { // Ignore case when performing comparisons ReqColumn.set_ignore_case(1); Column.set_ignore_case(1); - ByColumn.set_ignore_case(1); clear(); @@ -1972,7 +2012,6 @@ void TCStatJobSummary::clear() { ReqColumn.clear(); Column.clear(); - ByColumn.clear(); SummaryMap.clear(); // Set to default value @@ -1992,7 +2031,6 @@ void TCStatJobSummary::assign(const TCStatJobSummary & j) { ReqColumn = j.ReqColumn; Column = j.Column; ColumnUnion = j.ColumnUnion; - ByColumn = j.ByColumn; SummaryMap = j.SummaryMap; OutAlpha = j.OutAlpha; FSPThresh = j.FSPThresh; @@ -2026,7 +2064,6 @@ StringArray TCStatJobSummary::parse_job_command(const char *jobstring) { if(c.compare("-column" ) == 0) { ReqColumn.add_css(to_upper(a[i+1])); add_column(a[i+1].c_str()); a.shift_down(i, 1); } else if(c.compare("-column_union") == 0) { ColumnUnion = string_to_bool(a[i+1].c_str()); a.shift_down(i, 1); } - else if(c.compare("-by" ) == 0) { ByColumn.add_css(to_upper(a[i+1])); a.shift_down(i, 1); } else if(c.compare("-out_alpha" ) == 0) { OutAlpha = atof(a[i+1].c_str()); a.shift_down(i, 1); } else if(c.compare("-fsp_thresh" ) == 0) { FSPThresh.set(a[i+1].c_str()); a.shift_down(i, 1); } else { b.add(a[i]); } @@ -2090,8 +2127,6 @@ ConcatString TCStatJobSummary::serialize() const { s << "-column " << ReqColumn[i] << " "; if(ColumnUnion != default_column_union) s << "-column_union " << bool_to_string(ColumnUnion) << " "; - for(i=0; i&m) { << it->second.Info.cts.fn_on() << " correct negatives.\n"; // Increment the counts for the existing key - RIRWMap[it->first].Info.cts.set_fy_oy( - RIRWMap[it->first].Info.cts.fy_oy() + - it->second.Info.cts.fy_oy()); - RIRWMap[it->first].Info.cts.set_fy_on( - RIRWMap[it->first].Info.cts.fy_on() + - it->second.Info.cts.fy_on()); - RIRWMap[it->first].Info.cts.set_fn_oy( - RIRWMap[it->first].Info.cts.fn_oy() + - it->second.Info.cts.fn_oy()); - RIRWMap[it->first].Info.cts.set_fn_on( - RIRWMap[it->first].Info.cts.fn_on() + - it->second.Info.cts.fn_on()); + RIRWMap[it->first].Info.cts += it->second.Info.cts; RIRWMap[it->first].Hdr.add_uniq(it->second.Hdr); RIRWMap[it->first].AModel.add_uniq(it->second.AModel); @@ -3846,17 +3861,19 @@ void TCStatJobRIRW::do_stat_output(ostream &out) { if(OutInitMaskName.nonempty()) { cs << OutInitMaskName; } + // Add -out_valid_mask name, if specified if(OutValidMaskName.nonempty()) { if(cs.nonempty()) cs << ","; cs << OutValidMaskName; } + // If neither are specified, use input mask and/or basin names if(cs.empty()) { StringArray sa; sa.add_uniq(it->second.InitMask); sa.add_uniq(it->second.ValidMask); - + // Use the basin names instead if(sa.n() == 1 && sa[0] == na_str) { sa.clear(); @@ -3889,16 +3906,21 @@ void TCStatJobRIRW::do_stat_output(ostream &out) { // c = 0; + // Split the current map key, eliminating the job name in the first entry + StringArray ByValue = it->first.split(":"); + ByValue.shift_down(0, 1); + // // CTC output line // if(OutLineType.has(stat_ctc_str)) { shc.set_alpha(bad_data_double); shc.set_line_type(stat_ctc_str); + shc.apply_set_hdr_opts(HdrName, HdrValue, ByColumn, ByValue); write_header_cols(shc, stat_at, stat_row); write_ctc_cols(it->second.Info, stat_at, stat_row++, n_header_columns); } - + // // CTS output line // @@ -3910,12 +3932,12 @@ void TCStatJobRIRW::do_stat_output(ostream &out) { it->second.Info.allocate_n_alpha(1); it->second.Info.alpha[0] = OutAlpha; shc.set_alpha(OutAlpha); - + // // Compute the stats and confidence intervals for this // CTSInfo object // - + it->second.Info.compute_stats(); it->second.Info.compute_ci(); @@ -3923,14 +3945,15 @@ void TCStatJobRIRW::do_stat_output(ostream &out) { // Write the data line // shc.set_line_type(stat_cts_str); + shc.apply_set_hdr_opts(HdrName, HdrValue, ByColumn, ByValue); write_header_cols(shc, stat_at, stat_row); write_cts_cols(it->second.Info, 0, stat_at, stat_row++, n_header_columns); } } // end for it - + // Write the table out << stat_at << "\n" << flush; - + return; } @@ -3981,9 +4004,6 @@ void TCStatJobProbRIRW::init_from_scratch() { TCStatJob::init_from_scratch(); - // Ignore case when performing comparisons - ByColumn.set_ignore_case(1); - clear(); return; @@ -3997,7 +4017,6 @@ void TCStatJobProbRIRW::clear() { JobType = TCStatJobType::ProbRIRW; - ByColumn.clear(); ProbRIRWMap.clear(); // Set to default values @@ -4023,7 +4042,6 @@ void TCStatJobProbRIRW::assign(const TCStatJobProbRIRW & j) { ProbRIRWExact = j.ProbRIRWExact; ProbRIRWBDeltaThresh = j.ProbRIRWBDeltaThresh; ProbRIRWProbThresh = j.ProbRIRWProbThresh; - ByColumn = j.ByColumn; MaxNThresh = j.MaxNThresh; NDumpLines = j.NDumpLines; OutAlpha = j.OutAlpha; @@ -4060,8 +4078,7 @@ StringArray TCStatJobProbRIRW::parse_job_command(const char *jobstring) { } // Check job command options - if(c.compare("-by" ) == 0) { ByColumn.add_css(to_upper(a[i+1])); a.shift_down(i, 1); } - else if(c.compare("-out_alpha" ) == 0) { OutAlpha = atof(a[i+1].c_str()); a.shift_down(i, 1); } + if(c.compare("-out_alpha" ) == 0) { OutAlpha = atof(a[i+1].c_str()); a.shift_down(i, 1); } else if(c.compare("-out_line_type" ) == 0) { OutLineType.add_css(to_upper(a[i+1])); a.shift_down(i, 1); } else if(c.compare("-probrirw_exact" ) == 0) { ProbRIRWExact = string_to_bool(a[i+1].c_str()); a.shift_down(i, 1); } else if(c.compare("-probrirw_bdelta_thresh") == 0) { ProbRIRWBDeltaThresh.set(a[i+1].c_str()); a.shift_down(i, 1); } @@ -4157,9 +4174,6 @@ ConcatString TCStatJobProbRIRW::serialize() const { s << "-probrirw_prob_thresh " << prob_thresh_to_string(ProbRIRWProbThresh) << " "; // Add ProbRIRW job-specific options - for(i=0; i InitDiagThreshMap; StringArray PrintDiagWarning; + // Store the case information + StringArray ByColumn; + + // Options for -set_hdr output + StringArray HdrName; + StringArray HdrValue; + // Variables to the store the analysis job specification ConcatString DumpFile; // Dump TrackPairInfo used to a file std::ofstream *DumpOut; // Dump output file stream @@ -372,7 +381,7 @@ class TCStatJob { //////////////////////////////////////////////////////////////////////// inline void TCStatJob::set_precision (int p) { Precision = p; return; } -inline int TCStatJob::get_precision () const { return(Precision); } +inline int TCStatJob::get_precision () const { return Precision; } //////////////////////////////////////////////////////////////////////// @@ -448,9 +457,6 @@ class TCStatJobSummary : public TCStatJob { StringArray Column; bool ColumnUnion; - // Store the case information - StringArray ByColumn; - // Confidence interval alpha value double OutAlpha; @@ -504,9 +510,6 @@ class TCStatJobRIRW : public TCStatJob { void do_cts_output (std::ostream &); void do_mpr_output (std::ostream &); void do_stat_output(std::ostream &); - - // Store the case information - StringArray ByColumn; // Confidence interval alpha value double OutAlpha; @@ -555,9 +558,6 @@ class TCStatJobProbRIRW : public TCStatJob { SingleThresh ProbRIRWBDeltaThresh; // Threshold the BEST track change ThreshArray ProbRIRWProbThresh; // Array of probabilities for PCT bins - // Store the case information - StringArray ByColumn; - // Maximum number of thresholds encountered int MaxNThresh; int NDumpLines;