From c50bc48c86a796f9a39b39248de59c2083c1d400 Mon Sep 17 00:00:00 2001
From: Nancy Collins
Date: Tue, 27 Nov 2018 15:34:34 +0000
Subject: [PATCH] roll back changes committed to the manhattan release that
were supposed to go on a branch. sorry.
git-svn-id: https://svn-dares-dart.cgd.ucar.edu/DART/releases/Manhattan@12939 dfa8782c-da17-4c45-ba5c-5625b50a00d6
Former-commit-id: 3402e37975e4854e1f06b5e6eae7df12e3532c12
---
.../threed_cartesian/xyz_location_mod.f90 | 621 ++--
.../assimilation/adaptive_inflate_mod.f90 | 70 +-
.../observations/DEFAULT_obs_kind_mod.F90 | 6 -
.../modules/utilities/mpi_utilities_mod.f90 | 357 +-
.../utilities/null_mpi_utilities_mod.f90 | 93 +-
.../modules/utilities/types_mod.f90 | 4 +-
.../closest_member_tool.f90 | 58 +-
.../closest_member_tool.html | 10 +-
.../closest_member_tool.nml | 15 +-
build_templates/mkmf.template | 170 +-
.../work/path_names_closest_member_tool | 2 -
models/mpas_atm/data/namelist.atmosphere | 50 +-
models/mpas_atm/data/streams.atmosphere | 59 +-
models/mpas_atm/exhaustion.f90 | 4 +-
models/mpas_atm/model_mod.f90 | 2969 +++++++++--------
models/mpas_atm/mpas_dart_obs_preprocess.f90 | 451 +--
models/mpas_atm/mpas_dart_obs_preprocess.nml | 13 +-
models/mpas_atm/resample.f90 | 4 +-
.../mpas_atm/shell_scripts/advance_model.csh | 79 +-
.../shell_scripts/advance_model.template | 2 +-
.../shell_scripts/driver_initial_ens.csh | 8 -
.../shell_scripts/driver_mpas_dart.csh | 211 +-
.../shell_scripts/filter.template.pbs | 7 +-
.../mpas_atm/shell_scripts/init_mpas_grib.csh | 15 +-
.../mpas_atm/shell_scripts/setup_params.csh | 72 +-
models/mpas_atm/update_bc.f90 | 427 ---
models/mpas_atm/update_bc.html | 346 --
models/mpas_atm/update_bc.nml | 12 -
models/mpas_atm/update_mpas_states.f90 | 32 +-
models/mpas_atm/work/input.nml | 194 +-
models/mpas_atm/work/obs_seq.1obs | 2 +-
models/mpas_atm/work/path_names_filter | 4 +-
.../mpas_atm/work/path_names_model_mod_check | 4 +-
.../work/path_names_perfect_model_obs | 4 +-
models/mpas_atm/work/quickbuild.csh | 2 +-
models/wrf/model_mod.f90 | 28 +-
.../wrf/work/path_names_closest_member_tool | 4 +-
models/wrf/work/path_names_filter | 4 +-
models/wrf/work/path_names_model_mod_check | 4 +-
models/wrf/work/path_names_perfect_model_obs | 4 +-
models/wrf/work/path_names_wakeup_filter | 2 +-
.../obs_def_dew_point_mod.f90 | 17 +-
.../obs_def_dew_point_mod.html | 4 +-
43 files changed, 2559 insertions(+), 3885 deletions(-)
delete mode 100644 models/mpas_atm/update_bc.f90
delete mode 100644 models/mpas_atm/update_bc.html
delete mode 100644 models/mpas_atm/update_bc.nml
diff --git a/assimilation_code/location/threed_cartesian/xyz_location_mod.f90 b/assimilation_code/location/threed_cartesian/xyz_location_mod.f90
index 09fca56332..9040d017a8 100644
--- a/assimilation_code/location/threed_cartesian/xyz_location_mod.f90
+++ b/assimilation_code/location/threed_cartesian/xyz_location_mod.f90
@@ -21,18 +21,9 @@ module xyz_location_mod
implicit none
private
-public :: xyz_location_type, &
- xyz_get_location, &
- xyz_set_location, &
- xyz_get_close_type, &
- xyz_get_close_init, &
- xyz_get_close_destroy, &
- xyz_find_nearest, &
- xyz_find_nearest_N, &
- xyz_use_great_circle_dist, &
- xyz_get_dist, &
- xyz_get_ll_location, &
- xyz_write_location
+public :: xyz_location_type, xyz_get_location, xyz_set_location, &
+ xyz_get_close_maxdist_init, xyz_get_close_obs_init, xyz_get_close_type, &
+ xyz_find_nearest, xyz_get_close_obs_destroy, xyz_get_dist, xyz_get_ll_location
! version controlled file description for error handling, do not edit
character(len=256), parameter :: source = &
@@ -45,20 +36,34 @@ module xyz_location_mod
real(r8) :: x, y, z
end type xyz_location_type
-! This version supports regularly spaced boxes.
+! This version supports both regularly spaced boxes, and octree division
+! of the space. for octrees, divide each dim in half until N numbers of filled
+! boxes, or octree reaches some depth? give some threshold where you don't
+! divide a box with less than N points in it?
+
+! contrast with kD-trees (divide along dimensions, not points), and there are
+! two types of octrees - PR (point region) where the regions split at an
+! explicit point, vs MX tree where the split is defined to be at the center
+! of the region.
+
+! if the underlying geometry is spherical, there will be many many empty boxes
+! if we uniformly divide up space, and worse, existing locations will be
+! clustered in a few boxes.
+
type box_type
private
- integer, pointer :: loc_box(:) ! (nloc); List of loc indices in boxes
- integer, pointer :: count(:, :, :) ! (nx, ny, nz); # of loc in each box
- integer, pointer :: start(:, :, :) ! (nx, ny, nz); Start of list of loc in this box
+ integer, pointer :: obs_box(:) ! (nobs); List of obs indices in boxes
+ integer, pointer :: count(:, :, :) ! (nx, ny, nz); # of obs in each box
+ integer, pointer :: start(:, :, :) ! (nx, ny, nz); Start of list of obs in this box
real(r8) :: bot_x, top_x ! extents in x, y, z
real(r8) :: bot_y, top_y
real(r8) :: bot_z, top_z
real(r8) :: x_width, y_width, z_width ! widths of boxes in x,y,z
+ real(r8) :: nboxes_x, nboxes_y, nboxes_z ! based on maxdist how far to search
end type box_type
-! Type to facilitate efficient computation of locations close to a given location
+! Type to facilitate efficient computation of observations close to a given location
type xyz_get_close_type
private
integer :: num
@@ -68,11 +73,10 @@ module xyz_location_mod
logical, save :: module_initialized = .false.
-real(r8) :: sphere_radius = -1.0_r8
-logical :: line_of_sight_distance = .true. ! alternative: great circle
-
character(len = 512) :: errstring
+!real(r8) :: radius ! used only for converting points on a sphere into x,y,z and back
+
!-----------------------------------------------------------------
! Namelist with default values
@@ -81,14 +85,23 @@ module xyz_location_mod
integer :: ny = 20
integer :: nz = 20
+! tuning options
+integer :: filled = 10 ! threshold at which you quit splitting
+logical :: use_octree = .false. ! if false, use regular boxes
+
+! extensible options - these may be useful for tuning the octree
+! integer :: nboxes = 1000 ! suggestion for max number of nodes
+! integer :: maxdepth = 4 ! suggestion for max tree depth
+
namelist /xyz_location_nml/ &
+ filled, use_octree, &
nx, ny, nz
!-----------------------------------------------------------------
interface xyz_set_location
module procedure set_location_single
- module procedure set_location_array
+! module procedure set_location_array
module procedure set_location_lonlat
end interface xyz_set_location
@@ -117,17 +130,15 @@ subroutine initialize_module
if(do_nml_file()) write(nmlfileunit, nml=xyz_location_nml)
if(do_nml_term()) write( * , nml=xyz_location_nml)
-end subroutine initialize_module
-
-!----------------------------------------------------------------------------
-
-subroutine xyz_use_great_circle_dist(radius)
- real(r8), intent(in) :: radius
+if (filled < 1) then
+ write(errstring,*)'filled sets limit for number of points per box. must be >= 1'
+ call error_handler(E_ERR, 'set_location', errstring, source, revision, revdate)
+endif
-sphere_radius = radius
-line_of_sight_distance = .false.
+! FIXME:
+use_octree = .false. ! if false, use regular boxes
-end subroutine xyz_use_great_circle_dist
+end subroutine initialize_module
!----------------------------------------------------------------------------
@@ -150,37 +161,14 @@ function xyz_get_dist(loc1, loc2, type1, kind2)
real(r8) :: xyz_get_dist
real(r8) :: x_dif, y_dif, z_dif
-real(r8) :: mag1, mag2, norm
if ( .not. module_initialized ) call initialize_module
-if (line_of_sight_distance) then
-
- ! straight line in 3d space
- x_dif = loc1%x - loc2%x
- y_dif = loc1%y - loc2%y
- z_dif = loc1%z - loc2%z
-
- xyz_get_dist = sqrt(x_dif * x_dif + y_dif * y_dif + z_dif * z_dif)
-
- !print *, 'los dist: ', xyz_get_dist
+x_dif = loc1%x - loc2%x
+y_dif = loc1%y - loc2%y
+z_dif = loc1%z - loc2%z
-else
-
- ! great circle distance
- mag1 = sqrt(loc1%x * loc1%x + loc1%y * loc1%y + loc1%z * loc1%z)
- mag2 = sqrt(loc2%x * loc2%x + loc2%y * loc2%y + loc2%z * loc2%z)
- norm = (loc1%x*loc2%x + loc1%y*loc2%y + loc1%z*loc2%z) / (mag1 * mag2)
-
- if (norm >= 1.0_r8) then
- xyz_get_dist = 0.0_r8
- !print *, 'mag1,2,norm,radius,dist: ', mag1, mag2, norm, sphere_radius, xyz_get_dist
- else
- xyz_get_dist = sphere_radius * acos(norm)
- !print *, 'mag1,2,norm,acos,radius,dist: ', mag1, mag2, norm, acos(norm), sphere_radius, xyz_get_dist
- endif
-
-endif
+xyz_get_dist = sqrt(x_dif * x_dif + y_dif * y_dif + z_dif * z_dif)
end function xyz_get_dist
@@ -205,9 +193,10 @@ end function xyz_get_location
subroutine xyz_get_ll_location(loc, radius, lon, lat)
-! Given a cartesian x, y, z coordinate relative to an origin at
-! the center of the sphere, using a fixed radius specified by the
-! caller, return the corresponding lon, lat location in degrees.
+! Given a cartesian x, y, z coordinate relative to the origin
+! at the center of the earth, using a fixed radius specified
+! by MPAS (in the grid generation step), return the corresponding
+! lon, lat location in degrees.
type(xyz_location_type) :: loc
real(r8), intent(in) :: radius
@@ -257,7 +246,7 @@ function set_location_array(list)
if ( .not. module_initialized ) call initialize_module
if (size(list) < 3) then
- write(errstring,*)'requires exactly 3 input values in an array'
+ write(errstring,*)'requires 3 input values'
call error_handler(E_ERR, 'set_location', errstring, source, revision, revdate)
endif
@@ -296,14 +285,29 @@ function set_location_lonlat(lon, lat, height, radius)
end function set_location_lonlat
+!---------------------------------------------------------------------------
+
+subroutine xyz_get_close_obs_init(gc, num, obs)
+
+! Initializes part of get_close accelerator that depends on the particular obs
+
+type(xyz_get_close_type), intent(inout) :: gc
+integer, intent(in) :: num
+type(xyz_location_type), intent(in) :: obs(num)
+
+ call get_close_init_boxes(gc, num, obs)
+
+end subroutine xyz_get_close_obs_init
+
!----------------------------------------------------------------------------
+
+subroutine get_close_init_boxes(gc, num, obs)
-subroutine xyz_get_close_init(gc, maxdist, num, locs)
+! Initializes part of get_close accelerator that depends on the particular obs
type(xyz_get_close_type), intent(inout) :: gc
-real(r8), intent(in) :: maxdist
-integer, intent(in) :: num
-type(xyz_location_type), intent(in) :: locs(num)
+integer, intent(in) :: num
+type(xyz_location_type), intent(in) :: obs(num)
integer :: i, j, k, cum_start, l
integer :: x_box(num), y_box(num), z_box(num)
@@ -311,42 +315,33 @@ subroutine xyz_get_close_init(gc, maxdist, num, locs)
if ( .not. module_initialized ) call initialize_module
-! set the default value.
-gc%maxdist = maxdist
-!print *, 'setting maxdist to ', maxdist
-
-! Allocate the storage for the grid dependent boxes
-allocate(gc%box%count(nx,ny,nz), gc%box%start(nx,ny,nz))
-gc%box%count = -1
-gc%box%start = -1
-
-! Allocate storage for loc number dependent part
-allocate(gc%box%loc_box(num))
-gc%box%loc_box(:) = -1
+! Allocate storage for obs number dependent part
+allocate(gc%box%obs_box(num))
+gc%box%obs_box(:) = -1
-! Set the value of num_loc in the structure
+! Set the value of num_obs in the structure
gc%num = num
! If num == 0, no point in going any further.
if (num == 0) return
-! Determine where the boxes should be for this set of loc and maxdist
-call find_box_ranges(gc, locs, num)
+! Determine where the boxes should be for this set of obs and maxdist
+call find_box_ranges(gc, obs, num)
-! Begin by computing the number of locations in each box in x,y,z
+! Begin by computing the number of observations in each box in x,y,z
gc%box%count = 0
do i = 1, num
-!print *, i, locs(i)%x, locs(i)%y, locs(i)%z
- x_box(i) = floor((locs(i)%x - gc%box%bot_x) / gc%box%x_width) + 1
+!print *, i, obs(i)%x, obs(i)%y, obs(i)%z
+ x_box(i) = floor((obs(i)%x - gc%box%bot_x) / gc%box%x_width) + 1
if(x_box(i) > nx) x_box(i) = nx
if(x_box(i) < 1) x_box(i) = 1
- y_box(i) = floor((locs(i)%y - gc%box%bot_y) / gc%box%y_width) + 1
+ y_box(i) = floor((obs(i)%y - gc%box%bot_y) / gc%box%y_width) + 1
if(y_box(i) > ny) y_box(i) = ny
if(y_box(i) < 1) y_box(i) = 1
- z_box(i) = floor((locs(i)%z - gc%box%bot_z) / gc%box%z_width) + 1
+ z_box(i) = floor((obs(i)%z - gc%box%bot_z) / gc%box%z_width) + 1
if(z_box(i) > nz) z_box(i) = nz
if(z_box(i) < 1) z_box(i) = 1
@@ -369,294 +364,170 @@ subroutine xyz_get_close_init(gc, maxdist, num, locs)
! Now we know how many are in each box, get a list of which are in each box
tstart = gc%box%start
do i = 1, num
- gc%box%loc_box(tstart(x_box(i), y_box(i), z_box(i))) = i
+ gc%box%obs_box(tstart(x_box(i), y_box(i), z_box(i))) = i
tstart(x_box(i), y_box(i), z_box(i)) = tstart(x_box(i), y_box(i), z_box(i)) + 1
end do
-if (.false.) then ! debug only
- do i = 1, nx
- do j = 1, ny
- do k = 1, nz
- !if (gc%box%count(i,j,k) > 0) print *, i,j,k, gc%box%count(i,j,k), gc%box%start(i,j,k)
-
- ! or
- ! do l=1, gc%box%count(i,j,k)
- !print *, l, gc%box%loc_box(l)
- ! enddo
- end do
- end do
- end do
-endif
+do i = 1, nx
+ do j = 1, ny
+ do k = 1, nz
+!if (gc%box%count(i,j,k) > 0) print *, i,j,k, gc%box%count(i,j,k), gc%box%start(i,j,k)
+ do l=1, gc%box%count(i,j,k)
+!print *, l, gc%box%obs_box(l)
+ enddo
+ end do
+ end do
+end do
-end subroutine xyz_get_close_init
+end subroutine get_close_init_boxes
!----------------------------------------------------------------------------
-subroutine xyz_get_close_destroy(gc)
+subroutine xyz_get_close_obs_destroy(gc)
type(xyz_get_close_type), intent(inout) :: gc
-deallocate(gc%box%loc_box, gc%box%count, gc%box%start)
+ call get_close_destroy_boxes(gc)
-end subroutine xyz_get_close_destroy
+end subroutine xyz_get_close_obs_destroy
+
+!----------------------------------------------------------------------------
+
+subroutine get_close_destroy_boxes(gc)
+
+type(xyz_get_close_type), intent(inout) :: gc
+
+deallocate(gc%box%obs_box, gc%box%count, gc%box%start)
+
+end subroutine get_close_destroy_boxes
+
+!----------------------------------------------------------------------------
+
+subroutine xyz_get_close_maxdist_init(gc, maxdist)
+
+type(xyz_get_close_type), intent(inout) :: gc
+real(r8), intent(in) :: maxdist
+
+! set the default value.
+gc%maxdist = maxdist
+!print *, 'setting maxdist to ', maxdist
+
+if (.not. use_octree) then
+ ! Allocate the storage for the grid dependent boxes
+ allocate(gc%box%count(nx,ny,nz), gc%box%start(nx,ny,nz))
+ gc%box%count = -1
+ gc%box%start = -1
+endif
+
+end subroutine xyz_get_close_maxdist_init
!----------------------------------------------------------------------------
-!> Finds boundaries for x,y,z boxes.
subroutine find_box_ranges(gc, locs, num)
+! Finds boundaries for x,y,z boxes.
+! FIXME: ways boxes could be divided:
+! - evenly along each axis
+! - octree-like, divide each axis so roughly half the points are
+! on each side of the dividing plane.
+! - about 100 other schemes
+
type(xyz_get_close_type), intent(inout) :: gc
integer, intent(in) :: num
type(xyz_location_type), intent(in) :: locs(num)
-logical :: old_out
-integer :: i
-
+! integer :: i
! FIXME: this space could be very sparse
-! find the rectangular prism that encloses all the given locations
-! with maxdist boundaries on all sides
-gc%box%bot_x = minval(locs(:)%x) - gc%maxdist
-gc%box%bot_y = minval(locs(:)%y) - gc%maxdist
-gc%box%bot_z = minval(locs(:)%z) - gc%maxdist
+gc%box%bot_x = minval(locs(:)%x)
+gc%box%bot_y = minval(locs(:)%y)
+gc%box%bot_z = minval(locs(:)%z)
-gc%box%top_x = maxval(locs(:)%x) + gc%maxdist
-gc%box%top_y = maxval(locs(:)%y) + gc%maxdist
-gc%box%top_z = maxval(locs(:)%z) + gc%maxdist
+gc%box%top_x = maxval(locs(:)%x)
+gc%box%top_y = maxval(locs(:)%y)
+gc%box%top_z = maxval(locs(:)%z)
+
+!gc%box%bot_x = locs(1)%x
+!gc%box%bot_y = locs(1)%y
+!gc%box%bot_z = locs(1)%z
+!
+!gc%box%top_x = locs(1)%x
+!gc%box%top_y = locs(1)%y
+!gc%box%top_z = locs(1)%z
+!
+!do i=2, num
+! gc%box%bot_x = min(gc%box%bot_x, locs(i)%x)
+! gc%box%bot_y = min(gc%box%bot_y, locs(i)%y)
+! gc%box%bot_z = min(gc%box%bot_z, locs(i)%z)
+!
+! gc%box%top_x = max(gc%box%top_x, locs(i)%x)
+! gc%box%top_y = max(gc%box%top_y, locs(i)%y)
+! gc%box%top_z = max(gc%box%top_z, locs(i)%z)
+!enddo
-! divide the space into boxes
gc%box%x_width = (gc%box%top_x - gc%box%bot_x) / nx
gc%box%y_width = (gc%box%top_y - gc%box%bot_y) / ny
gc%box%z_width = (gc%box%top_z - gc%box%bot_z) / nz
+! FIXME: compute a sphere of radius maxdist and see how
+! many boxes in x, y, z that would include.
+gc%box%nboxes_x = aint((gc%maxdist + (gc%box%x_width-1)) / gc%box%x_width)
+gc%box%nboxes_y = aint((gc%maxdist + (gc%box%y_width-1)) / gc%box%y_width)
+gc%box%nboxes_z = aint((gc%maxdist + (gc%box%z_width-1)) / gc%box%z_width)
+
!print *, 'min xyz = ', gc%box%bot_x, gc%box%bot_y, gc%box%bot_z
!print *, 'max xyz = ', gc%box%top_x, gc%box%top_y, gc%box%top_z
!print *, 'wid xyz = ', gc%box%x_width, gc%box%y_width, gc%box%z_width
!print *, 'nbx xyz = ', nx, ny, nz
+!print *, 'nbx xyz = ', gc%box%nboxes_x, gc%box%nboxes_y, gc%box%nboxes_z
end subroutine find_box_ranges
!----------------------------------------------------------------------------
-!> find the nearest point in the get close list to the specified point
-!> optionally return the exact distance since we have to compute it anyway
-subroutine xyz_find_nearest(gc, base_loc, loc_list, nearest, rc, dist)
+subroutine xyz_find_nearest(gc, base_loc, loc_list, nearest, rc)
type(xyz_get_close_type), intent(in), target :: gc
type(xyz_location_type), intent(in) :: base_loc
type(xyz_location_type), intent(in) :: loc_list(:)
integer, intent(out) :: nearest
integer, intent(out) :: rc
- real(r8), optional, intent(out) :: dist
+! find the nearest point in the get close list to the specified point
-call find_nearest(gc, base_loc, loc_list, nearest, rc, dist)
+ call find_nearest_boxes(gc, base_loc, loc_list, nearest, rc)
end subroutine xyz_find_nearest
!----------------------------------------------------------------------------
-!> find the nearest N points in the get close list to the specified location
-subroutine xyz_find_nearest_N(gc, base_loc, loc_list, n_wanted, nearest, n_found, rc, dist)
- type(xyz_get_close_type), intent(in), target :: gc
- type(xyz_location_type), intent(in) :: base_loc
- type(xyz_location_type), intent(in) :: loc_list(:)
- integer, intent(in) :: n_wanted
- integer, intent(out) :: nearest(:)
- integer, intent(out) :: n_found
- integer, intent(out) :: rc
- real(r8), optional, intent(out) :: dist(:)
-
-call find_nearest_n(gc, base_loc, loc_list, n_wanted, nearest, n_found, rc, dist)
-
-end subroutine xyz_find_nearest_N
-
-!----------------------------------------------------------------------------
-!> search all boxes which are potentially close enough
-
-subroutine find_nearest(gc, base_loc, loc_list, nearest, rc, dist)
+subroutine find_nearest_boxes(gc, base_loc, loc_list, nearest, rc)
type(xyz_get_close_type), intent(in), target :: gc
type(xyz_location_type), intent(in) :: base_loc
type(xyz_location_type), intent(in) :: loc_list(:)
integer, intent(out) :: nearest
integer, intent(out) :: rc
- real(r8), optional, intent(out) :: dist
-
-
-integer :: x_box, y_box, z_box, i, j, k, l
-integer :: start_x, end_x, start_y, end_y, start_z, end_z
-integer :: n_in_box, st, t_ind(1), ghost, n_found
-real(r8) :: this_dist, mindist(1)
-logical :: been_searched(nx,ny,nz)
-
-
-! First, set the intent out arguments to a missing value
-nearest = -99
-t_ind = -99
-rc = -1
-n_found = 0
-mindist = 1e38_r8 ! something big and positive.
-if (present(dist)) dist = 1e38_r8 ! ditto
-
-! the list of locations in the loc_list() argument must be the same
-! as the list of locations passed into get_close_init(), so
-! gc%num and size(loc_list) better be the same. if the list changes
-! you have to destroy the old gc and init a new one.
-if (size(loc_list) /= gc%num) then
- write(errstring,*)'loc_list() array must match one passed to xyz_get_close_init()'
- call error_handler(E_ERR, 'find_nearest', errstring, source, revision, revdate)
-endif
-
-! If num == 0, no point in going any further.
-if (gc%num == 0) return
-
-!--------------------------------------------------------------
-
-! Begin by figuring out which box the base loc is in
-x_box = floor((base_loc%x - gc%box%bot_x) / gc%box%x_width) + 1
-!if(x_box > nx) x_box = nx
-!if(x_box < 1) x_box = 1
-y_box = floor((base_loc%y - gc%box%bot_y) / gc%box%y_width) + 1
-!if(y_box > ny) y_box = ny
-!if(y_box < 1) y_box = 1
-z_box = floor((base_loc%z - gc%box%bot_z) / gc%box%z_width) + 1
-!if(z_box > nz) z_box = nz
-!if(z_box < 1) z_box = 1
-
-!print *, 'base_loc box ', x_box, y_box, z_box
-!print *, 'nx, ny, nz = ', nx, ny, nz
-
-! If it is not in any box, then it is more than the maxdist away from everybody
-if(x_box > nx .or. x_box < 1) return
-if(y_box > ny .or. y_box < 1) return
-if(z_box > nz .or. z_box < 1) return
-
-!print *, 'good box'
-
-! keep track of where we've searched so we don't search the same
-! box multiple times.
-been_searched = .false.
-
-! First, search all points in the box that contains the base loc
-call do_this_box(gc, x_box, y_box, z_box, base_loc, loc_list, 1, t_ind, mindist, n_found)
-been_searched(x_box, y_box, z_box) = .true.
-
-! FIXME: is it worth checking to see if the location is within
-! maxdist of each of the 6 edges and only change the start/end
-! if close enough?
-! if box small enough that no points match, expand search.
-! and for now, it is quite possible that points in a neighboring
-! box are closer than all points in the current box. so search
-! the neighbors until we are far enough away that we know we
-! have the nearest ones.
-ghost = 0
-
-10 continue
-if (ghost == 0 .or. n_found < 1) then
- ghost = ghost + 1
-
- start_x = x_box - ghost
- if (start_x < 1) start_x = 1
- end_x = x_box + ghost
- if (end_x > nx) end_x = nx
-
- start_y = y_box - ghost
- if (start_y < 1) start_y = 1
- end_y = y_box + ghost
- if (end_y > ny) end_y = ny
-
- start_z = z_box - ghost
- if (start_z < 1) start_z = 1
- end_z = z_box + ghost
- if (end_z > nz) end_z = nz
-
- !print *, 'looping from '
- !print *, 'x: ', start_x, end_x
- !print *, 'y: ', start_y, end_y
- !print *, 'z: ', start_z, end_z
-
- ! Next, loop through each box that is close to this box
- do i = start_x, end_x
- do j = start_y, end_y
- do k = start_z, end_z
-
- if (been_searched(i, j, k)) cycle
-
- call do_this_box(gc, i, j, k, base_loc, loc_list, 1, t_ind, mindist, n_found)
- been_searched(i,j,k) = .true.
-
- end do
- end do
- end do
-
- if (n_found < 1) then
- ! if we have searched the entire space, punt.
- if (start_x == 1 .and. end_x == nx .and. &
- start_y == 1 .and. end_y == ny .and. &
- start_z == 1 .and. end_z == nz) return
-
- ! repeat search with larger radius of boxes
- goto 10
- endif
-endif
-
-nearest = t_ind(1)
-rc = 0
-
-! if the caller asked for the actual distance, return it
-if (present(dist)) dist = mindist(1)
-
-end subroutine find_nearest
-
-!----------------------------------------------------------------------------
-
-subroutine find_nearest_n(gc, base_loc, loc_list, n_wanted, nearest, n_found, rc, dist)
- type(xyz_get_close_type), intent(in), target :: gc
- type(xyz_location_type), intent(in) :: base_loc
- type(xyz_location_type), intent(in) :: loc_list(:)
- integer, intent(in) :: n_wanted
- integer, intent(out) :: nearest(:)
- integer, intent(out) :: n_found
- integer, intent(out) :: rc
- real(r8), optional, intent(out) :: dist(:)
-! find the nearest N points in the get close list to the specified point
+! find the nearest point in the get close list to the specified point
integer :: x_box, y_box, z_box, i, j, k, l
integer :: start_x, end_x, start_y, end_y, start_z, end_z
integer :: n_in_box, st, t_ind, ghost
-real(r8) :: this_dist, mindist(n_wanted), largest_dist
-logical :: been_searched(nx, ny, nz)
+real(r8) :: this_dist, dist
! First, set the intent out arguments to a missing value
nearest = -99
-n_found = 0
rc = -1
-mindist = 1e38_r8 ! something big and positive.
-largest_dist = 1e38_r8 ! ditto
-if (present(dist)) dist(:) = 1e38_r8 ! ditto
-
-! make sure they want something - else return or fail?
-if (n_wanted < 1) then
- write(errstring,*)'n_wanted must be 1 or larger'
- call error_handler(E_ERR, 'find_nearest_n', errstring, source, revision, revdate)
-endif
-
-if (present(dist)) then
- if (size(dist) < n_wanted) then
- write(errstring,*)'dist(:) array size must be equal to or larger than n_wanted'
- call error_handler(E_ERR, 'find_nearest_n', errstring, source, revision, revdate)
- endif
-endif
+dist = 1e38_r8 ! something big and positive.
-! the list of locations in the loc_list argument must be the same
-! as the list of locations passed into get_close_init(), so
-! gc%num and size(loc_list better be the same. if the list changes
+! the list of locations in the obs() argument must be the same
+! as the list of locations passed into get_close_obs_init(), so
+! gc%num and size(obs) better be the same. if the list changes,
! you have to destroy the old gc and init a new one.
if (size(loc_list) /= gc%num) then
- write(errstring,*)'loc_list() array must match one passed to xyz_get_close_init()'
- call error_handler(E_ERR, 'find_nearest_n', errstring, source, revision, revdate)
+ write(errstring,*)'obs() array must match one passed to get_close_obs_init()'
+ call error_handler(E_ERR, 'get_close_obs', errstring, source, revision, revdate)
endif
! If num == 0, no point in going any further.
@@ -678,28 +549,41 @@ subroutine find_nearest_n(gc, base_loc, loc_list, n_wanted, nearest, n_found, rc
!print *, 'base_loc box ', x_box, y_box, z_box
!print *, 'nx, ny, nz = ', nx, ny, nz
-! If it is not in any box, then is this an error? what do we do about maxdist? FIXME
+! If it is not in any box, then it is more than the maxdist away from everybody
if(x_box > nx .or. x_box < 1) return
if(y_box > ny .or. y_box < 1) return
if(z_box > nz .or. z_box < 1) return
!print *, 'good box'
-been_searched(:,:,:) = .false.
-! find the closest N in the box that contains the base loc
-call do_this_box(gc, x_box, y_box, z_box, base_loc, loc_list, n_wanted, nearest, mindist, n_found)
-been_searched(x_box, y_box, z_box) = .true.
+! First, search all points in this box.
+
+! Box to search is x_box,y_box,z_box
+n_in_box = gc%box%count(x_box,y_box,z_box)
+st = gc%box%start(x_box,y_box,z_box)
+
+! find the closest one in this box
+do l = 1, n_in_box
-! if box small enough that not enough points match, expand search.
-! also, it is quite possible that points in a neighboring
-! box are closer than some points in the current box. so search
-! the neighbors until we are far enough away that we know we
-! have the nearest ones. FIXME: be smarter about this.
+ t_ind = gc%box%obs_box(st - 1 + l)
+!print *, 'l, t_ind = ', l, t_ind
+
+ this_dist = xyz_get_dist(base_loc, loc_list(t_ind))
+!print *, 'this_dist = ', this_dist
+ ! If this obs' distance is less than current nearest, it's new nearest
+ if(this_dist <= dist) then
+ nearest = t_ind
+ dist = this_dist
+ if (rc < 0) rc = 0
+ endif
+end do
+
+! if box small enough that no points match, expand search
ghost = 0
10 continue
-if (ghost == 0 .or. n_found < n_wanted) then
+if (nearest < 0 .or. ghost == 0) then
ghost = ghost + 1
start_x = x_box - ghost
@@ -727,16 +611,31 @@ subroutine find_nearest_n(gc, base_loc, loc_list, n_wanted, nearest, n_found, rc
do j = start_y, end_y
do k = start_z, end_z
- if (been_searched(i,j,k)) cycle
-
- call do_this_box(gc, i, j, k, base_loc, loc_list, n_wanted, nearest, mindist, n_found)
- been_searched(i,j,k) = .true.
-
+ ! Box to search is i,j,k
+ n_in_box = gc%box%count(i, j, k)
+ st = gc%box%start(i,j,k)
+
+
+ ! Loop to check how close all obs in the box are; add those that are close
+ do l = 1, n_in_box
+
+ t_ind = gc%box%obs_box(st - 1 + l)
+ ! print *, 'l, t_ind = ', l, t_ind
+
+ this_dist = xyz_get_dist(base_loc, loc_list(t_ind))
+ ! print *, 'this_dist = ', this_dist
+ ! If this obs' distance is less than current nearest, it's new nearest
+ if(this_dist <= dist) then
+ nearest = t_ind
+ dist = this_dist
+ if (rc < 0) rc = 0
+ endif
+ end do
end do
end do
end do
- if (n_found < n_wanted) then
+ if (nearest < 0) then
! if we have searched the entire space, punt.
if (start_x == 1 .and. end_x == nx .and. &
start_y == 1 .and. end_y == ny .and. &
@@ -747,86 +646,10 @@ subroutine find_nearest_n(gc, base_loc, loc_list, n_wanted, nearest, n_found, rc
endif
endif
-! if they asked for the explicit distances, return them
-if (present(dist)) dist(:) = mindist(:)
-
-end subroutine find_nearest_n
+end subroutine find_nearest_boxes
!----------------------------------------------------------------------------
-subroutine do_this_box(gc, i, j, k, base_loc, loc_list, n_wanted, nearest, dist, n_found)
- type(xyz_get_close_type), intent(in), target :: gc
- integer, intent(in) :: i, j, k
- type(xyz_location_type), intent(in) :: base_loc
- type(xyz_location_type), intent(in) :: loc_list(:)
- integer, intent(in) :: n_wanted
- integer, intent(inout) :: nearest(:)
- real(r8), intent(inout) :: dist(:)
- integer, intent(inout) :: n_found !< how many we already have
-
-
-integer :: n_in_box, st, l, m, n, t_ind
-real(r8) :: this_dist
-logical :: this_one_is_wanted
-
-! Box to search is i,j,k
-n_in_box = gc%box%count(i, j, k)
-st = gc%box%start(i,j,k)
-
-! Loop to check how close all locs in the box are; add those that are close
-do l = 1, n_in_box
-
- t_ind = gc%box%loc_box(st - 1 + l)
- ! print *, 'l, t_ind = ', l, t_ind
-
- this_dist = xyz_get_dist(base_loc, loc_list(t_ind))
- ! print *, 'this_dist = ', this_dist
-
- ! if we haven't filled up the number of wanted near points,
- ! or this new distance is smaller than the largest one, we are
- ! going to be adding it. make these separate tests since if
- ! n_found is 0, you'll get an out-of-bounds on dist(n_found)
- this_one_is_wanted = .false.
- if(n_found < n_wanted) then
- this_one_is_wanted = .true.
- else if (this_dist < dist(n_found)) then
- this_one_is_wanted = .true.
- endif
-
- if (this_one_is_wanted) then
- if (n_found < n_wanted) n_found = n_found + 1
- do m=1, n_wanted ! updated value
- if (this_dist >= dist(m)) cycle
- if (m == n_found) then
- nearest(m) = t_ind
- dist(m) = this_dist
- exit
- else
- do n=n_found, m+1, -1
- nearest(n) = nearest(n-1)
- dist(n) = dist(n-1)
- enddo
- nearest(m) = t_ind
- dist(m) = this_dist
- exit
- endif
- enddo
- endif
-end do ! n_in_box
-
-end subroutine do_this_box
-
-!----------------------------------------------------------------------------
-
-subroutine xyz_write_location(loc, buf)
- type(xyz_location_type), intent(in) :: loc
- character(len=*), intent(out) :: buf
-
- if (len(buf) < 18*3) print *, 'buffer too short in xyz_write_location'
- write(buf, '(3F18.6)') loc%x, loc%y, loc%z
-
-end subroutine
-
!----------------------------------------------------------------------------
! end of location/threed_cartesian/xyz_location_mod.f90
!----------------------------------------------------------------------------
diff --git a/assimilation_code/modules/assimilation/adaptive_inflate_mod.f90 b/assimilation_code/modules/assimilation/adaptive_inflate_mod.f90
index 7a845349a1..e650a4a7c2 100644
--- a/assimilation_code/modules/assimilation/adaptive_inflate_mod.f90
+++ b/assimilation_code/modules/assimilation/adaptive_inflate_mod.f90
@@ -669,45 +669,45 @@ subroutine bayes_cov_inflate(ens_size, inf_type, x_p, sigma_p_2, y_o, sigma_o_2,
call linear_bayes(dist_2, sigma_p_2, sigma_o_2, lambda_mean, lambda_sd_2, gamma_corr, &
new_cov_inflate)
- ! Bail out to save cost when lower bound is reached on lambda standard deviation
- if(lambda_sd <= sd_lower_bound_in) then
- new_cov_inflate_sd = lambda_sd
- else
- ! Compute by forcing a Gaussian fit at one positive SD
- ! First compute the new_max value for normalization purposes
- new_max = compute_new_density(dist_2, sigma_p_2, sigma_o_2, lambda_mean, lambda_sd, &
- gamma_corr, new_cov_inflate)
-
- ! Find value at a point one OLD sd above new mean value
+! Bail out to save cost when lower bound is reached on lambda standard deviation
+if(lambda_sd <= sd_lower_bound_in) then
+ new_cov_inflate_sd = lambda_sd
+else
+ ! Compute by forcing a Gaussian fit at one positive SD
+! First compute the new_max value for normalization purposes
+ new_max = compute_new_density(dist_2, sigma_p_2, sigma_o_2, lambda_mean, lambda_sd, &
+ gamma_corr, new_cov_inflate)
+
+! Find value at a point one OLD sd above new mean value
new_1_sd = compute_new_density(dist_2, sigma_p_2, sigma_o_2, lambda_mean, lambda_sd, gamma_corr, &
new_cov_inflate + lambda_sd)
-
- ! If either the numerator or denominator of the following computation
- ! of 'ratio' is going to be zero (or almost so), return the original incoming
- ! inflation value. The computation would have resulted in either Inf or NaN.
- if (abs(new_max) <= TINY(0.0_r8) .or. abs(new_1_sd) <= TINY(0.0_r8)) then
- new_cov_inflate_sd = lambda_sd
- return
- endif
-
- ratio = new_1_sd / new_max
-
- ! Another error for numerical issues; if ratio is larger than 0.99, bail out
- if(ratio > 0.99) then
- new_cov_inflate_sd = lambda_sd
- return
- endif
-
- ! Can now compute the standard deviation consistent with this as
- ! sigma = sqrt(-x^2 / (2 ln(r)) where r is ratio and x is lambda_sd (distance from mean)
- new_cov_inflate_sd = sqrt( -1.0_r8 * lambda_sd_2 / (2.0_r8 * log(ratio)))
-
- ! Prevent an increase in the sd of lambda???
- ! For now, this is mostly countering numerical errors in this computation
- if(new_cov_inflate_sd > lambda_sd) new_cov_inflate_sd = lambda_sd
-
+
+ ! If either the numerator or denominator of the following computation
+ ! of 'ratio' is going to be zero (or almost so), return the original incoming
+ ! inflation value. The computation would have resulted in either Inf or NaN.
+ if (abs(new_max) <= TINY(0.0_r8) .or. abs(new_1_sd) <= TINY(0.0_r8)) then
+ new_cov_inflate_sd = lambda_sd
+ return
+ endif
+
+ ratio = new_1_sd / new_max
+
+ ! Another error for numerical issues; if ratio is larger than 0.99, bail out
+ if(ratio > 0.99) then
+ new_cov_inflate_sd = lambda_sd
+ return
endif
+ ! Can now compute the standard deviation consistent with this as
+ ! sigma = sqrt(-x^2 / (2 ln(r)) where r is ratio and x is lambda_sd (distance from mean)
+ new_cov_inflate_sd = sqrt( -1.0_r8 * lambda_sd_2 / (2.0_r8 * log(ratio)))
+
+ ! Prevent an increase in the sd of lambda???
+ ! For now, this is mostly countering numerical errors in this computation
+ if(new_cov_inflate_sd > lambda_sd) new_cov_inflate_sd = lambda_sd
+
+endif
+
else if (inf_type == GHA2017) then
! Transform Gaussian prior to Inverse Gamma
diff --git a/assimilation_code/modules/observations/DEFAULT_obs_kind_mod.F90 b/assimilation_code/modules/observations/DEFAULT_obs_kind_mod.F90
index f38ebcdf66..96fa4a1759 100644
--- a/assimilation_code/modules/observations/DEFAULT_obs_kind_mod.F90
+++ b/assimilation_code/modules/observations/DEFAULT_obs_kind_mod.F90
@@ -274,10 +274,6 @@ module obs_kind_mod
QTY_FPAR = 129, &
QTY_TOTAL_WATER_STORAGE = 130
-! apparently we never had a surface vapor pressure quantity?
-integer, parameter, public :: &
- QTY_2M_VAPOR_MIXING_RATIO = 131
-
! kinds for NOAH (Tim Hoar)
integer, parameter, public :: &
QTY_NEUTRON_INTENSITY = 140, &
@@ -721,8 +717,6 @@ subroutine initialize_module
obs_kind_names(129) = obs_kind_type(QTY_FPAR ,'QTY_FPAR')
obs_kind_names(130) = obs_kind_type(QTY_TOTAL_WATER_STORAGE ,'QTY_TOTAL_WATER_STORAGE')
-obs_kind_names(131) = obs_kind_type(QTY_2M_VAPOR_MIXING_RATIO,'QTY_2M_VAPOR_MIXING_RATIO')
-
obs_kind_names(140) = obs_kind_type(QTY_NEUTRON_INTENSITY ,'QTY_NEUTRON_INTENSITY')
obs_kind_names(141) = obs_kind_type(QTY_CANOPY_WATER ,'QTY_CANOPY_WATER')
obs_kind_names(142) = obs_kind_type(QTY_GROUND_HEAT_FLUX ,'QTY_GROUND_HEAT_FLUX')
diff --git a/assimilation_code/modules/utilities/mpi_utilities_mod.f90 b/assimilation_code/modules/utilities/mpi_utilities_mod.f90
index 8e05ce6793..56dbfadc2a 100644
--- a/assimilation_code/modules/utilities/mpi_utilities_mod.f90
+++ b/assimilation_code/modules/utilities/mpi_utilities_mod.f90
@@ -39,9 +39,11 @@ module mpi_utilities_mod
use mpi
! the NAG compiler needs these special definitions enabled
+! but we don't preprocess this file (why?) so you have to
+! edit this by hand for NAG.
-! !!NAG_BLOCK_EDIT START COMMENTED_OUT
-! use F90_unix_proc, only : sleep, system, exit
+!#ifdef __NAG__
+ !use F90_unix_proc, only : sleep, system, exit
!! block for NAG compiler
! PURE SUBROUTINE SLEEP(SECONDS,SECLEFT)
! INTEGER,INTENT(IN) :: SECONDS
@@ -55,7 +57,7 @@ module mpi_utilities_mod
! SUBROUTINE EXIT(STATUS)
! INTEGER,OPTIONAL :: STATUS
!! end block
-! !!NAG_BLOCK_EDIT END COMMENTED_OUT
+!#endif
implicit none
private
@@ -70,16 +72,16 @@ module mpi_utilities_mod
! this directory. It is a sed script that comments in and out the interface
! block below. Please leave the BLOCK comment lines unchanged.
- !!SYSTEM_BLOCK_EDIT START COMMENTED_IN
- ! interface block for getting return code back from system() routine
- interface
- function system(string)
- character(len=*) :: string
- integer :: system
- end function system
- end interface
- ! end block
- !!SYSTEM_BLOCK_EDIT END COMMENTED_IN
+! !!SYSTEM_BLOCK_EDIT START COMMENTED_OUT
+! ! interface block for getting return code back from system() routine
+! interface
+! function system(string)
+! character(len=*) :: string
+! integer :: system
+! end function system
+! end interface
+! ! end block
+! !!SYSTEM_BLOCK_EDIT END COMMENTED_OUT
! allow global sum to be computed for integers, r4, and r8s
@@ -753,6 +755,26 @@ subroutine receive_from(src_id, destarray, time, label)
end subroutine receive_from
+
+!-----------------------------------------------------------------------------
+! TODO: do i need to overload this for both integer and real?
+! do i need to handle 1D, 2D, 3D inputs?
+
+subroutine transpose_array
+
+! not implemented here yet. will have arguments -- several of them.
+
+if ( .not. module_initialized ) then
+ write(errstring, *) 'initialize_mpi_utilities() must be called first'
+ call error_handler(E_ERR,'transpose_array', errstring, source, revision, revdate)
+endif
+
+write(errstring, *) 'not implemented yet'
+call error_handler(E_ERR,'transpose_array', errstring, source, revision, revdate)
+
+end subroutine transpose_array
+
+
!-----------------------------------------------------------------------------
! TODO: do i need to overload this for both integer and real?
! do i need to handle 2D inputs?
@@ -841,6 +863,123 @@ subroutine array_broadcast(array, root)
end subroutine array_broadcast
+!-----------------------------------------------------------------------------
+! TODO: do i need to overload this for both integer and real?
+! do i need to handle 2D inputs?
+
+subroutine array_distribute(srcarray, root, dstarray, dstcount, how, which)
+ real(r8), intent(in) :: srcarray(:)
+ integer, intent(in) :: root
+ real(r8), intent(out) :: dstarray(:)
+ integer, intent(out) :: dstcount
+ integer, intent(in) :: how
+ integer, intent(out) :: which(:)
+
+! 'srcarray' on the root task will be distributed across all the tasks
+! into 'dstarray'. dstarray must be large enough to hold each task's share
+! of the data. The actual number of values returned on each task will be
+! passed back in the 'count' argument. 'how' is a flag to select how to
+! distribute the data (round-robin, contiguous chunks, etc). 'which' is an
+! integer index array which lists which of the original values were selected
+! and put into 'dstarray'.
+
+real(r8), allocatable :: localchunk(:)
+integer :: srccount, leftover
+integer :: i, tag, errcode
+logical :: iamroot
+integer :: status(MPI_STATUS_SIZE)
+
+if ( .not. module_initialized ) then
+ write(errstring, *) 'initialize_mpi_utilities() must be called first'
+ call error_handler(E_ERR,'array_distribute', errstring, source, revision, revdate)
+endif
+
+! simple idiotproofing
+if ((root < 0) .or. (root >= total_tasks)) then
+ write(errstring, '(a,i8,a,i8)') "root task id ", root, &
+ "must be >= 0 and < ", total_tasks
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+endif
+
+iamroot = (root == myrank)
+tag = 1
+
+srccount = size(srcarray)
+
+! TODO: right now this code does contig chunks only
+! TODO: it should select on the 'how' argument
+dstcount = srccount / total_tasks
+leftover = srccount - (dstcount * total_tasks)
+if (myrank == total_tasks-1) dstcount = dstcount + leftover
+
+
+! idiotproofing, continued...
+if (size(dstarray) < dstcount) then
+ write(errstring, '(a,i8,a,i8)') "size of dstarray is", size(dstarray), &
+ " but must be >= ", dstcount
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+endif
+if (size(which) < dstcount) then
+ write(errstring, '(a,i8,a,i8)') "size of which is", size(which), &
+ " but must be >= ", dstcount
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+endif
+
+! TODO: this code is separate from the 'dstcount' computation because we
+! need to test to be sure the user has passed us in arrays large enough to
+! hold the data, but then this section needs to have a select (how) and set
+! the corresponding index numbers accordingly.
+which(1:dstcount) = (/ (i, i= myrank *dstcount, (myrank+1)*dstcount - 1) /)
+if (size(which) > dstcount) which(dstcount+1:) = -1
+
+
+if (.not.iamroot) then
+
+ ! my task is receiving data.
+ call MPI_Recv(dstarray, dstcount, datasize, root, MPI_ANY_TAG, &
+ my_local_comm, status, errcode)
+ if (errcode /= MPI_SUCCESS) then
+ write(errstring, '(a,i8)') 'MPI_Recv returned error code ', errcode
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+ endif
+
+else
+ ! my task must send to everyone else and copy to myself.
+ allocate(localchunk(dstcount), stat=errcode)
+ if (errcode /= 0) then
+ write(errstring, *) 'allocation error of allocatable array'
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+ endif
+
+ do i=0, total_tasks-1
+ ! copy correct data from srcarray to localchunk for each destination
+ if (i == myrank) then
+ ! this is my task, so do a copy from localchunk to dstarray
+ dstarray(1:dstcount) = localchunk(1:dstcount)
+ else
+ ! call MPI to send the data to the remote task
+ call MPI_Ssend(localchunk, dstcount, datasize, i, tag, &
+ my_local_comm, errcode)
+ if (errcode /= MPI_SUCCESS) then
+ write(errstring, '(a,i8)') 'MPI_Ssend returned error code ', errcode
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+ endif
+ endif
+ tag = tag + 1
+ enddo
+
+ deallocate(localchunk, stat=errcode)
+ if (errcode /= 0) then
+ write(errstring, *) 'deallocation error of allocatable array'
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+ endif
+endif
+
+! set any additional space which wasn't filled with zeros.
+if (size(dstarray) > dstcount) dstarray(dstcount+1:) = 0.0
+
+end subroutine array_distribute
+
!-----------------------------------------------------------------------------
! DART-specific cover utilities
!-----------------------------------------------------------------------------
@@ -1244,8 +1383,7 @@ end subroutine unpackscalar
!-----------------------------------------------------------------------------
! overloaded global reduce routines
-! The external32 representations of the datatypes returned by
-! MPI_TYPE_CREATE_F90_REAL/COMPLEX/INTEGER are given by the following rules.
+! The external32 representations of the datatypes returned by MPI_TYPE_CREATE_F90_REAL/COMPLEX/INTEGER are given by the following rules.
! For MPI_TYPE_CREATE_F90_REAL:
!
! if (p > 33) or (r > 4931) then external32 representation
@@ -1354,7 +1492,6 @@ subroutine sum_across_tasks_real(addend, sum)
end subroutine sum_across_tasks_real
-
!-----------------------------------------------------------------------------
! pipe-related utilities
!-----------------------------------------------------------------------------
@@ -1401,12 +1538,12 @@ subroutine block_task()
if (async4_verbose) then
write(*,*) 'checking master task host'
- call do_system('echo master task running on host `hostname`', rc)
+ rc = system('echo master task running on host `hostname`'//' '//char(0))
if (rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
if (async4_verbose .or. print4status) write(*,*) 'MPI job telling script to advance model'
- call do_system('echo advance > '//trim(non_pipe), rc)
+ rc = system('echo advance > '//trim(non_pipe)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
@@ -1415,16 +1552,16 @@ subroutine block_task()
if (async4_verbose) then
write(*,*) 'checking master task host'
- call do_system('echo master task running on host `hostname`', rc)
+ rc = system('echo master task running on host `hostname`'//' '//char(0))
if (rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
if (async4_verbose .or. print4status) write(*,*) 'MPI job telling script to advance model'
- call do_system('echo advance > '//trim(filter_to_model), rc)
+ rc = system('echo advance > '//trim(filter_to_model)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
if (async4_verbose) write(*,*) 'MPI job now waiting to read from lock file'
- call do_system('cat < '//trim(model_to_filter)//'> /dev/null', rc)
+ rc = system('cat < '//trim(model_to_filter)//'> /dev/null '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
else
@@ -1436,24 +1573,24 @@ subroutine block_task()
if (async4_verbose) then
write(*,*) 'checking slave task host'
- call do_system('echo '//trim(fifo_name)//' accessed from host `hostname`', rc)
+ rc = system('echo '//trim(fifo_name)//' accessed from host `hostname`'//' '//char(0))
if (rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
if (async4_verbose) write(*,*) 'removing any previous lock file: '//trim(fifo_name)
- call do_system('rm -f '//trim(fifo_name), rc)
+ rc = system('rm -f '//trim(fifo_name)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
if (async4_verbose) write(*,*) 'made fifo, named: '//trim(fifo_name)
- call do_system('mkfifo '//trim(fifo_name), rc)
+ rc = system('mkfifo '//trim(fifo_name)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
if (async4_verbose) write(*,*) 'ready to read from lock file: '//trim(fifo_name)
- call do_system('cat < '//trim(fifo_name)//'> /dev/null ', rc)
+ rc = system('cat < '//trim(fifo_name)//'> /dev/null '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
if (async4_verbose) write(*,*) 'got response, removing lock file: '//trim(fifo_name)
- call do_system('rm -f '//trim(fifo_name), rc)
+ rc = system('rm -f '//trim(fifo_name)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
@@ -1501,12 +1638,12 @@ subroutine restart_task()
if ((myrank == head_task) .and. .not. separate_node_sync) then
if (async4_verbose) then
- call do_system('echo master task running on host `hostname`', rc)
+ rc = system('echo master task running on host `hostname`'//' '//char(0))
if (rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
if (async4_verbose .or. print4status) write(*,*) 'script telling MPI job ok to restart'
- call do_system('echo restart > '//trim(model_to_filter), rc)
+ rc = system('echo restart > '//trim(model_to_filter)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
else
@@ -1517,12 +1654,12 @@ subroutine restart_task()
write(fifo_name,"(a,i5.5)") "filter_lock", myrank
if (async4_verbose) then
- call do_system('echo '//trim(fifo_name)//' accessed from host `hostname`', rc)
+ rc = system('echo '//trim(fifo_name)//' accessed from host `hostname`'//' '//char(0))
if (rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
if (async4_verbose) write(*,*) 'ready to write to lock file: '//trim(fifo_name)
- call do_system('echo restart > '//trim(fifo_name), rc)
+ rc = system('echo restart > '//trim(fifo_name)//' '//char(0))
if (async4_verbose .and. rc /= 0) write(*, *) 'system command returned nonzero rc, ', rc
endif
@@ -1554,9 +1691,9 @@ subroutine finished_task(async)
if (print4status .or. verbose) write(*,*) 'MPI task telling script we are done'
if (separate_node_sync) then
- call do_system('echo finished > '//trim(non_pipe), rc)
+ rc = system('echo finished > '//trim(non_pipe)//' '//char(0))
else
- call do_system('echo finished > '//trim(filter_to_model), rc)
+ rc = system('echo finished > '//trim(filter_to_model)//' '//char(0))
endif
@@ -1586,113 +1723,94 @@ function shell_execute(execute_string, serialize)
integer :: errcode, dummy(1)
integer :: status(MPI_STATUS_SIZE)
-if (verbose) async2_verbose = .true.
+ if (verbose) async2_verbose = .true.
-! default to everyone running concurrently, but if set and not true,
-! serialize the calls to system() so they do not step on each other.
-if (present(serialize)) then
- all_at_once = .not. serialize
-else
- all_at_once = .TRUE.
-endif
+ ! default to everyone running concurrently, but if set and not true,
+ ! serialize the calls to system() so they do not step on each other.
+ if (present(serialize)) then
+ all_at_once = .not. serialize
+ else
+ all_at_once = .TRUE.
+ endif
-if (async2_verbose) write(*,*) "PE", myrank, ": system string is: ", trim(execute_string)
-shell_execute = -1
+ if (async2_verbose) write(*,*) "PE", myrank, ": system string is: ", trim(execute_string)
+ shell_execute = -1
-! this is the normal (default) case
-if (all_at_once) then
+ ! this is the normal (default) case
+ if (all_at_once) then
- ! all tasks call system at the same time
- call do_system(execute_string, shell_execute)
- if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
+ ! all tasks call system at the same time
+ !shell_execute = system(trim(execute_string)//' '//char(0))
+ shell_execute = system(trim(shell_name)//' '//trim(execute_string)//' '//char(0))
+ if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
- return
-endif
+ return
+ endif
-! only one task at a time calls system, and all wait their turn by
-! making each task wait for a message from the (N-1)th task.
+ ! only one task at a time calls system, and all wait their turn by
+ ! making each task wait for a message from the (N-1)th task.
+
+ ! this is used only to signal; the value it contains is unused.
+ dummy = 0
-! this is used only to signal; the value it contains is unused.
-dummy = 0
+ if (myrank == 0) then
-if (myrank == 0) then
+ ! my turn to execute
+ shell_execute = system(trim(shell_name)//' '//trim(execute_string)//' '//char(0))
+ if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
+
+ if (total_tasks > 1) then
+ ! tell next task it can continue
+ call MPI_Send(dummy, 1, MPI_INTEGER, 1, 1, my_local_comm, errcode)
+ if (errcode /= MPI_SUCCESS) then
+ write(errstring, '(a,i8)') 'MPI_Send returned error code ', &
+ errcode
+ call error_handler(E_ERR,'shell_execute', errstring, source, &
+ revision, revdate)
+ endif
+ endif
+
+ else if (myrank /= (total_tasks-1)) then
+ ! wait for (me-1) to tell me it is my turn
+ call MPI_Recv(dummy, 1, MPI_INTEGER, myrank-1, myrank, &
+ my_local_comm, status, errcode)
+ if (errcode /= MPI_SUCCESS) then
+ write(errstring, '(a,i8)') 'MPI_Recv returned error code ', errcode
+ call error_handler(E_ERR,'shell_execute', errstring, source, &
+ revision, revdate)
+ endif
- ! my turn to execute
- call do_system(execute_string, shell_execute)
- if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
+ ! my turn to execute
+ shell_execute = system(trim(shell_name)//' '//trim(execute_string)//' '//char(0))
+ if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
- if (total_tasks > 1) then
- ! tell next task it can continue
- call MPI_Send(dummy, 1, MPI_INTEGER, 1, 1, my_local_comm, errcode)
+ ! and now tell (me+1) to go
+ call MPI_Send(dummy, 1, MPI_INTEGER, myrank+1, myrank+1, my_local_comm, errcode)
if (errcode /= MPI_SUCCESS) then
write(errstring, '(a,i8)') 'MPI_Send returned error code ', &
errcode
call error_handler(E_ERR,'shell_execute', errstring, source, &
revision, revdate)
endif
- endif
-
-else if (myrank /= (total_tasks-1)) then
- ! wait for (me-1) to tell me it is my turn
- call MPI_Recv(dummy, 1, MPI_INTEGER, myrank-1, myrank, &
- my_local_comm, status, errcode)
- if (errcode /= MPI_SUCCESS) then
- write(errstring, '(a,i8)') 'MPI_Recv returned error code ', errcode
- call error_handler(E_ERR,'shell_execute', errstring, source, &
- revision, revdate)
- endif
+ else
+ ! last task, no one else to send to.
+ call MPI_Recv(dummy, 1, MPI_INTEGER, myrank-1, myrank, &
+ my_local_comm, status, errcode)
+ if (errcode /= MPI_SUCCESS) then
+ write(errstring, '(a,i8)') 'MPI_Recv returned error code ', errcode
+ call error_handler(E_ERR,'shell_execute', errstring, source, &
+ revision, revdate)
+ endif
- ! my turn to execute
- call do_system(execute_string, shell_execute)
- if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
+ ! my turn to execute
+ shell_execute = system(trim(shell_name)//' '//trim(execute_string)//' '//char(0))
+ if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
- ! and now tell (me+1) to go
- call MPI_Send(dummy, 1, MPI_INTEGER, myrank+1, myrank+1, my_local_comm, errcode)
- if (errcode /= MPI_SUCCESS) then
- write(errstring, '(a,i8)') 'MPI_Send returned error code ', &
- errcode
- call error_handler(E_ERR,'shell_execute', errstring, source, &
- revision, revdate)
- endif
-else
- ! last task, no one else to send to.
- call MPI_Recv(dummy, 1, MPI_INTEGER, myrank-1, myrank, &
- my_local_comm, status, errcode)
- if (errcode /= MPI_SUCCESS) then
- write(errstring, '(a,i8)') 'MPI_Recv returned error code ', errcode
- call error_handler(E_ERR,'shell_execute', errstring, source, &
- revision, revdate)
endif
-
- ! my turn to execute
- call do_system(execute_string, shell_execute)
- if (async2_verbose) write(*,*) "PE", myrank, ": execution returns, rc = ", shell_execute
-endif
-
+
end function shell_execute
-!-----------------------------------------------------------------------------
-
-!> wrapper so you only have to make this work in a single place
-!> 'shell_name' is a namelist item and normally is the null string.
-!> on at least on cray system, the compute nodes only had one type
-!> of shell and you had to specify it.
-
-subroutine do_system(execute, rc)
-
-character(len=*), intent(in) :: execute
-integer, intent(out) :: rc
-
-! !!NAG_BLOCK_EDIT START COMMENTED_OUT
-! call system(trim(shell_name)//' '//trim(execute)//' '//char(0), errno=rc)
-! !!NAG_BLOCK_EDIT END COMMENTED_OUT
-! !!OTHER_BLOCK_EDIT START COMMENTED_IN
- rc = system(trim(shell_name)//' '//trim(execute)//' '//char(0))
-! !!OTHER_BLOCK_EDIT END COMMENTED_IN
-
-end subroutine do_system
-
!-----------------------------------------------------------------------------
subroutine sleep_seconds(naplength)
real(r8), intent(in) :: naplength
@@ -1873,8 +1991,8 @@ subroutine get_from_mean(owner, window, mindex, x)
! => Don't do anything with x in between mpi_get and mpi_win_lock
! Note to programmer: openmpi 1.10.0 does not
-! allow scalars in mpi calls. openmpi 1.10.1 fixes this.
-
+! allow scalars in mpi calls. openmpi 1.10.1 fixes
+! this.
target_disp = (mindex - 1)
call mpi_win_lock(MPI_LOCK_SHARED, owner, 0, window, errcode)
call mpi_get(x, 1, datasize, owner, target_disp, 1, datasize, window, errcode)
@@ -1899,6 +2017,7 @@ subroutine get_from_fwd(owner, window, mindex, num_rows, x)
! to have occured until the call to mpi_win_unlock.
! => Don't do anything with x in between mpi_get and mpi_win_lock
+
target_disp = (mindex - 1)*num_rows
call mpi_win_lock(MPI_LOCK_SHARED, owner, 0, window, errcode)
call mpi_get(x, num_rows, datasize, owner, target_disp, num_rows, datasize, window, errcode)
diff --git a/assimilation_code/modules/utilities/null_mpi_utilities_mod.f90 b/assimilation_code/modules/utilities/null_mpi_utilities_mod.f90
index 80d402b151..de7a5a7724 100644
--- a/assimilation_code/modules/utilities/null_mpi_utilities_mod.f90
+++ b/assimilation_code/modules/utilities/null_mpi_utilities_mod.f90
@@ -17,8 +17,8 @@ module mpi_utilities_mod
use time_manager_mod, only : time_type, set_time
-! !!NAG_BLOCK_EDIT START COMMENTED_OUT
-! use F90_unix_proc, only : sleep, system, exit
+!#ifdef __NAG__
+ !use F90_unix_proc, only : sleep, system, exit
!! block for NAG compiler
! PURE SUBROUTINE SLEEP(SECONDS,SECLEFT)
! INTEGER,INTENT(IN) :: SECONDS
@@ -32,7 +32,7 @@ module mpi_utilities_mod
! SUBROUTINE EXIT(STATUS)
! INTEGER,OPTIONAL :: STATUS
!! end block
-! !!NAG_BLOCK_EDIT END COMMENTED_OUT
+!#endif
implicit none
@@ -46,16 +46,16 @@ module mpi_utilities_mod
! this directory. It is a sed script that comments in and out the interface
! block below. Please leave the BLOCK comment lines unchanged.
- !!SYSTEM_BLOCK_EDIT START COMMENTED_IN
- ! interface block for getting return code back from system() routine
- interface
- function system(string)
- character(len=*) :: string
- integer :: system
- end function system
- end interface
- ! end block
- !!SYSTEM_BLOCK_EDIT END COMMENTED_IN
+! !!SYSTEM_BLOCK_EDIT START COMMENTED_OUT
+! ! interface block for getting return code back from system() routine
+! interface
+! function system(string)
+! character(len=*) :: string
+! integer :: system
+! end function system
+! end interface
+! ! end block
+! !!SYSTEM_BLOCK_EDIT END COMMENTED_OUT
interface sum_across_tasks
@@ -288,6 +288,23 @@ subroutine receive_from(src_id, destarray, time, label)
end subroutine receive_from
+
+!-----------------------------------------------------------------------------
+! TODO: do i need to overload this for both integer and real?
+! do i need to handle 1D, 2D, 3D inputs?
+
+subroutine transpose_array
+
+! not implemented here yet. will have arguments -- several of them.
+
+if ( .not. module_initialized ) call initialize_mpi_utilities()
+
+write(errstring, *) 'not implemented yet'
+call error_handler(E_ERR,'transpose_array', errstring, source, revision, revdate)
+
+end subroutine transpose_array
+
+
!-----------------------------------------------------------------------------
! TODO: do i need to overload this for both integer and real?
! do i need to handle 2D inputs?
@@ -315,6 +332,43 @@ subroutine array_broadcast(array, root)
end subroutine array_broadcast
+!-----------------------------------------------------------------------------
+! TODO: do i need to overload this for both integer and real?
+! do i need to handle 2D inputs?
+
+subroutine array_distribute(srcarray, root, dstarray, dstcount, how, which)
+real(r8), intent(in) :: srcarray(:)
+integer, intent(in) :: root
+real(r8), intent(out) :: dstarray(:)
+integer, intent(out) :: dstcount
+integer, intent(in) :: how
+integer, intent(out) :: which(:)
+
+! 'srcarray' on the root task will be distributed across all the tasks
+! into 'dstarray'. dstarray must be large enough to hold each task's share
+! of the data. The actual number of values returned on each task will be
+! passed back in the 'count' argument. 'how' is a flag to select how to
+! distribute the data (round-robin, contiguous chunks, etc). 'which' is an
+! integer index array which lists which of the original values were selected
+! and put into 'dstarray'.
+
+integer :: i
+
+if ( .not. module_initialized ) call initialize_mpi_utilities()
+
+! simple idiotproofing
+if ((root < 0) .or. (root >= total_tasks)) then
+ write(errstring, '(a,i8,a,i8)') "root task id ", root, &
+ "must be >= 0 and < ", total_tasks
+ call error_handler(E_ERR,'array_broadcast', errstring, source, revision, revdate)
+endif
+
+dstarray = srcarray
+dstcount = size(srcarray)
+which = (/ ((i), i=1,size(srcarray)) /)
+
+end subroutine array_distribute
+
!-----------------------------------------------------------------------------
! DART-specific cover utilities
!-----------------------------------------------------------------------------
@@ -468,14 +522,8 @@ function shell_execute(execute_string, serialize)
!print *, "about to run: ", trim(doit)
!print *, "input string length = ", len(trim(doit))
-! !!NAG_BLOCK_EDIT START COMMENTED_OUT
-! call system(doit, status=rc)
-! shell_execute = rc
-! !!NAG_BLOCK_EDIT END COMMENTED_OUT
- !!OTHER_BLOCK_EDIT START COMMENTED_IN
- shell_execute = system(doit)
- !!OTHER_BLOCK_EDIT END COMMENTED_IN
- !print *, "execution returns, rc = ", shell_execute
+ shell_execute = system(doit)
+ print *, "execution returns, rc = ", shell_execute
end function shell_execute
@@ -670,9 +718,6 @@ end module mpi_utilities_mod
!-----------------------------------------------------------------------------
subroutine exit_all(exit_code)
-! !!NAG_BLOCK_EDIT START COMMENTED_OUT
-! use F90_unix_proc, only : exit
-! !!NAG_BLOCK_EDIT END COMMENTED_OUT
integer, intent(in) :: exit_code
! Call exit with the specified code.
diff --git a/assimilation_code/modules/utilities/types_mod.f90 b/assimilation_code/modules/utilities/types_mod.f90
index 5e8fd0194e..f72501f716 100644
--- a/assimilation_code/modules/utilities/types_mod.f90
+++ b/assimilation_code/modules/utilities/types_mod.f90
@@ -79,8 +79,8 @@ module types_mod
! TO RUN WITH REDUCED PRECISION REALS (and use correspondingly less memory)
! comment OUT the r8 definition below and use the second one:
integer, parameter :: r4 = SELECTED_REAL_KIND(6,30)
-!integer, parameter :: r8 = SELECTED_REAL_KIND(12) ! real r8
-integer, parameter :: r8 = r4 ! alias r8 to r4
+integer, parameter :: r8 = SELECTED_REAL_KIND(12) ! real r8
+!integer, parameter :: r8 = r4 ! alias r8 to r4
! complex precision:
integer, parameter :: c4 = SELECTED_REAL_KIND(6,30)
diff --git a/assimilation_code/programs/closest_member_tool/closest_member_tool.f90 b/assimilation_code/programs/closest_member_tool/closest_member_tool.f90
index 58931c7abc..bb2411469e 100644
--- a/assimilation_code/programs/closest_member_tool/closest_member_tool.f90
+++ b/assimilation_code/programs/closest_member_tool/closest_member_tool.f90
@@ -4,30 +4,28 @@
!
! $Id$
-!> @mainpage
-!> @{
-!> @brief this program selects the member closest to the ensemble mean.
-!>
-!>
-!> this program has options to compute distance in several different ways
-!> and returns the ensemble member which has the smallest total distance from
-!> the ensemble mean.
-!> @}
-!>
-!>
+!>@todo FIXME changed so it compiles, but this IS NOT WORKING CODE YET
+!> it needs to read in an ensemble (perhaps become an mpi program)
+!> and do all ensemble members at the same time - also handle the
+!> layout when only part of a state vector is on a single task.
+!> would have to do a reduce to add up the total differences.
+
+!>@todo FIXME the html needs to be made consistent with the namelist once the namelist
+!> is fleshed out.
program closest_member_tool
+! Program to overwrite the time on each ensemble in a restart file.
+
use types_mod, only : r8, i8, obstypelength, MAX_NUM_DOMS, MAX_FILES
-use time_manager_mod, only : time_type, set_time_missing, &
- operator(/=), print_time
+use time_manager_mod, only : time_type, set_time_missing, operator(/=), &
+ print_time
use utilities_mod, only : register_module, find_namelist_in_file, &
error_handler, nmlfileunit, E_MSG, E_ERR, &
check_namelist_read, do_nml_file, do_nml_term, &
- open_file, close_file, set_multiple_filename_lists, &
- get_next_filename
+ open_file, close_file, set_multiple_filename_lists
use location_mod, only : location_type
@@ -39,7 +37,7 @@ program closest_member_tool
use assim_model_mod, only : static_init_assim_model, get_model_size, &
get_state_meta_data
-use state_vector_io_mod, only : read_state
+use state_vector_io_mod, only : read_state, write_state
use io_filenames_mod, only : file_info_type, io_filenames_init, &
set_io_copy_flag, set_file_metadata, &
@@ -51,7 +49,7 @@ program closest_member_tool
use mpi_utilities_mod, only : initialize_mpi_utilities, task_count, &
finalize_mpi_utilities, my_task_id, &
- send_sum_to, sum_across_tasks
+ send_sum_to
use ensemble_manager_mod, only : ensemble_type, init_ensemble_manager, compute_copy_mean, &
get_my_vars, get_my_num_vars, end_ensemble_manager
@@ -64,7 +62,7 @@ program closest_member_tool
character(len=32 ), parameter :: revision = "$Revision$"
character(len=128), parameter :: revdate = "$Date$"
-integer :: iunit, io, ens, i, j, total_j, qtyindex
+integer :: iunit, io, ens, i, j, qtyindex
integer :: num_qtys, stype
integer(i8) :: ii, model_size
integer, allocatable :: index_list(:)
@@ -162,7 +160,7 @@ program closest_member_tool
num_domains = get_num_domains()
-! Given either a vector of input_state_files or a text file containing
+! Given either a vector of in/output_state_files or a text file containing
! a list of files, return a vector of files containing the filenames.
call set_multiple_filename_lists(input_restart_files(:), &
input_restart_file_list(:), &
@@ -178,11 +176,10 @@ program closest_member_tool
file_array_input = RESHAPE(input_restart_files, (/ens_size, num_domains/))
! read in the ensemble and the mean - always in a separate file
-call io_filenames_init(ens_file_info, &
- ncopies = ens_size, &
- cycling = single_restart_file_in, &
- single_file = single_restart_file_in, &
- restart_files = file_array_input)
+call io_filenames_init(ens_file_info, ens_size, &
+ cycling=single_restart_file_in, &
+ single_file=single_restart_file_in, &
+ restart_files=file_array_input)
do imem = 1, ens_size
write(my_base,'(A,I0.2)') 'inens_', imem
@@ -285,9 +282,8 @@ program closest_member_tool
endif
enddo
- ! compute the total across all members
- call sum_across_tasks(j, total_j)
- write(msgstring, *) 'using ', total_j, ' of ', model_size, ' items in the state vector'
+ !>@todo JOHNNY should do a sum_all_variables then print
+ write(msgstring, *) 'using ', j, ' of ', model_size, ' items in the state vector'
call error_handler(E_MSG,'closest_member_tool', msgstring)
else
! use everything.
@@ -318,11 +314,9 @@ program closest_member_tool
iunit = open_file(output_file_name, 'formatted', 'write')
if (single_restart_file_in) then
- write(iunit, "(I6)") index_list(1)
+ write(iunit, "(I4)") index_list(1)
else
- !> @todo FIXME is this domain by domain? if so, need to loop over domains?
- msgstring = get_next_filename(input_restart_file_list(1), index_list(1))
- write(iunit, "(A)") trim(msgstring)
+ write(iunit, "(A,A,I4.4)") trim(input_restart_file_list(1)), '.', index_list(1)
endif
call close_file(iunit)
@@ -340,7 +334,7 @@ program closest_member_tool
if (.not. allqtys) deallocate(useqty)
call end_ensemble_manager(ens_handle)
-call finalize_mpi_utilities()
+call finalize_mpi_utilities() ! now closes log file, too
!----------------------------------------------------------------
!----------------------------------------------------------------
diff --git a/assimilation_code/programs/closest_member_tool/closest_member_tool.html b/assimilation_code/programs/closest_member_tool/closest_member_tool.html
index d2939370b3..6f46f0188f 100644
--- a/assimilation_code/programs/closest_member_tool/closest_member_tool.html
+++ b/assimilation_code/programs/closest_member_tool/closest_member_tool.html
@@ -36,8 +36,8 @@
PROGRAM closest_member_tool
Overview
-Utility program to compare the ensemble mean to an ensemble of
-restart files, which can now be run in parallel.
+Utility program to compare the ensemble mean to a group of ensemble
+member restart files, which can now be run in parallel.
The program prints out a sorted order of which
members are 'closest' to the mean, where the method used to determine
'close' is selectable by namelist option. It also creates a file with
@@ -85,7 +85,7 @@
Overview
members to run a free model forecast forward in time after
the assimilation is finished. Each member is an equally likely
representation of the model state. Using the ensemble mean
-may not be the best choice since the mean may not have
+may not be the best since the mean may not have
self-consistent fine-scale structures in the data.
In addition to printing out data about all members to both
@@ -93,8 +93,8 @@
Overview
a single output file containing information about the closest member.
If the input restart data is in a single file, the output
file 'closest_restart' contains a single number which is
-the ensemble member number.
-If the input restart data is in separate files, the output file
+the ensemble member number. If the
+input restart data is in separate files, the output file
contains the full filename of the closest member, e.g.
'filter_restart.0004' if member 4 is closest. For scripting
the contents of this file can be used to copy the corresponding
diff --git a/assimilation_code/programs/closest_member_tool/closest_member_tool.nml b/assimilation_code/programs/closest_member_tool/closest_member_tool.nml
index 72a72a2d18..76d0210d22 100644
--- a/assimilation_code/programs/closest_member_tool/closest_member_tool.nml
+++ b/assimilation_code/programs/closest_member_tool/closest_member_tool.nml
@@ -1,3 +1,4 @@
+
# different methods to compute 'distance' from mean:
# 1 = simple absolute difference
# 2 = normalized absolute difference
@@ -5,12 +6,12 @@
# 4 = normalized rmse difference
&closest_member_tool_nml
- input_restart_files = ''
- input_restart_file_list = '',
- output_file_name = 'closest_restart',
- ens_size = 1
- difference_method = 4,
- use_only_qtys = '',
- single_restart_file_in = .false.,
+ input_restart_file_list = '',
+ mean_restart_file_list = '',
+ output_file_name = 'closest_restart',
+ ens_size = 1
+ difference_method = 4,
+ use_only_kinds = '',
+ single_restart_file_in = .false.,
/
diff --git a/build_templates/mkmf.template b/build_templates/mkmf.template
index a8583e4474..beef252c49 100755
--- a/build_templates/mkmf.template
+++ b/build_templates/mkmf.template
@@ -1,4 +1,4 @@
-# Template for Intel ifort and Mac OSX
+# Template for GNU gfortran on Linux or Mac OSX
#
# DART software - Copyright UCAR. This open source software is provided
# by UCAR, "as is", without charge, subject to all terms of use at
@@ -9,108 +9,57 @@
# typical use with mkmf
# mkmf -t mkmf.template.xxxx ...
#
-# FFLAGS useful for DEBUGGING. NOTE: The intel compiler can provide a lot more
-# information if you LEAVE the object and module files intact.
-# Do not remove the *.o and *.mod files when debugging code.
-#
-# -g include debugging information. these are all synonyms.
-# -debug full
-# -debug all
-# -O0 setting -g will make this the default (no optimization).
-# it is possible to set -g and then explicitly set -O2 if
-# the behavior being debugged depends on optimization changes.
-# -ftrapuv traps if a local variable is used before being set
-# -C enables all runtime checks. -C and -check all are synonyms.
-# -check all
-# -check enables/disables more specific runtime checks.
-# keywords: [arg_temp_created,bounds,overflow,format,pointers,uninit]
-# -warn the level of warning messages issued.
-# keywords: [alignments, argument_checking, declarations,
-# errors, fileopt, general, ignore_loc,
-# stderrors, truncated_source, uncalled,
-# uninitialized, unused, usage, all]
-# -fp-stack-check catches conditions where the FP stack is not correct.
-# Typically this is when a real function is called as if it were a
-# subroutine, OR a subroutine is called as if it were a function (return
-# values left of FP stack OR too much data is taken off the FP stack)
-# -vec-reportN controls how much diagnostic output is printed about
-# loops vectorized by the compiler. N = 0 is silent,
-# N can have values up to 5.
-# -traceback tells the compiler to generate extra information in the
-# object file to provide source file traceback information
-# when a severe error occurs at run time
-#
-# FFLAGS useful for bitwise reproducibility and accuracy control
-# (these will slow down performance to various degrees)
-# -fp-model precise control how floating point roundoff is done so it is
-# reproducible from run to run. in simple tests this
-# flag alone was enough to create bitwise reproducible
-# code but slowed execution significantly.
-# -ftz 'flush to zero' underflows result in zero. set by default if
-# any -O other than -O0 set, or if -fpe0 or -fpe1 set.
-# -fpeN controls floating point exception handling. -fpe0 rounds underflow
-# to zero and traps on any other exception type.
-# -pc80 set internal FPU precision to 64 bit significand
-# (default is -pc64 with 53 internal bits)
-#
-# FFLAGS useful for production
-# -O2 default. optimize without too much unrepeatable numerical games
-# -O3 more aggressive optimizations. check numerical differences
-# before using this indiscriminately.
-# -O1 if you get compile-time errors about out of memory or unable to
-# complete compilation because of complexity, try lowering the
-# optimization level on the offending source files.
-# -ipo enable optimizations between routines in separate source files
-# -heap-arrays 10 allocate large arrays from the heap instead of putting them
-# on the stack. the number is the limit in KB for when arrays
-# move from the stack to the heap. this can help if you get stack
-# overflow errors and cannot increase the stack size more.
-# allocating from the stack is faster, but it's usually a smaller
-# size than the heap.
-# -x, -m, -ax, -mcode, -march all these flags tell the compiler to generate
-# processor-specific or vector instructions. either 'man ifort' or
-# ifort --help to see what the current list of options are and
-# which have priority over the others.
-# (for those running on yellowstone, -axavx will enable the advanced
-# vector instructions available on the sandy bridge processors.)
-# -assume buffered_io allows the runtime library to buffer up individual
-# writes before calling the operating system. in particular, we
-# write our observation sequence files as a series of many individual
-# calls to the write() routine. when debugging you do not want to
-# buffer so you can see the last output before the program dies.
-# for production, however, you want to batch up writes into larger
-# blocks before stopping to do i/o to disk. an alternative at
-# runtime is to set FORT_BUFFERED to 'true' in your environment.
-# (e.g. csh family: setenv FORT_BUFFERED true or
-# ksh family: export FORT_BUFFERED=true).
-#
-#
-# FFLAGS possibly useful, not normally used by DART
-# -fpp run Fortran preprocessor on source files prior to compilation
-# -free interpret source as free-format, regardless of file extension
-# -r8 specify default real size. note that for DART we use explicit
-# types on all our real values so this will not change anything
-# inside DART. see DART/common/types_mod.f90 if you must run
-# with single precision reals.
-# -convert big_endian useful if you're on the wrong architecture.
-# however this controls both reading and writing so you can't
-# use it as a conversion mechanism unless you write files out
-# in ascii format. applies to all unformatted fortran i/o.
-# -assume byterecl ... more 'industry-standard' direct-access behavior
-# controls what units the RECL (record length) specifier returns.
-#
-# Runtime environment variables that influence the compiler behavior:
-#
-# Make output lines for fortran write statements longer without wrapping:
-# setenv FORT_FMT_RECL 512 (or any length)
-#
-#
-# IF YOU HAVE MORE CURRENT COMPILER INFORMATION, PLEASE SHARE IT WITH US.
+# Suggested (perhaps required) flags:
+# -ffree-line-length-none handles "long" lines - i.e. longer than 72 chars
+#
+# Suggested debugging flags:
+# -g add debugging information to executable
+# -Wuninitialized catch uninitialized variables
+# -Wunused issue warning for unused variables - keeps code clean
+# -fbacktrace runtime errors try to print out a stack backtrace
+# -fbounds-check add runtime-checking for out-of-range array indices
+# -ffpe-trap=invalid,zero,overflow
+# stop on floating point errors
+#
+# earlier versions of this mkmf file listed 'precision' as one of the options on
+# the fpe-trap line. this is not recommended anymore as some of the gfortran internal
+# routines will trap, as well as some perfectly valid DART computations.
+#
+# Generally not needed but possibly useful for non-DART code:
+#
+# -ffree-form forces input file to be read as free format regardless
+# of what file extension it has.
+#
+# -fdefault-real-8 force all real variables to be real*8. not needed for DART code
+# since all real variables in DART are declared with a size.
+#
+# NOTE: Some previous versions of the gfortran compiler (including 4.1.2)
+# changed the record marker size to 8 bytes, which made binary files written
+# with the 'unformatted' option unreadable by programs compiled with other
+# compilers. Later versions of the compiler changed the default back to 4 bytes.
+# If you have a version that defaults to 8 byte record markers, use this
+# compile time flag:
+# -frecord-marker=4
+# If you have files written by older versions of the gfortran compiler that
+# cannot be read now, try this compile time flag:
+# -frecord-marker=8
+#
+# Convert big-endian or little-endian binary files at runtime:
+#
+# setenv GFORTRAN_CONVERT_UNIT 'big_endian'
+# Treats ALL units as big_endian
+#
+# setenv GFORTRAN_CONVERT_UNIT 'big_endian;native:10-20'
+# Treats units 10-20 as big_endian but the rest are 'native'
+#
+# for more information on unit conversions:
+# http://gcc.gnu.org/onlinedocs/gfortran/GFORTRAN_005fCONVERT_005fUNIT.html#GFORTRAN_005fCONVERT_005fUNIT
+#
MPIFC = mpif90
MPILD = mpif90
-FC = ifort
-LD = ifort
+FC = gfortran
+LD = gfortran
# DISCUSSION ABOUT NETCDF. DART works with both V3 and V4 flavors of netCDF.
# Some V4 installations also require the HDF5 libraries. Some don't.
@@ -121,7 +70,7 @@ LD = ifort
# LIBS = -L$(NETCDF)/lib -lnetcdf
# LIBS = -L$(NETCDF)/lib -lnetcdff -lnetcdf
# LIBS = -L$(NETCDF)/lib -lnetcdf -lcurl -lhdf5_hl -lhdf5 -lz -lm
-# LIBS = -L$(NETCDF)/lib -lnetcdf -lnetcdff -lcurl -lhdf5_hl -lhdf5 -lz -lm
+# LIBS = -L$(NETCDF)/lib -lnetcdff -lnetcdf -lcurl -lhdf5_hl -lhdf5 -lz -lm
#
# If you get an error "ld: library not found for -lnetcdff" (note 2 f's),
# remove it from the LIBS line. The same is true for any library. If 'ld'
@@ -135,22 +84,11 @@ LD = ifort
INCS = -I$(NETCDF)/include
LIBS = -L$(NETCDF)/lib -lnetcdff -lnetcdf
+FFLAGS = -O2 -ffree-line-length-none $(INCS)
+LDFLAGS = $(FFLAGS) $(LIBS)
-# for Intel 9.x:
-#FFLAGS = $(INCS) -O2
-#LDFLAGS = $(INCS) -Wl,-stack_size,10000000 $(LIBS)
-
-# for Intel 10.x and beyond:
-#FFLAGS = $(INCS) -O2 -m64 -heap-arrays -assume buffered_io
-LDFLAGS = $(INCS) $(LIBS)
-
-# for development or debugging, use this instead:
- FFLAGS = -g -C -check noarg_temp_created -fpe0 \
- -fp-model precise -ftrapuv -traceback $(INCS)
-#
-# If you get this error: libimf.so: warning: warning: feupdateenv is not implemented
-# try adding: -limf -lm to your LIBS line.
-
+# FFLAGS = -g -Wuninitialized -Wunused -ffree-line-length-none -fbounds-check \
+# -fbacktrace -ffpe-trap=invalid,zero,overflow $(INCS)
#
# $URL$
diff --git a/models/cam-fv/work/path_names_closest_member_tool b/models/cam-fv/work/path_names_closest_member_tool
index 278fc2f588..933e33f22f 100644
--- a/models/cam-fv/work/path_names_closest_member_tool
+++ b/models/cam-fv/work/path_names_closest_member_tool
@@ -29,9 +29,7 @@ assimilation_code/modules/utilities/time_manager_mod.f90
assimilation_code/modules/utilities/types_mod.f90
assimilation_code/modules/utilities/utilities_mod.f90
assimilation_code/programs/closest_member_tool/closest_member_tool.f90
-models/cam-fv/chem_tables_mod.f90
models/cam-fv/model_mod.f90
models/utilities/default_model_mod.f90
-models/utilities/quad_utils_mod.f90
observations/forward_operators/obs_def_mod.f90
observations/forward_operators/obs_def_utilities_mod.f90
diff --git a/models/mpas_atm/data/namelist.atmosphere b/models/mpas_atm/data/namelist.atmosphere
index d735a83a4e..019990da37 100644
--- a/models/mpas_atm/data/namelist.atmosphere
+++ b/models/mpas_atm/data/namelist.atmosphere
@@ -1,10 +1,10 @@
&nhyd_model
config_time_integration_order = 2
- config_dt = 1200.0
- config_start_time = '2017-04-26_00:00:00'
- config_run_duration = '0_01:00:00'
+ config_dt = 1200.
+ config_start_time = '2012-06-20_09:00:00'
+ config_run_duration = '0_09:00:00'
config_split_dynamics_transport = true
- config_number_of_sub_steps = 2
+ config_number_of_sub_steps = 2
config_dynamics_split_steps = 3
config_h_mom_eddy_visc2 = 0.0
config_h_mom_eddy_visc4 = 0.0
@@ -13,7 +13,7 @@
config_h_theta_eddy_visc4 = 0.0
config_v_theta_eddy_visc2 = 0.0
config_horiz_mixing = '2d_smagorinsky'
- config_len_disp = 240000.0
+ config_len_disp = 240000.0
config_visc4_2dsmag = 0.05
config_w_adv_order = 3
config_theta_adv_order = 3
@@ -27,45 +27,45 @@
config_monotonic = true
config_coef_3rd_order = 0.25
config_epssm = 0.1
- config_smdiv = 0.1
+ config_smdiv = 0.05
+ config_smp_forward = 0.2
/
+
&damping
config_zd = 22000.0
config_xnutr = 0.2
/
+
&io
- config_pio_num_iotasks = 0
- config_pio_stride = 1
+ config_pio_num_iotasks = 0
+ config_pio_stride = 1
/
+
&decomposition
- config_block_decomp_file_prefix = 'graph.info.part.'
+ config_block_decomp_file_prefix = 'x1.10242.graph.info.part.'
/
+
&restart
- config_do_restart = false
- config_do_DAcycling = false
+ config_do_restart = true
+ config_do_DAcycling = true
/
+
&printout
config_print_global_minmax_vel = true
- config_print_detailed_minmax_vel = false
/
+
&IAU
- config_IAU_option = 'off'
- config_IAU_window_length_s = 21600.
+ config_IAU_option = 'off'
+ config_IAU_window_length_s = 21600.
/
+
&physics
- config_sst_update = true
- config_sstdiurn_update = false
+ config_sst_update = true
+ config_sstdiurn_update = false
config_deepsoiltemp_update = false
config_radtlw_interval = '00:30:00'
config_radtsw_interval = '00:30:00'
- config_bucket_update = 'none'
+ config_bucket_update = '0_06:00:00'
config_physics_suite = 'mesoscale_reference'
- config_o3climatology = true
- config_gwdo_scheme = 'bl_ysu_gwdo'
-/
-&diagnostics
- config_num_diag_levels = 16
-/
-&soundings
- config_sounding_interval = 'none'
/
+
diff --git a/models/mpas_atm/data/streams.atmosphere b/models/mpas_atm/data/streams.atmosphere
index 34bf970cc9..f2d0ae32c8 100644
--- a/models/mpas_atm/data/streams.atmosphere
+++ b/models/mpas_atm/data/streams.atmosphere
@@ -1,64 +1,55 @@
+
+ filename_template="x1.10242.init.nc"
+ input_interval="initial_only"/>
+ clobber_mode="truncate"
+ output_interval="06:00:00"/>
+ precision="single"
+ output_interval="none">
+
+
-
+ precision="single"
+ output_interval="6:00:00">
+
+
-
+ input_interval="01_00:00:00">
-
+
-
-
-
+
-
-
-
-
-
-
-
-
-
+
diff --git a/models/mpas_atm/exhaustion.f90 b/models/mpas_atm/exhaustion.f90
index cbaea39ff1..8dc845f080 100644
--- a/models/mpas_atm/exhaustion.f90
+++ b/models/mpas_atm/exhaustion.f90
@@ -39,7 +39,7 @@ program exhaustion
operator(-)
use model_mod, only : static_init_model, get_model_size, get_state_meta_data, &
model_interpolate, get_analysis_time, &
- get_init_template_filename, analysis_file_to_statevector, &
+ get_model_analysis_filename, analysis_file_to_statevector, &
statevector_to_analysis_file, get_analysis_time, &
write_model_time, get_grid_dims
@@ -108,7 +108,7 @@ program exhaustion
type(time_type) :: model_time, adv_to_time
real(r8), allocatable :: statevector(:)
-character(len=129) :: mpas_input_file ! set with get_init_template_filename() if needed
+character(len=129) :: mpas_input_file ! set with get_model_analysis_filename() if needed
type(location_type) :: loc
diff --git a/models/mpas_atm/model_mod.f90 b/models/mpas_atm/model_mod.f90
index ceb149660c..3af2c43d80 100644
--- a/models/mpas_atm/model_mod.f90
+++ b/models/mpas_atm/model_mod.f90
@@ -14,21 +14,8 @@ module model_mod
! be used by converters and utilities and those interfaces can be anything
! that is useful to other pieces of code.
-! This revision of the model_mod supports both a global MPAS grid and
-! a regional grid. For the regional grid only observations which
-! are completely inside the interior will be assimilated, meaning obs
-! which need interpolation information from the boundary cells
-! (in any of the 7 boundary layers) will be rejected. However, during the
-! assimilation phase all locations in the local grid will be impacted,
-! even locations in the boundary layers if there are obs close to the
-! boundaries. A post-processing step will smooth the GFS external
-! values with the values updated by the assimilation in the boundary layers.
-
-! Note that to reject obs during interpolation requires the model_interpolate()
-! routine to check and return an error, but during the vertical conversion and
-! get_close routines all state point operations must succeed, even those
-! in the boundary layers. Pay close attention to which internal routines are used
-! by each to make sure the intended actions are what happens.
+
+! Routines in other modules that are used here.
use types_mod, only : r4, r8, i8, digits12, SECPERDAY, MISSING_R8, &
rad2deg, deg2rad, PI, MISSING_I, obstypelength
@@ -50,23 +37,17 @@ module model_mod
use netcdf_utilities_mod, only : nc_add_global_attribute, nc_synchronize_file, &
nc_add_global_creation_time, nc_check, &
- nc_begin_define_mode, nc_end_define_mode, &
- nc_open_file_readonly, nc_close_file, &
- nc_add_attribute_to_variable, nc_define_dimension, &
- nc_define_unlimited_dimension, nc_define_character_variable, &
- nc_define_real_variable, nc_get_variable, nc_put_variable, &
- nc_get_dimension_size, nc_variable_exists, nc_dimension_exists, &
- nc_define_integer_variable
+ nc_begin_define_mode, nc_end_define_mode
use location_io_mod, only : nc_write_location_atts, nc_write_location
-use default_model_mod, only : nc_write_model_vars, adv_1step, &
- init_time => fail_init_time, &
- init_conditions => fail_init_conditions
+use default_model_mod, only : nc_write_model_vars, init_time, init_conditions, &
+ adv_1step
-use xyz_location_mod, only : xyz_location_type, xyz_set_location, xyz_get_location, &
- xyz_get_close_type, xyz_get_close_init, xyz_get_close_destroy, &
- xyz_find_nearest, xyz_use_great_circle_dist
+use xyz_location_mod, only : xyz_location_type, xyz_get_close_maxdist_init, &
+ xyz_get_close_type, xyz_set_location, xyz_get_location, &
+ xyz_get_close_obs_init, xyz_get_close_obs_destroy, &
+ xyz_find_nearest
use utilities_mod, only : register_module, error_handler, &
E_ERR, E_WARN, E_MSG, logfileunit, get_unit, &
@@ -76,35 +57,33 @@ module model_mod
file_to_text, close_file, do_nml_file, &
do_nml_term, scalar
-use obs_kind_mod, only : get_index_for_quantity, &
- get_name_for_quantity, &
- get_quantity_for_type_of_obs, &
- QTY_SURFACE_ELEVATION, &
- QTY_SURFACE_PRESSURE, &
- QTY_10M_U_WIND_COMPONENT, &
- QTY_10M_V_WIND_COMPONENT, &
- QTY_2M_TEMPERATURE, &
- QTY_2M_SPECIFIC_HUMIDITY, &
- QTY_VERTICAL_VELOCITY, &
- QTY_POTENTIAL_TEMPERATURE, &
- QTY_EDGE_NORMAL_SPEED, &
- QTY_TEMPERATURE, &
- QTY_U_WIND_COMPONENT, &
- QTY_V_WIND_COMPONENT, &
- QTY_PRESSURE, &
- QTY_DENSITY, &
- QTY_VAPOR_MIXING_RATIO, &
- QTY_2M_VAPOR_MIXING_RATIO, &
- QTY_CLOUDWATER_MIXING_RATIO, &
- QTY_RAINWATER_MIXING_RATIO, &
- QTY_ICE_MIXING_RATIO, &
- QTY_SNOW_MIXING_RATIO, &
- QTY_GRAUPEL_MIXING_RATIO, &
- QTY_SPECIFIC_HUMIDITY, &
- QTY_GEOPOTENTIAL_HEIGHT, &
+use obs_kind_mod, only : get_index_for_quantity, &
+ get_name_for_quantity, &
+ QTY_SURFACE_ELEVATION, &
+ QTY_SURFACE_PRESSURE, &
+ QTY_10M_U_WIND_COMPONENT, &
+ QTY_10M_V_WIND_COMPONENT, &
+ QTY_2M_TEMPERATURE, &
+ QTY_2M_SPECIFIC_HUMIDITY, &
+ QTY_VERTICAL_VELOCITY, &
+ QTY_POTENTIAL_TEMPERATURE, &
+ QTY_EDGE_NORMAL_SPEED, &
+ QTY_TEMPERATURE, &
+ QTY_U_WIND_COMPONENT, &
+ QTY_V_WIND_COMPONENT, &
+ QTY_PRESSURE, &
+ QTY_DENSITY, &
+ QTY_VAPOR_MIXING_RATIO, &
+ QTY_CLOUDWATER_MIXING_RATIO, &
+ QTY_RAINWATER_MIXING_RATIO, &
+ QTY_ICE_MIXING_RATIO, &
+ QTY_SNOW_MIXING_RATIO, &
+ QTY_GRAUPEL_MIXING_RATIO, &
+ QTY_SPECIFIC_HUMIDITY, &
+ QTY_GEOPOTENTIAL_HEIGHT, &
QTY_PRECIPITABLE_WATER
-use mpi_utilities_mod, only: my_task_id, broadcast_minmax
+use mpi_utilities_mod, only: my_task_id, all_reduce_min_max, task_count
use random_seq_mod, only: random_seq_type, init_random_seq, random_gaussian
@@ -163,21 +142,18 @@ module model_mod
! generally useful routines for various support purposes.
! the interfaces here can be changed as appropriate.
-public :: get_init_template_filename, &
+public :: get_model_analysis_filename, &
+ get_grid_definition_filename, &
analysis_file_to_statevector, &
statevector_to_analysis_file, &
get_analysis_time, &
get_grid_dims, &
get_xland, &
- get_cell_center_coords, &
- get_bdy_mask, &
print_variable_ranges, &
find_closest_cell_center, &
find_triangle, &
read_2d_from_nc_file, &
- find_height_bounds, &
- cell_ok_to_interpolate, &
- is_global_grid
+ find_height_bounds
! version controlled file description for error handling, do not edit
character(len=256), parameter :: source = &
@@ -190,26 +166,24 @@ module model_mod
character(len=256) :: string1, string2, string3, locstring
logical, save :: module_initialized = .false.
-! length of an mpas (also wrf) time string: YYYY-MM-DD_hh:mm:ss
-integer, parameter :: TIMELEN = 19
-
! Real (physical) constants as defined exactly in MPAS.
! redefined here for consistency with the model.
real(r8), parameter :: rgas = 287.0_r8
-real(r8), parameter :: rv = 461.6_r8
real(r8), parameter :: cp = 1003.0_r8
real(r8), parameter :: cv = 716.0_r8
real(r8), parameter :: p0 = 100000.0_r8
real(r8), parameter :: rcv = rgas/(cp-rgas)
-real(r8), parameter :: rvord = rv/rgas
! earth radius; needed to convert lat/lon to x,y,z cartesian coords.
-! for the highest accuracy this should match what the model uses.
+! FIXME: one of the example ocean files had a global attr with 6371220.0
+! instead of 1229. ??
real(r8), parameter :: radius = 6371229.0 ! meters
-! roundoff error for single precision and double
-! set in init code to 1e-5 or 1e-12 depending.
-real(r8) :: roundoff = 1.0e-5_r8
+! roundoff error for single precision
+!real(r8), parameter :: roundoff = 1.0e-5_r8
+
+! r8 r4
+real(r8), parameter :: roundoff = 1.0e-12_r8
! Storage for a random sequence for perturbing a single initial state
type(random_seq_type) :: random_seq
@@ -229,22 +203,18 @@ module model_mod
logical :: add_static_data_to_diags = .false.
! variables which are in the module namelist
-character(len=256) :: init_template_filename = 'mpas_init.nc'
integer :: vert_localization_coord = VERTISHEIGHT
integer :: assimilation_period_days = 0
integer :: assimilation_period_seconds = 21600
real(r8) :: model_perturbation_amplitude = 0.0001 ! tiny amounts
-logical :: log_p_vert_interp = .true. ! if true, interpolate vertical pressure in log space
-character(len=32) :: calendar = 'Gregorian'
real(r8) :: highest_obs_pressure_mb = 100.0_r8 ! do not assimilate obs higher than this level.
real(r8) :: sfc_elev_max_diff = -1.0_r8 ! do not assimilate if |model - station| height is larger than this [m].
-integer :: xyzdebug = 0
+logical :: log_p_vert_interp = .true. ! if true, interpolate vertical pressure in log space
integer :: debug = 0 ! turn up for more and more debug messages
-
-! this is not in the namelist or supported generally.
-! (setting this to true avoids the surface elevation max diff
-! test for elevation and surface pressure.)
-logical :: always_assim_surf_altimeters = .false.
+integer :: xyzdebug = 0
+character(len=32) :: calendar = 'Gregorian'
+character(len=256) :: model_analysis_filename = 'mpas_init.nc'
+character(len=256) :: grid_definition_filename = 'mpas_init.nc'
integer :: domid ! For state_structure_mod access
@@ -273,23 +243,9 @@ module model_mod
real(r8) :: outside_grid_level_tolerance = -1.0_r8
logical :: extrapolate = .false.
-! if the calling code updates an existing file it simply writes the state variable
-! data. if it needs to create a file from scratch it calls nc_write_model_atts()
-! to optionally add grid info or any other non-state variables or attributes.
-! setting this to .true. adds the mpas grid info to the file. .false. does
-! not and results in smaller diag/output files.
-logical :: write_grid_to_diag_files = .true.
-
-! in converting to scale height for the vertical, set this to .false. to
-! use simply the log of the pressure. to normalize by the surface pressure
-! (backwards compatible with previous code), set this to .true.
-logical :: no_normalization_of_scale_heights = .true.
-
-! for regional MPAS
-real(r8) :: dxmax ! max distance between two adjacent cell centers in the mesh (in meters)
-
namelist /model_nml/ &
- init_template_filename, &
+ model_analysis_filename, &
+ grid_definition_filename, &
vert_localization_coord, &
assimilation_period_days, &
assimilation_period_seconds, &
@@ -305,9 +261,7 @@ module model_mod
highest_obs_pressure_mb, &
outside_grid_level_tolerance, &
extrapolate, &
- sfc_elev_max_diff, &
- write_grid_to_diag_files, &
- no_normalization_of_scale_heights
+ sfc_elev_max_diff
! DART state vector contents are specified in the input.nml:&mpas_vars_nml namelist.
integer, parameter :: max_state_variables = 80
@@ -319,20 +273,19 @@ module model_mod
namelist /mpas_vars_nml/ mpas_state_variables, mpas_state_bounds
+! FIXME: this shouldn't be a global. the progvar array
+! should be allocated at run time and nfields should be part
+! of a larger derived type that includes nfields and an array
+! of progvartypes.
integer :: nfields
-!>@todo FIXME - REMOVE AS MUCH OF THIS AS POSSIBLE.
-!> some of this information is in the state structure now.
-!> the duplicate progvar stuff should be removed and the
-!> state routines used instead. this duplicates work and
-!> makes us keep up code in 2 different places.
-
-! original code:
! Everything needed to describe a variable
type progvartype
private
character(len=NF90_MAX_NAME) :: varname
+ character(len=NF90_MAX_NAME) :: long_name
+ character(len=NF90_MAX_NAME) :: units
character(len=NF90_MAX_NAME), dimension(NF90_MAX_VAR_DIMS) :: dimname
integer, dimension(NF90_MAX_VAR_DIMS) :: dimlens
integer :: xtype ! netCDF variable type (NF90_double, etc.)
@@ -384,7 +337,6 @@ module model_mod
real(r8), allocatable :: latEdge(:) ! edge longitudes (degrees, original radians in file)
real(r8), allocatable :: lonCell(:) ! cell center longitudes (degrees, original radians in file)
real(r8), allocatable :: latCell(:) ! cell center latitudes (degrees, original radians in file)
-real(r8), allocatable :: dcEdge(:) ! distance between two adjacent cell centers (in meters)
real(r8), allocatable :: xland(:) ! LAND MASK (1 FOR LAND, 2 FOR WATER)
real(r8), allocatable :: zGridFace(:,:) ! geometric height at cell faces (nVertLevelsP1,nCells)
real(r8), allocatable :: zGridCenter(:,:) ! geometric height at cell centers (nVertLevels, nCells)
@@ -403,8 +355,8 @@ module model_mod
! Boundary information might be needed ... regional configuration?
! Read if available.
-integer, allocatable :: bdyMaskCell(:)
-integer, allocatable :: bdyMaskEdge(:)
+integer, allocatable :: boundaryEdge(:,:)
+integer, allocatable :: boundaryVertex(:,:)
integer, allocatable :: maxLevelCell(:)
real(r8), allocatable :: ens_mean(:) ! needed to convert vertical distances consistently
@@ -525,17 +477,16 @@ subroutine static_init_model()
integer :: cel1, cel2
logical :: both
real(r8) :: variable_bounds(max_state_variables, 2)
-character(len=*), parameter :: routine = 'static_init_model'
if ( module_initialized ) return ! only need to do this once.
+! Print module information to log file and stdout.
+call register_module(source, revision, revdate)
+
! Since this routine calls other routines that could call this routine
! we'll say we've been initialized pretty dang early.
module_initialized = .true.
-! Print module information to log file and stdout.
-call register_module(source, revision, revdate)
-
! Read the DART namelist for this model
call find_namelist_in_file('input.nml', 'model_nml', iunit)
read(iunit, nml = model_nml, iostat = io)
@@ -564,25 +515,23 @@ subroutine static_init_model()
call get_time(model_timestep,ss,dd) ! set_time() assures the seconds [0,86400)
-write(string1,*)'assimilation window is ',dd,' days ',ss,' seconds'
-call error_handler(E_MSG,routine,string1,source,revision,revdate)
+write(string1,*)'assimilation period is ',dd,' days ',ss,' seconds'
+call error_handler(E_MSG,'static_init_model',string1,source,revision,revdate)
!---------------------------------------------------------------
! 1) get grid dimensions
! 2) allocate space for the grids
! 3) read them from the analysis file
-ncid = nc_open_file_readonly(init_template_filename, routine)
-
-! get sizes
-call read_grid_dims(ncid)
+! read_grid_dims() fills in the following module global variables:
+! nCells, nVertices, nEdges, maxEdges, nVertLevels, nVertLevelsP1, vertexDegree, nSoilLevels
+call read_grid_dims()
allocate(latCell(nCells), lonCell(nCells))
allocate(zGridFace(nVertLevelsP1, nCells))
allocate(zGridCenter(nVertLevels, nCells))
allocate(cellsOnVertex(vertexDegree, nVertices))
-allocate(dcEdge(nEdges))
allocate(nEdgesOnCell(nCells))
allocate(xland(nCells))
allocate(edgesOnCell(maxEdges, nCells))
@@ -601,15 +550,10 @@ subroutine static_init_model()
allocate(latEdge(nEdges), lonEdge(nEdges))
endif
-! is this a global or regional grid? determined by the
-! existance of bdyMaskCells or bdyMaskEdges.
-! if regional, allocate and read in the boundry info here.
-call set_global_grid(ncid)
-
-! fill in the grid values
-call get_grid(ncid)
+! this reads in latCell, lonCell, zGridFace, cellsOnVertex
+call get_grid()
-! vertical faces are in the input file. compute vertical center locations here.
+! read in vert cell face locations and then compute vertical center locations
do kloc=1, nCells
do iloc=1, nVertLevels
zGridCenter(iloc,kloc) = (zGridFace(iloc,kloc) + zGridFace(iloc+1,kloc))*0.5_r8
@@ -640,13 +584,21 @@ subroutine static_init_model()
!---------------------------------------------------------------
! Compile the list of model variables to use in the creation
-! of the DART state vector.
+! of the DART state vector. Required to determine model_size.
!
-! THIS CODE SHOULD BE REMOVED - it is done by the add_domain code.
+! Verify all variables are in the model analysis file
!
+! Compute the offsets into the state vector for the start of each
+! different variable type. Requires reading shapes from the model
+! analysis file. As long as TIME is the LAST dimension, we're OK.
+!
+! Record the extent of the data type in the state vector.
+
+call nc_check( nf90_open(trim(model_analysis_filename), NF90_NOWRITE, ncid), &
+ 'static_init_model', 'open '//trim(model_analysis_filename))
-call verify_state_variables( mpas_state_variables, ncid, init_template_filename, &
+call verify_state_variables( mpas_state_variables, ncid, model_analysis_filename, &
nfields, variable_table)
TimeDimID = FindTimeDimension( ncid )
@@ -657,7 +609,7 @@ subroutine static_init_model()
endif
call nc_check(nf90_Inquire(ncid,nDimensions,nVariables,nAttributes,unlimitedDimID), &
- 'static_init_model', 'inquire '//trim(init_template_filename))
+ 'static_init_model', 'inquire '//trim(model_analysis_filename))
if ( (TimeDimID > 0) .and. (unlimitedDimID > 0) .and. (TimeDimID /= unlimitedDimID)) then
write(string1,*)'IF Time is not the unlimited dimension, I am lost.'
@@ -679,7 +631,7 @@ subroutine static_init_model()
progvar(ivar)%numcells = MISSING_I
progvar(ivar)%numedges = MISSING_I
- string2 = trim(init_template_filename)//' '//trim(varname)
+ string2 = trim(model_analysis_filename)//' '//trim(varname)
call nc_check(nf90_inq_varid(ncid, trim(varname), VarID), &
'static_init_model', 'inq_varid '//trim(string2))
@@ -687,6 +639,23 @@ subroutine static_init_model()
call nc_check(nf90_inquire_variable(ncid, VarID, xtype=progvar(ivar)%xtype, &
dimids=dimIDs, ndims=numdims), 'static_init_model', 'inquire '//trim(string2))
+ ! If the long_name and/or units attributes are set, get them.
+ ! They are not REQUIRED to exist but are nice to use if they are present.
+
+ if( nf90_inquire_attribute( ncid, VarID, 'long_name') == NF90_NOERR ) then
+ call nc_check( nf90_get_att(ncid, VarID, 'long_name' , progvar(ivar)%long_name), &
+ 'static_init_model', 'get_att long_name '//trim(string2))
+ else
+ progvar(ivar)%long_name = varname
+ endif
+
+ if( nf90_inquire_attribute( ncid, VarID, 'units') == NF90_NOERR ) then
+ call nc_check( nf90_get_att(ncid, VarID, 'units' , progvar(ivar)%units), &
+ 'static_init_model', 'get_att units '//trim(string2))
+ else
+ progvar(ivar)%units = 'unknown'
+ endif
+
! Since we are not concerned with the TIME dimension, we need to skip it.
! When the variables are read, only a single timestep is ingested into
! the DART state vector.
@@ -741,7 +710,8 @@ subroutine static_init_model()
enddo
-call nc_close_file(ncid, routine)
+call nc_check( nf90_close(ncid), &
+ 'static_init_model', 'close '//trim(model_analysis_filename))
model_size = progvar(nfields)%indexN
@@ -758,8 +728,8 @@ subroutine static_init_model()
write(logfileunit, *)'static_init_model: grid is a global grid '
write( * , *)'static_init_model: grid is a global grid '
else
- write(logfileunit, *)'static_init_model: grid is NOT a global grid. Lateral boundaries exist '
- write( * , *)'static_init_model: grid is NOT a global grid. Lateral boundaries exist '
+ write(logfileunit, *)'static_init_model: grid has boundaries '
+ write( * , *)'static_init_model: grid has boundaries '
endif
if ( all_levels_exist_everywhere ) then
write(logfileunit, *)'static_init_model: all cells have same number of vertical levels '
@@ -845,7 +815,7 @@ subroutine static_init_model()
variable_bounds(1:nfields, 1) = progvar(1:nfields)%range(1)
variable_bounds(1:nfields, 2) = progvar(1:nfields)%range(2)
-domid = add_domain( trim(init_template_filename), nfields, &
+domid = add_domain( trim(model_analysis_filename), nfields, &
var_names = variable_table (1:nfields,1), &
clamp_vals = variable_bounds(1:nfields,:) )
@@ -854,14 +824,6 @@ subroutine static_init_model()
! tell the location module how we want to localize in the vertical
call set_vertical_localization_coord(vert_localization_coord)
-! set an appropriate value for roundoff tests based
-! on this code being compiled single or double precision.
-if (r8 == digits12) then
- roundoff = 1.0e-12_r8
-else
- roundoff = 1.0e-5_r8
-endif
-
end subroutine static_init_model
@@ -880,7 +842,9 @@ subroutine get_state_meta_data(index_in, location, var_type)
! Local variables
-integer :: nzp, iloc, vloc, nf, ndim
+integer :: i, j, k ! Indices into variable (note k is not used in MPAS)
+integer :: nzp, iloc, vloc, nf
+integer :: istatus
real(r8) :: height
type(location_type) :: new_location
@@ -888,7 +852,17 @@ subroutine get_state_meta_data(index_in, location, var_type)
! get the local indicies and type from dart index. kloc is a dummy variable for this subroutine
-call find_mpas_indices(index_in, iloc, vloc, ndim, nf)
+call get_model_variable_indices(index_in, i, j, k, var_id=nf)
+
+if (progvar(nf)%numdims == 2) then ! variable(vcol, iloc)
+ vloc = i
+ iloc = j
+elseif (progvar(nf)%numdims == 1) then ! variable(iloc)
+ iloc = i
+ vloc = 1
+else
+ call error_handler(E_ERR, 'get_state_meta_data ', 'expecting 1D or 2D variable')
+endif
nzp = progvar(nf)%numvertical
@@ -933,6 +907,19 @@ subroutine get_state_meta_data(index_in, location, var_type)
! cannot do vertical conversion here. assim_tools will call vertical conversion
! on the obs and on the state.
+!! Let us return the vert location with the requested vertical localization coordinate
+!! hoping that the code can run faster when same points are close to many obs
+!! since vert_convert does not need to be called repeatedly in get_close_obs any more.
+!! FIXME: we should test this to see if it's a win (in computation time). for obs
+!! which are not dense relative to the grid, this might be slower than doing the
+! conversions on demand in the localization code (in get_close_obs()).
+!
+!if ( vertical_localization_on() .and. vert_localization_coord /= VERTISHEIGHT ) then
+! new_location = location
+! call vert_convert(state_handle, new_location, progvar(nf)%dart_kind, istatus)
+! if(istatus == 0) location = new_location
+!endif
+
if (debug > 20) then
write(*,'("INDEX_IN / IVAR : ",(i10,2x),(i5,2x))') index_in, nf
@@ -947,41 +934,6 @@ subroutine get_state_meta_data(index_in, location, var_type)
end subroutine get_state_meta_data
-!------------------------------------------------------------------
-!> given an index into the state vector, return with the cellid
-!> and the vertical level if this is a 2d variable. also return
-!> the dimensionality, and optionally the progvar index.
-
-subroutine find_mpas_indices(index_in, cellid, vert_level, ndim, nf)
-integer(i8), intent(in) :: index_in
-integer, intent(out) :: cellid
-integer, intent(out) :: vert_level
-integer, intent(out), optional :: ndim
-integer, intent(out), optional :: nf
-
-integer :: i, j, k ! Indices into variable (note k is not used in MPAS)
-integer :: nzp, iloc, vloc, nnf
-
-if ( .not. module_initialized ) call static_init_model
-
-! get the local indicies and type from dart index. 'k' is a dummy variable for this subroutine
-
-call get_model_variable_indices(index_in, i, j, k, var_id=nnf)
-
-if (progvar(nnf)%numdims == 2) then ! variable(vcol, iloc)
- vert_level = i
- cellid = j
-elseif (progvar(nnf)%numdims == 1) then ! variable(iloc)
- cellid = i
- vert_level = 1
-else
- call error_handler(E_ERR, 'find_mpas_indices ', 'expecting 1D or 2D variable')
-endif
-
-if (present(ndim)) ndim = progvar(nnf)%numdims
-if (present(nf)) nf = nnf
-
-end subroutine find_mpas_indices
!------------------------------------------------------------------
subroutine model_interpolate(state_handle, ens_size, location, obs_type, expected_obs, istatus)
@@ -994,14 +946,9 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
!
! ISTATUS = 99: general error in case something terrible goes wrong...
! ISTATUS = 88: this kind is not in the state vector
-! ISTATUS = 82: Unsupported vertical type (VERTISUNDEF)
-! ISTATUS = 81: Vertical location too high
-! ISTATUS = 80: Vertical location too low
-! ISTATUS = 11: Could not find the closest cell center that contains this lat/lon
-! ISTATUS = 12: Surface obs too far away from model elevation
+! ISTATUS = 11: Could not find a triangle that contains this lat/lon
+! ISTATUS = 12: Height vertical coordinate out of model range.
! ISTATUS = 13: Missing value in interpolation.
-! ISTATUS = 14: Could not find the other two cell centers of the triangle that contains this lat/lon
-! ISTATUS = 15: Cell centers of the triangle fall in the lateral boundary zone
! ISTATUS = 16: Don't know how to do vertical velocity for now
! ISTATUS = 17: Unable to compute pressure values
! ISTATUS = 18: altitude illegal
@@ -1026,7 +973,7 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
integer :: ivar, obs_kind
integer :: tvars(3)
integer :: cellid
-logical :: goodkind, surface_obs
+logical :: goodkind
real(r8) :: lpres(ens_size), values(3, ens_size)
real(r8) :: llv(3) ! lon/lat/vert
integer :: e, verttype
@@ -1045,62 +992,31 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
llv = get_location(location)
verttype = nint(query_location(location))
-surface_obs = (verttype == VERTISSURFACE)
if (debug > 10) &
- print *, 'task ', my_task_id(), ' model_interpolate: obs_kind ', trim(get_name_for_quantity(obs_kind)),' at', trim(locstring)
-
-! this routine returns the cellid for a global mpas grid, same as
-! find_closest_cell_center().
-! for a regional grid it only returns a good cellid if the closest cell center
-! AND the other 2 triangle points surrounding this location are completely inside
-! the grid and none of the vertices are in the boundary region.
-cellid = cell_ok_to_interpolate(location)
-if (debug > 10) print *, ' model_interpolate: lon/lat, cellid ', llv(1), llv(2), cellid ! SYHA
-if (cellid < 1) then
- !print *, 'model_interpolate: lon/lat is outside the domain: ', llv(1), llv(2)
- istatus = 11
- goto 100
-endif
+print *, 'task ', my_task_id(), ' model_interpolate: obs_kind', obs_kind,' at', trim(locstring)
! Reject obs if the station height is far way from the model terrain.
+! HK is this the same across the ensemble?
if(is_vertical(location, "SURFACE").and. sfc_elev_max_diff >= 0) then
+ cellid = find_closest_cell_center(llv(2), llv(1))
+ if (cellid < 1) then
+ if(debug > 0) print *, 'no closest cell center for lat/lon: ', llv(1), llv(2), cellid
+ goto 100
+ endif
if(abs(llv(3) - zGridFace(1,cellid)) > sfc_elev_max_diff) then
- ! allow experimentation with always accepting surface altimeters
- ! by setting this module global variable to .true. at top of file.
- if (always_assim_surf_altimeters .and. &
- (obs_kind == QTY_SURFACE_PRESSURE .or. obs_kind == QTY_SURFACE_ELEVATION)) then
- istatus = 0
- else
+ !Soyoung: No threshold for surface altimeter
+ !if(obs_kind == QTY_SURFACE_PRESSURE .or. obs_kind == QTY_SURFACE_ELEVATION) then
+ ! istatus = 0
+ !else
istatus = 12
goto 100
- endif
+ !endif
endif
endif
-! check for quantities that have both a surface field (often diagnostic)
-! and a full volume field. if the incoming location has VERTISSURFACE
-! as the vertical coordinate type, make sure the quantity to be
-! interpolated is the corresponding surface field quantity.
-if (surface_obs) then
- select case (obs_kind)
- case (QTY_TEMPERATURE)
- obs_kind = QTY_2M_TEMPERATURE
- case (QTY_PRESSURE)
- obs_kind = QTY_SURFACE_PRESSURE
- case (QTY_SPECIFIC_HUMIDITY)
- obs_kind = QTY_2M_SPECIFIC_HUMIDITY
- case (QTY_VAPOR_MIXING_RATIO)
- obs_kind = QTY_2M_VAPOR_MIXING_RATIO
- case (QTY_U_WIND_COMPONENT)
- obs_kind = QTY_10M_U_WIND_COMPONENT
- case (QTY_V_WIND_COMPONENT)
- obs_kind = QTY_10M_V_WIND_COMPONENT
- end select
-endif
-
-! see if observation quantity is in the state vector. this sets an
+! see if observation kind is in the state vector. this sets an
! error code and returns without a fatal error if answer is no.
! exceptions: the state vector has potential temp, but we can
! compute sensible temperature from pot_temp, rho, and qv.
@@ -1112,26 +1028,37 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
! we have because we require potential temp, mixing ratio, and
! density to be in the state vector in all cases.)
-! is this field in the state?
ivar = get_progvar_index_from_kind(obs_kind)
if (ivar > 0) then
- goodkind = .true. ! yes
-
+ goodkind = .true.
else
- goodkind = .false. ! but check for exceptions
-
+ goodkind = .false.
! exceptions if the kind isn't directly
! a field in the state vector:
select case (obs_kind)
- case (QTY_TEMPERATURE, QTY_2M_TEMPERATURE)
+ case (QTY_TEMPERATURE)
+ goodkind = .true.
+ case (QTY_SURFACE_ELEVATION)
+ goodkind = .true.
+ case (QTY_SURFACE_PRESSURE)
+ goodkind = .true.
+ case (QTY_PRESSURE)
+ goodkind = .true.
+ case (QTY_GEOPOTENTIAL_HEIGHT)
+ goodkind = .true.
+ case (QTY_SPECIFIC_HUMIDITY)
goodkind = .true.
- case (QTY_SURFACE_ELEVATION, QTY_GEOPOTENTIAL_HEIGHT)
+ case (QTY_PRECIPITABLE_WATER)
goodkind = .true.
- case (QTY_PRESSURE) ! surface pressure should be in the state
+ case (QTY_10M_U_WIND_COMPONENT)
goodkind = .true.
- case (QTY_SPECIFIC_HUMIDITY, QTY_2M_SPECIFIC_HUMIDITY)
+ case (QTY_10M_V_WIND_COMPONENT)
goodkind = .true.
- case (QTY_U_WIND_COMPONENT, QTY_V_WIND_COMPONENT)
+ case (QTY_2M_TEMPERATURE)
+ goodkind = .true.
+ case (QTY_2M_SPECIFIC_HUMIDITY)
+ goodkind = .true.
+ case (QTY_U_WIND_COMPONENT,QTY_V_WIND_COMPONENT)
! if the reconstructed winds at the cell centers aren't there,
! we can use the edge normal winds, if the user allows it.
if (get_progvar_index_from_kind(QTY_EDGE_NORMAL_SPEED) > 0 &
@@ -1147,31 +1074,7 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
goto 100
endif
-! Reject obs above a user specified pressure level.
-! this is expensive - only do it if users want to reject observations
-! at the top of the model. negative values mean ignore this test.
-
-if (highest_obs_pressure_mb > 0.0) then
- call compute_pressure_at_loc(state_handle, ens_size, location, lpres, istatus)
- where (lpres < highest_obs_pressure_mb * 100.0_r8)
- ! Exclude from assimilation the obs above a user specified level
- istatus(:) = 201
- end where
-
- if (debug > 10) then
- do e = 1, ens_size
- if (istatus(e) == 201) print *, 'ens ', e, ' rejected, pressure < upper limit', lpres(e), highest_obs_pressure_mb
- enddo
- endif
- if (all(istatus /= 0)) goto 100 ! if everyone has failed, we can quit
-
-endif
-
-if (debug > 10) then
- print *, my_task_id(), ' passed high pressure test, ready to interpolate kind ', obs_kind
-endif
-
-! Not prepared to do W interpolation at this time
+! Not prepared to do w interpolation at this time
if(obs_kind == QTY_VERTICAL_VELOCITY) then
if (debug > 4) print *, 'model_interpolate: code does not handle vertical velocity yet'
istatus(:) = 16
@@ -1199,10 +1102,12 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
tvars(3) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
call compute_scalar_with_barycentric(state_handle, ens_size, location, 3, tvars, values, istatus)
+ where (istatus /= 0) expected_obs = missing_r8 ! FIXME: this might not be necessary
if (all(istatus /= 0)) goto 100
! convert pot_temp, density, vapor mixing ratio into sensible temperature
expected_obs(:) = theta_to_tk(ens_size, values(1, :), values(2, :), values(3, :), istatus(:))
+ where (istatus /= 0) expected_obs = missing_r8
if (debug > 10) &
print *, 'model_interpolate: TEMPERATURE ', istatus, expected_obs, trim(locstring)
@@ -1213,18 +1118,22 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
if (debug > 10) &
print *, 'model_interpolate: PRESSURE ', istatus, expected_obs, trim(locstring)
+ if (all(istatus /= 0)) goto 100
+
else if (obs_kind == QTY_GEOPOTENTIAL_HEIGHT) then
location_tmp = location
call convert_vert_distrib(state_handle, ens_size, location_tmp, QTY_GEOPOTENTIAL_HEIGHT, VERTISHEIGHT, istatus)
+ where (istatus /= 0) expected_obs = MISSING_R8
do e = 1, ens_size
if(istatus(e) == 0) expected_obs(e) = query_location(location_tmp(e), 'VLOC')
enddo
-else if (obs_kind == QTY_VAPOR_MIXING_RATIO .or. obs_kind == QTY_2M_VAPOR_MIXING_RATIO) then
- tvars(1) = get_progvar_index_from_kind(obs_kind)
+else if (obs_kind == QTY_VAPOR_MIXING_RATIO) then
+ tvars(1) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
call compute_scalar_with_barycentric(state_handle, ens_size, location, 1, tvars, values, istatus)
expected_obs = values(1, :)
+ where (istatus /= 0) expected_obs = missing_r8 ! FIXME: this might not be necessary
if ( all(istatus /= 0 ) ) goto 100
! Don't accept negative moisture
@@ -1235,14 +1144,11 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
if (debug > 10) &
print *, 'model_interpolate: VAPOR_MIXING_RATIO', istatus, expected_obs, trim(locstring)
-else if (obs_kind == QTY_SPECIFIC_HUMIDITY .or. obs_kind == QTY_2M_SPECIFIC_HUMIDITY) then
- if (obs_kind == QTY_SPECIFIC_HUMIDITY) then
- tvars(1) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
- else
- tvars(1) = get_progvar_index_from_kind(QTY_2M_VAPOR_MIXING_RATIO)
- endif
+else if (obs_kind == QTY_SPECIFIC_HUMIDITY) then
+ tvars(1) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
call compute_scalar_with_barycentric(state_handle, ens_size, location, 1, tvars, values, istatus)
expected_obs = values(1, :)
+ where (istatus /= 0) expected_obs = missing_r8 ! FIXME: this might not be necessary
if ( all(istatus /= 0 ) ) goto 100
! compute vapor pressure, then: sh = vp / (1.0 + vp)
@@ -1257,50 +1163,78 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
enddo
if (debug > 10) &
- print *, 'model_interpolate: SH/SH2 ', istatus, expected_obs, trim(locstring)
+ print *, 'model_interpolate: SH ', istatus, expected_obs, trim(locstring)
else if (obs_kind == QTY_SURFACE_ELEVATION) then
call compute_elevation_with_barycentric(location, expected_obs(1), istatus(1))
- expected_obs(2:ens_size) = expected_obs(1)
istatus(2:ens_size) = istatus(1)
+ if (istatus(1) /= 0) then
+ expected_obs = missing_r8
+ goto 100
+ endif
+
+ expected_obs(2:ens_size) = expected_obs(1)
if (debug > 10) &
print *, 'model_interpolate: SURFACE_ELEVATION', istatus, expected_obs, trim(locstring)
+!> @todo check againt trunk, it does QTY_PRECIPITABLE_WATER and QTY_SURFACE_PRESSURE in the same if
+!> statement
+else if (obs_kind == QTY_PRECIPITABLE_WATER) then
+ tvars(1) = ivar
+ call compute_scalar_with_barycentric(state_handle, ens_size, location, 1, tvars, values, istatus)
+ expected_obs = values(1, :)
+ where (istatus /= 0) expected_obs = missing_r8 ! FIXME: this might not be necessary
+ if ( all(istatus /= 0) ) goto 100
+
else
- ! all other kinds come here.
- ! direct interpolation: kind is in the state vector and no clamping or other conversions needed
+ ! direct interpolation, kind is in the state vector
tvars(1) = ivar
call compute_scalar_with_barycentric(state_handle, ens_size, location, 1, tvars, values, istatus)
expected_obs = values(1, :)
+ where (istatus /= 0) expected_obs = missing_r8 ! FIXME: this might not be necessary
+ if ( all(istatus /= 0) ) goto 100
if (debug > 10) &
- print *, 'model_interpolate: generic interpolation: ', obs_kind, ' istatus ',istatus, &
- ' expected_obs ', expected_obs
+ print *, 'model_interpolate: generic interpolation: ', obs_kind, istatus, expected_obs
endif
+! Reject obs above a user specified pressure level.
+! this is expensive - only do it if users want to reject observations
+! at the top of the model. negative values mean ignore this test.
+if (highest_obs_pressure_mb > 0.0) then
+ call compute_pressure_at_loc(state_handle, ens_size, location, lpres, istatus)
+ where (lpres < highest_obs_pressure_mb * 100.0_r8)
+ ! Exclude from assimilation the obs above a user specified level
+ expected_obs = MISSING_R8
+ istatus = 201
+ ! goto 100
+ end where
+
+ if (debug > 10) then
+ do e = 1, ens_size
+ if (istatus(e) == 201) print *, 'ens ', e, ' rejected, pressure < upper limit', lpres(e), highest_obs_pressure_mb
+ enddo
+ endif
+endif
100 continue
-! this is for debugging - when we're confident the code is returning
-! consistent values and rc codes, both these tests can be removed for speed.
-! also optionally check for generated NaNs for now.
-
+! this is for debugging - when we're confident the code is
+! returning consistent values and rc codes, both these tests can
+! be removed for speed. FIXME.
do e = 1, ens_size
- if ((istatus(e) < 0) .or. &
- (istatus(e) /= 0 .and. expected_obs(e) /= MISSING_R8) .or. &
+ if ((istatus(e) /= 0 .and. expected_obs(e) /= MISSING_R8) .or. &
(istatus(e) == 0 .and. expected_obs(e) == MISSING_R8)) then
- write(string2,*) 'member ',e,' obs_kind', obs_kind,' value = ', expected_obs(e), ' istatus = ', istatus(e), ' cellid:',cellid,' location ', trim(locstring)
+ write(string2,*) 'member ',e,' obs_kind', obs_kind,' value = ', expected_obs(e), ' istatus = ', istatus(e)
write(string3,*) 'at location ', trim(locstring)
- if (istatus(e) < 0) then
- write(string1,*) 'interp routine returned a negative status which is an illegal value'
- else if (istatus(e) /= 0 .and. expected_obs(e) /= MISSING_R8) then
+ if (istatus(e) /= 0 .and. expected_obs(e) /= MISSING_R8) then
write(string1,*) 'interp routine returned a bad status but not a MISSING_R8 value'
else
write(string1,*) 'interp routine returned a good status but set value to MISSING_R8'
@@ -1310,15 +1244,6 @@ subroutine model_interpolate(state_handle, ens_size, location, obs_type, expecte
text2=string2, text3=string3)
endif
- ! the only portable, reliable test for NaNs we know - if some number is neither
- ! less than nor equal to/greater than 0, it must be a NaN. all numerical comparisons
- ! fail if one or more of the operands are NaN.
-
- if (.not. expected_obs(e) < 0 .and. .not. expected_obs(e) >= 0) then
- write(string1,*) 'member ', e, ' expected obs may be NaN: ', expected_obs(e)
- call error_handler(E_ERR,'model_interpolate', string1, source,revision,revdate)
- endif
-
if (debug > 10) then
write(string2,*) 'Completed for member ',e,' obs_kind', obs_kind,' expected_obs = ', expected_obs(e)
write(string3,*) 'istatus = ', istatus(e), ' at ', trim(locstring)
@@ -1336,18 +1261,50 @@ subroutine nc_write_model_atts(ncid, domain_id)
integer, intent(in) :: ncid
integer, intent(in) :: domain_id
-character(len=*), parameter :: routine = 'nc_write_model_atts'
+! for the dimensions and coordinate variables
+integer :: nCellsDimID
+integer :: nEdgesDimID, maxEdgesDimID
+integer :: nVerticesDimID
+integer :: VertexDegreeDimID
+integer :: nSoilLevelsDimID
+integer :: nVertLevelsDimID
+integer :: nVertLevelsP1DimID
+
+
+integer :: VarID, mpasFileID
+
+integer, dimension(NF90_MAX_VAR_DIMS) :: mydimids
+integer :: myndims
+
+character(len=128) :: filename
+
+real(r8), allocatable, dimension(:) :: data1d
+
+
+if ( .not. module_initialized ) call static_init_model
-real(r8), allocatable :: data1d(:)
-integer :: ncid2
+!--------------------------------------------------------------------
+! we only have a netcdf handle here so we do not know the filename
+! or the fortran unit number. but construct a string with at least
+! the netcdf handle, so in case of error we can trace back to see
+! which netcdf file is involved.
+!--------------------------------------------------------------------
+write(filename,*) 'ncid', ncid
!-------------------------------------------------------------------------------
-! put file into define mode.
+! make sure ncid refers to an open netCDF file,
+! and then put into define mode.
!-------------------------------------------------------------------------------
call nc_begin_define_mode(ncid)
+!-------------------------------------------------------------------------------
+! We need the dimension ID for the number of copies/ensemble members, and
+! we might as well check to make sure that Time is the Unlimited dimension.
+! Our job is create the 'model size' dimension.
+!-------------------------------------------------------------------------------
+
!-------------------------------------------------------------------------------
! Write Global Attributes
!-------------------------------------------------------------------------------
@@ -1362,92 +1319,153 @@ subroutine nc_write_model_atts(ncid, domain_id)
!----------------------------------------------------------------------------
-! if not adding grid info, return here
+! Define the new dimensions IDs
!----------------------------------------------------------------------------
-if (.not. write_grid_to_diag_files) then
- call nc_end_define_mode(ncid)
- call nc_synchronize_file(ncid)
- return
-endif
+call nc_check(nf90_def_dim(ncid, name='nCells', &
+ len = nCells, dimid = nCellsDimID),'nc_write_model_atts', 'nCells def_dim '//trim(filename))
-!----------------------------------------------------------------------------
-! Everything below here is static grid info
-!----------------------------------------------------------------------------
+call nc_check(nf90_def_dim(ncid, name='nEdges', &
+ len = nEdges, dimid = nEdgesDimID),'nc_write_model_atts', 'nEdges def_dim '//trim(filename))
+
+call nc_check(nf90_def_dim(ncid, name='nVertLevels', &
+ len = nVertLevels, dimid = NVertLevelsDimID),'nc_write_model_atts', &
+ 'nVertLevels def_dim '//trim(filename))
+
+call nc_check(nf90_def_dim(ncid, name='nVertLevelsP1', &
+ len = nVertLevelsP1, dimid = NVertLevelsP1DimID),'nc_write_model_atts', &
+ 'nVertLevelsP1 def_dim '//trim(filename))
+call nc_check(nf90_def_dim(ncid, name='nSoilLevels', &
+ len = nSoilLevels, dimid = nSoilLevelsDimID),'nc_write_model_atts', &
+ 'nSoilLevels def_dim '//trim(filename))
+
+
+if (add_static_data_to_diags) then
!----------------------------------------------------------------------------
-! Dimensions
+ ! Dimensions needed only if you are writing out static grid information
!----------------------------------------------------------------------------
+ call nc_check(nf90_def_dim(ncid, name='maxEdges', &
+ len = maxEdges, dimid = maxEdgesDimID),'nc_write_model_atts', 'maxEdges def_dim '//trim(filename))
-call nc_define_dimension(ncid, 'nCells', nCells, routine)
-call nc_define_dimension(ncid, 'nEdges', nEdges, routine)
-call nc_define_dimension(ncid, 'nVertLevels', nVertLevels, routine)
-call nc_define_dimension(ncid, 'nVertLevelsP1', nVertLevelsP1, routine)
-call nc_define_dimension(ncid, 'nSoilLevels', nSoilLevels, routine)
+ call nc_check(nf90_def_dim(ncid, name='nVertices', &
+ len = nVertices, dimid = nVerticesDimID),'nc_write_model_atts', &
+ 'nVertices def_dim '//trim(filename))
-call nc_define_dimension(ncid, 'maxEdges', maxEdges, routine)
-call nc_define_dimension(ncid, 'nVertices', nVertices, routine)
-call nc_define_dimension(ncid, 'VertexDegree', VertexDegree, routine)
+ call nc_check(nf90_def_dim(ncid, name='VertexDegree', &
+ len = VertexDegree, dimid = VertexDegreeDimID),'nc_write_model_atts', &
+ 'VertexDegree def_dim '//trim(filename))
!----------------------------------------------------------------------------
-! Coordinate Variables and the Attributes
+ ! Create the (empty) Coordinate Variables and the Attributes
!----------------------------------------------------------------------------
! Cell Longitudes
-call nc_define_real_variable(ncid, 'lonCell', 'nCells', routine)
-call nc_add_attribute_to_variable(ncid, 'lonCell', 'long_name', 'cell center longitudes', routine)
-call nc_add_attribute_to_variable(ncid, 'lonCell', 'units', 'degrees_east', routine)
-call nc_add_attribute_to_variable(ncid, 'lonCell', 'valid_range', (/ 0.0_r8, 360.0_r8 /), routine)
+ call nc_check(nf90_def_var(ncid,name='lonCell', xtype=nf90_double, &
+ dimids=nCellsDimID, varid=VarID),&
+ 'nc_write_model_atts', 'lonCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'cell center longitudes'), &
+ 'nc_write_model_atts', 'lonCell long_name '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'units', 'degrees_east'), &
+ 'nc_write_model_atts', 'lonCell units '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'valid_range', (/ 0.0_r8, 360.0_r8 /)), &
+ 'nc_write_model_atts', 'lonCell valid_range '//trim(filename))
! Cell Latitudes
-call nc_define_real_variable(ncid, 'latCell', 'nCells', routine)
-call nc_add_attribute_to_variable(ncid, 'latCell', 'long_name', 'cell center latitudes', routine)
-call nc_add_attribute_to_variable(ncid, 'latCell', 'units', 'degrees_north', routine)
-call nc_add_attribute_to_variable(ncid, 'latCell', 'valid_range', (/ -90.0_r8, 90.0_r8 /), routine)
+ call nc_check(nf90_def_var(ncid,name='latCell', xtype=nf90_double, &
+ dimids=nCellsDimID, varid=VarID),&
+ 'nc_write_model_atts', 'latCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'cell center latitudes'), &
+ 'nc_write_model_atts', 'latCell long_name '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'units', 'degrees_north'), &
+ 'nc_write_model_atts', 'latCell units '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID,'valid_range',(/ -90.0_r8, 90.0_r8 /)), &
+ 'nc_write_model_atts', 'latCell valid_range '//trim(filename))
+
+ call nc_check(nf90_def_var(ncid,name='xCell', xtype=nf90_double, &
+ dimids=nCellsDimID, varid=VarID),&
+ 'nc_write_model_atts', 'xCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'cell center x cartesian coordinates'), &
+ 'nc_write_model_atts', 'xCell long_name '//trim(filename))
+
+ call nc_check(nf90_def_var(ncid,name='yCell', xtype=nf90_double, &
+ dimids=nCellsDimID, varid=VarID),&
+ 'nc_write_model_atts', 'yCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'cell center y cartesian coordinates'), &
+ 'nc_write_model_atts', 'yCell long_name '//trim(filename))
+
+ call nc_check(nf90_def_var(ncid,name='zCell', xtype=nf90_double, &
+ dimids=nCellsDimID, varid=VarID),&
+ 'nc_write_model_atts', 'zCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'cell center z cartesian coordinates'), &
+ 'nc_write_model_atts', 'zCell long_name '//trim(filename))
! Grid vertical information
-call nc_define_real_variable(ncid, 'zgrid', (/ 'nVertLevelsP1', 'nCells ' /), routine)
-call nc_add_attribute_to_variable(ncid, 'zgrid', 'long_name', 'grid zgrid', routine)
-call nc_add_attribute_to_variable(ncid, 'zgrid', 'units', 'meters', routine)
-call nc_add_attribute_to_variable(ncid, 'zgrid', 'positive', 'up', routine)
-call nc_add_attribute_to_variable(ncid, 'zgrid', 'cartesian_axis', 'Z', routine)
-
-! Grid relationship information
-call nc_define_integer_variable(ncid, 'nEdgesOnCell', 'nCells', routine)
-call nc_add_attribute_to_variable(ncid, 'nEdgesOnCell', 'long_name', 'grid nEdgesOnCell', routine)
-
-call nc_define_integer_variable(ncid, 'cellsOnVertex', (/ 'VertexDegree', 'nVertices ' /), routine)
-call nc_add_attribute_to_variable(ncid, 'cellsOnVertex', 'long_name', 'grid cellsOnVertex', routine)
+ call nc_check(nf90_def_var(ncid,name='zgrid',xtype=nf90_double, &
+ dimids=(/ nVertLevelsP1DimID, nCellsDimID /) ,varid=VarID), &
+ 'nc_write_model_atts', 'zgrid def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'grid zgrid'), &
+ 'nc_write_model_atts', 'zgrid long_name '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'units', 'meters'), &
+ 'nc_write_model_atts', 'zgrid units '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'positive', 'up'), &
+ 'nc_write_model_atts', 'zgrid units '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'cartesian_axis', 'Z'), &
+ 'nc_write_model_atts', 'zgrid cartesian_axis '//trim(filename))
+
+ ! Vertex Longitudes
+ call nc_check(nf90_def_var(ncid,name='lonVertex', xtype=nf90_double, &
+ dimids=nVerticesDimID, varid=VarID),&
+ 'nc_write_model_atts', 'lonVertex def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'vertex longitudes'), &
+ 'nc_write_model_atts', 'lonVertex long_name '//trim(filename))
+
+ ! Vertex Latitudes
+ call nc_check(nf90_def_var(ncid,name='latVertex', xtype=nf90_double, &
+ dimids=nVerticesDimID, varid=VarID),&
+ 'nc_write_model_atts', 'latVertex def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'vertex latitudes'), &
+ 'nc_write_model_atts', 'latVertex long_name '//trim(filename))
-call nc_define_integer_variable(ncid, 'verticesOnCell', (/ 'maxEdges', 'nCells ' /), routine)
-call nc_add_attribute_to_variable(ncid, 'verticesOnCell', 'long_name', 'grid verticesOnCell', routine)
-
-! Cartesian coordinates for the same cells
-call nc_define_real_variable(ncid, 'xCell', 'nCells', routine)
-call nc_add_attribute_to_variable(ncid, 'xCell', 'long_name', 'cell center x cartesian coordinates', routine)
-
-call nc_define_real_variable(ncid, 'yCell', 'nCells', routine)
-call nc_add_attribute_to_variable(ncid, 'yCell', 'long_name', 'cell center y cartesian coordinates', routine)
-
-call nc_define_real_variable(ncid, 'zCell', 'nCells', routine)
-call nc_add_attribute_to_variable(ncid, 'zCell', 'long_name', 'cell center z cartesian coordinates', routine)
+ if(data_on_edges) then
+ ! Edge Longitudes
+ call nc_check(nf90_def_var(ncid,name='lonEdge', xtype=nf90_double, &
+ dimids=nEdgesDimID, varid=VarID),&
+ 'nc_write_model_atts', 'lonEdge def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'edge longitudes'), &
+ 'nc_write_model_atts', 'lonEdge long_name '//trim(filename))
+
+ ! Edge Latitudes
+ call nc_check(nf90_def_var(ncid,name='latEdge', xtype=nf90_double, &
+ dimids=nEdgesDimID, varid=VarID),&
+ 'nc_write_model_atts', 'latEdge def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'edge latitudes'), &
+ 'nc_write_model_atts', 'latEdge long_name '//trim(filename))
+ endif
-! Vertex Longitudes and latitudes
-call nc_define_real_variable(ncid, 'lonVertex', 'nVertices', routine)
-call nc_add_attribute_to_variable(ncid, 'lonVertex', 'long_name', 'vertex longitudes', routine)
+ ! Grid relationship information
+ call nc_check(nf90_def_var(ncid,name='nEdgesOnCell',xtype=nf90_int, &
+ dimids=nCellsDimID ,varid=VarID), &
+ 'nc_write_model_atts', 'nEdgesOnCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'grid nEdgesOnCell'), &
+ 'nc_write_model_atts', 'nEdgesOnCell long_name '//trim(filename))
-call nc_define_real_variable(ncid, 'latVertex', 'nVertices', routine)
-call nc_add_attribute_to_variable(ncid, 'latVertex', 'long_name', 'vertex latitudes', routine)
+ call nc_check(nf90_def_var(ncid,name='cellsOnVertex',xtype=nf90_int, &
+ dimids=(/ VertexDegreeDimID, nVerticesDimID /) ,varid=VarID), &
+ 'nc_write_model_atts', 'cellsOnVertex def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'grid cellsOnVertex'), &
+ 'nc_write_model_atts', 'cellsOnVertex long_name '//trim(filename))
-! Edge Longitudes and latitudes
- if(data_on_edges) then
- call nc_define_real_variable(ncid, 'lonEdge', 'nEdges', routine)
- call nc_add_attribute_to_variable(ncid, 'lonEdge', 'long_name', 'edge longitudes', routine)
+ call nc_check(nf90_def_var(ncid,name='verticesOnCell',xtype=nf90_int, &
+ dimids=(/ maxEdgesDimID, nCellsDimID /) ,varid=VarID), &
+ 'nc_write_model_atts', 'verticesOnCell def_var '//trim(filename))
+ call nc_check(nf90_put_att(ncid, VarID, 'long_name', 'grid verticesOnCell'), &
+ 'nc_write_model_atts', 'verticesOnCell long_name '//trim(filename))
- call nc_define_real_variable(ncid, 'latEdge', 'nEdges', routine)
- call nc_add_attribute_to_variable(ncid, 'latEdge', 'long_name', 'edge latitudes', routine)
-endif
+ call nc_check(nf90_def_var(ncid,name='areaCell', xtype=nf90_double, &
+ dimids=nCellsDimID, varid=VarID),&
+ 'nc_write_model_atts', 'areaCell def_var '//trim(filename))
-call nc_define_real_variable(ncid, 'areaCell', 'nCells', routine)
+endif ! add_static_data_to_diags
!----------------------------------------------------------------------------
! Finished with dimension/variable definitions, must end 'define' mode to fill.
@@ -1455,57 +1473,123 @@ subroutine nc_write_model_atts(ncid, domain_id)
call nc_end_define_mode(ncid)
+if (add_static_data_to_diags) then
!----------------------------------------------------------------------------
-! Fill the coordinate variables
+ ! Fill the coordinate variables that DART needs and has locally
!----------------------------------------------------------------------------
-call nc_put_variable(ncid, 'lonCell', lonCell, routine)
-call nc_put_variable(ncid, 'latCell', latCell, routine)
+ call nc_check(NF90_inq_varid(ncid, 'lonCell', VarID), &
+ 'nc_write_model_atts', 'lonCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, lonCell ), &
+ 'nc_write_model_atts', 'lonCell put_var '//trim(filename))
-call nc_put_variable(ncid, 'zgrid', zGridFace, routine)
+ call nc_check(NF90_inq_varid(ncid, 'latCell', VarID), &
+ 'nc_write_model_atts', 'latCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, latCell ), &
+ 'nc_write_model_atts', 'latCell put_var '//trim(filename))
if(data_on_edges) then
- call nc_put_variable(ncid, 'lonEdge', lonEdge, routine)
- call nc_put_variable(ncid, 'latEdge', latEdge, routine)
-endif
+ call nc_check(NF90_inq_varid(ncid, 'lonEdge', VarID), &
+ 'nc_write_model_atts', 'lonEdge inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, lonEdge ), &
+ 'nc_write_model_atts', 'lonEdge put_var '//trim(filename))
+
+ call nc_check(NF90_inq_varid(ncid, 'latEdge', VarID), &
+ 'nc_write_model_atts', 'latEdge inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, latEdge ), &
+ 'nc_write_model_atts', 'latEdge put_var '//trim(filename))
+ endif
+
+ call nc_check(NF90_inq_varid(ncid, 'zgrid', VarID), &
+ 'nc_write_model_atts', 'zgrid inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, zGridFace ), &
+ 'nc_write_model_atts', 'zgrid put_var '//trim(filename))
+
+ call nc_check(NF90_inq_varid(ncid, 'nEdgesOnCell', VarID), &
+ 'nc_write_model_atts', 'nEdgesOnCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, nEdgesOnCell ), &
+ 'nc_write_model_atts', 'nEdgesOnCell put_var '//trim(filename))
+
+ call nc_check(NF90_inq_varid(ncid, 'verticesOnCell', VarID), &
+ 'nc_write_model_atts', 'verticesOnCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, verticesOnCell ), &
+ 'nc_write_model_atts', 'verticesOnCell put_var '//trim(filename))
-call nc_put_variable(ncid, 'nEdgesOnCell', nEdgesOnCell, routine)
-call nc_put_variable(ncid, 'verticesOnCell', verticesOnCell, routine)
-call nc_put_variable(ncid, 'cellsOnVertex', cellsOnVertex, routine)
+ call nc_check(NF90_inq_varid(ncid, 'cellsOnVertex', VarID), &
+ 'nc_write_model_atts', 'cellsOnVertex inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, cellsOnVertex ), &
+ 'nc_write_model_atts', 'cellsOnVertex put_var '//trim(filename))
!----------------------------------------------------------------------------
+ ! Fill the coordinate variables needed for plotting only.
! DART has not read these in, so we have to read them from the input file
-! and copy them to the DART output file.
+ ! and parrot them to the DART output file.
!----------------------------------------------------------------------------
-ncid2 = nc_open_file_readonly(init_template_filename, routine)
+ call nc_check(nf90_open(trim(grid_definition_filename), NF90_NOWRITE, mpasFileID), &
+ 'nc_write_model_atts','open '//trim(grid_definition_filename))
allocate(data1d(nCells))
-
-call nc_get_variable(ncid2, 'xCell', data1d, routine)
-call nc_put_variable(ncid, 'xCell', data1d, routine)
-
-call nc_get_variable(ncid2, 'yCell', data1d, routine)
-call nc_put_variable(ncid, 'yCell', data1d, routine)
-
-call nc_get_variable(ncid2, 'zCell', data1d, routine)
-call nc_put_variable(ncid, 'zCell', data1d, routine)
-
-call nc_get_variable(ncid2, 'areaCell', data1d, routine)
-call nc_put_variable(ncid, 'areaCell', data1d, routine)
-
+ call nc_check(nf90_inq_varid(mpasFileID, 'xCell', VarID), &
+ 'nc_write_model_atts', 'xCell inq_varid ')
+ call nc_check(nf90_get_var(mpasFileID, VarID, data1d ), &
+ 'nc_write_model_atts', 'xCell get_var ')
+ call nc_check(nf90_inq_varid(ncid, 'xCell', VarID), &
+ 'nc_write_model_atts', 'xCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, data1d ), &
+ 'nc_write_model_atts', 'xCell put_var '//trim(filename))
+
+ call nc_check(nf90_inq_varid(mpasFileID, 'yCell', VarID), &
+ 'nc_write_model_atts', 'yCell inq_varid ')
+ call nc_check(nf90_get_var(mpasFileID, VarID, data1d ), &
+ 'nc_write_model_atts', 'yCell get_var ')
+ call nc_check(nf90_inq_varid(ncid, 'yCell', VarID), &
+ 'nc_write_model_atts', 'yCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, data1d ), &
+ 'nc_write_model_atts', 'yCell put_var '//trim(filename))
+
+ call nc_check(nf90_inq_varid(mpasFileID, 'zCell', VarID), &
+ 'nc_write_model_atts', 'zCell inq_varid ')
+ call nc_check(nf90_get_var(mpasFileID, VarID, data1d ), &
+ 'nc_write_model_atts', 'zCell get_var ')
+ call nc_check(nf90_inq_varid(ncid, 'zCell', VarID), &
+ 'nc_write_model_atts', 'zCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, data1d ), &
+ 'nc_write_model_atts', 'zCell put_var '//trim(filename))
+
+ call nc_check(nf90_inq_varid(mpasFileID, 'areaCell', VarID), &
+ 'nc_write_model_atts', 'areaCell inq_varid ')
+ call nc_check(nf90_get_var(mpasFileID, VarID, data1d ), &
+ 'nc_write_model_atts', 'areaCell get_var ')
+ call nc_check(nf90_inq_varid(ncid, 'areaCell', VarID), &
+ 'nc_write_model_atts', 'areaCell inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, data1d ), &
+ 'nc_write_model_atts', 'areaCell put_var '//trim(filename))
deallocate(data1d)
allocate(data1d(nVertices))
-call nc_get_variable(ncid2, 'lonVertex', data1d, routine)
-call nc_put_variable(ncid, 'lonVertex', data1d, routine)
-
-call nc_get_variable(ncid2, 'latVertex', data1d, routine)
-call nc_put_variable(ncid, 'latVertex', data1d, routine)
+ call nc_check(nf90_inq_varid(mpasFileID, 'lonVertex', VarID), &
+ 'nc_write_model_atts', 'lonVertex inq_varid ')
+ call nc_check(nf90_get_var(mpasFileID, VarID, data1d ), &
+ 'nc_write_model_atts', 'lonVertex get_var ')
+ call nc_check(nf90_inq_varid(ncid, 'lonVertex', VarID), &
+ 'nc_write_model_atts', 'lonVertex inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, data1d ), &
+ 'nc_write_model_atts', 'lonVertex put_var '//trim(filename))
+
+ call nc_check(nf90_inq_varid(mpasFileID, 'latVertex', VarID), &
+ 'nc_write_model_atts', 'latVertex inq_varid ')
+ call nc_check(nf90_get_var(mpasFileID, VarID, data1d ), &
+ 'nc_write_model_atts', 'latVertex get_var ')
+ call nc_check(nf90_inq_varid(ncid, 'latVertex', VarID), &
+ 'nc_write_model_atts', 'latVertex inq_varid '//trim(filename))
+ call nc_check(nf90_put_var(ncid, VarID, data1d ), &
+ 'nc_write_model_atts', 'latVertex put_var '//trim(filename))
+ deallocate(data1d)
-deallocate(data1d)
+ call nc_check(nf90_close(mpasFileID),'nc_write_model_atts','close '//trim(grid_definition_filename))
-call nc_close_file(ncid2)
+endif ! add_static_data_to_diags
!-------------------------------------------------------------------------------
! Flush the buffer and leave netCDF file open
@@ -1567,7 +1651,6 @@ subroutine end_model()
if (allocated(lonCell)) deallocate(lonCell)
if (allocated(zGridFace)) deallocate(zGridFace)
if (allocated(zGridCenter)) deallocate(zGridCenter)
-if (allocated(dcEdge)) deallocate(dcEdge)
if (allocated(cellsOnVertex)) deallocate(cellsOnVertex)
if (allocated(xland)) deallocate(xland)
if (allocated(nEdgesOnCell)) deallocate(nEdgesOnCell)
@@ -1647,7 +1730,7 @@ subroutine pert_model_copies(ens_handle, ens_size, pert_amp, interf_provided)
enddo
! get global min/max for each variable
-call broadcast_minmax(min_var, max_var, num_variables)
+call all_reduce_min_max(min_var, max_var, num_variables)
deallocate(within_range)
call init_random_seq(random_seq, my_task_id()+1)
@@ -1698,12 +1781,10 @@ subroutine get_close_obs(gc, base_loc, base_type, locs, loc_qtys, loc_types, &
integer :: ztypeout
-integer :: t_ind, istatus1, istatus2, k, istat_arr(1)
-integer :: base_which, local_obs_which, base_qty
+integer :: t_ind, istatus1, istatus2, k
+integer :: base_which, local_obs_which
real(r8), dimension(3) :: base_llv, local_obs_llv ! lon/lat/vert
-type(location_type) :: local_obs_loc, location_arr(1)
-! timing
-real(digits12) :: t_base, t_base2, interval
+type(location_type) :: local_obs_loc
real(r8) :: hor_dist
hor_dist = 1.0e9_r8
@@ -1726,12 +1807,8 @@ subroutine get_close_obs(gc, base_loc, base_type, locs, loc_qtys, loc_types, &
if (vertical_localization_on()) then
if (base_llv(3) == MISSING_R8) then
istatus1 = 1
- else if (base_which /= vert_localization_coord .and. base_which /= VERTISUNDEF) then
- base_qty = get_quantity_for_type_of_obs(base_type)
- location_arr(1) = base_loc
- call convert_vert_distrib(state_handle, 1, location_arr, base_qty, vert_localization_coord, istat_arr)
- istatus1 = istat_arr(1)
- base_loc = location_arr(1)
+ else if (base_which /= vert_localization_coord) then
+ call vert_convert(state_handle, base_loc, base_type, istatus1)
if(debug > 5) then
call write_location(0,base_loc,charstring=string1)
call error_handler(E_MSG, 'get_close_obs: base_loc',string1,source, revision, revdate)
@@ -1758,11 +1835,9 @@ subroutine get_close_obs(gc, base_loc, base_type, locs, loc_qtys, loc_types, &
! This should only be necessary for obs priors, as state location information already
! contains the correct vertical coordinate (filter_assim's call to get_state_meta_data).
if (vertical_localization_on()) then
- if (local_obs_which /= vert_localization_coord .and. local_obs_which /= VERTISUNDEF) then
- location_arr(1) = local_obs_loc
- call convert_vert_distrib(state_handle, 1, location_arr, loc_qtys(t_ind), vert_localization_coord, istat_arr)
- istatus2 = istat_arr(1)
- locs(t_ind) = location_arr(1)
+ if (local_obs_which /= vert_localization_coord) then
+ call vert_convert(state_handle, local_obs_loc, loc_qtys(t_ind), istatus2)
+ locs(t_ind) = local_obs_loc
else
istatus2 = 0
endif
@@ -1776,7 +1851,16 @@ subroutine get_close_obs(gc, base_loc, base_type, locs, loc_qtys, loc_types, &
(local_obs_llv(3) == MISSING_R8)) .or. (istatus2 /= 0) ) then
dist(k) = 1.0e9_r8
else
- dist(k) = get_dist(base_loc, locs(t_ind), base_type, loc_qtys(t_ind))
+ dist(k) = get_dist(base_loc, local_obs_loc, base_type, loc_qtys(t_ind))
+ ! if ((debug > 4) .and. (k < 100) .and. do_output()) then
+ ! print *, 'calling get_dist'
+ ! call write_location(0,base_loc,charstring=string2)
+ ! call error_handler(E_MSG, 'get_close_obs: base_loc',string2,source, revision, revdate)
+ ! call write_location(0,local_obs_loc,charstring=string2)
+ ! call error_handler(E_MSG, 'get_close_obs: local_obs_loc',string2,source, revision, revdate)
+ ! hor_dist = get_dist(base_loc, local_obs_loc, base_type, loc_qtys(t_ind), no_vert=.true.)
+ ! print *, 'hor/3d_dist for k =', k, ' is ', hor_dist,dist(k)
+ ! endif
endif
endif
@@ -1812,13 +1896,10 @@ subroutine get_close_state(gc, base_loc, base_type, locs, loc_qtys, loc_indx, &
integer :: ztypeout
-integer :: t_ind, istatus1, istatus2, k, istat_arr(1)
-integer :: base_which, local_obs_which, base_qty
+integer :: t_ind, istatus1, istatus2, k
+integer :: base_which, local_obs_which
real(r8), dimension(3) :: base_llv, local_obs_llv ! lon/lat/vert
-type(location_type) :: local_obs_loc, location_arr(1)
-! timing
-real(digits12) :: t_base, t_base2, interval
-
+type(location_type) :: local_obs_loc
real(r8) :: hor_dist
hor_dist = 1.0e9_r8
@@ -1841,12 +1922,8 @@ subroutine get_close_state(gc, base_loc, base_type, locs, loc_qtys, loc_indx, &
if (vertical_localization_on()) then
if (base_llv(3) == MISSING_R8) then
istatus1 = 1
- else if (base_which /= vert_localization_coord .and. base_which /= VERTISUNDEF) then
- base_qty = get_quantity_for_type_of_obs(base_type)
- location_arr(1) = base_loc
- call convert_vert_distrib(state_handle, 1, location_arr, base_qty, vert_localization_coord, istat_arr)
- istatus1 = istat_arr(1)
- base_loc = location_arr(1)
+ else if (base_which /= vert_localization_coord) then
+ call vert_convert(state_handle, base_loc, base_type, istatus1)
if(debug > 5) then
call write_location(0,base_loc,charstring=string1)
call error_handler(E_MSG, 'get_close_state: base_loc',string1,source, revision, revdate)
@@ -1873,12 +1950,9 @@ subroutine get_close_state(gc, base_loc, base_type, locs, loc_qtys, loc_indx, &
! This should only be necessary for obs priors, as state location information already
! contains the correct vertical coordinate (filter_assim's call to get_state_meta_data).
if (vertical_localization_on()) then
- if (local_obs_which /= vert_localization_coord .and. local_obs_which /= VERTISUNDEF) then
- location_arr(1) = local_obs_loc
- call convert_vert_distrib_state(state_handle, 1, location_arr, loc_qtys(t_ind), &
- loc_indx(t_ind), vert_localization_coord, istat_arr)
- istatus2 = istat_arr(1)
- locs(t_ind) = location_arr(1)
+ if (local_obs_which /= vert_localization_coord) then
+ call vert_convert(state_handle, local_obs_loc, loc_qtys(t_ind), istatus2)
+ locs(t_ind) = local_obs_loc
else
istatus2 = 0
endif
@@ -1892,7 +1966,16 @@ subroutine get_close_state(gc, base_loc, base_type, locs, loc_qtys, loc_indx, &
(local_obs_llv(3) == MISSING_R8)) .or. (istatus2 /= 0) ) then
dist(k) = 1.0e9_r8
else
- dist(k) = get_dist(base_loc, locs(t_ind), base_type, loc_qtys(t_ind))
+ dist(k) = get_dist(base_loc, local_obs_loc, base_type, loc_qtys(t_ind))
+ ! if ((debug > 4) .and. (k < 100) .and. do_output()) then
+ ! print *, 'calling get_dist'
+ ! call write_location(0,base_loc,charstring=string2)
+ ! call error_handler(E_MSG, 'get_close_state: base_loc',string2,source, revision, revdate)
+ ! call write_location(0,local_obs_loc,charstring=string2)
+ ! call error_handler(E_MSG, 'get_close_state: local_obs_loc',string2,source, revision, revdate)
+ ! hor_dist = get_dist(base_loc, local_obs_loc, base_type, loc_qtys(t_ind), no_vert=.true.)
+ ! print *, 'hor/3d_dist for k =', k, ' is ', hor_dist,dist(k)
+ ! endif
endif
endif
@@ -1912,27 +1995,38 @@ end subroutine get_close_state
! (these are not required by dart but are used by other programs)
!==================================================================
-subroutine get_init_template_filename( filename )
-! return the name of the template filename that was set
-! in the model_nml namelist (ex. init.nc)
+subroutine get_model_analysis_filename( filename )
+
+! return the name of the analysis filename that was set
+! in the model_nml namelist
character(len=*), intent(OUT) :: filename
if ( .not. module_initialized ) call static_init_model
-filename = trim(init_template_filename)
+filename = trim(model_analysis_filename)
-end subroutine get_init_template_filename
+end subroutine get_model_analysis_filename
!-------------------------------------------------------------------
-!>@todo FIXME: these need to be replaced by calls to the state structure.
-!> at the moment they are only used by the postprocessing program that
-!> moves the wind increments from cell centers to edges - it's not needed
-!> by anything in filter. it's a holdover from code before the state structure
-!> was a general facility.
+subroutine get_grid_definition_filename( filename )
+
+! return the name of the grid_definition filename that was set
+! in the model_nml namelist
+
+character(len=*), intent(out) :: filename
+
+if ( .not. module_initialized ) call static_init_model
+
+filename = trim(grid_definition_filename)
+
+end subroutine get_grid_definition_filename
+
+
+!-------------------------------------------------------------------
subroutine analysis_file_to_statevector(filename, state_vector, model_time)
@@ -1944,6 +2038,7 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
type(time_type), intent(out) :: model_time
! temp space to hold data while we are reading it
+integer :: ndim1, ndim2, ndim3
integer :: i, ivar
real(r8), allocatable, dimension(:) :: data_1d_array
real(r8), allocatable, dimension(:,:) :: data_2d_array
@@ -1954,7 +2049,6 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
integer :: VarID, ncNdims, dimlen
integer :: ncid, TimeDimID, TimeDimLength
character(len=256) :: myerrorstring
-character(len=*), parameter :: routine = 'analysis_file_to_statevector'
if ( .not. module_initialized ) call static_init_model
@@ -1967,10 +2061,17 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
call error_handler(E_ERR,'analysis_file_to_statevector',string1,source,revision,revdate)
endif
-ncid = nc_open_file_readonly(filename, routine)
+call nc_check(nf90_open(trim(filename), NF90_NOWRITE, ncid), &
+ 'analysis_file_to_statevector','open '//trim(filename))
model_time = get_analysis_time(ncid, filename)
+! let the calling program print out the time information it wants.
+!if (do_output()) &
+! call print_time(model_time,'time in restart file '//trim(filename))
+!if (do_output()) &
+! call print_date(model_time,'date in restart file '//trim(filename))
+
! Start counting and filling the state vector one item at a time,
! repacking the Nd arrays into a single 1d list of numbers.
@@ -2012,6 +2113,11 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
call nc_check(nf90_inquire_dimension(ncid, dimIDs(i), len=dimlen), &
'analysis_file_to_statevector', string1)
+ if ( dimlen /= progvar(ivar)%dimlens(i) ) then
+ write(string1,*) trim(myerrorstring),' dim/dimlen ',i,dimlen,' not ',progvar(ivar)%dimlens(i)
+ call error_handler(E_ERR,'analysis_file_to_statevector',string1,source,revision,revdate)
+ endif
+
mycount(i) = dimlen
enddo DimCheck
@@ -2028,7 +2134,8 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
! If the single dimension is TIME, we only need a scalar.
! Pretty sure this cannot happen ...
- allocate(data_1d_array(mycount(1)))
+ ndim1 = mycount(1)
+ allocate(data_1d_array(ndim1))
call nc_check(nf90_get_var(ncid, VarID, data_1d_array, &
start=mystart(1:ncNdims), count=mycount(1:ncNdims)), &
'analysis_file_to_statevector', 'get_var '//trim(varname))
@@ -2038,7 +2145,9 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
elseif (ncNdims == 2) then
- allocate(data_2d_array(mycount(1), mycount(2)))
+ ndim1 = mycount(1)
+ ndim2 = mycount(2)
+ allocate(data_2d_array(ndim1, ndim2))
call nc_check(nf90_get_var(ncid, VarID, data_2d_array, &
start=mystart(1:ncNdims), count=mycount(1:ncNdims)), &
'analysis_file_to_statevector', 'get_var '//trim(varname))
@@ -2048,7 +2157,10 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
elseif (ncNdims == 3) then
- allocate(data_3d_array(mycount(1), mycount(2), mycount(3)))
+ ndim1 = mycount(1)
+ ndim2 = mycount(2)
+ ndim3 = mycount(3)
+ allocate(data_3d_array(ndim1, ndim2, ndim3))
call nc_check(nf90_get_var(ncid, VarID, data_3d_array, &
start=mystart(1:ncNdims), count=mycount(1:ncNdims)), &
'analysis_file_to_statevector', 'get_var '//trim(varname))
@@ -2064,20 +2176,20 @@ subroutine analysis_file_to_statevector(filename, state_vector, model_time)
enddo
-call nc_close_file(ncid, routine)
+call nc_check(nf90_close(ncid), &
+ 'analysis_file_to_statevector','close '//trim(filename))
end subroutine analysis_file_to_statevector
!-------------------------------------------------------------------
-subroutine statevector_to_analysis_file(state_vector, ncid, filename, statetime)
+subroutine statevector_to_analysis_file(state_vector, filename, statetime)
! Writes the current time and state variables from a dart state
! vector (1d array) into a mpas netcdf analysis file.
real(r8), intent(in) :: state_vector(:)
-integer, intent(in) :: ncid
character(len=*), intent(in) :: filename
type(time_type), intent(in) :: statetime
@@ -2090,12 +2202,50 @@ subroutine statevector_to_analysis_file(state_vector, ncid, filename, statetime)
integer, dimension(NF90_MAX_VAR_DIMS) :: dimIDs, mystart, mycount
character(len=NF90_MAX_NAME) :: varname
integer :: VarID, ncNdims, dimlen
-integer :: TimeDimID, TimeDimLength
+integer :: ncid, TimeDimID, TimeDimLength
logical :: done_winds
type(time_type) :: model_time
if ( .not. module_initialized ) call static_init_model
+! Check that the output file exists ...
+
+if ( .not. file_exist(filename) ) then
+ write(string1,*) 'cannot open file ', trim(filename),' for writing.'
+ call error_handler(E_ERR,'statevector_to_analysis_file',string1,source,revision,revdate)
+endif
+
+call nc_check(nf90_open(trim(filename), NF90_WRITE, ncid), &
+ 'statevector_to_analysis_file','open '//trim(filename))
+
+! make sure the time in the file is the same as the time on the data
+! we are trying to insert. we are only updating part of the contents
+! of the mpas analysis file, and state vector contents from a different
+! time won't be consistent with the rest of the file.
+
+model_time = get_analysis_time(ncid, filename)
+
+if ( model_time /= statetime ) then
+ call print_time( statetime,'DART current time',logfileunit)
+ call print_time(model_time,'mpas current time',logfileunit)
+ call print_time( statetime,'DART current time')
+ call print_time(model_time,'mpas current time')
+ write(string1,*)trim(filename),' current time must equal model time'
+ call error_handler(E_ERR,'statevector_to_analysis_file',string1,source,revision,revdate)
+endif
+
+! let the calling program print out the time information it wants.
+!if (do_output()) &
+! call print_time(statetime,'time of DART file '//trim(filename))
+!if (do_output()) &
+! call print_date(statetime,'date of DART file '//trim(filename))
+
+! The DART prognostic variables are only defined for a single time.
+! We already checked the assumption that variables are xy2d or xyz3d ...
+! IF the netCDF variable has a TIME dimension, it must be the last dimension,
+! and we need to read the LAST timestep and effectively squeeze out the
+! singleton dimension when we stuff it into the DART state vector.
+
TimeDimID = FindTimeDimension( ncid )
if ( TimeDimID > 0 ) then
@@ -2117,7 +2267,7 @@ subroutine statevector_to_analysis_file(state_vector, ncid, filename, statetime)
! this routine updates the edge winds from both the zonal and meridional
! fields, so only call it once.
- call update_wind_components(ncid, filename, state_vector, use_increments_for_u_update)
+ call update_wind_components(ncid, state_vector, use_increments_for_u_update)
done_winds = .true.
cycle PROGVARLOOP
endif
@@ -2147,6 +2297,13 @@ subroutine statevector_to_analysis_file(state_vector, ncid, filename, statetime)
call nc_check(nf90_inquire_dimension(ncid, dimIDs(i), len=dimlen), &
'statevector_to_analysis_file', string1)
+ if ( dimlen /= progvar(ivar)%dimlens(i) ) then
+ write(string1,*) trim(string2),' dim/dimlen ',i,dimlen,' not ',progvar(ivar)%dimlens(i)
+ write(string2,*)' but it should be.'
+ call error_handler(E_ERR, 'statevector_to_analysis_file', string1, &
+ source, revision, revdate, text2=string2)
+ endif
+
mycount(i) = dimlen
enddo DimCheck
@@ -2222,6 +2379,8 @@ subroutine statevector_to_analysis_file(state_vector, ncid, filename, statetime)
enddo PROGVARLOOP
+call nc_check(nf90_close(ncid), &
+ 'statevector_to_analysis_file','close '//trim(filename))
end subroutine statevector_to_analysis_file
@@ -2456,6 +2615,9 @@ end function get_analysis_time_ncid
function get_analysis_time_fname(filename)
+! HK I don't understand this comment, it is identical to the comment
+! in get_analysis_time_ncid, but here there is no netcdf stuff?
+
! The analysis netcdf files have the start time of the experiment.
! The time array contains the time trajectory since then.
! This routine returns the start time of the experiment.
@@ -2468,21 +2630,19 @@ function get_analysis_time_fname(filename)
if ( .not. module_initialized ) call static_init_model
-! why do we care? we aren't opening this file, just taking the time
-! string from the filename itself.
if ( .not. file_exist(filename) ) then
- write(string1,*) 'file ', trim(filename),' does not exist.'
+ write(string1,*) 'cannot open file ', trim(filename),' for reading.'
call error_handler(E_ERR,'get_analysis_time',string1,source,revision,revdate)
endif
-! find the first digit and use that as the start of the string conversion
+! find the first number and use that as the start of the string conversion
i = scan(filename, "0123456789")
if (i <= 0) then
write(string1,*) 'cannot find time string in name ', trim(filename)
call error_handler(E_ERR,'get_analysis_time',string1,source,revision,revdate)
endif
-get_analysis_time_fname = string_to_time(filename(i:i+TIMELEN-1))
+get_analysis_time_fname = string_to_time(filename(i:i+19))
end function get_analysis_time_fname
@@ -2495,7 +2655,7 @@ subroutine write_model_time_file(time_filename, model_time, adv_to_time)
type(time_type), intent(in), optional :: adv_to_time
integer :: iunit
-character(len=TIMELEN) :: timestring
+character(len=19) :: timestring
type(time_type) :: deltatime
iunit = open_file(time_filename, action='write')
@@ -2522,35 +2682,7 @@ subroutine write_model_time_restart(ncid, dart_time)
integer, intent(in) :: ncid !< netcdf file handle
type(time_type), intent(in) :: dart_time
-integer :: year, month, day, hour, minute, second
-character(len=64) :: timestring
-character(len=*), parameter :: routine = 'write_model_time_restart'
-
-call get_date(dart_time, year, month, day, hour, minute, second)
-call set_wrf_date(timestring, year, month, day, hour, minute, second)
-
-! Define xtime variable if it does not already exist
-if (.not. nc_variable_exists(ncid, 'xtime')) then
-
- call nc_begin_define_mode(ncid)
-
- ! check to see if there are Time and date_string_length dimensions
- if (.not. nc_dimension_exists(ncid, 'Time')) &
- call nc_define_unlimited_dimension(ncid, 'Time', routine)
-
- if (.not. nc_dimension_exists(ncid, 'StrLen')) &
- call nc_define_dimension(ncid, 'StrLen', len(timestring), routine)
-
- ! make xtime(Time, StrLen)
- call nc_define_character_variable(ncid, 'xtime', (/ 'StrLen', 'Time ' /), routine)
- call nc_add_attribute_to_variable(ncid, 'xtime', 'units', "YYYY-MM-DD_hh:mm:ss", routine)
- call nc_add_attribute_to_variable(ncid, 'xtime', 'long_name', 'Model valid time', routine)
-
-call nc_end_define_mode(ncid)
-
-endif
-
-call nc_put_variable(ncid, 'xtime', timestring, routine)
+call error_handler(E_MSG, 'write_model_time', 'no routine for mpas_atm write model time')
end subroutine write_model_time_restart
@@ -2589,7 +2721,7 @@ subroutine get_xland(Cells,LandOrNot)
integer, intent(in) :: Cells
real(r8), allocatable, intent(out) :: LandOrNot(:)
-if ( .not. module_initialized ) call static_init_model()
+if ( .not. module_initialized ) call static_init_model
allocate(LandOrNot(Cells))
@@ -2597,63 +2729,34 @@ subroutine get_xland(Cells,LandOrNot)
end subroutine get_xland
-!------------------------------------------------------------------
-
-subroutine get_cell_center_coords(Cells,Lats,Lons)
-
-! public routine for returning cell center coordinates
-
-integer, intent(in) :: Cells
-real(r8), allocatable, intent(out) :: Lats(:), Lons(:)
-
-if ( .not. module_initialized ) call static_init_model()
-
-allocate(Lats(Cells), Lons(Cells))
-
-Lats = latCell
-Lons = lonCell
-
-end subroutine get_cell_center_coords
-
-!------------------------------------------------------------------
-
-subroutine get_bdy_mask(Cells,Mask)
-
-! public routine for returning mask for boundary cells
-
-integer, intent(in) :: Cells
-integer, allocatable, intent(out) :: Mask(:)
-
-if ( .not. module_initialized ) call static_init_model()
-
-allocate(Mask(Cells))
-
-Mask = bdyMaskCell
-
-end subroutine get_bdy_mask
-
-
!==================================================================
! The (model-specific) private interfaces come last
!==================================================================
!------------------------------------------------------------------
-!> convert time type into a character string with the
-!> format of YYYY-MM-DD_hh:mm:ss
function time_to_string(t, interval)
+! convert time type into a character string with the
+! format of YYYY-MM-DD_hh:mm:ss
+
+! passed variables
+ character(len=19) :: time_to_string
type(time_type), intent(in) :: t
logical, intent(in), optional :: interval
-character(len=TIMELEN) :: time_to_string
+
+! local variables
integer :: iyear, imonth, iday, ihour, imin, isec
integer :: ndays, nsecs
logical :: dointerval
+if (present(interval)) then
+ dointerval = interval
+else
dointerval = .false.
-if (present(interval)) dointerval = interval
+endif
! for interval output, output the number of days, then hours, mins, secs
! for date output, use the calendar routine to get the year/month/day hour:min:sec
@@ -2710,33 +2813,94 @@ function set_model_time_step()
if ( .not. module_initialized ) call static_init_model
! these are from the namelist
+!FIXME: sanity check these for valid ranges?
set_model_time_step = set_time(assimilation_period_seconds, assimilation_period_days)
end function set_model_time_step
!------------------------------------------------------------------
-!> Read the grid dimensions from the MPAS netcdf file.
-!>
-subroutine read_grid_dims(ncid)
-integer, intent(in) :: ncid
+subroutine read_grid_dims()
-character(len=*), parameter :: routine = 'read_grid_dims'
+! Read the grid dimensions from the MPAS netcdf file.
+!
+! The file name comes from module storage ... namelist.
-nCells = nc_get_dimension_size(ncid, 'nCells', routine)
-nVertices = nc_get_dimension_size(ncid, 'nVertices', routine)
-nEdges = nc_get_dimension_size(ncid, 'nEdges', routine)
-maxEdges = nc_get_dimension_size(ncid, 'maxEdges', routine)
-nVertLevels = nc_get_dimension_size(ncid, 'nVertLevels', routine)
-nVertLevelsP1 = nc_get_dimension_size(ncid, 'nVertLevelsP1', routine)
-vertexDegree = nc_get_dimension_size(ncid, 'vertexDegree', routine)
-nSoilLevels = nc_get_dimension_size(ncid, 'nSoilLevels', routine)
+integer :: grid_id, dimid
-if (debug > 4 .and. do_output()) then
- write(*,*)
- write(*,*)'read_grid_dims: nCells is ', nCells
- write(*,*)'read_grid_dims: nVertices is ', nVertices
+if ( .not. module_initialized ) call static_init_model
+
+! get the ball rolling ...
+
+call nc_check( nf90_open(trim(grid_definition_filename), NF90_NOWRITE, grid_id), &
+ 'read_grid_dims', 'open '//trim(grid_definition_filename))
+
+! nCells : get dimid for 'nCells' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'nCells', dimid), &
+ 'read_grid_dims','inq_dimid nCells '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=nCells), &
+ 'read_grid_dims','inquire_dimension nCells '//trim(grid_definition_filename))
+
+! nVertices : get dimid for 'nVertices' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'nVertices', dimid), &
+ 'read_grid_dims','inq_dimid nVertices '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=nVertices), &
+ 'read_grid_dims','inquire_dimension nVertices '//trim(grid_definition_filename))
+
+! nEdges : get dimid for 'nEdges' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'nEdges', dimid), &
+ 'read_grid_dims','inq_dimid nEdges '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=nEdges), &
+ 'read_grid_dims','inquire_dimension nEdges '//trim(grid_definition_filename))
+
+! maxEdges : get dimid for 'maxEdges' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'maxEdges', dimid), &
+ 'read_grid_dims','inq_dimid maxEdges '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=maxEdges), &
+ 'read_grid_dims','inquire_dimension maxEdges '//trim(grid_definition_filename))
+
+! nVertLevels : get dimid for 'nVertLevels' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'nVertLevels', dimid), &
+ 'read_grid_dims','inq_dimid nVertLevels '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=nVertLevels), &
+ 'read_grid_dims','inquire_dimension nVertLevels '//trim(grid_definition_filename))
+
+! nVertLevelsP1 : get dimid for 'nVertLevelsP1' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'nVertLevelsP1', dimid), &
+ 'read_grid_dims','inq_dimid nVertLevelsP1 '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=nVertLevelsP1), &
+ 'read_grid_dims','inquire_dimension nVertLevelsP1 '//trim(grid_definition_filename))
+
+! vertexDegree : get dimid for 'vertexDegree' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'vertexDegree', dimid), &
+ 'read_grid_dims','inq_dimid vertexDegree '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=vertexDegree), &
+ 'read_grid_dims','inquire_dimension vertexDegree '//trim(grid_definition_filename))
+
+! nSoilLevels : get dimid for 'nSoilLevels' and then get value
+
+call nc_check(nf90_inq_dimid(grid_id, 'nSoilLevels', dimid), &
+ 'read_grid_dims','inq_dimid nSoilLevels '//trim(grid_definition_filename))
+call nc_check(nf90_inquire_dimension(grid_id, dimid, len=nSoilLevels), &
+ 'read_grid_dims','inquire_dimension nSoilLevels '//trim(grid_definition_filename))
+
+! tidy up
+
+call nc_check(nf90_close(grid_id), &
+ 'read_grid_dims','close '//trim(grid_definition_filename) )
+
+if (debug > 4 .and. do_output()) then
+ write(*,*)
+ write(*,*)'read_grid_dims: nCells is ', nCells
+ write(*,*)'read_grid_dims: nVertices is ', nVertices
write(*,*)'read_grid_dims: nEdges is ', nEdges
write(*,*)'read_grid_dims: maxEdges is ', maxEdges
write(*,*)'read_grid_dims: nVertLevels is ', nVertLevels
@@ -2749,67 +2913,154 @@ end subroutine read_grid_dims
!------------------------------------------------------------------
-!> Read the grid values in from the MPAS netcdf file.
-!>
-subroutine get_grid(ncid)
-integer, intent(in) :: ncid
+subroutine get_grid()
+
+! Read the actual grid values in from the MPAS netcdf file.
+!
+! The file name comes from module storage ... namelist.
+! This reads in the following arrays:
+! latCell, lonCell, zGridFace, cellsOnVertex (all in module global storage)
+
+
+integer :: ncid, VarID
+
+! Read the netcdf file data
-character(len=*), parameter :: routine = 'get_grid'
+call nc_check(nf90_open(trim(grid_definition_filename), nf90_nowrite, ncid), 'get_grid', 'open '//trim(grid_definition_filename))
+! Read the variables
-call nc_get_variable(ncid, 'lonCell', lonCell, routine)
-call nc_get_variable(ncid, 'latCell', latCell, routine)
+call nc_check(nf90_inq_varid(ncid, 'latCell', VarID), &
+ 'get_grid', 'inq_varid latCell '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, latCell), &
+ 'get_grid', 'get_var latCell '//trim(grid_definition_filename))
-! MPAS locations are in radians - at this point DART needs degrees.
-! watch out for tiny rounding errors and clamp to exactly +/- 90
+call nc_check(nf90_inq_varid(ncid, 'lonCell', VarID), &
+ 'get_grid', 'inq_varid lonCell '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, lonCell), &
+ 'get_grid', 'get_var lonCell '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'zgrid', VarID), &
+ 'get_grid', 'inq_varid zgrid '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, zGridFace), &
+ 'get_grid', 'get_var zgrid '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'cellsOnVertex', VarID), &
+ 'get_grid', 'inq_varid cellsOnVertex '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, cellsOnVertex), &
+ 'get_grid', 'get_var cellsOnVertex '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'xland', VarID), &
+ 'get_grid', 'inq_varid xland '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, xland), &
+ 'get_grid', 'get_var xland '//trim(grid_definition_filename))
+
+! MPAS analysis files are in radians - at this point DART needs degrees.
latCell = latCell * rad2deg
lonCell = lonCell * rad2deg
where (latCell > 90.0_r8) latCell = 90.0_r8
where (latCell < -90.0_r8) latCell = -90.0_r8
-call nc_get_variable(ncid, 'dcEdge', dcEdge, routine)
-call nc_get_variable(ncid, 'zgrid', zGridFace, routine)
-call nc_get_variable(ncid, 'cellsOnVertex', cellsOnVertex, routine)
-call nc_get_variable(ncid, 'xland', xland, routine)
+! Read the variables
-dxmax = maxval(dcEdge) ! max grid resolution in meters
+call nc_check(nf90_inq_varid(ncid, 'edgeNormalVectors', VarID), &
+ 'get_grid', 'inq_varid edgeNormalVectors '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, edgeNormalVectors), &
+ 'get_grid', 'get_var edgeNormalVectors '//trim(grid_definition_filename))
-call nc_get_variable(ncid, 'edgeNormalVectors', edgeNormalVectors, routine)
-call nc_get_variable(ncid, 'nEdgesOnCell', nEdgesOnCell, routine)
-call nc_get_variable(ncid, 'edgesOnCell', edgesOnCell, routine)
-call nc_get_variable(ncid, 'cellsOnEdge', cellsOnEdge, routine)
-call nc_get_variable(ncid, 'verticesOnCell', verticesOnCell, routine)
+call nc_check(nf90_inq_varid(ncid, 'nEdgesOnCell', VarID), &
+ 'get_grid', 'inq_varid nEdgesOnCell '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, nEdgesOnCell), &
+ 'get_grid', 'get_var nEdgesOnCell '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'edgesOnCell', VarID), &
+ 'get_grid', 'inq_varid edgesOnCell '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, edgesOnCell), &
+ 'get_grid', 'get_var edgesOnCell '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'cellsOnEdge', VarID), &
+ 'get_grid', 'inq_varid cellsOnEdge '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, cellsOnEdge), &
+ 'get_grid', 'get_var cellsOnEdge '//trim(grid_definition_filename))
if(data_on_edges) then
- call nc_get_variable(ncid, 'lonEdge', lonEdge, routine)
- call nc_get_variable(ncid, 'latEdge', latEdge, routine)
+ call nc_check(nf90_inq_varid(ncid, 'latEdge', VarID), &
+ 'get_grid', 'inq_varid latEdge '//trim(grid_definition_filename))
+ call nc_check(nf90_get_var( ncid, VarID, latEdge), &
+ 'get_grid', 'get_var latEdge '//trim(grid_definition_filename))
+
+ call nc_check(nf90_inq_varid(ncid, 'lonEdge', VarID), &
+ 'get_grid', 'inq_varid lonEdge '//trim(grid_definition_filename))
+ call nc_check(nf90_get_var( ncid, VarID, lonEdge), &
+ 'get_grid', 'get_var lonEdge '//trim(grid_definition_filename))
latEdge = latEdge * rad2deg
lonEdge = lonEdge * rad2deg
- where (latEdge > 90.0_r8) latEdge = 90.0_r8
- where (latEdge < -90.0_r8) latEdge = -90.0_r8
- call nc_get_variable(ncid, 'xEdge', xEdge, routine)
- call nc_get_variable(ncid, 'yEdge', yEdge, routine)
- call nc_get_variable(ncid, 'zEdge', zEdge, routine)
+ call nc_check(nf90_inq_varid(ncid, 'xEdge', VarID), &
+ 'get_grid', 'inq_varid xEdge '//trim(grid_definition_filename))
+ call nc_check(nf90_get_var( ncid, VarID, xEdge), &
+ 'get_grid', 'get_var xEdge '//trim(grid_definition_filename))
+
+ call nc_check(nf90_inq_varid(ncid, 'yEdge', VarID), &
+ 'get_grid', 'inq_varid yEdge '//trim(grid_definition_filename))
+ call nc_check(nf90_get_var( ncid, VarID, yEdge), &
+ 'get_grid', 'get_var yEdge '//trim(grid_definition_filename))
+
+ call nc_check(nf90_inq_varid(ncid, 'zEdge', VarID), &
+ 'get_grid', 'inq_varid zEdge '//trim(grid_definition_filename))
+ call nc_check(nf90_get_var( ncid, VarID, zEdge), &
+ 'get_grid', 'get_var zEdge '//trim(grid_definition_filename))
endif
-call nc_get_variable(ncid, 'xVertex', xVertex, routine)
-call nc_get_variable(ncid, 'yVertex', yVertex, routine)
-call nc_get_variable(ncid, 'zVertex', zVertex, routine)
+call nc_check(nf90_inq_varid(ncid, 'xVertex', VarID), &
+ 'get_grid', 'inq_varid xVertex '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, xVertex), &
+ 'get_grid', 'get_var xVertex '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'yVertex', VarID), &
+ 'get_grid', 'inq_varid yVertex '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, yVertex), &
+ 'get_grid', 'get_var yVertex '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'zVertex', VarID), &
+ 'get_grid', 'inq_varid zVertex '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, zVertex), &
+ 'get_grid', 'get_var zVertex '//trim(grid_definition_filename))
+
+call nc_check(nf90_inq_varid(ncid, 'verticesOnCell', VarID), &
+ 'get_grid', 'inq_varid verticesOnCell '//trim(grid_definition_filename))
+call nc_check(nf90_get_var( ncid, VarID, verticesOnCell), &
+ 'get_grid', 'get_var verticesOnCell '//trim(grid_definition_filename))
! Get the boundary information if available.
! Assuming the existence of this variable is sufficient to determine if
! the grid is defined everywhere or not.
-if (nc_variable_exists(ncid, 'maxLevelCell')) then
+if ( nf90_inq_varid(ncid, 'boundaryEdge', VarID) == NF90_NOERR ) then
+ allocate(boundaryEdge(nVertLevels,nEdges))
+ call nc_check(nf90_get_var( ncid, VarID, boundaryEdge), &
+ 'get_grid', 'get_var boundaryEdge '//trim(grid_definition_filename))
+ global_grid = .false.
+endif
+
+if ( nf90_inq_varid(ncid, 'boundaryVertex', VarID) == NF90_NOERR ) then
+ allocate(boundaryVertex(nVertLevels,nVertices))
+ call nc_check(nf90_get_var( ncid, VarID, boundaryVertex), &
+ 'get_grid', 'get_var boundaryVertex '//trim(grid_definition_filename))
+ global_grid = .false.
+endif
+
+if ( nf90_inq_varid(ncid, 'maxLevelCell', VarID) == NF90_NOERR ) then
allocate(maxLevelCell(nCells))
- call nc_get_variable(ncid, 'maxLevelCell', maxlevelCell, routine)
+ call nc_check(nf90_get_var( ncid, VarID, maxLevelCell), &
+ 'get_grid', 'get_var maxLevelCell '//trim(grid_definition_filename))
all_levels_exist_everywhere = .false.
endif
+call nc_check(nf90_close(ncid), 'get_grid','close '//trim(grid_definition_filename) )
! A little sanity check
@@ -2836,10 +3087,10 @@ subroutine get_grid(ncid)
write(*,*)'yVertex range ',minval(yVertex), maxval(yVertex)
write(*,*)'zVertex range ',minval(zVertex), maxval(zVertex)
write(*,*)'verticesOnCell range ',minval(verticesOnCell), maxval(verticesOnCell)
- if (allocated(bdyMaskCell)) &
- write(*,*)'bdyMaskCell range ',minval(bdyMaskCell), maxval(bdyMaskCell)
- if (allocated(bdyMaskEdge)) &
- write(*,*)'bdyMaskEdge range ',minval(bdyMaskEdge), maxval(bdyMaskEdge)
+ if (allocated(boundaryEdge)) &
+ write(*,*)'boundaryEdge range ',minval(boundaryEdge), maxval(boundaryEdge)
+ if (allocated(boundaryVertex)) &
+ write(*,*)'boundaryVertex range ',minval(boundaryVertex), maxval(boundaryVertex)
if (allocated(maxLevelCell)) &
write(*,*)'maxLevelCell range ',minval(maxLevelCell), maxval(maxLevelCell)
@@ -2850,10 +3101,9 @@ end subroutine get_grid
!------------------------------------------------------------------
-subroutine update_wind_components(ncid, filename, state_vector, use_increments_for_u_update)
+subroutine update_wind_components(ncid, state_vector, use_increments_for_u_update)
- integer, intent(in) :: ncid
- character(len=*), intent(in) :: filename
+ integer, intent(in) :: ncid ! netCDF handle for model_analysis_filename
real(r8), intent(in) :: state_vector(:)
logical, intent(in) :: use_increments_for_u_update
@@ -2918,9 +3168,9 @@ subroutine update_wind_components(ncid, filename, state_vector, use_increments_f
! and uReconstructMeridional fields from the mpas analysis netcdf file.
if (use_increments_for_u_update) then
- call read_2d_from_nc_file(ncid, filename, 'u', u)
- call read_2d_from_nc_file(ncid, filename, 'uReconstructZonal', ucell)
- call read_2d_from_nc_file(ncid, filename, 'uReconstructMeridional', vcell)
+ call read_2d_from_nc_file(ncid, 'u', u)
+ call read_2d_from_nc_file(ncid, 'uReconstructZonal', ucell)
+ call read_2d_from_nc_file(ncid, 'uReconstructMeridional', vcell)
if ((debug > 8) .and. do_output()) then
write(*,*)
@@ -2977,7 +3227,7 @@ subroutine update_wind_components(ncid, filename, state_vector, use_increments_f
! Write back to the mpas analysis file.
-call put_u(ncid, filename, u)
+call put_u(ncid, u)
deallocate(ucell, vcell, u)
@@ -2986,9 +3236,8 @@ end subroutine update_wind_components
!------------------------------------------------------------------
-subroutine read_2d_from_nc_file(ncid, filename, varname, data)
+subroutine read_2d_from_nc_file(ncid, varname, data)
integer, intent(in) :: ncid
- character(len=*), intent(in) :: filename
character(len=*), intent(in) :: varname
real(r8), intent(out) :: data(:,:)
@@ -3003,17 +3252,17 @@ subroutine read_2d_from_nc_file(ncid, filename, varname, data)
call nc_check(nf90_inq_varid(ncid, varname, VarID), &
'read_from_nc_file', &
- 'inq_varid '//trim(varname)//' '//trim(filename))
+ 'inq_varid '//trim(varname)//' '//trim(model_analysis_filename))
call nc_check(nf90_inquire_variable(ncid, VarID, dimids=dimIDs, ndims=numdims), &
'read_from_nc_file', &
- 'inquire '//trim(varname)//' '//trim(filename))
+ 'inquire '//trim(varname)//' '//trim(model_analysis_filename))
do i=1, numdims
write(string1,*)'inquire length for dimension ',i
call nc_check(nf90_inquire_dimension(ncid, dimIDs(i), len=dimlen, name=dimname), &
'read_2d_from_nc_file', &
- trim(string1)//' '//trim(filename))
+ trim(string1)//' '//trim(model_analysis_filename))
if (trim(dimname) == 'Time') then
mystart(i) = dimlen
mycount(numdims) = 1
@@ -3026,16 +3275,15 @@ subroutine read_2d_from_nc_file(ncid, filename, varname, data)
call nc_check( nf90_get_var(ncid, VarID, data, &
start=mystart(1:numdims), count=mycount(1:numdims)), &
'read_2d_from_nc_file', &
- 'get_var u '//trim(filename))
+ 'get_var u '//trim(model_analysis_filename))
end subroutine read_2d_from_nc_file
!------------------------------------------------------------------
-subroutine put_u(ncid, filename, u)
+subroutine put_u(ncid, u)
integer, intent(in) :: ncid
- character(len=*), intent(in) :: filename
real(r8), intent(in) :: u(:,:) ! u(nVertLevels, nEdges)
! Put the newly updated 'u' field back into the netcdf file.
@@ -3048,20 +3296,20 @@ subroutine put_u(ncid, filename, u)
call nc_check(nf90_Inquire(ncid,nDimensions,nVariables,nAttributes,unlimitedDimID), &
- 'put_u', 'inquire '//trim(filename))
+ 'put_u', 'inquire '//trim(model_analysis_filename))
call nc_check(nf90_inquire_dimension(ncid, unlimitedDimID, len=ntimes), &
- 'put_u', 'inquire time dimension length '//trim(filename))
+ 'put_u', 'inquire time dimension length '//trim(model_analysis_filename))
call nc_check(nf90_inq_varid(ncid, 'u', VarID), &
- 'put_u', 'inq_varid u '//trim(filename))
+ 'put_u', 'inq_varid u '//trim(model_analysis_filename))
call nc_check(nf90_inquire_variable(ncid, VarID, dimids=dimIDs, ndims=numdims), &
- 'put_u', 'inquire u '//trim(filename))
+ 'put_u', 'inquire u '//trim(model_analysis_filename))
do i=1, numdims
call nc_check(nf90_inquire_dimension(ncid, dimIDs(i), len=numu(i)), &
- 'put_u', 'inquire U dimension length '//trim(filename))
+ 'put_u', 'inquire U dimension length '//trim(model_analysis_filename))
enddo
! for all but the time dimension, read all the values.
@@ -3072,7 +3320,7 @@ subroutine put_u(ncid, filename, u)
mycount(numdims) = 1
call nc_check(nf90_put_var(ncid, VarID, u, start=mystart, count=mycount), &
- 'put_u', 'put_var u '//trim(filename))
+ 'put_u', 'put_var u '//trim(model_analysis_filename))
! A little sanity check
@@ -3098,12 +3346,24 @@ subroutine vector_to_1d_prog_var(x, ivar, data_1d_array)
integer, intent(in) :: ivar
real(r8), dimension(:), intent(out) :: data_1d_array
-integer :: start_offset, end_offset
+integer :: idim1,ii
+
+if ( .not. module_initialized ) call static_init_model
+
+ii = progvar(ivar)%index1
-start_offset = progvar(ivar)%index1
-end_offset = start_offset + size(data_1d_array) - 1
+do idim1 = 1, size(data_1d_array, 1)
+ data_1d_array(idim1) = x(ii)
+ ii = ii + 1
+enddo
-data_1d_array = x(start_offset:end_offset)
+ii = ii - 1
+if ( ii /= progvar(ivar)%indexN ) then
+ write(string1, *)'Variable '//trim(progvar(ivar)%varname)//' filled wrong.'
+ write(string2, *)'Should have ended at ',progvar(ivar)%indexN,' actually ended at ',ii
+ call error_handler(E_ERR,'vector_to_1d_prog_var', string1, &
+ source, revision, revdate, text2=string2)
+endif
end subroutine vector_to_1d_prog_var
@@ -3119,12 +3379,26 @@ subroutine vector_to_2d_prog_var(x, ivar, data_2d_array)
integer, intent(in) :: ivar
real(r8), dimension(:,:), intent(out) :: data_2d_array
-integer :: start_offset, end_offset
+integer :: idim1,idim2,ii
+
+if ( .not. module_initialized ) call static_init_model
+
+ii = progvar(ivar)%index1
-start_offset = progvar(ivar)%index1
-end_offset = start_offset + size(data_2d_array) - 1
+do idim2 = 1,size(data_2d_array, 2)
+ do idim1 = 1,size(data_2d_array, 1)
+ data_2d_array(idim1,idim2) = x(ii)
+ ii = ii + 1
+ enddo
+enddo
-data_2d_array = reshape(x(start_offset:end_offset), shape(data_2d_array))
+ii = ii - 1
+if ( ii /= progvar(ivar)%indexN ) then
+ write(string1, *)'Variable '//trim(progvar(ivar)%varname)//' filled wrong.'
+ write(string2, *)'Should have ended at ',progvar(ivar)%indexN,' actually ended at ',ii
+ call error_handler(E_ERR,'vector_to_2d_prog_var', string1, &
+ source, revision, revdate, text2=string2)
+endif
end subroutine vector_to_2d_prog_var
@@ -3140,12 +3414,28 @@ subroutine vector_to_3d_prog_var(x, ivar, data_3d_array)
integer, intent(in) :: ivar
real(r8), dimension(:,:,:), intent(out) :: data_3d_array
-integer :: start_offset, end_offset
+integer :: idim1,idim2,idim3,ii
-start_offset = progvar(ivar)%index1
-end_offset = start_offset + size(data_3d_array) - 1
+if ( .not. module_initialized ) call static_init_model
-data_3d_array = reshape(x(start_offset:end_offset), shape(data_3d_array))
+ii = progvar(ivar)%index1
+
+do idim3 = 1,size(data_3d_array, 3)
+ do idim2 = 1,size(data_3d_array, 2)
+ do idim1 = 1,size(data_3d_array, 1)
+ data_3d_array(idim1,idim2,idim3) = x(ii)
+ ii = ii + 1
+ enddo
+ enddo
+enddo
+
+ii = ii - 1
+if ( ii /= progvar(ivar)%indexN ) then
+ write(string1, *)'Variable '//trim(progvar(ivar)%varname)//' filled wrong.'
+ write(string2, *)'Should have ended at ',progvar(ivar)%indexN,' actually ended at ',ii
+ call error_handler(E_ERR,'vector_to_3d_prog_var', string1, &
+ source, revision, revdate, text2=string2)
+endif
end subroutine vector_to_3d_prog_var
@@ -3161,12 +3451,24 @@ subroutine prog_var_1d_to_vector(data_1d_array, x, ivar)
real(r8), dimension(:), intent(inout) :: x
integer, intent(in) :: ivar
-integer :: start_offset, end_offset
+integer :: idim1,ii
-start_offset = progvar(ivar)%index1
-end_offset = start_offset + size(data_1d_array) - 1
+if ( .not. module_initialized ) call static_init_model
+
+ii = progvar(ivar)%index1
+
+do idim1 = 1, size(data_1d_array, 1)
+ x(ii) = data_1d_array(idim1)
+ ii = ii + 1
+enddo
-x(start_offset:end_offset) = data_1d_array
+ii = ii - 1
+if ( ii /= progvar(ivar)%indexN ) then
+ write(string1, *)'Variable '//trim(progvar(ivar)%varname)//' read wrong.'
+ write(string2, *)'Should have ended at ',progvar(ivar)%indexN,' actually ended at ',ii
+ call error_handler(E_ERR,'prog_var_1d_to_vector', string1, &
+ source, revision, revdate, text2=string2)
+endif
end subroutine prog_var_1d_to_vector
@@ -3182,12 +3484,26 @@ subroutine prog_var_2d_to_vector(data_2d_array, x, ivar)
real(r8), dimension(:), intent(inout) :: x
integer, intent(in) :: ivar
-integer :: start_offset, end_offset
+integer :: idim1,idim2,ii
+
+if ( .not. module_initialized ) call static_init_model
+
+ii = progvar(ivar)%index1
-start_offset = progvar(ivar)%index1
-end_offset = start_offset + size(data_2d_array) - 1
+do idim2 = 1,size(data_2d_array, 2)
+ do idim1 = 1,size(data_2d_array, 1)
+ x(ii) = data_2d_array(idim1,idim2)
+ ii = ii + 1
+ enddo
+enddo
-x(start_offset:end_offset) = reshape(data_2d_array, (/ size(data_2d_array) /) )
+ii = ii - 1
+if ( ii /= progvar(ivar)%indexN ) then
+ write(string1, *)'Variable '//trim(progvar(ivar)%varname)//' read wrong.'
+ write(string2, *)'Should have ended at ',progvar(ivar)%indexN,' actually ended at ',ii
+ call error_handler(E_ERR,'prog_var_2d_to_vector', string1, &
+ source, revision, revdate, text2=string2)
+endif
end subroutine prog_var_2d_to_vector
@@ -3203,20 +3519,34 @@ subroutine prog_var_3d_to_vector(data_3d_array, x, ivar)
real(r8), dimension(:), intent(inout) :: x
integer, intent(in) :: ivar
-integer :: start_offset, end_offset
+integer :: idim1,idim2,idim3,ii
+
+if ( .not. module_initialized ) call static_init_model
+
+ii = progvar(ivar)%index1
-start_offset = progvar(ivar)%index1
-end_offset = start_offset + size(data_3d_array) - 1
+do idim3 = 1,size(data_3d_array, 3)
+ do idim2 = 1,size(data_3d_array, 2)
+ do idim1 = 1,size(data_3d_array, 1)
+ x(ii) = data_3d_array(idim1,idim2,idim3)
+ ii = ii + 1
+ enddo
+ enddo
+enddo
-x(start_offset:end_offset) = reshape(data_3d_array, (/ size(data_3d_array) /))
+ii = ii - 1
+if ( ii /= progvar(ivar)%indexN ) then
+ write(string1, *)'Variable '//trim(progvar(ivar)%varname)//' read wrong.'
+ write(string2, *)'Should have ended at ',progvar(ivar)%indexN,' actually ended at ',ii
+ call error_handler(E_ERR,'prog_var_3d_to_vector', string1, &
+ source, revision, revdate, text2=string2)
+endif
end subroutine prog_var_3d_to_vector
!------------------------------------------------------------------
-!>@todo fill in the inputs we need for the add_domain() routine
-
subroutine verify_state_variables( state_variables, ncid, filename, ngood, table )
character(len=*), dimension(:), intent(in) :: state_variables
@@ -3321,6 +3651,11 @@ subroutine verify_state_variables( state_variables, ncid, filename, ngood, table
call error_handler(E_MSG,'verify_state_variables',string1,source,revision,revdate,text2=string2)
endif
+! TJH FIXME need to add check so they cannot have both normal winds and reconstructed winds in
+! DART state vector. nsc - not sure that should be illegal. which one is
+! updated in the restart file is controlled by namelist and both could be in
+! state vector for testing.
+
end subroutine verify_state_variables
@@ -3373,6 +3708,8 @@ subroutine dump_progvar(ivar)
!%! type progvartype
!%! private
!%! character(len=NF90_MAX_NAME) :: varname
+!%! character(len=NF90_MAX_NAME) :: long_name
+!%! character(len=NF90_MAX_NAME) :: units
!%! integer, dimension(NF90_MAX_VAR_DIMS) :: dimlens
!%! integer :: xtype ! netCDF variable type (NF90_double, etc.)
!%! integer :: numdims ! number of dims - excluding TIME
@@ -3399,6 +3736,10 @@ subroutine dump_progvar(ivar)
write( * ,*)
write(logfileunit,*) 'variable number ',ivar,' is ',trim(progvar(ivar)%varname)
write( * ,*) 'variable number ',ivar,' is ',trim(progvar(ivar)%varname)
+write(logfileunit,*) ' long_name ',trim(progvar(ivar)%long_name)
+write( * ,*) ' long_name ',trim(progvar(ivar)%long_name)
+write(logfileunit,*) ' units ',trim(progvar(ivar)%units)
+write( * ,*) ' units ',trim(progvar(ivar)%units)
write(logfileunit,*) ' xtype ',progvar(ivar)%xtype
write( * ,*) ' xtype ',progvar(ivar)%xtype
write(logfileunit,*) ' dimlens ',progvar(ivar)%dimlens(1:progvar(ivar)%numdims)
@@ -3615,6 +3956,45 @@ subroutine get_variable_bounds(bounds_table, ivar)
end subroutine get_variable_bounds
+!------------------------------------------------------------
+
+subroutine define_var_dims(ncid,ivar, memberdimid, unlimiteddimid, ndims, dimids)
+
+! set the dimids array needed to augment the natural shape of the variable
+! with the two additional dimids needed by the DART diagnostic output.
+integer, intent(in) :: ncid
+integer, intent(in) :: ivar
+integer, intent(in) :: memberdimid, unlimiteddimid
+integer, intent(out) :: ndims
+integer, dimension(:), intent(out) :: dimids
+
+integer :: i,mydimid
+
+ndims = 0
+dimids = 0
+
+do i = 1,progvar(ivar)%numdims
+
+ ! Each of these dimension names (originally from the MPAS analysis file)
+ ! must exist in the DART diagnostic netcdf files.
+
+ call nc_check(nf90_inq_dimid(ncid, trim(progvar(ivar)%dimname(i)), mydimid), &
+ 'define_var_dims','inq_dimid '//trim(progvar(ivar)%dimname(i)))
+
+ ndims = ndims + 1
+
+ dimids(ndims) = mydimid
+
+enddo
+
+ndims = ndims + 1
+dimids(ndims) = memberdimid
+ndims = ndims + 1
+dimids(ndims) = unlimiteddimid
+
+end subroutine define_var_dims
+
+
!------------------------------------------------------------
subroutine get_index_range_string(string,index1,indexN)
@@ -3736,14 +4116,8 @@ subroutine compute_pressure_at_loc(state_handle, ens_size, location, ploc, istat
integer, intent(out) :: istatus(ens_size)
! convert the vertical coordinate at the given location
-! to a pressure. if the vertical type is "undefined" then
-! return 2001 mb. this routine is currently only used to
-! test for and reject obs which are above the upper limit
-! (and for a 'VERTISUNDEF' obs it needs to return a large
-! value with no error code). it's also used if the
-! interpolation type is QTY_PRESSURE. we don't explicitly
-! prevent someone from creating a synthetic obs that has
-! a VERTISUNDEF type, but it doesn't make sense in that case.
+! to a pressure. if the vertical is undefined
+! return a fixed 1000 mb.
real(r8), dimension(3) :: llv, llv_new ! lon/lat/vert
real(r8), dimension(3, ens_size) :: values
@@ -3754,19 +4128,19 @@ subroutine compute_pressure_at_loc(state_handle, ens_size, location, ploc, istat
! default is failure
ploc = MISSING_R8
-istatus = 99
! base location
llv = get_location(location)
if(is_vertical(location, "PRESSURE")) then
- ploc(:) = llv(3)
- istatus(:) = 0
+ do e = 1, ens_size
+ ploc(e) = llv(3)
+ enddo
else if(is_vertical(location, "HEIGHT") .or. is_vertical(location, "LEVEL")) then
new_location = location
- ! the quantity doesn't matter for this call but we have to pass in something.
+ ! FIXME: pick a hardcoded obs_kind for this call.
call convert_vert_distrib(state_handle, ens_size, new_location, QTY_TEMPERATURE, VERTISPRESSURE, istatus)
do e = 1, ens_size
@@ -3779,39 +4153,35 @@ subroutine compute_pressure_at_loc(state_handle, ens_size, location, ploc, istat
else if(is_vertical(location, "SURFACE")) then
ivars(1) = get_progvar_index_from_kind(QTY_SURFACE_PRESSURE)
+
if ( ivars(1) >= 0 ) then
call compute_scalar_with_barycentric(state_handle, ens_size, location, 1, ivars, values, istatus)
- where (istatus == 0) ploc(:) = values(1,:)
+ if ( all(istatus/= 0) ) return
+ do e = 1, ens_size
+ ploc(e) = values(1,e)
+ enddo
else
- istatus = 88 ! required quantity not in state vector
- return
-
-!%! !>original code:
-!%! !>@todo FIXME: do we really want to do this if the vert is surface and
-!%! !> the surface pressure field is not in the state? this is going to return
-!%! !> the pressure at the midpoint of the first level, is it not?
-!%!
-!%! new_location(1) = set_location(llv(1), llv(2), 1.0_r8, VERTISLEVEL)
-!%!
-!%! ! Need to get base offsets for the potential temperature, density, and water
-!%! ! vapor mixing fields in the state vector
-!%! ivars(1) = get_progvar_index_from_kind(QTY_POTENTIAL_TEMPERATURE)
-!%! ivars(2) = get_progvar_index_from_kind(QTY_DENSITY)
-!%! ivars(3) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
-!%!
-!%! call compute_scalar_with_barycentric (state_handle, ens_size, new_location(1), 3, ivars, values, istatus)
-!%! if ( all(istatus /= 0) ) return
-!%!
-!%! ! Convert surface theta, rho, qv into pressure
-!%! call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), ploc(:), tk(:), istatus(:) )
+
+ new_location(1) = set_location(llv(1), llv(2), 1.0_r8, VERTISLEVEL)
+
+ ! Need to get base offsets for the potential temperature, density, and water
+ ! vapor mixing fields in the state vector
+ ivars(1) = get_progvar_index_from_kind(QTY_POTENTIAL_TEMPERATURE)
+ ivars(2) = get_progvar_index_from_kind(QTY_DENSITY)
+ ivars(3) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
+
+ call compute_scalar_with_barycentric (state_handle, ens_size, new_location(1), 3, ivars, values, istatus)
+ if ( all(istatus/= 0) ) return
+
+ ! Convert surface theta, rho, qv into pressure
+ call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), ploc(:), tk(:), istatus(:) )
endif
else if(is_vertical(location, "UNDEFINED")) then ! not error, but no exact vert loc either
ploc(:) = 200100.0_r8 ! this is an unrealistic pressure value to indicate no known pressure.
- istatus(:) = 0 ! see comment at top of this routine for why this is ok.
else
call error_handler(E_ERR, 'compute_pressure:', 'internal error: unknown type of vertical', &
@@ -3826,7 +4196,6 @@ subroutine compute_pressure_at_loc(state_handle, ens_size, location, ploc, istat
end subroutine compute_pressure_at_loc
!------------------------------------------------------------------
-!> convert the vertical type for one or more observation locations.
subroutine convert_vertical_obs(state_handle, num, locs, loc_qtys, loc_types, &
which_vert, status)
@@ -3847,7 +4216,6 @@ subroutine convert_vertical_obs(state_handle, num, locs, loc_qtys, loc_types, &
end subroutine convert_vertical_obs
!--------------------------------------------------------------------
-!> convert the vertical type for one or more state locations.
subroutine convert_vertical_state(state_handle, num, locs, loc_qtys, loc_indx, &
which_vert, istatus)
@@ -3862,19 +4230,12 @@ subroutine convert_vertical_state(state_handle, num, locs, loc_qtys, loc_indx, &
integer :: i, status(1)
-!> we are using code from get_state_meta_data to get
-!> the indices for cell id and vert level. this calls a
-!> modified convert_vert_distrib() routine that doesn't
-!> have to search for the cell centers.
-
-istatus = 0
-
do i=1, num
- call convert_vert_distrib_state(state_handle, 1, locs(i:i), loc_qtys(i), &
- loc_indx(i), which_vert, status)
-
- ! save the first error we see - but continue to convert the rest
- if (istatus == 0 .and. status(1) /= 0) istatus = status(1)
+ call convert_vert_distrib(state_handle, 1, locs(i:i), loc_qtys(i), which_vert, status)
+ if (status(1) /= 0) then
+ istatus = status(1)
+ return
+ endif
enddo
@@ -3882,11 +4243,10 @@ end subroutine convert_vertical_state
!------------------------------------------------------------------
-!> code to convert an observation location's vertical coordinate type.
subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztypeout, istatus)
-! This subroutine converts a given obs vertical coordinate to
+! This subroutine converts a given ob/state vertical coordinate to
! the vertical localization coordinate type requested through the
! model_mod namelist.
!
@@ -3897,20 +4257,20 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
! their DART location information from get_state_meta_data
! which is called by filter_assim during the assimilation
! process.
-! (3) state_handle is the relevant DART state vector for carrying out
+! (3) x is the relevant DART state vector for carrying out
! computations necessary for the vertical coordinate
! transformations. As the vertical coordinate is only used
! in distance computations, this is actually the "expected"
! vertical coordinate, so that computed distance is the
! "expected" distance. Thus, under normal circumstances,
-! the state that is supplied to convert_vert_distrib should be the
+! x that is supplied to vert_convert should be the
! ensemble mean. Nevertheless, the subroutine has the
! functionality to operate on any DART state vector that
! is supplied to it.
type(ensemble_type), intent(in) :: state_handle
integer, intent(in) :: ens_size
-type(location_type), intent(inout) :: location(ens_size) ! because the verticals may differ
+type(location_type), intent(inout) :: location(ens_size) ! so you can do ens_size. This sucks
integer, intent(in) :: obs_kind
integer, intent(in) :: ztypeout
integer, intent(out) :: istatus(ens_size)
@@ -3923,7 +4283,6 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
integer, dimension(3,ens_size) :: k_low, k_up
real(r8), dimension(ens_size) :: zin, zout
real(r8), dimension(ens_size) :: tk, fullp, surfp
-logical :: at_surf, do_norm
type(location_type), dimension(ens_size) :: surfloc
real(r8) :: weights(3)
@@ -3939,10 +4298,12 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
weights = 0.0_r8
! first off, check if ob is identity ob. if so get_state_meta_data() will
-! return the location.
+! have returned location information already in the requested vertical type.
if (obs_kind < 0) then
- call get_state_meta_data(-int(obs_kind,i8),location(1)) ! will be the same across the ensemble
+ call get_state_meta_data(int(obs_kind,i8),location(1)) ! will be the same across the ensemble
location(:) = location(1)
+ istatus(:) = 0
+ return
endif
! if the existing coord is already in the requested vertical units
@@ -4003,16 +4364,8 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
! and the fraction (fract) for vertical interpolation.
call find_triangle (location(1), n, c, weights, istatus(1))
- if(istatus(1) /= 0) then
- istatus(:) = istatus(1)
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
call find_vert_indices (state_handle, ens_size, location(1), n, c, k_low, k_up, fract, istatus)
- if( all(istatus /= 0) ) then
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
+ if( all(istatus /= 0) ) return
zk_mid = k_low + fract
do e = 1, ens_size
@@ -4044,10 +4397,7 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
! Get theta, rho, qv at the interpolated location
call compute_scalar_with_barycentric (state_handle, ens_size, location(1), 3, ivars, values, istatus)
- if( all(istatus /= 0) ) then
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
+ if( all(istatus /= 0) ) return
! Convert theta, rho, qv into pressure
call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), zout(:), tk(:), istatus(:))
@@ -4062,16 +4412,8 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
case (VERTISHEIGHT)
call find_triangle (location(1), n, c, weights, istatus(1))
- if(istatus(1) /= 0) then
- istatus(:) = istatus(1)
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
call find_vert_indices (state_handle, ens_size, location(1), n, c, k_low, k_up, fract, istatus)
- if( all(istatus /= 0) ) then
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
+ if( all(istatus /= 0) ) return
fdata = 0.0_r8
do i = 1, n
@@ -4093,7 +4435,7 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
if (debug > 9) then
write(string2,'("zout_in_height:",F10.2)') zout
- call error_handler(E_MSG, 'convert_vert_distrib',string2,source, revision, revdate)
+ call error_handler(E_MSG, 'vert_convert',string2,source, revision, revdate)
endif
@@ -4102,44 +4444,31 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
! ------------------------------------------------------------
case (VERTISSCALEHEIGHT)
- ! Scale Height is defined as: log(pressure)
- ! if namelist item: no_normalization_of_scale_heights = .true.
- ! otherwise it is defined as: -log(pressure / surface_pressure)
-
- ! set logicals here so we can do the minimum amount of work.
- ! finding gridcells and computing pressure is expensive in this model.
- ! logic table is:
- ! surf T, norm T: return 0.0 by definition
- ! surf T, norm F: need surfp only
- ! surf F, norm F: need fullp only
- ! surf F, norm T: need both surfp and fullp
-
- at_surf = (ztypein == VERTISSURFACE)
- do_norm = .not. no_normalization_of_scale_heights
-
- ! if normalizing pressure and we're on the surface, by definition scale height
- ! is log(1.0) so skip the rest of these computations.
- if (at_surf .and. do_norm) then
- zout = 0.0_r8
- istatus(:) = 0
- goto 101
- endif
+ if ( ztypein /= VERTISSURFACE ) then
+
+ ! Scale Height is defined here as: -log(pressure / surface_pressure)
- ! Base offsets for the potential temperature, density, and water
- ! vapor mixing fields in the state vector.
+ ! Need to get base offsets for the potential temperature, density, and water
+ ! vapor mixing fields in the state vector
ivars(1) = get_progvar_index_from_kind(QTY_POTENTIAL_TEMPERATURE)
ivars(2) = get_progvar_index_from_kind(QTY_DENSITY)
ivars(3) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
- if (at_surf .or. do_norm) then ! we will need surface pressure
+ ! Get theta, rho, qv at the interpolated location
+ call compute_scalar_with_barycentric (state_handle, ens_size, location(1), 3, ivars, values, istatus)
+ !if (istatus /= 0) return
+
+ ! Convert theta, rho, qv into pressure
+ call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), fullp(:), tk(:), istatus(:))
+ if (debug > 9) then
+ write(string2,'("zout_full_pressure, theta, rho, qv:",3F10.2,F18.8)') fullp, values(1:3,1)
+ call error_handler(E_MSG, 'convert_vert_distrib',string2,source, revision, revdate)
+ endif
! Get theta, rho, qv at the surface corresponding to the interpolated location
surfloc(1) = set_location(llv_loc(1, 1), llv_loc(2, 1), 1.0_r8, VERTISLEVEL)
call compute_scalar_with_barycentric (state_handle, ens_size, surfloc(1), 3, ivars, values, istatus)
- if( all(istatus /= 0) ) then
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
+ if( all(istatus /= 0) ) return
! Convert surface theta, rho, qv into pressure
call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), surfp(:), tk(:), istatus(:))
@@ -4148,51 +4477,23 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
call error_handler(E_MSG, 'convert_vert_distrib',string2,source, revision, revdate)
endif
- endif
-
- if (.not. at_surf) then ! we will need full pressure
-
- ! Get theta, rho, qv at the interpolated location
- call compute_scalar_with_barycentric (state_handle, ens_size, location(1), 3, ivars, values, istatus)
-
- ! Convert theta, rho, qv into pressure
- call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), fullp(:), tk(:), istatus(:))
- if (debug > 9) then
- write(string2,'("zout_full_pressure, theta, rho, qv:",3F10.2,F18.8)') fullp, values(1:3,1)
- call error_handler(E_MSG, 'convert_vert_distrib',string2,source, revision, revdate)
- endif
- endif
-
- ! we have what we need now. figure out the case and set zout to the right values.
- ! we've already taken care of the (at_surf .and. do_norm) case, so that simplifies
- ! the tests here.
- if (at_surf) then
- where (surfp /= MISSING_R8)
- zout = log(surfp)
+ ! and finally, convert into scale height
+ where (surfp /= 0.0_r8 .and. fullp /= MISSING_R8)
+ zout = -log(fullp / surfp)
else where
zout = MISSING_R8
end where
- else if (.not. do_norm) then
- where (fullp /= MISSING_R8)
- zout = log(fullp)
- else where
- zout = MISSING_R8
- end where
-
- else ! not at surface, and normalizing by surface pressure
- where (surfp /= MISSING_R8 .and. surfp > 0.0_r8 .and. fullp /= MISSING_R8)
- zout = -log(fullp / surfp)
- else where
- zout = MISSING_R8
- end where
- endif
+ else
-101 continue
+ zout = -log(1.0_r8)
+ istatus(:) = 0
+
+ endif
if (debug > 9) then
write(string2,'("zout_in_scaleheight:",F10.2)') zout
- call error_handler(E_MSG, 'convert_vert_distrib',string2,source, revision, revdate)
+ call error_handler(E_MSG, 'vert_convert',string2,source, revision, revdate)
endif
! -------------------------------------------------------
@@ -4209,6 +4510,7 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
do e = 1, ens_size
if(istatus(e) == 0) then
location(e) = set_location(llv_loc(1, e),llv_loc(2, e),zout(e),ztypeout)
+ ! Set successful return code only if zout has good value - I think istatus is set already
else
location(e) = set_location(llv_loc(1, e),llv_loc(2, e),missing_r8,ztypeout)
endif
@@ -4217,330 +4519,107 @@ subroutine convert_vert_distrib(state_handle, ens_size, location, obs_kind, ztyp
end subroutine convert_vert_distrib
-!------------------------------------------------------------------
-!> code to convert an state location's vertical coordinate type.
+!-------------------------------------------------------------------
-subroutine convert_vert_distrib_state(state_handle, ens_size, location, quantity, state_indx, ztypeout, istatus)
+subroutine vert_convert(state_handle, location, obs_kind, ierr)
-! This subroutine converts a given state vertical coordinate to
+! This subroutine converts a given ob/state vertical coordinate to
! the vertical localization coordinate type requested through the
! model_mod namelist.
!
-! (3) state_handle is the relevant DART state vector for carrying out
+! Notes: (1) obs_kind is only necessary to check whether the ob
+! is an identity ob.
+! (2) This subroutine can convert both obs' and state points'
+! vertical coordinates. Remember that state points get
+! their DART location information from get_state_meta_data
+! which is called by filter_assim during the assimilation
+! process.
+! (3) x is the relevant DART state vector for carrying out
! computations necessary for the vertical coordinate
! transformations. As the vertical coordinate is only used
! in distance computations, this is actually the "expected"
! vertical coordinate, so that computed distance is the
! "expected" distance. Thus, under normal circumstances,
-! state_handle that is supplied to convert_vert_distrib should be the
+! x that is supplied to vert_convert should be the
! ensemble mean. Nevertheless, the subroutine has the
! functionality to operate on any DART state vector that
! is supplied to it.
type(ensemble_type), intent(in) :: state_handle
-integer, intent(in) :: ens_size
-type(location_type), intent(inout) :: location(ens_size) ! because the verticals may differ
-integer, intent(in) :: quantity
-integer(i8), intent(in) :: state_indx
-integer, intent(in) :: ztypeout
-integer, intent(out) :: istatus(ens_size)
-
-! zin and zout are the vert values coming in and going out.
-! ztype{in,out} are the vert types as defined by the 3d sphere
-! locations mod (location/threed_sphere/location_mod.f90)
-real(r8), dimension(3, ens_size) :: llv_loc
-real(r8), dimension(3,ens_size) :: zk_mid, values, fract, fdata
-integer, dimension(3,ens_size) :: k_low, k_up
-real(r8), dimension(ens_size) :: zin, zout
-real(r8), dimension(ens_size) :: tk, fullp, surfp
-logical :: at_surf, do_norm, on_bound
-type(location_type), dimension(ens_size) :: surfloc
-
-real(r8) :: weights(3)
-integer :: ztypein, i, e, n, ndim
-integer :: c(3), ivars(3), vert_level
-integer :: cellid !SYHA
-
-! assume failure.
-istatus = 1
-
-! initialization
-k_low = 0.0_r8
-k_up = 0.0_r8
-weights = 0.0_r8
-
-
-! if the existing coord is already in the requested vertical units
-! or if the vert is 'undef' which means no specifically defined
-! vertical coordinate, return now.
-ztypein = nint(query_location(location(1), 'which_vert'))
-if ((ztypein == ztypeout) .or. (ztypein == VERTISUNDEF)) then
- istatus = 0
- return
-else
- if (debug > 9) then
- write(string1,'(A,3X,2I3)') 'ztypein, ztypeout:',ztypein,ztypeout
- call error_handler(E_MSG, 'convert_vert_distrib_state',string1,source, revision, revdate)
- endif
-endif
-
-!> assume that all locations have the same incoming lat/lon and level.
-!> depending on the output vert type each member might have a different
-!> vertical value.
-
-! unpack the incoming location(s)
-do e = 1, ens_size
- llv_loc(:, e) = get_location(location(e))
-enddo
-
-!> state_indx is i8, intent(in).
-!> cellid and vert_level are integer, intent(out)
-call find_mpas_indices(state_indx, cellid, vert_level, ndim)
-
-! the routines below will use zin as the incoming vertical value
-! and zout as the new outgoing one. start out assuming failure
-! (zout = missing) and wait to be pleasantly surprised when it works.
-zin(:) = vert_level
-ztypein = VERTISLEVEL
-zout(:) = missing_r8
-
-! if the vertical is missing to start with, return it the same way
-! with the requested type as out.
-do e = 1, ens_size
- if (zin(e) == missing_r8) then
- location(e) = set_location(llv_loc(1, e),llv_loc(2, e),missing_r8,ztypeout)
- endif
-enddo
-! if the entire ensemble has missing vertical values we can return now.
-! otherwise we need to continue to convert the members with good vertical values.
-! boundary cells will be updated by the assimilation.
-! if all the vertical localization coord values are missing,
-! we don't call this routine again, and return.
-if (all(zin == missing_r8)) then ! .or. on_bound) then
- istatus(:) = 0
- return
-endif
-
-! Convert the incoming vertical type (ztypein) into the vertical
-! localization coordinate given in the namelist (ztypeout).
-! Because this is only for state vector locations, we have already
-! computed the vertical level, so all these conversions are from
-! model level to something.
-
-! convert into:
-select case (ztypeout)
-
- ! ------------------------------------------------------------
- ! outgoing vertical coordinate should be 'model level number'
- ! ------------------------------------------------------------
- case (VERTISLEVEL)
-
- ! we have the vert_level and cellid - no need to call find_triangle or find_vert_indices
-
- zout(:) = vert_level
-
- if (debug > 9) then
- write(string2,'("zout_in_level:",F10.2)') zout
- call error_handler(E_MSG, 'convert_vert_distrib_state',string2,source, revision, revdate)
- endif
-
- ! ------------------------------------------------------------
- ! outgoing vertical coordinate should be 'pressure' in Pa
- ! ------------------------------------------------------------
- case (VERTISPRESSURE)
-
- !>@todo FIXME - this is the original code from the
- !> observation version which does horizontal interpolation.
- !> in this code we know we are on a state vector location
- !> so no interp is needed. we should be able to make this
- !> more computationally efficient.
-
- ! Need to get base offsets for the potential temperature, density, and water
- ! vapor mixing fields in the state vector
- ivars(1) = get_progvar_index_from_kind(QTY_POTENTIAL_TEMPERATURE)
- ivars(2) = get_progvar_index_from_kind(QTY_DENSITY)
- ivars(3) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
-
- if (any(ivars(1:3) < 0)) then
- write(string1,*) 'Internal error, cannot find one or more of: theta, rho, qv'
- call error_handler(E_ERR, 'convert_vert_distrib_state',string1,source, revision, revdate)
- endif
-
- ! Get theta, rho, qv at the interpolated location - pass in cellid we have already located
- ! to save the search time.
- call compute_scalar_with_barycentric (state_handle, ens_size, location(1), 3, ivars, values, istatus, cellid)
- if( all(istatus /= 0) ) then
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
-
- ! Convert theta, rho, qv into pressure
- call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), zout(:), tk(:), istatus(:))
- if (debug > 10) then
- write(string2,'("zout_in_p, theta, rho, qv, ier:",3F10.2,F18.8,I3)') zout(1), values(1:3,1),istatus(1)
- call error_handler(E_MSG, 'convert_vert_distrib_state',string2,source, revision, revdate)
- endif
-
- ! ------------------------------------------------------------
- ! outgoing vertical coordinate should be 'height' in meters
- ! ------------------------------------------------------------
- case (VERTISHEIGHT)
-
- ! surface obs should use the lower face of the first level. the rest
- ! of the quantities should use the level centers.
- if ( ndim == 1 ) then
- zout(:) = zGridFace(1, cellid)
- else
- zout(:) = zGridCenter(vert_level, cellid)
- if ( quantity == QTY_VERTICAL_VELOCITY ) zout(:) = zGridFace(vert_level, cellid)
- if ( quantity == QTY_EDGE_NORMAL_SPEED ) zout(:) = zGridEdge(vert_level, cellid)
- endif
-
- if (debug > 9) then
- write(string2,'("zout_in_height:",F10.2)') zout
- call error_handler(E_MSG, 'convert_vert_distrib_state',string2,source, revision, revdate)
- endif
-
-
- ! ------------------------------------------------------------
- ! outgoing vertical coordinate should be 'scale height' (a ratio)
- ! ------------------------------------------------------------
- case (VERTISSCALEHEIGHT)
-
- !>@todo FIXME
- !> whatever we do for pressure, something similar here
-
- ! Scale Height is defined as: log(pressure)
- ! if namelist item: no_normalization_of_scale_heights = .true.
- ! otherwise it is defined as: -log(pressure / surface_pressure)
-
- ! set logicals here so we can do the minimum amount of work.
- ! finding gridcells and computing pressure is expensive in this model.
- ! logic table is:
- ! surf T, norm T: return 0.0 by definition
- ! surf T, norm F: need surfp only
- ! surf F, norm F: need fullp only
- ! surf F, norm T: need both surfp and fullp
-
- at_surf = (ztypein == VERTISSURFACE)
- do_norm = .not. no_normalization_of_scale_heights
-
- ! if normalizing pressure and we're on the surface, by definition scale height
- ! is log(1.0) so skip the rest of these computations.
- if (at_surf .and. do_norm) then
- zout = 0.0_r8
- istatus(:) = 0
- goto 101
- endif
-
- ! Base offsets for the potential temperature, density, and water
- ! vapor mixing fields in the state vector.
- ivars(1) = get_progvar_index_from_kind(QTY_POTENTIAL_TEMPERATURE)
- ivars(2) = get_progvar_index_from_kind(QTY_DENSITY)
- ivars(3) = get_progvar_index_from_kind(QTY_VAPOR_MIXING_RATIO)
+type(location_type), intent(inout) :: location
+integer, intent(in) :: obs_kind
+integer, intent(out) :: ierr
- if (at_surf .or. do_norm) then ! we will need surface pressure
+integer :: istatus(1)
+integer :: ztype
+type(location_type) :: new_location(1)
+integer :: ens_size = 1 ! just mean
- ! Get theta, rho, qv at the surface corresponding to the interpolated location
- surfloc(1) = set_location(llv_loc(1, 1), llv_loc(2, 1), 1.0_r8, VERTISLEVEL)
- call compute_scalar_with_barycentric (state_handle, ens_size, surfloc(1), 3, ivars, values, istatus, cellid)
- if( all(istatus /= 0) ) then
- location(:) = set_location(llv_loc(1, 1),llv_loc(2, 1),missing_r8,ztypeout)
- return
- endif
+ztype = query_vert_localization_coord()
+new_location(1) = location
- ! Convert surface theta, rho, qv into pressure
- call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), surfp(:), tk(:), istatus(:))
- if (debug > 9) then
- write(string2,'("zout_surf_pressure, theta, rho, qv:",3F10.2,F18.8)') surfp, values(1:3,1)
- call error_handler(E_MSG, 'convert_vert_distrib_state',string2,source, revision, revdate)
- endif
+call convert_vert_distrib(state_handle, ens_size, new_location, obs_kind, ztype, istatus)
- endif
+ierr = istatus(1)
+location = new_location(1)
- if (.not. at_surf) then ! we will need full pressure
+end subroutine vert_convert
- ! Get theta, rho, qv at the interpolated location
- call compute_scalar_with_barycentric (state_handle, ens_size, location(1), 3, ivars, values, istatus)
- ! Convert theta, rho, qv into pressure
- call compute_full_pressure(ens_size, values(1, :), values(2, :), values(3, :), fullp(:), tk(:), istatus(:))
- if (debug > 9) then
- write(string2,'("zout_full_pressure, theta, rho, qv:",3F10.2,F18.8)') fullp, values(1:3,1)
- call error_handler(E_MSG, 'convert_vert_distrib_state',string2,source, revision, revdate)
- endif
- endif
+!==================================================================
+! The following (private) interfaces are used for triangle interpolation
+!==================================================================
- ! we have what we need now. figure out the case and set zout to the right values.
- ! we've already taken care of the (at_surf .and. do_norm) case, so that simplifies
- ! the tests here.
- if (at_surf) then
- where (surfp /= MISSING_R8)
- zout = log(surfp)
- else where
- zout = MISSING_R8
- end where
-
- else if (.not. do_norm) then
- where (fullp /= MISSING_R8)
- zout = log(fullp)
- else where
- zout = MISSING_R8
- end where
-
- else ! not at surface, and normalizing by surface pressure
- where (surfp /= MISSING_R8 .and. surfp > 0.0_r8 .and. fullp /= MISSING_R8)
- zout = -log(fullp / surfp)
- else where
- zout = MISSING_R8
- end where
- endif
-101 continue
+!------------------------------------------------------------------
+!> HK where is this used?
+subroutine vert_interp(x, base_offset, cellid, nlevs, lower, fract, val, ier)
- if (debug > 9) then
- write(string2,'("zout_in_scaleheight:",F10.2)') zout
- call error_handler(E_MSG, 'convert_vert_distrib_state',string2,source, revision, revdate)
- endif
+! Interpolates in vertical in column indexed by tri_index for a field
+! with base_offset. Vertical index is varying fastest here. Returns ier=0
+! unless missing value is encounterd.
- ! -------------------------------------------------------
- ! outgoing vertical coordinate is unrecognized
- ! -------------------------------------------------------
- case default
- write(string1,*) 'Requested vertical coordinate not recognized: ', ztypeout
- call error_handler(E_ERR,'convert_vert_distrib_state', string1, &
- source, revision, revdate)
-
-end select ! outgoing vert type
+real(r8), intent(in) :: x(:)
+integer, intent(in) :: base_offset
+integer, intent(in) :: cellid
+integer, intent(in) :: nlevs
+integer, intent(in) :: lower
+real(r8), intent(in) :: fract
+real(r8), intent(out) :: val
+integer, intent(out) :: ier
-! Returned location
-do e = 1, ens_size
- if(istatus(e) == 0) then
- location(e) = set_location(llv_loc(1, e),llv_loc(2, e),zout(e),ztypeout)
- else
- location(e) = set_location(llv_loc(1, e),llv_loc(2, e),missing_r8,ztypeout)
- endif
-enddo
+integer :: offset
+real(r8) :: lx, ux
+! Default return is good
+ier = 0
-end subroutine convert_vert_distrib_state
+! Get the value at the lower and upper points
+offset = base_offset + (cellid - 1) * nlevs + lower - 1
+lx = x(offset)
+ux = x(offset + 1)
-!-------------------------------------------------------------------
+! Check for missing value
+if(lx == MISSING_R8 .or. ux == MISSING_R8) then
+ ier = 2
+ return
+endif
+! Interpolate
+val = (1.0_r8 - fract)*lx + fract*ux
-!==================================================================
-! The following (private) interfaces are used for triangle interpolation
-!==================================================================
+end subroutine vert_interp
!------------------------------------------------------------------
-!> Finds position of a given height in an array of height grid points and returns
-!> the index of the lower and upper bounds and the fractional offset. ier
-!> returns 0 unless there is an error. Could be replaced with a more efficient
-!> search if there are many vertical levels.
-
+!> @todo is this correct for RMA?
subroutine find_height_bounds(height, nbounds, bounds, lower, upper, fract, ier)
+! Finds position of a given height in an array of height grid points and returns
+! the index of the lower and upper bounds and the fractional offset. ier
+! returns 0 unless there is an error. Could be replaced with a more efficient
+! search if there are many vertical levels.
+
real(r8), intent(in) :: height
integer, intent(in) :: nbounds
real(r8), intent(in) :: bounds(nbounds)
@@ -4634,8 +4713,8 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
! the two level numbers that enclose the given vertical
! value plus the fraction between them for each of the 3 cell centers.
-! note that this code handles data at cell centers, at edges, but not
-! data on faces. so far we don't have any on faces.
+! FIXME: this handles data at cell centers, at edges, but not
+! data on faces.
! loc is an intrisic funtion
type(ensemble_type), intent(in) :: state_handle
@@ -4648,8 +4727,8 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
integer, intent(out) :: ier(:)
real(r8) :: lat, lon, vert, llv(3)
-real(r8) :: vert_array(ens_size)
-integer :: track_ier(ens_size)
+real(r8), allocatable :: vert_array(:)
+integer, allocatable :: track_ier(:)
integer(i8) :: pt_base_offset, density_base_offset, qv_base_offset
integer :: verttype, i
integer :: e
@@ -4683,14 +4762,14 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
verttype = nint(query_location(loc))
! these first 3 types need no cell/edge location information.
-if ((debug > 10) .and. do_output()) then
+if ((debug > 11) .and. do_output()) then
write(string2,'("vert, which_vert:",3F20.12,I5)') lon,lat,vert,verttype
call error_handler(E_MSG, 'find_vert_level',string2,source, revision, revdate)
endif
! no defined vertical location (e.g. vertically integrated vals)
if (verttype == VERTISUNDEF) then
- ier = 82
+ ier = 12
return
endif
@@ -4710,8 +4789,8 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
! model level numbers (supports fractional levels)
if(verttype == VERTISLEVEL) then
! FIXME: if this is W, the top is nVertLevels+1
- if (vert > nVertLevels) then
- ier(:) = 81
+ if (vert > nVertLevels) then !> @todo Is this the same across the ensemble?
+ ier(:) = 12
return
endif
@@ -4740,6 +4819,7 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
! Vertical interpolation for pressure coordinates
if(verttype == VERTISPRESSURE ) then
+ allocate(vert_array(ens_size), track_ier(ens_size))
track_ier = 0
vert_array = vert
@@ -4753,7 +4833,6 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
call find_pressure_bounds(state_handle, ens_size, vert_array, ids(i), nVertLevels, &
pt_base_offset, density_base_offset, qv_base_offset, &
lower(i, :), upper(i, :), fract(i, :), ier)
-
!if(debug > 9) print '(A,5I5,F10.4)', &
! ' after find_pressure_bounds: ier, i, cellid, lower, upper, fract = ', &
! ier, i, ids(i), lower(i, :), upper(i, :), fract(i, :)
@@ -4761,14 +4840,14 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
!'fail in find_pressure_bounds: ier, nc, i, id, vert, lower, fract: ', &
! ier, nc, i, ids(i), vert_array, lower(i, :), fract(i, :)
- ! we are inside a loop over each corner. consolidate error codes
- ! so that we return an error for that ensemble member if any
- ! of the corners fails the pressure bounds test.
- where (ier /= 0 .and. track_ier == 0) track_ier = ier
-
+ do e = 1, ens_size
+ if(ier(e) /= 0) track_ier(e) = ier(e)
+ enddo
+ !if (ier /= 0) return ! These are annoying
enddo
ier = track_ier
+ deallocate(vert_array, track_ier)
return
endif
@@ -4780,6 +4859,7 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
if(verttype == VERTISHEIGHT) then
! For height, can do simple vertical search for interpolation for now
! Get the lower and upper bounds and fraction for each column
+ allocate(track_ier(ens_size))
track_ier = 0
do i=1, nc
@@ -4797,18 +4877,22 @@ subroutine find_vert_level(state_handle, ens_size, loc, nc, ids, oncenters, lowe
lower(i, :), upper(i, :), fract(i, :), ier)
endif
- ! we are inside a loop over each corner. consolidate error codes
- ! so that we return an error for that ensemble member if any
- ! of the corners fails the pressure bounds test.
- where (ier /= 0 .and. track_ier == 0) track_ier = ier
+ do e = 1, ens_size
+ if(ier(e) /= 0) track_ier(e) = ier(e)
+ enddo
+ !if (ier /= 0) return ! These are annoying
enddo
-
ier = track_ier
+ deallocate(track_ier)
return
endif
+! If we get here, the vertical type is not understood. Should not
+! happen. Not true anymore
+!ier = 3
+
end subroutine find_vert_level
!------------------------------------------------------------------
@@ -4846,8 +4930,6 @@ subroutine find_pressure_bounds(state_handle, ens_size, p, cellid, nbounds, &
lower = -1
upper = -1
-! this must start out success (0) and then as ensemble members fail
-! we will record the first encountered error code.
ier = 0
! Find the lowest pressure
@@ -4855,79 +4937,105 @@ subroutine find_pressure_bounds(state_handle, ens_size, p, cellid, nbounds, &
qv_base_offset, cellid, 1, nbounds, pressure(1, :), temp_ier)
if(debug > 11) print *, 'find_pressure_bounds: find the lowest p, ier at k=1 ', pressure(1,:), ier
-where(ier(:) == 0) ier(:) = temp_ier(:)
+do e = 1, ens_size
+ if(temp_ier(e) /= 0) ier(e) = temp_ier(e)
+enddo
! Get the highest pressure level
call get_interp_pressure(state_handle, ens_size, pt_base_offset, density_base_offset, &
qv_base_offset, cellid, nbounds, nbounds, pressure(nbounds, :), temp_ier)
-if(debug > 11) print *, 'find_pressure_bounds: find the highest p, ier at k= ', nbounds, pressure(nbounds,:), ier
-where(ier(:) == 0) ier(:) = temp_ier(:)
+if(debug > 11) print *, 'find_pressure_bounds: find the highest p, ier at k= ', nbounds, pressure(nbounds,:), ier
+!if(ier /= 0) return
+do e = 1, ens_size
+ if(temp_ier(e) /= 0) ier(e) = temp_ier(e)
+enddo
! Check for out of the column range
-where(p(:) > pressure( 1, :)) ier(:) = 80
-where(p(:) < pressure(nbounds, :)) ier(:) = 81
-
-if(all(ier /= 0)) return
+do e = 1, ens_size
+ if(p(e) > pressure(1, e)) ier(e) = 88
+ if(p(e) < pressure(nbounds, e)) ier(e) = 888
+enddo
+if(any(ier /= 0)) return
! Loop through the rest of the column from the bottom up
found_level(:) = .false.
-do i = 2, nbounds
- ! we've already done this call for level == nbounds
- if (i /= nbounds) then
- call get_interp_pressure(state_handle, ens_size, pt_base_offset, density_base_offset, &
- qv_base_offset, cellid, i, nbounds, pressure(i, :), temp_ier)
- if(debug > 11) print *, 'find_pressure_bounds: find p, ier at k= ', i, pressure(i,:), ier
+do i = 2, nbounds ! You have already done nbounds?
+ call get_interp_pressure(state_handle, ens_size, pt_base_offset, density_base_offset, &
+ qv_base_offset, cellid, i, nbounds, pressure(i, :), temp_ier)
+ do e = 1, ens_size
+ if(temp_ier(e) /= 0) ier(e) = temp_ier(e)
+ enddo
- where (ier(:) == 0) ier(:) = temp_ier(:)
- endif
-
- ! Check if pressure at lower level is larger than at upper level
- ! (shouldn't happen).
- if(any(pressure(i, :) > pressure(i-1, :))) then
- if (debug > 0) then
- write(*, *) 'lower pressure larger than upper pressure at cellid', cellid
- do e=1, ens_size
- write(*, *) 'ens#, level nums, pressures: ', e,i-1,i,pressure(i-1,e),pressure(i,e)
- enddo
+if(debug > 11) print *, 'find_pressure_bounds: find p, ier at k= ', i, pressure(i,:), ier
+ !if (ier /= 0) return
+ do e = 1, ens_size
+ ! Check if pressure at lower level is higher than at upper level.
+ !if (ier(e) /= 0) return
+ if(pressure(i, e) > pressure(i-1, e)) then
+ if ((debug > 0) .and. do_output()) then
+ write(string1, *) 'lower pressure larger than upper pressure at cellid', cellid
+ write(string2, *) 'level nums, pressures: ', i-1,i,pressure(i-1, e),pressure(i, e)
+ write(*,*) 'find_pressure_bounds: ', trim(string1), trim(string2)
+ endif
+ !if ((debug > 5) .and. do_output()) then
+ !do j = 1, nbounds
+ ! call get_interp_pressure(x, pt_base_offset, density_base_offset, &
+ ! qv_base_offset, cellid, j, nbounds, pr, ier2, .true.)
+ !enddo
+ !endif
+
+ ier(e) = 988
+ !return
+
+ !call error_handler(E_ERR, "find_pressure_bounds", string1, &
+ ! source, revision, revdate, text2=string2)
endif
- where(pressure(i, :) > pressure(i-1, :)) ier(:) = 988
- endif
- ! each ensemble member could have a vertical between different levels,
- ! and more likely a different fract across a level.
- do e = 1, ens_size
+ ! Is pressure between i-1 and i level?
+ if (found_level(e) .eqv. .false. .and. ier(e) == 0) then
+ if(p(e) > pressure(i, e)) then
+ found_level(e) = .true.
+ lower(e) = i - 1
+ upper(e) = i
+ if (pressure(i, e) == pressure(i-1, e)) then
+ fract(e) = 0.0_r8
+ else if (log_p_vert_interp) then
+ fract(e) = (log(p(e)) - log(pressure(i-1,e))) / &
+ (log(pressure(i, e)) - log(pressure(i-1, e)))
+ else
+ fract(e) = (p(e) - pressure(i-1,e)) / (pressure(i,e) - pressure(i-1,e))
+ endif
- ! if we've already run into an error, or we've already found the
- ! level for this ensemble member, skip the rest of this loop.
- if (ier(e) /= 0) cycle
- if (found_level(e)) cycle
-
- ! Is pressure between levels i-1 and i?
- ! if so, set the lower and upper level numbers and fraction across.
- ! fraction is 0 at level (i-1) and 1 at level(i).
- if(p(e) > pressure(i, e)) then
- found_level(e) = .true.
- lower(e) = i - 1
- upper(e) = i
- if (pressure(i, e) == pressure(i-1, e)) then
- fract(e) = 0.0_r8
- else if (log_p_vert_interp) then
- fract(e) = (log(p(e)) - log(pressure(i-1,e))) / &
- (log(pressure(i,e)) - log(pressure(i-1,e)))
- else
- fract(e) = (p(e) - pressure(i-1,e)) / (pressure(i,e) - pressure(i-1,e))
- endif
+ ! shouldn't happen but with roundoff i suppose could be one
+ ! least-significant-bit out of range. so don't print unless some
+ ! level of debugging is enabled.
+ ! JH : Note fract needs to be a scalar not a vector
+ ! if ((fract < 0.0_r8 .or. fract > 1.0_r8) .and. debug > 0) then
+ if ((fract(e) < 0.0_r8 .or. fract(e) > 1.0_r8) .and. debug > 0) then
+ print '(A,3F26.18,2I4,F22.18)', &
+ "find_pressure_bounds: bad fract! p_in, pr(i-1), pr(i), lower, upper, fract = ", &
+ p(e), pressure(i-1, e), pressure(i, e), lower(e), upper(e), fract(e)
+ endif
- if ((debug > 9) .and. do_output()) print '(A,3F26.18,2I4,F22.18)', &
- "find_pressure_bounds: p_in, pr(i-1), pr(i), lower, upper, fract = ", &
- p(e), pressure(i-1,e), pressure(i,e), lower(e), upper(e), fract(e)
+ if ((debug > 9) .and. do_output()) print '(A,3F26.18,2I4,F22.18)', &
+ "find_pressure_bounds: p_in, pr(i-1), pr(i), lower, upper, fract = ", &
+ p(e), pressure(i-1,e), pressure(i,e), lower(e), upper(e), fract(e)
+ !return
+ endif
endif
enddo
enddo
+
+! The following is no longer true with the ensemble version:
+! should never get here because pressures above and below the column
+! were tested for at the start of this routine. if you get here
+! there is a coding error.
+!ier = 3
+
end subroutine find_pressure_bounds
!------------------------------------------------------------------
@@ -4953,9 +5061,15 @@ subroutine get_interp_pressure(state_handle, ens_size, pt_offset, density_offset
! Get the values of potential temperature, density, and vapor
offset = (cellid - 1) * nlevs + lev - 1
-pt = get_state(pt_offset + offset, state_handle)
-density = get_state(density_offset + offset, state_handle)
-qv = get_state(qv_offset + offset, state_handle)
+if (ens_size == 1) then
+ pt(1) = scalar(get_state(pt_offset + offset, state_handle))!pt = x(pt_offset + offset)
+ density(1) = scalar(get_state(density_offset + offset, state_handle))!density = x(density_offset + offset)
+ qv(1) = scalar(get_state(qv_offset + offset, state_handle))!qv = x(qv_offset + offset)
+else
+ pt = get_state(pt_offset + offset, state_handle)!pt = x(pt_offset + offset)
+ density = get_state(density_offset + offset, state_handle)!density = x(density_offset + offset)
+ qv = get_state(qv_offset + offset, state_handle)!qv = x(qv_offset + offset)
+endif
! Initialization
ier = 0
@@ -5063,23 +5177,22 @@ subroutine get_barycentric_weights(x, y, cxs, cys, weights)
weights(3) = 1.0_r8 - weights(1) - weights(2)
+! FIXME: i want to remove this code. does it affect the answers?
if (any(abs(weights) < roundoff)) then
+ !print *, 'get_barycentric_weights due to roundoff errors: ', weights
where (abs(weights) < roundoff) weights = 0.0_r8
where (abs(1.0_r8 - abs(weights)) < roundoff) weights = 1.0_r8
endif
+!if(abs(sum(weights)-1.0_r8) > roundoff) &
+! print *, 'fail in get_barycentric_weights: sum(weights) = ',sum(weights)
+!end FIXME section
end subroutine get_barycentric_weights
!------------------------------------------------------------
-!> this routine computes 1 or more values at a single location,
-!> for each ensemble member. "n" is the number of different
-!> quantities to compute, ival is an array of 'n' progval() indices
-!> to indicate which quantities to compute.
-!> dval(n, ens_size) are the output values, and ier(ens_size) are
-!> the success/error returns for each ensemble member.
-subroutine compute_scalar_with_barycentric(state_handle, ens_size, loc, n, ival, dval, ier, this_cellid)
+subroutine compute_scalar_with_barycentric(state_handle, ens_size, loc, n, ival, dval, ier)
type(ensemble_type), intent(in) :: state_handle
integer, intent(in) :: ens_size
@@ -5088,82 +5201,51 @@ subroutine compute_scalar_with_barycentric(state_handle, ens_size, loc, n, ival,
integer, intent(in) :: ival(n)
real(r8), intent(out) :: dval(:, :)
integer, intent(out) :: ier(ens_size)
-integer, optional, intent(in) :: this_cellid
real(r8), dimension(3, ens_size) :: fract, lowval, uppval, fdata
real(r8) :: weights(3)
integer :: c(3), nvert, k, i, nc
-integer(i8) :: index1, low_offset, upp_offset
+integer(i8) :: index1
integer :: lower(3, ens_size), upper(3,ens_size)
-integer :: e, e2, thislower, thisupper
-logical :: did_member(ens_size)
+integer :: e
! assume failure
dval = MISSING_R8
-ier = 88 ! field not in state vector
-! make sure we have all good field indices first
-if (any(ival < 0)) return
-
-call find_triangle (loc, nc, c, weights, ier(1), this_cellid)
-if(ier(1) /= 0) then
- ier(:) = ier(1)
- return
-endif
+call find_triangle (loc, nc, c, weights, ier(1))
+if(ier(1) /= 0) return
call find_vert_indices (state_handle, ens_size, loc, nc, c, lower, upper, fract, ier)
if(all(ier /= 0)) return
-! for each field to compute at this location:
do k=1, n
! get the starting index in the state vector
index1 = progvar(ival(k))%index1
nvert = progvar(ival(k))%numvertical
- ! for each corner: could be 1 if location is directly at a vertex, but
- ! normally is 3 for the enclosing triangle made up of cell centers.
- do i = 1, nc
- ! go around triangle and interpolate in the vertical
- ! c(3) are the cell ids
-
- low_offset = (c(i)-1) * nvert
- upp_offset = (c(i)-1) * nvert
-
- did_member(:) = .false.
-
- do e = 1, ens_size
-
- if (did_member(e)) cycle
-
- !> minimize the number of times we call get_state() by
- !> doing all the ensemble members which are between the same
- !> two vertical levels. this is true most of the time.
- !> in some cases it could be 2 or 3 different pairs of levels because
- !> of differences in vertical conversion that depends on per-member fields.
+ ! go around triangle and interpolate in the vertical
+ ! t1, t2, t3 are the xyz of the cell centers
+ ! c(3) are the cell ids
+ do e = 1, ens_size !> @todo Do you really need to loop around ens_size?
- lowval(i,:) = (get_state(index1 + low_offset + lower(i,e)-1, state_handle))
- uppval(i,:) = (get_state(index1 + upp_offset + upper(i,e)-1, state_handle))
+ do i = 1, nc
+! lowval(i) = x(index1 + (c(i)-1) * nvert + lower(i)-1)
+ lowval(i,:) = (get_state(index1 + (c(i)-1)*nvert + lower(i, e)-1, state_handle))
- thislower = lower(i, e)
- thisupper = upper(i, e)
+! uppval(i) = x(index1 + (c(i)-1) * nvert + upper(i)-1)
+ uppval(i,:) = (get_state(index1 + (c(i)-1) * nvert + upper(i, e)-1, state_handle))
- ! for all remaining ensemble members, use these values if the lower and
- ! upper level numbers are the same. fract() will vary with member.
- do e2=e, ens_size
- if (thislower == lower(i, e2) .and. thisupper == upper(i, e2)) then
- fdata(i, e2) = lowval(i, e2)*(1.0_r8 - fract(i, e2)) + uppval(i, e2)*fract(i, e2)
- did_member(e2) = .true.
- endif
- enddo
- enddo ! end ens_size
-
- enddo ! corners
+ fdata(i, e) = lowval(i, e)*(1.0_r8 - fract(i, e)) + uppval(i, e)*fract(i, e)
+! if((debug > 9) .and. do_output()) &
+! print '(A,I2,A,I2,5f12.5)','compute_scalar_with_barycentric: nv=',k,' ic =',i, &
+! lowval(i),uppval(i),fdata(i),fract(i),weights(i)
+ enddo
- ! now have vertically interpolated values at cell centers.
- ! use weights to compute value at interp point.
- do e = 1, ens_size
- if (ier(e) /= 0) cycle
+ ! now have vertically interpolated values at cell centers.
+ ! use weights to compute value at interp point.
dval(k, e) = sum(weights(1:nc) * fdata(1:nc, e))
+ !if(debug > 11) print *, 'compute_scalar_with_barycentric: k, e, dval(k,e) = ', k, e, dval(k,e)
+
enddo
enddo
@@ -5172,12 +5254,11 @@ end subroutine compute_scalar_with_barycentric
!------------------------------------------------------------
-subroutine compute_elevation_with_barycentric(loc, dval, ier, this_cellid)
+subroutine compute_elevation_with_barycentric(loc, dval, ier)
type(location_type), intent(in) :: loc
real(r8), intent(out) :: dval
integer, intent(out) :: ier
-integer, optional, intent(in) :: this_cellid
real(r8) :: weights(3), fdata(3)
integer :: c(3), i, nc
@@ -5185,7 +5266,7 @@ subroutine compute_elevation_with_barycentric(loc, dval, ier, this_cellid)
! assume failure
dval = MISSING_R8
-call find_triangle (loc, nc, c, weights, ier, this_cellid)
+call find_triangle (loc, nc, c, weights, ier)
if(ier /= 0) return
do i = 1, nc
@@ -5202,14 +5283,13 @@ end subroutine compute_elevation_with_barycentric
!------------------------------------------------------------
-subroutine find_triangle(loc, nc, c, weights, ier, this_cellid)
+subroutine find_triangle(loc, nc, c, weights, ier)
type(location_type), intent(in) :: loc
integer, intent(out) :: nc
integer, intent(out) :: c(:) ! single value - cell id
real(r8), intent(out) :: weights(:)
integer, intent(out) :: ier
-integer, optional, intent(in) :: this_cellid
! compute the values at the correct vertical level for each
! of the 3 cell centers defining a triangle that encloses the
@@ -5226,41 +5306,45 @@ subroutine find_triangle(loc, nc, c, weights, ier, this_cellid)
logical :: inside, foundit
! initialization
- c = MISSING_I
+ c = MISSING_R8
weights = 0.0_r8
ier = 0
nc = 1
-! unpack the location into local vars
+! unpack the location into local vars - I think you can do this with the first ensemble member?
+! Because they are the same horizontally?
+
llv = get_location(loc)
lon = llv(1)
lat = llv(2)
vert = llv(3)
verttype = nint(query_location(loc))
-! if we already know the closest cell center, pass it in instead
-! of searching for it again.
-if (present(this_cellid)) then
- cellid = this_cellid
-else
- cellid = find_closest_cell_center(lat, lon)
-endif
-
-if ((xyzdebug > 1) .and. do_output()) &
- print *, 'ft: closest cell center for lon/lat: ', lon, lat, cellid
-
+cellid = find_closest_cell_center(lat, lon)
+if ((xyzdebug > 5) .and. do_output()) &
+ print *, 'closest cell center for lon/lat: ', lon, lat, cellid
if (cellid < 1) then
- if(xyzdebug > 0) print *, 'ft: closest cell center for lon/lat: ', lon, lat, cellid
+ if(xyzdebug > 0) print *, 'closest cell center for lon/lat: ', lon, lat, cellid
ier = 11
return
endif
c(1) = cellid
+if (on_boundary(cellid)) then
+ ier = 12
+ return
+endif
+
+if (.not. inside_cell(cellid, lat, lon)) then
+ ier = 13
+ return
+endif
+
! closest vertex to given point.
closest_vert = closest_vertex_ll(cellid, lat, lon)
if ((xyzdebug > 5) .and. do_output()) &
- print *, 'ft: closest vertex for lon/lat: ', lon, lat, closest_vert
+ print *, 'closest vertex for lon/lat: ', lon, lat, closest_vert
! collect the neighboring cell ids and vertex numbers
! this 2-step process avoids us having to read in the
@@ -5272,21 +5356,13 @@ subroutine find_triangle(loc, nc, c, weights, ier, this_cellid)
vindex = 1
nedges = nEdgesOnCell(cellid)
do i=1, nedges
-!print *, 'ft: i: ', i
edgeid = edgesOnCell(i, cellid)
-!print *, 'ft: edgeid: ', edgeid
- if (.not. global_grid .and. &
- (cellsOnEdge(1, edgeid) <= 0 .or. cellsOnEdge(2, edgeid) <= 0)) then
- ier = 14
- return
- endif
if (cellsOnEdge(1, edgeid) /= cellid) then
neighborcells(i) = cellsOnEdge(1, edgeid)
else
neighborcells(i) = cellsOnEdge(2, edgeid)
endif
verts(i) = verticesOnCell(i, cellid)
-!print *, 'ft: verts: ', verts(i), closest_vert
if (verts(i) == closest_vert) vindex = i
call latlon_to_xyz(latCell(neighborcells(i)), lonCell(neighborcells(i)), &
xdata(i), ydata(i), zdata(i))
@@ -5298,7 +5374,6 @@ subroutine find_triangle(loc, nc, c, weights, ier, this_cellid)
! and the observation point
call latlon_to_xyz(lat, lon, r(1), r(2), r(3))
-!print *, 'ft: lat/lon: ', lat, lon
if (all(abs(t1-r) < roundoff)) then ! Located at a grid point (counting roundoff errors)
@@ -5310,40 +5385,48 @@ subroutine find_triangle(loc, nc, c, weights, ier, this_cellid)
else ! an arbitrary point
- ! find the cell-center-tri that encloses the obs point
- ! figure out which way vertices go around cell?
- foundit = .false.
- findtri: do i=vindex, vindex+nedges
- v = mod(i-1, nedges) + 1
- vp1 = mod(i, nedges) + 1
- t2(1) = xdata(v)
- t2(2) = ydata(v)
- t2(3) = zdata(v)
- t3(1) = xdata(vp1)
- t3(2) = ydata(vp1)
- t3(3) = zdata(vp1)
- call inside_triangle(t1, t2, t3, r, lat, lon, inside, weights)
- if (inside) then
- ! weights are the barycentric weights for the point r
- ! in the triangle formed by t1, t2, t3.
- ! v and vp1 are vert indices which are same indices
- ! for cell centers
+! find the cell-center-tri that encloses the obs point
+! figure out which way vertices go around cell?
+foundit = .false.
+findtri: do i=vindex, vindex+nedges
+ v = mod(i-1, nedges) + 1
+ vp1 = mod(i, nedges) + 1
+ t2(1) = xdata(v)
+ t2(2) = ydata(v)
+ t2(3) = zdata(v)
+ t3(1) = xdata(vp1)
+ t3(2) = ydata(vp1)
+ t3(3) = zdata(vp1)
+ call inside_triangle(t1, t2, t3, r, lat, lon, inside, weights)
+ if (inside) then
+ ! weights are the barycentric weights for the point r
+ ! in the triangle formed by t1, t2, t3.
+ ! v and vp1 are vert indices which are same indices
+ ! for cell centers
+! FIXME: i want to remove this code. does it affect the answers?
+ if(any(weights == 1.0_r8)) then
+ nc = 1
+ else
+!end FIXME section
nc = 3
c(2) = neighborcells(v)
c(3) = neighborcells(vp1)
- foundit = .true.
- exit findtri
+! FIXME: i want to remove this code. does it affect the answers?
endif
- enddo findtri
- if (.not. foundit) then
- ier = 14 ! 11?
- return
+!end FIXME section
+ foundit = .true.
+ exit findtri
endif
+enddo findtri
+if (.not. foundit) then
+ ier = 14 ! 11
+ return
+endif
endif ! horizontal index search is done now.
if (ier /= 0) return
-if (debug > 12) then
+if (debug > 11) then
write(string3,*) 'ier = ',ier, ' triangle = ',c(1:nc), ' weights = ',weights(1:nc)
call error_handler(E_MSG, 'find_triangle', string3, source, revision, revdate)
endif
@@ -5351,16 +5434,14 @@ subroutine find_triangle(loc, nc, c, weights, ier, this_cellid)
end subroutine find_triangle
!------------------------------------------------------------
-!> what does this layer do? this now seems identical to
-!> find_vert_level() plus some debug info.
subroutine find_vert_indices (state_handle, ens_size, loc, nc, c, lower, upper, fract, ier)
type(ensemble_type), intent(in) :: state_handle
type(location_type), intent(in) :: loc
integer, intent(in) :: ens_size
-integer, intent(in) :: nc
integer, intent(in) :: c(:)
+integer, intent(in) :: nc
integer, intent(out) :: lower(:, :), upper(:, :) ! ens_size
real(r8), intent(out) :: fract(:, :) ! ens_size
integer, intent(out) :: ier(:) ! ens_size
@@ -5368,20 +5449,20 @@ subroutine find_vert_indices (state_handle, ens_size, loc, nc, c, lower, upper,
integer :: e
! initialization
-lower = MISSING_R8
-upper = MISSING_R8
-fract = 0.0_r8
+ lower = MISSING_R8
+ upper = MISSING_R8
+ fract = 0.0_r8
! need vert index for the vertical level
call find_vert_level(state_handle, ens_size, loc, nc, c, .true., lower, upper, fract, ier)
-if (debug > 10) then
+if (debug > 11) then
write(string3,*) 'ier = ',ier (1), ' triangle = ',c(1:nc), ' vert_index = ',lower(1:nc, 1)+fract(1:nc, 1)
call error_handler(E_MSG, 'find_vert_indices', string3, source, revision, revdate)
endif
if (debug > 11) then
- do e = 1, ens_size
+ do e = 1, ens_size
if(ier(e) /= 0) then
print *, 'find_vert_indices: e = ', e, ' nc = ', nc, ' ier = ', ier(e)
print *, 'find_vert_indices: c = ', c
@@ -5389,9 +5470,11 @@ subroutine find_vert_indices (state_handle, ens_size, loc, nc, c, lower, upper,
print *, 'find_vert_indices: upper = ', upper(1:nc, e)
print *, 'find_vert_indices: fract = ', fract(1:nc, e)
endif
- enddo
+ enddo
endif
+if (all(ier /= 0)) return
+
end subroutine find_vert_indices
!------------------------------------------------------------
@@ -5428,12 +5511,9 @@ subroutine compute_u_with_rbf(state_handle, ens_size, loc, zonal, uval, ier)
ier = 0
uval = MISSING_R8
-! FIXME: it would be great to make this cache the last value and
-! if the location is the same as before and it's asking for V now
-! instead of U, skip the expensive computation. however, given
-! how we currently distribute observations the V wind obs will
-! almost certainly be given to a different task. if that changes
-! in some future version of dart, revisit this code as well.
+! FIXME: make this cache the last value and if the location is
+! the same as before and it's asking for V now instead of U,
+! skip the expensive computation.
progindex = get_index_from_varname('u')
if (progindex < 0 .or. .not. data_on_edges) then
@@ -5468,7 +5548,7 @@ subroutine compute_u_with_rbf(state_handle, ens_size, loc, zonal, uval, ier)
call find_vert_level(state_handle, ens_size, loc, ncells, celllist, .true., &
lower, upper, fract, ier)
- if (all(ier /= 0)) return
+ !if (ier /= 0) return
! now have pressure at all cell centers - need to interp to get pressure
! at edge centers.
@@ -5480,7 +5560,7 @@ subroutine compute_u_with_rbf(state_handle, ens_size, loc, zonal, uval, ier)
! need vert index for the vertical level
call find_vert_level(state_handle, ens_size, loc, nedges, edgelist, .false., &
lower, upper, fract, ier)
- if (all(ier /= 0)) return
+ !if (ier /= 0) return ! could be different ier across the ensemble
endif
! the rbf code needs (their names == our names):
@@ -5544,6 +5624,9 @@ subroutine compute_u_with_rbf(state_handle, ens_size, loc, zonal, uval, ier)
ureconstructx, ureconstructy, ureconstructz, &
ureconstructzonal, ureconstructmeridional)
+ !> @todo in the distributed version especially:
+ ! FIXME: it would be nice to return both and not have to call this
+ ! code twice. crap.
if (zonal) then
uval = ureconstructzonal
else
@@ -5578,6 +5661,12 @@ subroutine find_surrounding_edges(lat, lon, nedges, edge_list, cellid, vertexid)
return
endif
+if (.not. inside_cell(cellid, lat, lon)) then
+ nedges = 0
+ edge_list(:) = -1
+ return
+endif
+
! inside this cell, find the vertex id that the point
! is closest to. this is a return from this subroutine.
vertexid = closest_vertex_ll(cellid, lat, lon)
@@ -5659,16 +5748,6 @@ subroutine find_surrounding_edges(lat, lon, nedges, edge_list, cellid, vertexid)
source, revision, revdate)
end select
-! Ha: Check if any of edges are located in the boundary zone.
-! (We will skip the obs if any edges are located there.)
-if (on_boundary_edgelist(edge_list)) then
- nedges = -1
- edge_list(:) = -1
- call error_handler(E_MSG, 'find_surrounding_edges', 'edges in the boundary', &
- source, revision, revdate)
- return
-endif
-
end subroutine find_surrounding_edges
!------------------------------------------------------------
@@ -5688,22 +5767,20 @@ subroutine init_closest_center()
cell_locs(i) = xyz_set_location(lonCell(i), latCell(i), 0.0_r8, radius)
enddo
-! get 2nd arg from max dcEdge.
-call xyz_get_close_init(cc_gc, dxmax, nCells, cell_locs)
-
-if (.not. global_grid) &
- call xyz_use_great_circle_dist(radius)
+! the width really isn't used anymore, but it's part of the
+! interface so we have to pass some number in.
+call xyz_get_close_maxdist_init(cc_gc, 1.0_r8)
+call xyz_get_close_obs_init(cc_gc, nCells, cell_locs)
end subroutine init_closest_center
!------------------------------------------------------------
-!> Determine the cell index for the closest center to the given point
-!> 2D calculation only. If the closest cell is on the boundary for
-!> the regional case, the location is considered outside the region.
-!> for the global case this can't happen.
function find_closest_cell_center(lat, lon)
+! Determine the cell index for the closest center to the given point
+! 2D calculation only.
+
real(r8), intent(in) :: lat, lon
integer :: find_closest_cell_center
@@ -5719,25 +5796,16 @@ function find_closest_cell_center(lat, lon)
pointloc = xyz_set_location(lon, lat, 0.0_r8, radius)
call xyz_find_nearest(cc_gc, pointloc, cell_locs, closest_cell, rc)
-!if(debug > 0) print *, 'find_closest_cell_center: rc, closest_cell ', rc, closest_cell ! SYHA
-
-!> updated xyz_find_nearest to return -1 if outside the volume.
-!> this is a code change so allow -1 returns here. make sure
-!> calling code is ready to handle a -1 cellid. should only
-!> happen in the regional case.
+! decide what to do if we don't find anything.
if (rc /= 0 .or. closest_cell < 0) then
- if (debug > 8 .or. global_grid) then
+ if (debug > 8) &
print *, 'cannot find nearest cell to lon, lat: ', lon, lat
- endif
find_closest_cell_center = -1
return
endif
-if (debug > 10) print *, 'lat/lon closest to cellid, with lat/lon: ', &
- lat, lon, closest_cell, latCell(closest_cell), lonCell(closest_cell)
-
-! do allow boundary cells to be returned.
+! this is the cell index for the closest center
find_closest_cell_center = closest_cell
end function find_closest_cell_center
@@ -5749,166 +5817,119 @@ subroutine finalize_closest_center()
! get rid of storage associated with GC for cell centers if
! they were used.
-if (search_initialized) call xyz_get_close_destroy(cc_gc)
+if (search_initialized) call xyz_get_close_obs_destroy(cc_gc)
end subroutine finalize_closest_center
!------------------------------------------------------------
-!> we determine whether this is a local or global grid by
-!> whether the 'bdyMaskXXX' arrays exist in the template file
-!> or not. global grids don't have them; regional ones do.
-subroutine set_global_grid(ncid)
-integer, intent(in) :: ncid
+function on_boundary(cellid)
-character(len=*), parameter :: routine = 'set_global_grid'
+! use the surface (level 1) to determine if any edges (or vertices?)
+! are on the boundary, and return true if so. if the global flag
+! is set, skip all code and return false immediately.
-global_grid = .true.
+integer, intent(in) :: cellid
+logical :: on_boundary
-if (nc_variable_exists(ncid, 'bdyMaskCell')) then
- allocate(bdyMaskCell(nCells))
- call nc_get_variable(ncid, 'bdyMaskCell', bdyMaskCell, routine)
- global_grid = .false.
-endif
+! do this completely with topology of the grid. if any of
+! the cell edges are marked as boundary edges, return no.
+! otherwise return yes.
-! the atmosphere doesn't use this array, but it might be used for
-! the ocean. test out the regional configuration and if the edges are
-! really never needed for the atmosphere don't allocate this
-! array and don't read it in to save space.
-if (nc_variable_exists(ncid, 'bdyMaskEdge')) then
- allocate(bdyMaskEdge(nEdges))
- call nc_get_variable(ncid, 'bdyMaskEdge', bdyMaskEdge, routine)
- global_grid = .false.
-endif
+integer :: nedges, i, edgeid, vertical
if (global_grid) then
- string1 = 'MPAS running in global mode'
-else
- string1 = 'MPAS running in regional (limited-area) mode'
-endif
-call error_handler(E_MSG,'set_global_grid',string1,source,revision,revdate)
-
-end subroutine set_global_grid
-
-!------------------------------------------------------------
-!> accessor function for global_grid module variable
-
-function is_global_grid()
-logical :: is_global_grid
-
-is_global_grid = global_grid
-
-end function is_global_grid
-
-!------------------------------------------------------------
-!> find the closest cell center. if global grid, return it.
-!> if regional grid, continue to be sure that all three corners
-!> of the enclosing triangle are also inside the main part of
-!> the regional grid (none are in the boundary layers)
-!> return -1 if not ok.
-
-function cell_ok_to_interpolate(location)
-
-type(location_type), intent(in) :: location
-integer :: cell_ok_to_interpolate
-
-integer :: cellid, nc, c(3), istatus, i
-real(r8) :: llv(3), lat, lon
-real(r8) :: weights(3)
-
-llv = get_location(location)
-lat = llv(2)
-lon = llv(1)
-
-! if outside a regional grid or the grid is
-! global, return here.
-cellid = find_closest_cell_center(lat, lon)
-if (cellid < 1 .or. global_grid) then
- cell_ok_to_interpolate = cellid
+ on_boundary = .false.
return
endif
-! quick check to see if the current cell is
-! a regional boundary cell
-if (on_boundary_cell(cellid)) then
- cell_ok_to_interpolate = -1
- return
-endif
+! how many edges (same # for verts) to check
+nedges = nEdgesOnCell(cellid)
-! for regional, continue on and find the other 2 cell centers
-! that create a triangle enclosing this location. verify the
-! other vertices are also fully inside the grid and not in the
-! boundary layers.
+! go around the edges and check the boundary array.
+! if any are boundaries, return true. else, false.
-call find_triangle(location, nc, c, weights, istatus, cellid)
-if (istatus /= 0) then
- cell_ok_to_interpolate = -1
- return
-endif
+do i=1, nedges
+ edgeid = edgesOnCell(i, cellid)
-do i=1, nc
- if (on_boundary_cell(c(i))) then
- cell_ok_to_interpolate = -1
+ vertical = 1
+
+ ! FIXME: this is an int array. is it 0=false,1=true?
+ if (boundaryEdge(edgeid, vertical) > 0) then
+ on_boundary = .true.
return
endif
+
enddo
-cell_ok_to_interpolate = cellid
+on_boundary = .false.
-end function cell_ok_to_interpolate
+end function on_boundary
!------------------------------------------------------------
-!> Determine if this cell is on the boundary, and return true if so.
-!> if the global flag is set, skip all code and return false immediately.
-!> Unlike the previous version of on_boundary, we do not return true
-!> if any surrounding edges belong to the boundary zone. We will take care
-!> of those edges either in uv_cell_to_edges or in find_surrounding_edges
-!> individually.
-
-function on_boundary_cell(cellid)
-
-integer, intent(in) :: cellid
-logical :: on_boundary_cell
-
-on_boundary_cell = .false.
-
-if (global_grid .or. .not. allocated(bdyMaskCell)) return
-
-if (bdyMaskCell(cellid) > 0) on_boundary_cell = .true.
-end function on_boundary_cell
+function inside_cell(cellid, lat, lon)
-!------------------------------------------------------------
-!> Determine if this edge is on the boundary
+! this function no longer really determines if we are inside
+! the cell or not. what it does do is determine if the nearest
+! cell is on the grid boundary in any way and says no if it is
+! a boundary. if we have a flag saying this a global grid, we
+! can avoid doing any work and immediately return true. for a
+! global atmosphere this is always so; for a regional atmosphere
+! and for the ocean (which does not have cells on land) this is
+! necessary test.
-function on_boundary_edge(edgeid)
+integer, intent(in) :: cellid
+real(r8), intent(in) :: lat, lon
+logical :: inside_cell
-integer, intent(in) :: edgeid
-logical :: on_boundary_edge
+! do this completely with topology of the grid. if any of
+! the cell edges are marked as boundary edges, return no.
+! otherwise return yes.
-on_boundary_edge = .false.
+integer :: nedges, i, edgeid, vert
-if (global_grid .or. .not. allocated(bdyMaskEdge)) return
+! if we're on a global grid, skip all this code
+if (global_grid) then
+ inside_cell = .true.
+ return
+endif
-if (bdyMaskEdge(edgeid) > 0) on_boundary_edge = .true.
+nedges = nEdgesOnCell(cellid)
-end function on_boundary_edge
+! go around the edges and check the boundary array.
+! if any are true, return false. even if we are inside
+! this cell, we aren't going to be able to interpolate it
+! so shorten the code path.
-!------------------------------------------------------------
-!> Determine if any of these edges are on the boundary
+! FIXME: at some point we can be more selective and try to
+! interpolate iff the edges of the three cells which are
+! going to contribute edges to the RBF exist, even if some
+! of the other cell edges are on the boundary. so this
+! decision means we won't be interpolating some obs that in
+! theory we have enough information to interpolate. but it
+! is conservative for now - we certainly won't try to interpolate
+! outside the existing grid.
-function on_boundary_edgelist(edgeids)
+do i=1, nedges
+ edgeid = edgesOnCell(i, cellid)
-integer, intent(in) :: edgeids(:)
-logical :: on_boundary_edgelist
+ ! FIXME: this is an int array. is it 0=false,1=true?
+ ! BOTHER - we need the vert for this and we don't have it
+ ! and in fact can't compute it if the interpolation point
+ ! has pressure or height as its vertical coordinate.
+ vert = 1
-on_boundary_edgelist = .false.
+ if (boundaryEdge(edgeid, vert) > 0) then
+ inside_cell = .false.
+ return
+ endif
-if (global_grid .or. .not. allocated(bdyMaskEdge)) return
+enddo
-if (any(bdyMaskEdge(edgeids) > 0)) on_boundary_edgelist = .true.
+inside_cell = .true.
-end function on_boundary_edgelist
+end function inside_cell
!------------------------------------------------------------
@@ -6011,11 +6032,27 @@ subroutine make_edge_list_from_verts(nverts, vertex_list, nedges, edge_list)
! new edge is added. check arrays for enough length before
! starting to work.
+
+! FIXME: the ocean files have:
+! integer boundaryEdge(nVertLevels, nEdges)
+! integer boundaryVertex(nVertLevels, nVertices)
+! as a first pass, if ANY of the edges or vertices are on
+! the boundary, punt and return 0 as the edge count. later
+! once this is working, decide if a single boundary vertex or
+! edge is ok if it's the exterior of the edges we are including
+! and if it has good data values.
+
listlen = 0
do c=1, ncells
edgecount = nEdgesOnCell(cellid_list(c))
do e=1, edgecount
nextedge = edgesOnCell(e, cellid_list(c))
+ ! FIXME:
+ ! if (boundaryEdge(nextedge, vert)) then
+ ! nedges = 0
+ ! edge_list(:) = -1
+ ! return
+ ! endif
found = .false.
addloop: do l=1, listlen
if (edge_list(l) == nextedge) then
@@ -6038,6 +6075,9 @@ end subroutine make_edge_list_from_verts
subroutine make_edge_list_from_cells(ncells, cellids, nedges, edge_list)
+! FIXME: will need a vertical level number input arg here to detect
+! the boundary edges/vertices correctly.
+
! given a list of cellids, return a unique list of edges
! the edge_list output should be at least ncells * maxedges long
@@ -6053,11 +6093,27 @@ subroutine make_edge_list_from_cells(ncells, cellids, nedges, edge_list)
! new edge is added. check arrays for enough length before
! starting to work.
+
+! FIXME: the ocean files have:
+! integer boundaryEdge(nVertLevels, nEdges)
+! integer boundaryVertex(nVertLevels, nVertices)
+! as a first pass, if ANY of the edges or vertices are on
+! the boundary, punt and return 0 as the edge count. later
+! once this is working, decide if a single boundary vertex or
+! edge is ok if it's the exterior of the edges we are including
+! and if it has good data values.
+
listlen = 0
do c=1, ncells
edgecount = nEdgesOnCell(cellids(c))
do e=1, edgecount
nextedge = edgesOnCell(e, cellids(c))
+ ! FIXME:
+ ! if (boundaryEdge(nextedge, vert)) then
+ ! nedges = 0
+ ! edge_list(:) = -1
+ ! return
+ ! endif
found = .false.
addloop: do l=1, listlen
if (edge_list(l) == nextedge) then
@@ -6150,11 +6206,27 @@ subroutine make_cell_list(vertexid, degree, ncells, cell_list)
ncells = 3
if (degree == 1) return
+
+! FIXME: the ocean files have:
+! integer boundaryEdge(nVertLevels, nEdges)
+! integer boundaryVertex(nVertLevels, nVertices)
+! as a first pass, if ANY of the edges or vertices are on
+! the boundary, punt and return 0 as the edge count. later
+! once this is working, decide if a single boundary vertex or
+! edge is ok if it's the exterior of the edges we are including
+! and if it has good data values.
+
listlen = ncells
do c=1, ncells
vertcount = nEdgesOnCell(cell_list(c))
do v=1, vertcount
nextvert = verticesOnCell(v, cell_list(c))
+ ! FIXME:
+ ! if (boundaryVertex(nextvert, vert)) then
+ ! ncells = 0
+ ! cell_list(:) = -1
+ ! return
+ ! endif
do c2=1, 3
nextcell = cellsOnVertex(c2, nextvert)
found = .false.
@@ -6181,6 +6253,12 @@ subroutine make_cell_list(vertexid, degree, ncells, cell_list)
vertcount = nEdgesOnCell(cell_list(c))
do v=1, vertcount
nextvert = verticesOnCell(v, cell_list(c))
+ ! FIXME:
+ ! if (boundaryVertex(nextvert, vert)) then
+ ! ncells = 0
+ ! cell_list(:) = -1
+ ! return
+ ! endif
do c2=1, 3
nextcell = cellsOnVertex(c2, nextvert)
found = .false.
@@ -6630,8 +6708,6 @@ subroutine uv_cell_to_edges(zonal_wind, meridional_wind, du)
+ edgeNormalVectors(2,iEdge) * north(2,iCell) &
+ edgeNormalVectors(3,iEdge) * north(3,iCell))
enddo
- ! Ha: We do not update edges in the boundary zone.
- if(on_boundary_edge(iEdge)) du(:,iEdge) = 0.0
enddo
enddo
@@ -6755,43 +6831,6 @@ function read_model_time(filename)
end function read_model_time
-!----------------------------------------------------------------------
-! Returns integers taken from tstring
-! It is assumed that the tstring char array is as YYYY-MM-DD_hh:mm:ss
-
-subroutine set_wrf_date (tstring, year, month, day, hour, minute, second)
-
-integer, intent(in) :: year, month, day, hour, minute, second
-character(len=TIMELEN), intent(out) :: tstring
-
-character(len=4) :: ch_year
-character(len=2) :: ch_month, ch_day, ch_hour, ch_minute, ch_second
-
-write(ch_year,'(i4)') year
-write(ch_month,'(i2)') month
-if (ch_month(1:1) == " ") ch_month(1:1) = "0"
-write(ch_day,'(i2)') day
-if (ch_day(1:1) == " ") ch_day(1:1) = "0"
-write(ch_hour,'(i2)') hour
-if (ch_hour(1:1) == " ") ch_hour(1:1) = "0"
-write(ch_minute,'(i2)') minute
-if (ch_minute(1:1) == " ") ch_minute(1:1) = "0"
-write(ch_second,'(i2)') second
-if (ch_second(1:1) == " ") ch_second(1:1) = "0"
-tstring(1:4) = ch_year
-tstring(5:5) = "-"
-tstring(6:7) = ch_month
-tstring(8:8) = "-"
-tstring(9:10) = ch_day
-tstring(11:11) = "_"
-tstring(12:13) = ch_hour
-tstring(14:14) = ":"
-tstring(15:16) = ch_minute
-tstring(17:17) = ":"
-tstring(18:19) = ch_second
-
-end subroutine set_wrf_date
-
!===================================================================
! End of model_mod
!===================================================================
diff --git a/models/mpas_atm/mpas_dart_obs_preprocess.f90 b/models/mpas_atm/mpas_dart_obs_preprocess.f90
index cdc4d58b3a..0b764c105b 100644
--- a/models/mpas_atm/mpas_dart_obs_preprocess.f90
+++ b/models/mpas_atm/mpas_dart_obs_preprocess.f90
@@ -34,21 +34,19 @@ program mpas_dart_obs_preprocess
use types_mod, only : r8, missing_r8, earth_radius, RAD2DEG, DEG2RAD, i8
use utilities_mod, only : error_handler, E_MSG, find_namelist_in_file, &
- check_namelist_read, nc_check, initialize_utilities, &
- finalize_utilities
+ check_namelist_read, nc_check
use time_manager_mod, only : time_type, operator(>=), operator(<), operator(>), operator(<=), &
increment_time, decrement_time, operator(-), operator(+), &
set_calendar_type, GREGORIAN, set_time, get_time
use location_mod, only : location_type, get_location, set_location, get_dist, &
VERTISUNDEF, VERTISSURFACE, VERTISPRESSURE, &
- is_vertical, operator(==), get_close_type, get_close_init, &
- get_close_obs, get_close_destroy, set_location_missing, write_location
+ is_vertical, operator(==)
use obs_sequence_mod, only : append_obs_to_seq, copy_obs, delete_obs_from_seq, &
destroy_obs_sequence, get_first_obs, get_last_obs, &
get_next_obs, get_next_obs_from_key, get_num_copies, &
get_num_obs, get_num_qc, get_obs_def, get_obs_key, &
get_obs_values, get_qc, get_qc_meta_data, init_obs, &
- insert_obs_in_seq, obs_sequence_type, obs_type, set_obs, &
+ insert_obs_in_seq, obs_sequence_type, obs_type, &
read_obs_seq, read_obs_seq_header, set_copy_meta_data, &
set_obs_def, set_obs_values, set_qc, set_qc_meta_data, &
static_init_obs_sequence, write_obs_seq, init_obs_sequence
@@ -70,11 +68,10 @@ program mpas_dart_obs_preprocess
METAR_V_10_METER_WIND, PROFILER_U_WIND_COMPONENT, PROFILER_V_WIND_COMPONENT, &
RADIOSONDE_DEWPOINT, RADIOSONDE_RELATIVE_HUMIDITY, RADIOSONDE_SPECIFIC_HUMIDITY, &
RADIOSONDE_SURFACE_ALTIMETER, RADIOSONDE_TEMPERATURE, RADIOSONDE_U_WIND_COMPONENT, &
- RADIOSONDE_V_WIND_COMPONENT, SAT_U_WIND_COMPONENT, SAT_V_WIND_COMPONENT
+ RADIOSONDE_V_WIND_COMPONENT, SAT_U_WIND_COMPONENT, SAT_V_WIND_COMPONENT, &
+ VORTEX_LAT, VORTEX_LON, VORTEX_PMIN, VORTEX_WMAX
use model_mod, only : static_init_model, get_grid_dims, get_xland, &
- model_interpolate, find_closest_cell_center, &
- cell_ok_to_interpolate, is_global_grid, &
- get_bdy_mask, get_cell_center_coords
+ model_interpolate, find_closest_cell_center
use ensemble_manager_mod, only : ensemble_type, init_ensemble_manager, end_ensemble_manager
use netcdf
@@ -100,15 +97,10 @@ program mpas_dart_obs_preprocess
trop_cyclone_extra = 'obs_seq.tc'
integer :: max_num_obs = 1000000 ! Largest number of obs in one sequence
-! parameters to deal with obs near boundary if regional grid
-logical :: increase_bdy_error = .false. ! true to increase obs error near boundary
-real(r8) :: maxobsfac = 2.5_r8 ! maximum increase in obs error near boundary
-real(r8) :: obsdistbdy = 150000.0_r8 ! within X meters of boundary will have err changed
-
! parameters used to reduce observations
-logical :: sfc_elevation_check = .false. ! remove obs where model-obs topography is large
-real(r8) :: sfc_elevation_tol = 300.0_r8 ! largest difference between model and obs. topo.
-real(r8) :: obs_pressure_top = 0.0_r8 ! remove all obs at lower pressure
+logical :: sfc_elevation_check = .false. ! remove obs where model-obs topography is large
+real(r8) :: sfc_elevation_tol = 300.0_r8 ! largest difference between model and obs. topo.
+real(r8) :: obs_pressure_top = 0.0_r8 ! remove all obs at lower pressure
real(r8) :: obs_height_top = 2.0e10_r8 ! remove all obs at higher height
! Rawinsonde-specific parameters
@@ -116,7 +108,7 @@ program mpas_dart_obs_preprocess
real(r8) :: tc_sonde_radii = -1.0_r8 ! remove sonde obs closer than this to TC
! aircraft-specific parameters
-logical :: superob_aircraft = .false. ! super-ob aircraft data
+logical :: superob_aircraft = .false. ! super-ob aircraft data
real(r8) :: aircraft_pres_int = 2500.0_r8 ! pressure interval for super-ob
integer :: superob_qc_threshold = 4 ! reject obs with qc > 4 (applied for both aircraft and satwnd)
@@ -129,17 +121,13 @@ program mpas_dart_obs_preprocess
logical :: overwrite_ncep_sfc_qc = .false. ! true to overwrite NCEP QC (see instructions)
! lowest height for GPS REFRACTIVITY (SYHA)
-real(r8) :: gpsro_lowest_meter = 3000.0 ! remove all obs at lower height
+real(r8) :: gpsro_lowest_meter = 3000.0 ! remove all obs at lower height
! overwrite or windowing obs time
logical :: overwrite_obs_time = .false. ! true to overwrite all observation times
logical :: windowing_obs_time = .false. ! true to remove obs beyond the time window
real(r8) :: windowing_int_hour = 1.5_r8 ! time window [hr] centered on the analysis time
-! debug
-integer :: print_every_nth_obs = -1 ! if positive, print a reassuring message as you loop
- ! over the list of obs
-
namelist /mpas_obs_preproc_nml/ file_name_input, file_name_output, max_num_obs, &
include_sig_data, superob_aircraft, superob_sat_winds, superob_qc_threshold, &
sfc_elevation_check, overwrite_ncep_sfc_qc, overwrite_ncep_satwnd_qc, &
@@ -147,8 +135,7 @@ program mpas_dart_obs_preprocess
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sonde_extra, metar_extra, &
acars_extra, land_sfc_extra, marine_sfc_extra, sat_wind_extra, profiler_extra, &
trop_cyclone_extra, gpsro_extra, gpspw_extra, tc_sonde_radii, overwrite_obs_time, &
- increase_bdy_error, maxobsfac, obsdistbdy, windowing_obs_time, windowing_int_hour, &
- print_every_nth_obs
+ windowing_obs_time, windowing_int_hour
!----------------------------------------------------------------------
! Declare other variables
@@ -163,7 +150,7 @@ program mpas_dart_obs_preprocess
logical :: file_exist, pre_I_format
type(obs_sequence_type) :: seq_all, seq_rawin, seq_sfc, seq_acars, seq_satwnd, &
- seq_prof, seq_tc, seq_gpsro, seq_other, seq_gpspw, seq_air
+ seq_prof, seq_tc, seq_gpsro, seq_other, seq_gpspw
type(time_type) :: anal_time
@@ -178,9 +165,6 @@ program mpas_dart_obs_preprocess
integer :: dimid, ncid, VarID
real(r8), allocatable :: xland(:) ! indicator for land (1.0_r8) or ocean (> 1.0_r8)
-real(r8) :: radius_meters = earth_radius * 1000.0_r8 ! radius in meters, not km.
-
-call initialize_utilities('mpas_dart_obs_preprocess')
print*,'Enter target assimilation time (gregorian day, second): '
read*, gday,gsec
@@ -223,7 +207,6 @@ program mpas_dart_obs_preprocess
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_rawin)
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_sfc)
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_acars)
-call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_air)
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_satwnd)
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_prof)
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_gpsro)
@@ -231,118 +214,90 @@ program mpas_dart_obs_preprocess
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_other)
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_gpspw)
-print *, 'calling read_and_parse_input_seq'
-
! read input obs_seq file, divide into platforms
-call read_and_parse_input_seq(file_name_input, xland, obsdistbdy, &
+call read_and_parse_input_seq(file_name_input, xland, &
include_sig_data, obs_pressure_top, obs_height_top, sfc_elevation_check, &
sfc_elevation_tol, overwrite_ncep_sfc_qc, overwrite_ncep_satwnd_qc, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour, &
-seq_rawin, seq_sfc, seq_acars, seq_air, seq_satwnd, seq_tc, seq_gpsro, &
+seq_rawin, seq_sfc, seq_acars, seq_satwnd, seq_tc, seq_gpsro, &
seq_gpspw, seq_other)
-print *, 'calling add supplimental obs 1 of 10'
-
! add supplimental rawinsonde observations from file
call add_supplimental_obs(sonde_extra, seq_rawin, max_obs_seq, &
RADIOSONDE_U_WIND_COMPONENT, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 2 of 10'
-
! add supplimental ACARS observations from file
call add_supplimental_obs(acars_extra, seq_acars, max_obs_seq, &
ACARS_U_WIND_COMPONENT, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 3 of 10'
-
! add supplimental marine observations from file
call add_supplimental_obs(marine_sfc_extra, seq_sfc, max_obs_seq, &
MARINE_SFC_U_WIND_COMPONENT, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 4 of 10'
-
! add supplimental land surface observations from file
call add_supplimental_obs(land_sfc_extra, seq_sfc, max_obs_seq, &
LAND_SFC_U_WIND_COMPONENT, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 5 of 10'
-
! add supplimental metar observations from file
call add_supplimental_obs(metar_extra, seq_sfc, max_obs_seq, &
METAR_U_10_METER_WIND, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 6 of 10'
-
! add supplimental satellite wind observations from file
call add_supplimental_obs(sat_wind_extra, seq_satwnd, max_obs_seq, &
SAT_U_WIND_COMPONENT, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 7 of 10'
-
! add supplimental profiler observations from file
call add_supplimental_obs(profiler_extra, seq_prof, max_obs_seq, &
PROFILER_U_WIND_COMPONENT, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 8 of 10'
-
! add supplimental GPSRO observations from file
call add_supplimental_obs(gpsro_extra, seq_gpsro, max_obs_seq, &
GPSRO_REFRACTIVITY, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 9 of 10'
-
! add supplimental GPSPW observations from file
call add_supplimental_obs(gpspw_extra, seq_gpspw, max_obs_seq, &
GPS_PRECIPITABLE_WATER, include_sig_data, &
obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
-print *, 'calling add supplimental obs 10 of 10'
-
! add supplimental tropical cyclone vortex observations from file
-!call add_supplimental_obs(trop_cyclone_extra, seq_tc, max_obs_seq, &
-!VORTEX_LAT, include_sig_data, &
-!obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
-!overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
+call add_supplimental_obs(trop_cyclone_extra, seq_tc, max_obs_seq, &
+VORTEX_LAT, include_sig_data, &
+obs_pressure_top, obs_height_top, gpsro_lowest_meter, sfc_elevation_check, sfc_elevation_tol, &
+overwrite_obs_time, anal_time, windowing_obs_time, windowing_int_hour)
! remove all sonde observations within radius of TC if desired
if ( tc_sonde_radii > 0.0_r8 ) call remove_sondes_near_tc(seq_tc, &
seq_rawin, tc_sonde_radii)
-print *, 'ready to superob'
! super-ob ACARS data
-if ( superob_aircraft ) then
-call superob_aircraft_data(seq_acars, nCells, anal_time, &
- aircraft_pres_int, superob_qc_threshold, obs_pressure_top, 'ACAR')
-call superob_aircraft_data(seq_air, nCells, anal_time, &
- aircraft_pres_int, superob_qc_threshold, obs_pressure_top, 'AIRS')
-endif
+if ( superob_aircraft ) call superob_aircraft_data(seq_acars, nCells, anal_time, &
+ aircraft_pres_int, superob_qc_threshold, obs_pressure_top)
! super-ob satellite wind data
if ( superob_sat_winds ) call superob_sat_wind_data(seq_satwnd, nCells, anal_time, &
sat_wind_pres_int, superob_qc_threshold, obs_pressure_top)
-print*, 'Number of obs processed:'
+print*, 'Number of obs read:'
print*, 'num_rawin: ', get_num_obs(seq_rawin)
print*, 'num_sfc: ', get_num_obs(seq_sfc)
print*, 'num_acars: ', get_num_obs(seq_acars)
-print*, 'num_airs: ', get_num_obs(seq_air)
print*, 'num_satwnd: ', get_num_obs(seq_satwnd)
print*, 'num_prof: ', get_num_obs(seq_prof)
print*, 'num_gpsro: ', get_num_obs(seq_gpsro)
@@ -354,7 +309,7 @@ program mpas_dart_obs_preprocess
get_num_obs(seq_sfc) + get_num_obs(seq_acars) + &
get_num_obs(seq_satwnd) + get_num_obs(seq_prof) + &
get_num_obs(seq_gpsro) + get_num_obs(seq_gpspw) + &
- get_num_obs(seq_other) + get_num_obs(seq_air)
+ get_num_obs(seq_other)
print*, 'num_total: ', max_obs_seq
call create_new_obs_seq(num_copies, num_qc, max_obs_seq, seq_all)
@@ -371,9 +326,6 @@ program mpas_dart_obs_preprocess
call build_master_sequence(seq_acars, seq_all)
call destroy_obs_sequence(seq_acars)
-call build_master_sequence(seq_air, seq_all)
-call destroy_obs_sequence(seq_air)
-
call build_master_sequence(seq_gpsro, seq_all)
call destroy_obs_sequence(seq_gpsro)
@@ -392,20 +344,10 @@ program mpas_dart_obs_preprocess
write(6,*) 'Total number of observations after superobing:', get_num_obs(seq_all)
write(6,*) ''
-print *, 'ready to call increase_obs_err_bdy'
-
-! increase the observation error along the regional boundary
-if ( increase_bdy_error ) call increase_obs_err_bdy(seq_all, &
- obsdistbdy, maxobsfac)
-
! write the observation sequence to file
call write_obs_seq(seq_all, file_name_output)
call destroy_obs_sequence(seq_all)
-! release any other allocated space and close down cleanly
-deallocate(xland)
-call finalize_utilities()
-
contains
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -423,21 +365,6 @@ function aircraft_obs_check()
end function aircraft_obs_check
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-!
-! acars_obs_check - function that determines whether to include an
-! acars observation in the sequence. For now,
-! this function is a placeholder and returns true.
-!
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-function acars_obs_check()
-
-logical :: acars_obs_check
-
-acars_obs_check = .true.
-
-end function acars_obs_check
-
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! add_supplimental_obs - subroutine that reads observation data from
@@ -473,7 +400,7 @@ subroutine add_supplimental_obs(filename, obs_seq, max_obs_seq, plat_kind, &
logical, intent(in) :: obs_window
real(r8), intent(in) :: window_hours
-integer :: nloc, okind
+integer :: nloc, okind, dom_id
integer :: gsec, gday, dsec, bday, bsec, eday, esec, num_excluded_bytime
logical :: file_exist, last_obs, pass_checks, first_obs
real(r8) :: llv_loc(3)
@@ -503,8 +430,8 @@ subroutine add_supplimental_obs(filename, obs_seq, max_obs_seq, plat_kind, &
write(6,*) 'Adding Supplimental METAR Data'
case (SAT_U_WIND_COMPONENT)
write(6,*) 'Adding Supplimental Satellite Wind Data'
-! case (VORTEX_LAT)
-! write(6,*) 'Adding Supplimental Tropical Cyclone Data'
+ case (VORTEX_LAT)
+ write(6,*) 'Adding Supplimental Tropical Cyclone Data'
case (GPSRO_REFRACTIVITY)
write(6,*) 'Adding Supplimental GPS RO Data'
case (GPS_PRECIPITABLE_WATER)
@@ -591,8 +518,6 @@ subroutine add_supplimental_obs(filename, obs_seq, max_obs_seq, plat_kind, &
pass_checks = rawinsonde_obs_check(obs_loc, okind, siglevel, &
sfcelev, elev_max)
case (ACARS_U_WIND_COMPONENT)
- pass_checks = acars_obs_check()
- case (AIRCRAFT_U_WIND_COMPONENT)
pass_checks = aircraft_obs_check()
case (MARINE_SFC_U_WIND_COMPONENT)
pass_checks = surface_obs_check(sfcelev, elev_max, llv_loc)
@@ -937,7 +862,6 @@ end function rawinsonde_obs_check
!
! filename - name of input obs sequence
! landmask - land/ocean mask, dimensioned (nCells), 1=land,2=water
-! obs_bdy_dist - remove obs closer than this to boundary
! siglevel - true to include sonde significant level data
! ptop - lowest pressure to include in sequence
! htop - highest height level to include in sequence
@@ -958,16 +882,15 @@ end function rawinsonde_obs_check
! other_seq - remaining observation sequence
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
- ptop, htop, sfcelev, elev_max, new_sfc_qc, &
+subroutine read_and_parse_input_seq(filename, landmask, siglevel, ptop, &
+ htop, sfcelev, elev_max, new_sfc_qc, &
new_satwnd_qc, overwrite_time, atime, &
obs_window, window_hours, &
- rawin_seq, sfc_seq, acars_seq, air_seq, satwnd_seq, &
+ rawin_seq, sfc_seq, acars_seq, satwnd_seq, &
tc_seq, gpsro_seq, gpspw_seq, other_seq)
character(len=129), intent(in) :: filename
real(r8), intent(in) :: landmask(:)
-real(r8), intent(in) :: obs_bdy_dist
real(r8), intent(in) :: ptop, htop, elev_max
logical, intent(in) :: siglevel, sfcelev, new_sfc_qc, &
new_satwnd_qc, overwrite_time
@@ -975,8 +898,7 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
real(r8), intent(in) :: window_hours
type(time_type), intent(in) :: atime
type(obs_sequence_type), intent(inout) :: rawin_seq, sfc_seq, acars_seq, gpspw_seq, &
- satwnd_seq, tc_seq, gpsro_seq, other_seq, &
- air_seq
+ satwnd_seq, tc_seq, gpsro_seq, other_seq
real(r8), parameter :: satwnd_qc_ok = 15.0_r8
real(r8), parameter :: sfc_qc_ok1 = 9.0_r8
@@ -984,9 +906,9 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
real(r8), parameter :: new_qc_value = 2.0_r8
character(len=129) :: qcmeta
-integer :: fid, var_id, okind, cellid, dsec, nobs, nth_obs
+integer :: fid, var_id, okind, dom_id, cellid, dsec
integer :: bsec, bday, esec, eday, num_excluded_bytime
-logical :: file_exist, last_obs, input_ncep_qc, global
+logical :: file_exist, last_obs, input_ncep_qc
real(r8), allocatable :: qc(:)
real(r8) :: llv_loc(3)
@@ -1008,8 +930,6 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
call init_obs(prev_obs, get_num_copies(seq), get_num_qc(seq))
allocate(qc(get_num_qc(seq)))
-global = is_global_grid()
-
input_ncep_qc = .false.
qcmeta = get_qc_meta_data(seq, 1)
if ( trim(adjustl(qcmeta)) == 'NCEP QC index' ) input_ncep_qc = .true.
@@ -1029,36 +949,24 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
num_excluded_bytime = 0 ! total number of obs beyond the time window
end if
-nobs = 0
InputObsLoop: do while ( .not. last_obs ) ! loop over all observations in a sequence
- nobs = nobs + 1
-
- ! Some compilers do not like mod by 0, so test first.
- if (print_every_nth_obs > 0) nth_obs = mod(nobs, print_every_nth_obs)
-
- ! If requested, print out a message every Nth observation
- ! to indicate progress is being made and to allow estimates
- ! of how long the assim will take.
- if (nth_obs == 0) write(*, '(2(A,I8))') 'Processing observation ', nobs
-
- ! Get the observation information; if regional grid
- ! check if it is in the domain but not in the boundary
+ ! Get the observation information, check if it is in the domain
call get_obs_def(obs_in, obs_def)
okind = get_obs_def_type_of_obs(obs_def)
obs_loc = get_obs_def_location(obs_def)
- cellid = cell_ok_to_interpolate(obs_loc)
- if (cellid < 1) goto 100 ! exclude obs outside region and bdy cells
llv_loc = get_location(obs_loc)
+ cellid = find_closest_cell_center(llv_loc(2), llv_loc(1))
obs_time = get_obs_def_time(obs_def)
-
! check vertical location
if ( (is_vertical(obs_loc, "PRESSURE") .and. llv_loc(3) < ptop) .or. &
(is_vertical(obs_loc, "HEIGHT") .and. llv_loc(3) > htop) ) then
- goto 100
+ prev_obs = obs_in
+ call get_next_obs(seq, prev_obs, obs_in, last_obs)
+ cycle InputObsLoop
end if
@@ -1127,20 +1035,12 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
endif
case ( AIRCRAFT_U_WIND_COMPONENT, AIRCRAFT_V_WIND_COMPONENT, &
- AIRCRAFT_TEMPERATURE, AIRCRAFT_SPECIFIC_HUMIDITY )
-
- if ( aircraft_obs_check() ) then
-
- call copy_obs(obs, obs_in)
- call append_obs_to_seq(air_seq, obs)
-
- end if
-
- case ( ACARS_RELATIVE_HUMIDITY, ACARS_DEWPOINT, &
+ AIRCRAFT_TEMPERATURE, AIRCRAFT_SPECIFIC_HUMIDITY, &
+ ACARS_RELATIVE_HUMIDITY, ACARS_DEWPOINT, &
ACARS_U_WIND_COMPONENT, ACARS_V_WIND_COMPONENT, &
ACARS_TEMPERATURE, ACARS_SPECIFIC_HUMIDITY )
- if ( acars_obs_check() ) then
+ if ( aircraft_obs_check() ) then
call copy_obs(obs, obs_in)
call append_obs_to_seq(acars_seq, obs)
@@ -1166,10 +1066,10 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
endif
-! case ( VORTEX_LAT, VORTEX_LON, VORTEX_PMIN, VORTEX_WMAX )
-!
-! call copy_obs(obs, obs_in)
-! call append_obs_to_seq(tc_seq, obs)
+ case ( VORTEX_LAT, VORTEX_LON, VORTEX_PMIN, VORTEX_WMAX )
+
+ call copy_obs(obs, obs_in)
+ call append_obs_to_seq(tc_seq, obs)
case ( GPSRO_REFRACTIVITY )
@@ -1188,9 +1088,6 @@ subroutine read_and_parse_input_seq(filename, landmask, obs_bdy_dist, siglevel,&
end select
-! anything that wants to cycle, come here
-100 continue
-
prev_obs = obs_in
call get_next_obs(seq, prev_obs, obs_in, last_obs)
@@ -1262,7 +1159,7 @@ subroutine remove_sondes_near_tc(seq_tc, seq_rawin, sonde_radii)
use_obs = .true.
do n = 1, numtc
- if ( (get_dist(obs_loc,loctc(n),2,2,.true.) * radius_meters) <= sonde_radii ) use_obs = .false.
+ if ( (get_dist(obs_loc,loctc(n),2,2,.true.) * earth_radius) <= sonde_radii ) use_obs = .false.
end do
if ( use_obs ) then
@@ -1313,14 +1210,13 @@ end function sat_wind_obs_check
! ptop - lowest pressure to include in sequence
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obskind)
+subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop)
type(obs_sequence_type), intent(inout) :: seq
type(time_type), intent(in) :: atime
-integer, intent(in) :: ncell
-integer, intent(in) :: iqc_thres ! Superob obs w/ qc < iqc_thres only.
-real(r8), intent(in) :: vdist, ptop
-character(len=4), intent(in) :: obskind
+integer, intent(in) :: ncell
+integer, intent(in) :: iqc_thres ! Superob obs w/ qc < iqc_thres only.
+real(r8), intent(in) :: vdist, ptop
character(len=256) :: string
integer :: icell
@@ -1335,7 +1231,6 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
ndwpt(:,:),latd(:,:),lond(:,:),pred(:,:),dwpt(:,:),errd(:,:),qcd(:,:),&
nrelh(:,:),latr(:,:),lonr(:,:),prer(:,:),relh(:,:),errr(:,:),qcr(:,:)
-logical :: if_aircraft
integer :: nlev, ik
real(r8) :: ps, pt, dp
real(r8),allocatable:: plevs(:)
@@ -1361,15 +1256,9 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
character(len=32 ), parameter :: revision = "$Revision$"
character(len=128), parameter :: revdate = "$Date$"
!-----------------------------------------------------------------------
-write(6,*)
-if_aircraft = .false.
-if ( obskind(1:4) == 'AIRC' ) if_aircraft = .true.
-if ( if_aircraft ) then
- print *, obskind, AIRCRAFT_U_WIND_COMPONENT, AIRCRAFT_V_WIND_COMPONENT, AIRCRAFT_TEMPERATURE
-endif
-
-write(6,*) 'Super-Obing ', obskind, ' data over ',ncell,' cells.'
+write(6,*)
+write(6,*) 'Super-Obing Aircraft Data over ',ncell,' cells.'
! Vertical layers for superobing up to obs_pressure_top.
! plevs is defined at the midpoint between two adjent levels.
@@ -1391,7 +1280,7 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
num_qc = get_num_qc(seq)
num_obs = get_num_obs(seq)
-write(6,*) 'Super-Obing',num_obs,' ',obskind,' data'
+write(6,*) 'Super-Obing',num_obs,' Aircraft Data'
allocate(airobs(num_obs))
call init_obs(obs, num_copies, num_qc)
@@ -1599,7 +1488,7 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
qcq(icell,ik) = max(qcq(icell,ik),airobs(k)%qvap_qc)
end if
- if ( airobs(k)%dwpt /= missing_r8 ) then ! not used for now
+ if ( airobs(k)%dwpt /= missing_r8 ) then
ndwpt(icell,ik) = ndwpt(icell,ik) + 1.0_r8
latd(icell,ik) = latd(icell,ik) + airobs(k)%lat
lond(icell,ik) = lond(icell,ik) + airobs(k)%lon
@@ -1610,7 +1499,7 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
qcd(icell,ik) = max(qcd(icell,ik),airobs(k)%dwpt_qc)
end if
- if ( airobs(k)%relh /= missing_r8 ) then ! not used for now
+ if ( airobs(k)%relh /= missing_r8 ) then
nrelh(icell,ik) = nrelh(icell,ik) + 1.0_r8
latr(icell,ik) = latr(icell,ik) + airobs(k)%lat
lonr(icell,ik) = lonr(icell,ik) + airobs(k)%lon
@@ -1637,13 +1526,8 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
uwnd(n,k) = uwnd(n,k) / nuwnd(n,k)
!erru(n,k) = erru(n,k) / nuwnd(n,k)
- if ( if_aircraft ) then
- call create_obs_type(latu(n,k), lonu(n,k), preu(n,k), VERTISPRESSURE, uwnd(n,k), &
- AIRCRAFT_U_WIND_COMPONENT, erru(n,k), qcu(n,k), atime, obs)
- else
call create_obs_type(latu(n,k), lonu(n,k), preu(n,k), VERTISPRESSURE, uwnd(n,k), &
ACARS_U_WIND_COMPONENT, erru(n,k), qcu(n,k), atime, obs)
- endif
call append_obs_to_seq(seq, obs)
end if
@@ -1657,13 +1541,8 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
vwnd(n,k) = vwnd(n,k) / nvwnd(n,k)
!errv(n,k) = errv(n,k) / nvwnd(n,k)
- if ( if_aircraft ) then
- call create_obs_type(latv(n,k), lonv(n,k), prev(n,k), VERTISPRESSURE, vwnd(n,k), &
- AIRCRAFT_V_WIND_COMPONENT, errv(n,k), qcv(n,k), atime, obs)
- else
call create_obs_type(latv(n,k), lonv(n,k), prev(n,k), VERTISPRESSURE, vwnd(n,k), &
ACARS_V_WIND_COMPONENT, errv(n,k), qcv(n,k), atime, obs)
- endif
call append_obs_to_seq(seq, obs)
end if
@@ -1677,13 +1556,8 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
tmpk(n,k) = tmpk(n,k) / ntmpk(n,k)
!errt(n,k) = errt(n,k) / ntmpk(n,k)
- if ( if_aircraft ) then
- call create_obs_type(latt(n,k), lont(n,k), pret(n,k), VERTISPRESSURE, tmpk(n,k), &
- AIRCRAFT_TEMPERATURE, errt(n,k), qct(n,k), atime, obs)
- else
call create_obs_type(latt(n,k), lont(n,k), pret(n,k), VERTISPRESSURE, tmpk(n,k), &
ACARS_TEMPERATURE, errt(n,k), qct(n,k), atime, obs)
- endif
call append_obs_to_seq(seq, obs)
end if
@@ -1697,13 +1571,38 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
qvap(n,k) = qvap(n,k) / nqvap(n,k)
!errq(n,k) = errq(n,k) / nqvap(n,k)
- if ( if_aircraft ) then
- call create_obs_type(latq(n,k), lonq(n,k), preq(n,k), VERTISPRESSURE, qvap(n,k), &
- AIRCRAFT_SPECIFIC_HUMIDITY, errq(n,k), qcq(n,k), atime, obs)
- else
call create_obs_type(latq(n,k), lonq(n,k), preq(n,k), VERTISPRESSURE, qvap(n,k), &
ACARS_SPECIFIC_HUMIDITY, errq(n,k), qcq(n,k), atime, obs)
- endif
+ call append_obs_to_seq(seq, obs)
+
+ end if
+
+ if ( ndwpt(n,k) > 0.0_r8 ) then ! write dewpoint temperature superob
+
+ latd(n,k) = latd(n,k) / ndwpt(n,k)
+ lond(n,k) = lond(n,k) / ndwpt(n,k)
+ if ( lond(n,k) >= 360.0_r8 ) lond(n,k) = lond(n,k) - 360.0_r8
+ pred(n,k) = pred(n,k) / ndwpt(n,k)
+ dwpt(n,k) = dwpt(n,k) / ndwpt(n,k)
+ !errd(n,k) = errd(n,k) / ndwpt(n,k)
+
+ call create_obs_type(latd(n,k), lond(n,k), pred(n,k), VERTISPRESSURE, dwpt(n,k), &
+ ACARS_DEWPOINT, errd(n,k), qcd(n,k), atime, obs)
+ call append_obs_to_seq(seq, obs)
+
+ end if
+
+ if ( nrelh(n,k) > 0.0_r8 ) then ! write relative humidity superob
+
+ latr(n,k) = latr(n,k) / nrelh(n,k)
+ lonr(n,k) = lonr(n,k) / nrelh(n,k)
+ if ( lonr(n,k) >= 360.0_r8 ) lonr(n,k) = lonr(n,k) - 360.0_r8
+ prer(n,k) = prer(n,k) / nrelh(n,k)
+ relh(n,k) = relh(n,k) / nrelh(n,k)
+ !errr(n,k) = errr(n,k) / nrelh(n,k)
+
+ call create_obs_type(latr(n,k), lonr(n,k), prer(n,k), VERTISPRESSURE, relh(n,k), &
+ ACARS_RELATIVE_HUMIDITY, errr(n,k), qcr(n,k), atime, obs)
call append_obs_to_seq(seq, obs)
end if
@@ -1711,7 +1610,6 @@ subroutine superob_aircraft_data(seq, ncell, atime, vdist, iqc_thres, ptop, obsk
enddo
enddo !k = 1, nlev ! loop over all vertical levels
-write(6,*)
deallocate(plevs)
deallocate(nuwnd); deallocate(nvwnd); deallocate(ntmpk);
@@ -2057,15 +1955,11 @@ end function surface_obs_check
!
! lon - longitude in degrees
! lat - latitude in degrees
-! this_cellid - if we already have searched for the cellid, pass it
-! in and save repeating the search
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-function pole_check(lon, lat, this_cellid)
+function pole_check(lon, lat)
real(r8), intent(in) :: lon, lat
-integer, intent(in), optional :: this_cellid
-
logical :: pole_check
logical, save :: first = .true.
integer, save :: north_pole, south_pole
@@ -2079,11 +1973,7 @@ function pole_check(lon, lat, this_cellid)
endif
! create a point at this lon/lat, and at the nearest pole
-if (present(this_cellid)) then
- cellid = this_cellid
-else
- cellid = find_closest_cell_center(lat, lon)
-endif
+cellid = find_closest_cell_center(lat, lon)
! are we within the cell at that pole?
! FIXME: For now, we check if the obs is located within the cell at the pole.
@@ -2180,175 +2070,6 @@ subroutine wrap_lon(lon, westlon, eastlon)
end subroutine wrap_lon
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-!
-! increase_obs_err_bdy - subroutine that increases the observation
-! error based on proximity to the regional
-! boundary.
-!
-! seq - observation sequence
-! obsbdydist - max distance to boundary beyond which no errors changed
-! maxfac - factor to increase observation error at boundary
-!
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-subroutine increase_obs_err_bdy(seq, obsbdydist, maxfac)
-type(obs_sequence_type), intent(inout) :: seq
-real(r8), intent(in) :: obsbdydist, maxfac
-
-integer :: nbdy, i
-logical :: last_obs
-real(r8) :: slope, intercept, minbdydist, obsfac
-
-character(len=140) :: bob
-type(obs_def_type) :: obs_def
-type(obs_type) :: obs, prev_obs
-type(get_close_type) :: gc
-type(location_type) :: thisobsloc
-type(location_type), allocatable :: bdyloclist(:)
-
-write(6,*) 'Increasing the Observation Error Near the Regional Boundaries'
-
-call init_obs(obs, get_num_copies(seq), get_num_qc(seq))
-call init_obs(prev_obs, get_num_copies(seq), get_num_qc(seq))
-
-! count, allocate, and fill a location list
-call gather_bdy_cells(nbdy, bdyloclist)
-if (nbdy <= 0) return
-!do i=1, nbdy
-! call write_location(0, bdyloclist(i), charstring=bob)
-! write(*, *) bob
-!enddo
-call get_close_init(gc, nbdy, obsbdydist/radius_meters, bdyloclist)
-
-! compute slope and intercept for error increase factor
-slope = (1.0_r8 - maxfac) / obsbdydist
-intercept = maxfac
-
-last_obs = .false.
-if ( .not. get_first_obs(seq, obs) ) last_obs = .true.
-
-do while ( .not. last_obs )
-
- ! get location information relative to domain 1 (skip nests)
- call get_obs_def(obs, obs_def)
- thisobsloc = get_obs_def_location(obs_def)
- ! compute min dist to any boundary cell in meters
- call find_min_dist(gc, thisobsloc, nbdy, bdyloclist, minbdydist)
-
- ! increase error based on this distance
- if ( minbdydist <= obsbdydist ) then
-
- obsfac = slope * minbdydist + intercept
- call set_obs_def_error_variance(obs_def, &
- get_obs_def_error_variance(obs_def) * obsfac * obsfac)
- call set_obs_def(obs, obs_def)
- call set_obs(seq, obs, get_obs_key(obs))
-
- end if
- prev_obs = obs
- call get_next_obs(seq, prev_obs, obs, last_obs)
-
-end do
-
-deallocate(bdyloclist)
-call get_close_destroy(gc)
-
-end subroutine increase_obs_err_bdy
-
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-!
-! gather_bdy_cells - subroutine that finds the boundary cells in a
-! regional grid and adds their locations to an
-! array that is allocated here
-!
-! nbdy - number of cells in the boundary
-! bdyloclist - allocated array of location types for those cells
-!
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-subroutine gather_bdy_cells(nbdy, bdyloclist)
-integer, intent(out) :: nbdy
-type(location_type), allocatable, intent(out) :: bdyloclist(:)
-
-integer :: i
-integer, allocatable :: bdy_flag(:), b_cellids(:)
-real(r8), allocatable :: bdy_lat(:), bdy_lon(:)
-
-! just type 1 cells? or all bdy cells?
-! need accessor routine for the bdy info, and the
-! lon/lat arrays from model_mod.
-
-call get_bdy_mask(nCells, bdy_flag)
-call get_cell_center_coords(nCells, bdy_lat, bdy_lon)
-allocate(b_cellids(nCells))
-
-! start out trying this - only record cell ids which
-! have a flag of 1, not all boundary cells
-nbdy = 0
-do i = 1, nCells
- if (bdy_flag(i) == 1) then
- nbdy = nbdy+1
- b_cellids(nbdy) = i
- endif
-enddo
-
-allocate(bdyloclist(nbdy))
-
-do i = 1, nbdy
- bdyloclist(i)= set_location(bdy_lon(b_cellids(i)), bdy_lat(b_cellids(i)), 1.0_r8, 1)
-enddo
-
-
-deallocate(bdy_flag, bdy_lat, bdy_lon, b_cellids)
-
-end subroutine gather_bdy_cells
-
-
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-!
-! find_min_dist - subroutine that returns the distance to the closest
-! boundary cell from the given observation location
-!
-! gc - get close derived type
-! obsloc - observation currently being processed
-! nlocs - number of items in the loclist
-! loclist - list of locations
-! mindist - distance to the closest boundary cell
-! returns HUGE if no boundary cells are within
-! obsbdydist. converted from radians to meters.
-!
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-subroutine find_min_dist(gc, obsloc, nlocs, loclist, mindist)
-type(get_close_type), intent(in) :: gc
-type(location_type), intent(inout) :: obsloc
-integer, intent(in) :: nlocs
-type(location_type), intent(inout) :: loclist(:)
-real(r8), intent(out) :: mindist
-
-integer :: num_close
-integer, allocatable :: close_ind(:), dummy(:)
-real(r8), allocatable :: dist(:)
-
-allocate(close_ind(nlocs), dummy(nlocs), dist(nlocs))
-dummy(:) = 1
-
-! FIXME: could call get_close() w/o replicating dummy in call
-call get_close_obs(gc, obsloc, 1, loclist, dummy, dummy, &
- num_close, close_ind, dist)
-
-if (num_close <= 0) then
- mindist = HUGE(1.0_r8)
- return
-endif
-
-mindist = minval(dist(1:num_close)) ! radians here
-mindist = mindist * radius_meters ! back to meters
-
-deallocate(close_ind, dummy, dist)
-
-end subroutine find_min_dist
-
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
end program
!
diff --git a/models/mpas_atm/mpas_dart_obs_preprocess.nml b/models/mpas_atm/mpas_dart_obs_preprocess.nml
index f0f0073a25..b5dc5b238a 100644
--- a/models/mpas_atm/mpas_dart_obs_preprocess.nml
+++ b/models/mpas_atm/mpas_dart_obs_preprocess.nml
@@ -18,6 +18,11 @@
windowing_obs_time = .false.
windowing_int_hour = 1.5
+ obs_boundary = 0.0
+ increase_bdy_error = .false.
+ maxobsfac = 2.5
+ obsdistbdy = 15.0
+
sfc_elevation_check = .false.
sfc_elevation_tol = 300.0
obs_pressure_top = 0.0
@@ -28,19 +33,15 @@
superob_qc_threshold = 4
superob_aircraft = .false.
+ aircraft_horiz_int = 36.0
aircraft_pres_int = 2500.0
superob_sat_winds = .false.
+ sat_wind_horiz_int = 100.0
sat_wind_pres_int = 2500.0
overwrite_ncep_satwnd_qc = .false.
overwrite_ncep_sfc_qc = .false.
- gpsro_lowest_meter = 3000.0
-
- increase_bdy_error = .false.
- maxobsfac = 2.5
- obsdistbdy = 150000.0
-
max_num_obs = 1000000
/
diff --git a/models/mpas_atm/resample.f90 b/models/mpas_atm/resample.f90
index 711f37e282..ed65ba1534 100644
--- a/models/mpas_atm/resample.f90
+++ b/models/mpas_atm/resample.f90
@@ -34,7 +34,7 @@ program resample
operator(-)
use model_mod, only : static_init_model, get_model_size, get_state_meta_data, &
model_interpolate, get_analysis_time, &
- get_init_template_filename, analysis_file_to_statevector, &
+ get_model_analysis_filename, analysis_file_to_statevector, &
statevector_to_analysis_file, get_analysis_time, &
write_model_time, get_grid_dims
@@ -87,7 +87,7 @@ program resample
real(r8), allocatable :: statevector(:)
character(len=metadatalength) :: state_meta(1)
-character(len=129) :: mpas_input_file ! set with get_init_template_filename() if needed
+character(len=129) :: mpas_input_file ! set with get_model_analysis_filename() if needed
type(netcdf_file_type) :: ncFileID
type(location_type) :: loc
diff --git a/models/mpas_atm/shell_scripts/advance_model.csh b/models/mpas_atm/shell_scripts/advance_model.csh
index 1ef75badcd..878ba97104 100755
--- a/models/mpas_atm/shell_scripts/advance_model.csh
+++ b/models/mpas_atm/shell_scripts/advance_model.csh
@@ -42,28 +42,20 @@
set ensemble_member = $1
set ensemble_max = $2
-# Do you want to create lbc files?
-#-------------------------------------------------------------------
-set make_lbc = true
-
# Do you want to save horizontal winds in the analysis file?
#-------------------------------------------------------------------
-set save_wind = false
-
-# Do you want to save prior states before being overwritten at the next cycle?
-#-------------------------------------------------------------------
-set save_prior = true
+set save_wind = true
# mpi command
#-------------------------------------------------------------------
#set mpicmd = "mpi -n 4" # Mac OS
#set mpicmd = "mpirun.lsf" # Yellowstone
-set mpicmd = "mpiexec_mpt" # dplace -s 1" # Cheyenne
+set mpicmd = "mpiexec_mpt dplace -s 1" # Cheyenne
# Other commands
#-------------------------------------------------------------------
set REMOVE = 'rm -rf'
-set COPY = 'cp -pf'
+set COPY = 'cp -p'
set MOVE = 'mv -f'
set LINK = 'ln -sf'
unalias cd
@@ -156,15 +148,14 @@ while( $ensemble_member <= $ensemble_max )
${LINK} ${CENTRALDIR}/advance_time . || exit 1
# Get the files specific for this experiment
- ${COPY} ${CENTRALDIR}/input.nml . || exit 1
${LINK} ${CENTRALDIR}/streams.atmosphere . || exit 1
+ ${COPY} ${CENTRALDIR}/input.nml . || exit 1
${COPY} ${CENTRALDIR}/namelist.atmosphere . || exit 1
- ${LINK} ${CENTRALDIR}/${fs_grid}* . || exit 1
+ ${LINK} ${CENTRALDIR}/MPAS_RUN/${fs_grid}* . || exit 1
if( $if_sfc_update == .true. || $if_sfc_update == true ) then
${LINK} ${CENTRALDIR}/${fsfc} .
ls -lL $fsfc || exit 1
endif
- if($ensemble_member == 1) ${LINK} ${CENTRALDIR}/streams.atmosphere.tend streams.atmosphere || exit
# Input analysis file
set input_file = `head -n $ensemble_member ${CENTRALDIR}/${inlist} | tail -1`
@@ -229,7 +220,7 @@ while( $ensemble_member <= $ensemble_max )
@ assim_hour += ${thrs}
set intv_utc = `echo $assim_days + 100 | bc | cut -b2-3`_`echo $assim_hour + 100 | bc | cut -b2-3`:`echo $assim_min + 100 | bc | cut -b2-3`:`echo $assim_secs + 100 | bc | cut -b2-3`
set input_file = ${fhead}`echo ${prev_utc} | sed -e 's/:/\./g'`
- echo "With IAU, we run atmosphere_model for ${assim_hour} hrs from ${prev_utc}."
+ echo "With IAU, we run atmospher_model for ${assim_hour} hrs from ${prev_utc}."
cat >! script.sed << EOF
/config_start_time/c\
@@ -249,37 +240,10 @@ EOF
endif # if($is_iau_there == 1 && $is_iau_on == "'on'" ) then
- echo ${input_file}
ls -l ${input_file} || exit
if ( -e namelist.atmosphere ) ${REMOVE} namelist.atmosphere
sed -f script.sed ${CENTRALDIR}/namelist.atmosphere >! namelist.atmosphere
- # Prefix of a lateral boundary condition file if mpas is run in a limited-area mode
- set fgrd = `grep init_template_filename input.nml | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
- set is_it_regional = `ncdump -h $fgrd | grep bdyMask | wc -l`
- if ( $is_it_regional > 0 ) then
- set fbdy = `sed -n '//{/Scree/{p;n};/##/{q};p}' ${STREAM_ATM} | \
- grep filename_template | awk -F= '{print $2}' | awk -F$ '{print $1}' | sed -e 's/"//g'`
- set flbc = ${fbdy}.`echo ${anal_utc} | sed -e 's/:/\./g'`.nc
- ls -l ${flbc} || exit
-
- # update_bc with the analysis - even for edge winds
- cat >! bc.sed << EOF
- s/flbc/${flbc}/g
- s/fanl/${input_file}/g
- s/fini/${fgrd}/g
-EOF
- sed -f bc.sed ${CENTRALDIR}/update_bc.ncl >! update_bc.ncl || exit
- ncl update_bc.ncl
- #else
- #FIXME: S. Ha - We may want to use the same streams.atmosphere in both global and regional modes.
- # This means that we would always have an lbc_in input stream.
- # In case of the global mode, all we need might be just to set input_interval as none.
- # Then should we update streams.atmosphere for that? Not sure yet if the global MPAS will
- # fail with this additional input stream or simply ignore it (as of Nov-29-2017).
- # For now, I am just testing the regional MPAS with this script.
- endif
-
# clean out any old log files
if ( -e log.0000.out ) ${REMOVE} log.*
@@ -291,7 +255,6 @@ EOF
# Model output at the target time
set output_file = ${fhead}`echo ${targ_utc} | sed -e 's/:/\./g'`.nc
- set diag_file = ${fdiag}`echo ${targ_utc} | sed -e 's/:/\./g'`.nc
set date_utc = `ncdump -v xtime ${output_file} | tail -2 | head -1 | cut -d";" -f1 | sed -e 's/"//g'`
# Check if the model was succefully completed.
@@ -301,35 +264,8 @@ EOF
exit 1
endif
- # Create lbc file at the target time.
- #-------------------------------------------------------------------
- if($make_lbc == true) then
- set fini = `sed -n '//{/Scree/{p;n};/##/{q};p}' streams.atmosphere | \
- grep filename_template | awk -F= '{print $2}' | awk -F$ '{print $1}' | sed -e 's/"//g'`
- set g_lbc = `sed -n '//{/Scree/{p;n};/##/{q};p}' streams.atmosphere | \
- grep filename_template | awk -F= '{print $2}' | awk -F$ '{print $1}' | sed -e 's/"//g'`
- set tlbc = `echo $date_utc | cut -d : -f1`
- set glbc = ${g_lbc}${tlbc}.nc
- set flbc = lbc.${tlbc}.nc
- ls -lL ${glbc} || exit
- ../create_lbc.csh $fini $glbc >&! create_lbc.${tlbc}.log
- ls -lL ${flbc} || exit
- ${MOVE} ${glbc} ${g_lbc}${tlbc}.fcst${assim_hour}h.nc
- endif
- set ttnd = `echo ${date_utc} | sed -e 's/:/\./g'`
- set tnd0 = tendencies.`echo ${anal_utc} | sed -e 's/:/\./g'`.nc
- if( -e tendencies.${ttnd}.nc ) ${MOVE} tendencies.${ttnd}.nc tendencies.${tlbc}.fcst${assim_hour}h.nc
- ${MOVE} $diag_file ${fdiag}${tlbc}.fcst${assim_hour}h.nc
- ${REMOVE} ${tnd0}
-
# Back up some fields and clean up.
#-------------------------------------------------------------------
- if($save_prior == true) then
- set t_pr = `echo $date_utc | cut -d : -f1`
- ncks -O -v xtime,theta,rho,u,w,qv,qc,qr ${output_file} prior.${t_pr}.nc
- ls -l prior.${t_pr}.nc
- endif
-
if($save_wind == true) then
set if_u_used = `grep use_u_for_wind input.nml | awk '{print $3}' | cut -d ',' -f1`
@@ -342,7 +278,8 @@ EOF
endif
endif #($save_wind == true) then
- #${REMOVE} ${fdiag}*.nc
+
+ ${REMOVE} ${fdiag}*.nc
# Change back to the top directory.
#-------------------------------------------------------------------
diff --git a/models/mpas_atm/shell_scripts/advance_model.template b/models/mpas_atm/shell_scripts/advance_model.template
index 22dc03a03d..cce00b28b4 100755
--- a/models/mpas_atm/shell_scripts/advance_model.template
+++ b/models/mpas_atm/shell_scripts/advance_model.template
@@ -11,7 +11,7 @@
#PBS -o logs/JOB_NAME.log
#PBS -j oe
#PBS -q QUEUE
-#PBS -l select=NODES:mpiprocs=NPROC:ncpus=36
+#PBS -l select=NODES:ncpus=NCPUS:mpiprocs=NPROC
#PBS -l walltime=JOB_TIME
#==================================================================
diff --git a/models/mpas_atm/shell_scripts/driver_initial_ens.csh b/models/mpas_atm/shell_scripts/driver_initial_ens.csh
index 97fe9e1720..a68ee4604f 100755
--- a/models/mpas_atm/shell_scripts/driver_initial_ens.csh
+++ b/models/mpas_atm/shell_scripts/driver_initial_ens.csh
@@ -106,14 +106,6 @@ if ( ! -e ${fgraph} ) then
exit
endif
endif
-mv ${NML_MPAS} ${NML_MPAS}.tmp
-mv ${NML_INIT} ${NML_INIT}.tmp
-sed >! block.sed << EOF
- /config_block_decomp_file_prefix /c\
- config_block_decomp_file_prefix = '${MPAS_GRID}.graph.info.part.'
-EOF
-sed -f block.sed ${NML_MPAS}.tmp >! ${NML_MPAS}
-sed -f block.sed ${NML_INIT}.tmp >! ${NML_INIT}
set n = 1
while ( $n <= $ENS_SIZE )
diff --git a/models/mpas_atm/shell_scripts/driver_mpas_dart.csh b/models/mpas_atm/shell_scripts/driver_mpas_dart.csh
index 81e7ea2acf..4402af78d4 100755
--- a/models/mpas_atm/shell_scripts/driver_mpas_dart.csh
+++ b/models/mpas_atm/shell_scripts/driver_mpas_dart.csh
@@ -86,13 +86,13 @@ echo Experiment name: $EXPERIMENT_NAME
if( ! -e $RUN_DIR ) mkdir -p $RUN_DIR
cd $RUN_DIR
echo Running at $RUN_DIR
-\cp -pf ${fn_param} .
+${COPY} ${fn_param} .
#------------------------------------------
# Check if we have all the necessary files.
#------------------------------------------
-foreach fn ( advance_model.csh update_bc.ncl )
+foreach fn ( advance_model.csh )
if ( ! -r $fn || -z $fn ) then
echo ${COPY} ${DART_DIR}/../shell_scripts/${fn} .
${COPY} ${DART_DIR}/../shell_scripts/${fn} .
@@ -105,8 +105,8 @@ end
foreach fn ( filter advance_time update_mpas_states )
if ( ! -x $fn ) then
- echo ${LINK} $DART_DIR/${fn} .
- ${LINK} $DART_DIR/${fn} .
+ echo ${COPY} $DART_DIR/${fn} .
+ ${COPY} $DART_DIR/${fn} .
if ( ! $status == 0 ) then
echo ABORT\: We cannot find required executable dependency $fn.
exit
@@ -138,30 +138,23 @@ if ( ! -d MPAS_RUN ) then
endif
# Check to see if MPAS and DART namelists exist. If not, copy them from model source
-${COPY} ${NML_DIR}/${NML_MPAS} .
-if(${NML_MPAS} != namelist.atmosphere ) then
- ${MOVE} ${NML_DIR}/${NML_MPAS} namelist.atmosphere
- set NML_MPAS = namelist.atmosphere
-endif
+foreach fn ( ${NML_MPAS} ${NML_INIT} )
+ if ( ! -r $fn ) then
+ ${COPY} ${MPAS_DIR}/${fn} .
+ endif
+end
-foreach fn ( ${STREAM_ATM} ${STREAM_TEND} ${STREAM_ENS1} ) #${STREAM_INIT} )
+foreach fn ( ${STREAM_ATM} ${STREAM_INIT} )
if ( ! -r $fn || -z $fn ) then
- ${COPY} ${NML_DIR}/${fn} .
+ ${COPY} ${MPAS_DIR}/${fn} .
endif
end
-if( ${STREAM_ATM} != streams.atmosphere ) then
- ${MOVE} ${STREAM_ATM} streams.atmosphere
- set STREAM_ATM = streams.atmosphere
-endif
if ( ! -r ${NML_DART} ) then
- ${COPY} ${NML_DIR}/${NML_DART} .
-endif
-if ( ${NML_DART} != input.nml ) then
- ${MOVE} ${NML_DART} input.nml
- set NML_DART = input.nml
+ ${COPY} ${DART_DIR}/${NML_DART} .
endif
+
#--------------------------------------------------------------------------
# Take file names from input.nml, check to make sure there is consistency in variables.
#--------------------------------------------------------------------------
@@ -188,44 +181,46 @@ if ( $input_var != $INTV_SEC ) then
exit
endif
-set fn_grid_def = `grep init_template_filename ${NML_DART} | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
+set fn_grid_def = `grep grid_definition_filename ${NML_DART} | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
if ( ! -r $fn_grid_def ) then
- ln -s ${ENS_DIR}1/$fn_grid_def .
- #echo "ERROR! $fn_grid_def does not exist in ${RUN_DIR}, but is used for init_template_filename. Exiting"
- #exit
+ echo "ERROR! $fn_grid_def does not exist in ${RUN_DIR}, but is used for grid_definition_filename. Exiting"
+ exit
endif
-set is_it_regional = `ncdump -h $fn_grid_def | grep bdyMask | wc -l`
-if ( $is_it_regional > 0 ) then
- mv advance_model.csh advance_model.template.csh
- cat >! lbc.sed << EOF
- /set make_lbc /c\
- set make_lbc = false
-EOF
- sed -f lbc.sed advance_model.template.csh >! advance_model.csh
+set fn_model_anal = `grep model_analysis_filename ${NML_DART} | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
+if ( ! -r $fn_model_anal ) then
+ echo "ERROR! $fn_model_anal does not exist in ${RUN_DIR}, but is used for model_analysis_filename. Exiting"
+ exit
endif
-
#--------------------------------------------------------------------------
# Check for MPAS-related files and namelist entries
#--------------------------------------------------------------------------
-#echo "MPAS is running with $dx m horizontal spacing and a timestep of $dt s"
-echo
-# Graph.info
-set file_decomp = `grep config_block_decomp_file_prefix $NML_MPAS | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
+# Read dt and grid spacing from file. Echo them so user is aware.
+set dt = `grep config_dt $NML_MPAS | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
+set dx = `grep config_len_disp $NML_MPAS | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
-@ ndecomp = $MODEL_NODES * $N_PROCS_MPAS
-set fgraph = ${file_decomp}${ndecomp}
+echo "MPAS is running with $dx m horizontal spacing and a timestep of $dt s"
+echo
+
+@ ndecomp = $MODEL_NODES * $N_PROCS
+set fgraph = ${MPAS_GRID}.graph.info.part.${ndecomp}
if ( ! -e ${fgraph} ) then
- #${LINK} ${GRID_DIR}/${MPAS_GRID}.${fgraph} ${fgraph}
${LINK} ${GRID_DIR}/${fgraph} ${fgraph}
if(! -e ${fgraph}) then
- echo "Cannot find ${fgraph} for MODEL_NODES * N_PROCS_MPAS (= $MODEL_NODES * $N_PROCS_MPAS)"
+ echo "Cannot find ${fgraph} for MODEL_NODES * N_PROCS (= $MODEL_NODES * $N_PROCS)"
exit
endif
endif
+# Sanity checks for input files
+set file_decomp = `grep config_block_decomp_file_prefix $NML_MPAS | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
+if ( $file_decomp != ${MPAS_GRID}.graph.info.part. ) then
+ echo "config_block_decomp_file_prefix in $NML_MPAS does not match grid information provided. Exiting"
+ exit
+endif
+
set file_sst_update = `grep config_sst_update $NML_MPAS | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
if ( $SST_UPDATE == true && $file_sst_update != "true" ) then
echo "Configuration is to update SST, but config_sst_update is not true in ${NML_MPAS}. Exiting"
@@ -233,14 +228,12 @@ if ( $SST_UPDATE == true && $file_sst_update != "true" ) then
endif
if ( $SST_UPDATE == true ) then
- set fsst = `sed -n '//{/Scree/{p;n};/##/{q};p}' ${STREAM_ATM} | \
- grep filename_template | awk -F= '{print $2}' | awk -F$ '{print $1}' | sed -e 's/"//g'`
- ${LINK} ${SST_DIR}/${SST_FNAME} $fsst || exit
+ ${LINK} ${SST_DIR}/${SST_FNAME} . || exit 1
else
echo NO SST_UPDATE...
endif
-ls -l ${NML_MPAS} ${STREAM_ATM} || exit
+ls -l ${NML_MPAS} ${STREAM_ATM}
if ( ! -d logs ) mkdir logs # to print out log files
@@ -280,7 +273,7 @@ echo
# Initial ensemble for $DATE_INI
#------------------------------------------
-set fini = `sed -n '//{/Scree/{p;n};/##/{q};p}' ${STREAM_ATM} | \
+set fini = `sed -n '//{/Scree/{p;n};/##/{q};p}' ${STREAM_INIT} | \
grep filename_template | awk -F= '{print $2}' | sed -e 's/"//g'`
set frst = `sed -n '//{/Scree/{p;n};/##/{q};p}' ${STREAM_ATM} | \
grep filename_template | awk -F= '{print $2}' | awk -F$ '{print $1}' | sed -e 's/"//g'`
@@ -311,19 +304,15 @@ while ( $icyc <= $ncyc )
# 1. Namelist setup
#------------------------------------------------------
if($icyc == 1) then
- set cycling = false
- set do_restart = false
+ set cycling = .false.
+ set do_restart = .false.
else
- set cycling = true
- set do_restart = true
+ set cycling = .true.
+ set do_restart = .true.
endif
${REMOVE} init.sed script*.sed
cat >! init.sed << EOF3
- /config_dt /c\
- config_dt = ${TIMESTEP}
- /config_len_disp /c\
- config_len_disp = ${LEN_DISP}
/config_do_DAcycling /c\
config_do_DAcycling = ${cycling}
/config_do_restart /c\
@@ -333,13 +322,6 @@ EOF3
sed -f init.sed namelist.temp >! ${NML_MPAS}
${REMOVE} init.sed namelist.temp
-# Edit &ensemble_manager_nml in input.nml
- cat >! script1.sed << EOF
- /tasks_per_node /c\
- tasks_per_node = ${N_PROCS_ANAL}
-EOF
- cat script1.sed >> script.sed
-
if ( $ADAPTIVE_INF == true ) then # For a spatially-varying prior inflation.
if ($icyc == 1) then
@@ -377,24 +359,16 @@ EOF
if( -e ${output_list}) ${REMOVE} ${output_list}
echo ${input_list} ${output_list}
echo
-
set i = 1
while ( $i <= ${ENS_SIZE} )
- if($icyc == 1) then
- set finput = ${ENS_DIR}${i}/${fini}
- else
- set finput = ${ENS_DIR}${i}/${f_rst}
- endif
- if (! -e ${finput}) then
- echo "Cannot find ${finput}."
- exit
+ if (! -e ${ENS_DIR}${i}/${f_rst}) then
+ echo "Cannot find ${ENS_DIR}${i}/${f_rst}".
else
- echo ${finput} >> ${input_list}
+ echo ${ENS_DIR}${i}/${f_rst} >> ${input_list}
endif
echo ${ENS_DIR}${i}/${f_anl} >> ${output_list}
@ i++
end
-
tail -1 ${input_list}
tail -1 ${output_list}
@@ -430,19 +404,15 @@ EOF
set job_name = ${EXPERIMENT_NAME}.${icyc}
echo Running filter: $job_name
- if ( -e filter_done && -e obs_seq.final ) then
- echo filter for $job_name is already run.
- else
-
if ( $RUN_IN_PBS == yes ) then
cat >! filter.sed << EOF
s#JOB_NAME#${job_name}#g
s#PROJ_NUMBER#${PROJ_NUMBER}#g
s#NODES#${FILTER_NODES}#g
- s#NPROC#${N_PROCS_ANAL}#g
+ s#NCPUS#${N_CPUS}#g
s#JOB_TIME#${TIME_FILTER}#g
- s#QUEUE#${QUEUE}#g
+ #s#QUEUE#${QUEUE}#g
EOF
sed -f filter.sed filter.template.pbs >! filter.pbs
@@ -464,7 +434,6 @@ EOF
if ( -e ${obs_seq_out}) touch filter_done
endif
- endif #( -e filter_done && -e obs_seq.final ) then
# Check errors in filter.
if ( -e filter_started && ! -e filter_done ) then
@@ -475,14 +444,6 @@ EOF
${REMOVE} filter_started filter_done
echo Filter is done for Cycle ${icyc}\: ${time_anl}
- echo Checking NaN in the filter input and output files...
- #foreach f ( preassim_*nc output*nc )
- # set nnan = `ncdump $f | grep -i NaN | wc -l`
- # if($nnan > 0) then
- # echo $nnan NaNs in $f.
- # exit
- # endif
- #end
#------------------------------------------------------
# 5. Target time for model advance
@@ -496,13 +457,7 @@ EOF
#------------------------------------------------------
# 6. Run update_mpas_states for all ensemble members
#------------------------------------------------------
- ${DART_DIR}/update_mpas_states >! logs/update_mpas_states.${icyc}.log
- set fanal = `grep update_output_file_list input.nml | awk '{print $3}' | cut -d ',' -f1 | sed -e "s/'//g" | sed -e 's/"//g'`
- set nanal = `cat $fanal | wc -l`
- if($nanal != $ENS_SIZE) then
- echo Not enough ensemble input files in $fanal. $nanal
- exit
- endif
+ ${DART_DIR}/update_mpas_states >! logs/update_mpas_states.ic_${icyc}.log
#------------------------------------------------------
# 7. Advance model for each member
@@ -515,15 +470,16 @@ EOF
if ( $RUN_IN_PBS == yes ) then
- set job_ensemble = ${EXPERIMENT_NAME}.${icyc}.e${n}
+ set job_ensemble = ${EXPERIMENT_NAME}_${icyc}_ens${n}
cat >! advance.sed << EOF
s#JOB_NAME#${job_ensemble}#g
s#PROJ_NUMBER#${PROJ_NUMBER}#g
s#ENS_MEM#${n}#g
s#QUEUE#${QUEUE}#g
+ s#NCPUS#${N_CPUS}#g
s#NODES#${MODEL_NODES}#g
- s#NPROC#${N_PROCS_MPAS}#g
+ s#NPROC#${N_PROCS}#g
s#JOB_TIME#${TIME_MPAS}#g
EOF
@@ -545,10 +501,10 @@ EOF
if ( $RUN_IN_PBS == yes ) then
# Check if all members are done advancing model.
- set is_all_done = `qstat | grep ${EXPERIMENT_NAME}.${icyc}.e | wc -l`
+ set is_all_done = `qstat | grep $job_ensemble | wc -l`
while ( $is_all_done > 0 )
sleep 60
- set is_all_done = `qstat | grep ${EXPERIMENT_NAME}.${icyc}.e | wc -l`
+ set is_all_done = `qstat | grep $job_ensemble | wc -l`
end
sleep 60
@@ -581,66 +537,17 @@ EOF
if ( $HPSS_SAVE == yes ) then
cd ${sav_dir}
foreach f ( * )
- set fgz = `echo $f | grep .gz | wc -l`
- set finf = `echo $f | grep output_priorinf | wc -l`
- if($fgz == 0 && $finf == 0) then
- gzip -f $f
- ${HSICMD} ${f}.gz : ${hdir}/${f}.gz
- else
- ${HSICMD} ${f} : ${hdir}/${f}
- endif
+ gzip -f $f
+ ${HSICMD} ${f}.gz : ${hdir}/${f}.gz
end
endif
#------------------------------------------------------
- # 9. Compute tendencies for one member
+ # 9. Get ready to run filter for next cycle.
#------------------------------------------------------
cd $RUN_DIR
- set f_init = `head -1 ${input_list}`
- set f0 = `basename ${f_init}`
- set ft = `echo ${f0} | cut -d . -f2-`
- set ftend = `sed -n '//{/Scree/{p;n};/##/{q};p}' ${STREAM_ATM} | \
- grep filename_template | awk -F= '{print $2}' | awk -F$ '{print $1}' | sed -e 's/"//g'`
- set fgarb = ${ftend}${ft}
- if( -e ${f_init} ) then
- echo Run member1 once again to get tendencies in 10-min forecast.
- if(! -d ens1 ) mkdir ens1
- echo cd ens1
- cd ens1
- ln -sf ../${f_init} .
- if(! -e ${STREAM_ATM} ) ln -sf ${RUN_DIR}/${STREAM_ENS1} ${STREAM_ATM} || exit
- cat >! runtime.sed << EOF
- /config_run_duration /c\
- config_run_duration = '00_00:10:00'
-EOF
- sed -f runtime.sed ../member1/namelist.atmosphere >! namelist.atmosphere
- #if(! -e ${fgraph}) ln -s ../${fgraph} .
- cat >! run.pbs << EOF
-#!/bin/csh
-#==================================================================
-#PBS -N mpas_tend_e1
-#PBS -A ${PROJ_NUMBER}
-#PBS -j oe
-#PBS -q economy
-#PBS -l select=${MODEL_NODES}:mpiprocs=${N_PROCS_MPAS}:ncpus=36
-#PBS -l walltime=00:03:00
-#==================================================================
-mpiexec_mpt ../atmosphere_model
-EOF
- qsub run.pbs
- cd ../
- endif
- #echo ${REMOVE} ${fgarb} at the initial time.
- #${REMOVE} ${fgarb}
- ${COPY} ${input_list} ${sav_dir}/.
- ls -l list.${time_nxt}.txt || exit
- set ndone = `cat list.${time_nxt}.txt | wc -l`
- echo $ndone members are completed for ${time_nxt}
- echo
+ mv ${input_list} ${sav_dir}/.
- #------------------------------------------------------
- # 10. Get ready to run filter for next cycle.
- #------------------------------------------------------
set fcst_utc = `echo $time_nxt 0 -w | ./advance_time`
set f_fcst = `head -1 list.${time_nxt}.txt`
set fout = `basename ${f_fcst}`
@@ -661,6 +568,8 @@ EOF
set time_anl = $time_nxt
@ icyc++
+ exit
+
end
echo Cycling is done for $n_cycles cycles in ${EXPERIMENT_NAME}.
diff --git a/models/mpas_atm/shell_scripts/filter.template.pbs b/models/mpas_atm/shell_scripts/filter.template.pbs
index daa3e9b4b0..721dcc0b5c 100755
--- a/models/mpas_atm/shell_scripts/filter.template.pbs
+++ b/models/mpas_atm/shell_scripts/filter.template.pbs
@@ -11,7 +11,7 @@
#PBS -A PROJ_NUMBER
#PBS -j oe
#PBS -q QUEUE
-#PBS -l select=NODES:mpiprocs=NPROC:ncpus=36
+#PBS -l select=NODES:ncpus=NCPUS:mpiprocs=NPROC
#PBS -l walltime=JOB_TIME
############################################
if(! -e logs) mkdir logs
@@ -25,10 +25,7 @@ echo "host is " `hostname`
echo $start_time >&! filter_started
-module load peak_memusage
-setenv MPI_SHEPHERD true
-
-mpiexec_mpt peak_memusage.exe ./filter
+mpiexec_mpt dplace -s 1 ./filter
wait
if ( -e obs_seq.final ) touch filter_done
diff --git a/models/mpas_atm/shell_scripts/init_mpas_grib.csh b/models/mpas_atm/shell_scripts/init_mpas_grib.csh
index 164c060dbf..69f20ac658 100755
--- a/models/mpas_atm/shell_scripts/init_mpas_grib.csh
+++ b/models/mpas_atm/shell_scripts/init_mpas_grib.csh
@@ -31,16 +31,16 @@
cd ${RUN_DIR}/${temp_dir}
# Get the program and necessary files for the model
- ${LINK} ${WPS_DIR}/ungrib.exe . || exit 1
- ${LINK} ${WPS_DIR}/Vtable . || exit 1
${COPY} ${RUN_DIR}/input.nml . || exit 1
${LINK} ${RUN_DIR}/MPAS_RUN/atmosphere_model . || exit 1
${LINK} ${RUN_DIR}/MPAS_RUN/init_atmosphere_model . || exit 1
+ ${LINK} ${RUN_DIR}/MPAS_RUN/ungrib.exe . || exit 1
${LINK} ${RUN_DIR}/MPAS_RUN/stream* . || exit 1
${LINK} ${RUN_DIR}/MPAS_RUN/*BL . || exit 1
${LINK} ${RUN_DIR}/MPAS_RUN/*DATA . || exit 1
${LINK} ${RUN_DIR}/advance_time . || exit 1
- ${LINK} ${RUN_DIR}/${MPAS_GRID}.graph* . || exit 1
+ ${LINK} ${RUN_DIR}/*graph* . || exit 1
+ ${LINK} ${RUN_DIR}/Vtable . || exit 1
# Determine the initial, final and run times for the MPAS integration
set curr_utc = `echo $DATE_INI -${INIT_FORECAST_LENGTH}h -w | ./advance_time`
@@ -63,8 +63,7 @@
/end_date/c\
end_date = '${curr_utc}',
EOF
- sed -f script.sed ${RUN_DIR}/${NML_WPS} >! ${NML_WPS}
- set fhead = `grep prefix ${NML_WPS} | awk '{print $3}' | cut -d , -f1 | sed -e "s/'//g"`
+ sed -f script.sed ${RUN_DIR}/namelist.wps >! namelist.wps
./ungrib.exe >& ungrib.out
@@ -74,8 +73,6 @@ EOF
config_start_time = '$curr_utc'
/config_stop_time/c\
config_stop_time = '$curr_utc'
- /config_met_prefix/c\
- config_met_prefix = '${fhead}'
EOF
sed -f script.sed ${RUN_DIR}/MPAS_RUN/${NML_INIT} >! ${NML_INIT}
@@ -88,12 +85,12 @@ EOF
set fs_grid = `grep config_block_decomp_file_prefix ${RUN_DIR}/${NML_MPAS} | awk '{print $3}' | sed -e "s/'//g"`
${LINK} ${RUN_DIR}/MPAS_RUN/${fs_grid}* .
- ${LINK} ${RUN_DIR}/MPAS_RUN/${statfile} . || exit
+ ${LINK} ${RUN_DIR}/MPAS_RUN/${statfile} .
# Run init version of MPAS to create initial condition file
# mpirun.lsf ./init_atmosphere_model || exit 2
mpiexec_mpt dplace -s 1 ./init_atmosphere_model || exit 2
- ${REMOVE} ${fhead}:*
+ ${REMOVE} FILE:*
# Generate MPAS namelist file
cat >! script.sed << EOF
diff --git a/models/mpas_atm/shell_scripts/setup_params.csh b/models/mpas_atm/shell_scripts/setup_params.csh
index 3a2d121726..608ab787e2 100755
--- a/models/mpas_atm/shell_scripts/setup_params.csh
+++ b/models/mpas_atm/shell_scripts/setup_params.csh
@@ -13,36 +13,34 @@
# 2. Namelist options should be specified for all Namelist files separately.
########################################################################################
# General configuration
-set EXPERIMENT_NAME = x4.30k # What do you call this experiment?
-set DATE_INI = 2017-05-15_00:00:00 # initial cycle of the entire period
-set DATE_BEG = 2017-06-02_00:00:00 # start date to run this script
-set DATE_END = 2017-06-15_18:00:00 # end date to run this script
+set EXPERIMENT_NAME = TEST # What do you call this experiment?
+set DATE_INI = 2016-08-19_00:00:00 # initial cycle of the entire period
+set DATE_BEG = 2016-08-19_00:00:00 # start date to run this script
+set DATE_END = 2016-08-19_06:00:00 # end date to run this script
set INTV_DAY = 0 # cycling frequency - assimilation_period_days in input.nml
set INTV_SEC = 21600 # cycling frequency - assimilation_period_seconds in input.nml
# Settings specific to initializing the ensemble from grib files
-set INIT_GRIB_FILE_LIST = /glade/p/mmm/syha/FNL/UNGRIB/griblist # Name of variable with grib files (full path needed)
-set TIME_INIT = 00:10:00
-set INIT_FORECAST_LENGTH = 240 # Number of hours to integrate initial ensemble
+set INIT_GRIB_FILE_LIST = /glade/p/work/USER/MPAS-DART/rundir/griblist # Name of variable with grib files (full path needed)
+set TIME_INIT = 00:50:00
+set INIT_FORECAST_LENGTH = 12 # Number of hours to integrate initial ensemble
# PBS setup
set RUN_IN_PBS = yes # Run on cheyenne? yes or no.
-set PROJ_NUMBER = NMMM0040 # Your account key for cheyenne
-set FILTER_NODES = 18 # Total no. of nodes for DART/filter (at least bigger than ensemble size)
+set PROJ_NUMBER = xxxxxxxx # Your account key for cheyenne
+set FILTER_NODES = 35 # Total no. of nodes for DART/filter (at least bigger than ensemble size)
set MODEL_NODES = 4 # Total no. of nodes for MPAS/atmosphere_model
-set N_PROCS_ANAL = 36 # Number of mpi processors or tasks for filter; Reduce this for a larger memory
-set N_PROCS_MPAS = 36 # Number of mpi processors for model (=> MODEL_NODES * N_PROCS for graph.info)
+set N_CPUS = 36 # Number of cpus per node (default = 36)
+set N_PROCS = 32 # Number of mpi processors (=> MODEL_NODES * N_PROCS for graph.info)
set QUEUE = economy # queue for filter and model runs
-set TIME_FILTER = 00:40:00 # wall clock time for mpi filter runs
+set TIME_FILTER = 00:30:00 # wall clock time for mpi filter runs
set TIME_MPAS = 00:10:00 # wall clock time for mpi model runs
-set HPSS_SAVE = yes # Backup in HPSS? yes or no. If yes, edit below.
+set HPSS_SAVE = no # Backup in HPSS? yes or no. If yes, edit below.
# Ensemble configuration
-set MPAS_GRID = x4.133890 # This global grid can provide LBCs for a limited-area MPAS.
-set TIMESTEP = 180. # config_dt
-set LEN_DISP = 30000. # Finest scale
-set ENS_SIZE = 100 # Ensemble size
+set MPAS_GRID = x1.40962 # All grid parameters will be changed based on this.
+set ENS_SIZE = 30 # Ensemble size
set ADAPTIVE_INF = true # adaptive_inflation - If true, this script only supports
# spatially-varying state space prior inflation.
# And you also need to edit inf_sd_initial, inf_damping,
@@ -50,38 +48,36 @@ set ADAPTIVE_INF = true # adaptive_inflation - If true, this script only
set INFL_OUT = output_priorinf
set INFL_IN = input_priorinf
-set SST_UPDATE = true
-set SST_FNAME = x4.133890.sfc_update.spring2017.nc
+set SST_UPDATE = false
+set SST_FNAME = ${MPAS_GRID}.sfc_update.nc
# First Guess (for cold-start runs and initial ensemble)
set VTABLE = Vtable.GFS
# Directories
-set ROOT_DIR = /glade2/scratch2/syha
-set OUTPUT_DIR = ${ROOT_DIR}/MPAS_DART/Cycle # output directory
-set RUN_DIR = ${ROOT_DIR}/MPAS_DART/Cycle # Run MPAS/DART cycling
-set OBS_DIR = ${ROOT_DIR}/MPAS_DART/OBS/OBS_SEQ # obs_seq.out
-set INIT_DIR = ${ROOT_DIR}/MPAS_DART/INIT_ENS # initial ensemble
-set GRID_DIR = /glade/p/work/syha/MPAS_INIT/GRID/x4.133890.gwd # Grid info
-set SST_DIR = /glade/p/mmm/syha/MPAS_DART/SST # sfc_update.nc
-set HPSS_DIR = MPAS_DART/${EXPERIMENT_NAME} # hpss archives
set ENS_DIR = member
+set ROOT_DIR = /glade/scratch/USER
+set OUTPUT_DIR = ${ROOT_DIR}/MPAS-DART/output # output directory
+set CSH_DIR = ${ROOT_DIR}/MPAS-DART/TEST/Scripts # shell scripts
+set RUN_DIR = ${ROOT_DIR}/MPAS-DART/rundir # Run MPAS/DART cycling
+set SST_DIR = ${ROOT_DIR}/MPAS-DART/SST # sfc_update.nc
+set OBS_DIR = ${ROOT_DIR}/MPAS-DART/obs # obs_seq.out
+set INIT_DIR = ${ROOT_DIR}/MPAS-DART/INIT # initial ensemble
+set FG_DIR = ${ROOT_DIR}/FG # First Guess
+set GRID_DIR = ${ROOT_DIR} # Grid info
+set HPSS_DIR = MPAS_DART/${EXPERIMENT_NAME} # hpss archives
-set CSH_DIR = /glade/p/mmm/syha/Manhattan/models/mpas_atm/shell_scripts
-set DART_DIR = /glade/p/mmm/syha/Manhattan/models/mpas_atm/work # DART executables
-set NML_DIR = /glade/p/mmm/syha/Manhattan/models/mpas_atm/data # DART executables
-set MPAS_DIR = /glade/p/mmm/syha/MPASV52_tend_vnTiedtke # MPAS executables
-set WPS_DIR = /glade/p/mmm/syha/WPSV39 # WPS executables
+set MPAS_DIR = /glade/u/home/USER/MPAS_v5.0 # MPAS executables
+set DART_DIR = /glade/u/home/USER/DART-Man/models/mpas_atm/work # DART executables
+set WPS_DIR = /glade/u/home/wrfhelp/PRE_COMPILED/WPSV3.9_intel_serial # WPS executables
# Namelist files
set NML_INIT = namelist.init_atmosphere # Namelist for init_atmosphere_model
set NML_MPAS = namelist.atmosphere # Namelist for atmosphere_model
set NML_WPS = namelist.wps # Namelist for WPS
set NML_DART = input.nml # Namelist for DART
-set STREAM_INIT = streams.init_atmosphere # I/O list for init_atmosphere_model
set STREAM_ATM = streams.atmosphere # I/O list for atmosphere_model
-set STREAM_TEND = streams.atmosphere.tend # I/O list for atmosphere_model
-set STREAM_ENS1 = streams.atmosphere.ens1 # I/O list for atmosphere_model
+set STREAM_INIT = streams.init_atmosphere # I/O list for init_atmosphere_model
# Commands (do not need modification unless moving to new system)
set HSICMD = 'hsi put -P'
@@ -92,12 +88,6 @@ set LINK = 'ln -sf'
unalias cd
unalias ls
-# Create LBCs for a limited-area version of MPAS => create_lbc.csh
-set fcmd = /glade/p/mmm/syha/MPAS-Tools/limited_area/mpas_to_mpas/a.out.bdy_only.uncoupled.cheyenne
-set lopt = "--use-reconstruct-winds" # Wind option for LBCs
-set rgrd = ${RUN_DIR}/CONUS.x20.835586.init.nc # Regional IC
-# Ex. ${fcmd} ${lopt} init.nc ${rgrd} global_lbc.2017-05-15_06.nc # init.nc for MPAS_GRID
-
#
# $URL$
# $Revision$
diff --git a/models/mpas_atm/update_bc.f90 b/models/mpas_atm/update_bc.f90
deleted file mode 100644
index a50af5a57b..0000000000
--- a/models/mpas_atm/update_bc.f90
+++ /dev/null
@@ -1,427 +0,0 @@
-! DART software - Copyright UCAR. This open source software is provided
-! by UCAR, "as is", without charge, subject to all terms of use at
-! http://www.image.ucar.edu/DAReS/DART/DART_download
-!
-! $Id$
-
-!> @mainpage
-!> @{
-!> @brief this program updates the boundary cells in a regional mpas domain
-!>
-!>
-!> this program takes a global mpas file (or an initial boundary array?)
-!> and blends the values in the boundary cells
-!> @}
-!>
-!>
-
-program update_bc
-
-use types_mod, only : r8, i8, obstypelength, MAX_NUM_DOMS, MAX_FILES
-
-use time_manager_mod, only : time_type, set_time_missing, &
- operator(/=), print_time
-
-use utilities_mod, only : register_module, find_namelist_in_file, &
- error_handler, nmlfileunit, E_MSG, E_ERR, &
- check_namelist_read, do_nml_file, do_nml_term, &
- open_file, close_file, set_multiple_filename_lists, &
- get_next_filename
-
-use location_mod, only : location_type
-
-use obs_kind_mod, only : get_num_quantities, get_index_for_quantity, &
- get_name_for_quantity
-
-use sort_mod, only : index_sort
-
-use assim_model_mod, only : static_init_assim_model, get_model_size, &
- get_state_meta_data
-
-use state_vector_io_mod, only : read_state
-
-use io_filenames_mod, only : file_info_type, io_filenames_init, &
- set_io_copy_flag, set_file_metadata, &
- set_member_file_metadata, file_info_dump, &
- stage_metadata_type, get_stage_metadata, &
- get_restart_filename, READ_COPY
-
-use state_structure_mod, only : get_num_domains
-
-use mpi_utilities_mod, only : initialize_mpi_utilities, task_count, &
- finalize_mpi_utilities, my_task_id, &
- send_sum_to, sum_across_tasks
-
-use ensemble_manager_mod, only : ensemble_type, init_ensemble_manager, compute_copy_mean, &
- get_my_vars, get_my_num_vars, end_ensemble_manager
-
-implicit none
-
-! version controlled file description for error handling, do not edit
-character(len=256), parameter :: source = &
- "$URL$"
-character(len=32 ), parameter :: revision = "$Revision$"
-character(len=128), parameter :: revdate = "$Date$"
-
-integer :: iunit, io, ens, i, j, total_j, qtyindex
-integer :: num_qtys, stype
-integer(i8) :: ii, model_size
-integer, allocatable :: index_list(:)
-integer, parameter :: max_list_len = 500
-character(len=512) :: msgstring, msgstring1
-logical :: allqtys, done
-logical, allocatable :: useqty(:), useindex(:)
-type(location_type) :: loc
-type(time_type) :: mean_time, member_time
-type(file_info_type) :: ens_file_info
-
-
-character(len=64) :: method_name(1) = (/ &
- "Original " /)
-
-!----------------------------------------------------------------
-! These variables are namelist-controllable.
-!
-integer :: ens_size = 1
-integer :: blending_method = 4
-logical :: single_restart_file_in = .false.
-character(len=256) :: input_restart_file_list(MAX_NUM_DOMS) = ''
-character(len=256) :: input_restart_files(MAX_FILES) = ''
-character(len=256) :: output_restart_file_list(MAX_NUM_DOMS) = ''
-character(len=256) :: output_restart_files(MAX_FILES) = ''
-character(len=obstypelength) :: use_only_qtys(max_list_len) = ''
-
-!----------------------------------------------------------------
-! blending method - currently only 1
-! 1 = see notes
-!
-! (suggest more...)
-!----------------------------------------------------------------
-
-namelist /update_bc_nml/ &
- input_restart_files, &
- input_restart_file_list, &
- output_restart_files, &
- output_restart_file_list, &
- ens_size, &
- blending_method, &
- use_only_qtys, &
- single_restart_file_in
-
-type(ensemble_type) :: regional_ens_handle
-character(len=256), allocatable :: file_array_input(:,:)
-character(len=256) :: my_base, my_desc
-integer(i8), allocatable :: vars_array(:)
-integer(i8) :: owners_index
-integer :: num_domains, idom, imem
-integer :: ENS_MEAN_COPY
-integer :: copies, my_num_vars, num_copies
-real(r8), allocatable :: total_diff(:)
-
-!----------------------------------------------------------------
-! program start
-!----------------------------------------------------------------
-
-call initialize_mpi_utilities('update_bc')
-
-call register_module(source,revision,revdate)
-
-! Read the namelist entry and print it
-call find_namelist_in_file("input.nml", "update_bc_nml", iunit)
-read(iunit, nml = update_bc_nml, iostat = io)
-call check_namelist_read(iunit, io, "update_bc_nml")
-
-if (do_nml_file()) write(nmlfileunit, nml=update_bc_nml)
-if (do_nml_term()) write( * , nml=update_bc_nml)
-
-if (single_restart_file_in) then
- write(msgstring, *) 'single_restart_file_in is not supported.'
- write(msgstring1, *) 'Please contact DART if you would like to use this capability.'
- call error_handler(E_ERR,msgstring,msgstring1)
-endif
-
-! Initialize the model so we can get the size.
-
-call static_init_assim_model()
-model_size = get_model_size()
-
-write(msgstring, *) 'Model size/restart data length =', model_size
-call error_handler(E_MSG,'',msgstring)
-write(msgstring, *) 'Ensemble member count = ', ens_size
-call error_handler(E_MSG,'',msgstring)
-write(msgstring, *) 'Computing blending using method: '//trim(method_name(blending_method))
-call error_handler(E_MSG,'',msgstring)
-
-! Make space that is ensemble size and an extra copy for the mean
-call init_ensemble_manager(regional_ens_handle, ens_size+1, model_size)
-
-num_domains = get_num_domains()
-
-! Given either a vector of input_state_files or a text file containing
-! a list of files, return a vector of files containing the filenames.
-call set_multiple_filename_lists(input_restart_files(:), &
- input_restart_file_list(:), &
- num_domains, &
- ens_size, &
- 'update_bc', &
- 'input_restart_files', &
- 'input_state_file_list')
-
-! be ens_size but rather a single file (or multiple files if more than one domain)
-allocate(file_array_input(ens_size, num_domains))
-
-file_array_input = RESHAPE(input_restart_files, (/ens_size, num_domains/))
-
-! read in the ensemble and the mean - always in a separate file
-call io_filenames_init(ens_file_info, &
- ncopies = ens_size, &
- cycling = single_restart_file_in, &
- single_file = single_restart_file_in, &
- restart_files = file_array_input)
-
-do imem = 1, ens_size
- write(my_base,'(A,I0.2)') 'inens_', imem
- write(my_desc,'(A,I0.2)') 'input ensemble member ', imem
- call set_file_metadata(ens_file_info, &
- cnum = imem, &
- fnames = file_array_input(imem,:), &
- basename = my_base, &
- desc = my_desc)
-
- call set_io_copy_flag(ens_file_info, &
- cnum = imem, &
- io_flag = READ_COPY)
-enddo
-
-! Read the ensemble from files
-member_time = set_time_missing()
-call read_state(regional_ens_handle, ens_file_info, read_time_from_file=.false., time=member_time)
-
-! Compute mean
-ENS_MEAN_COPY = ens_size + 1
-call compute_copy_mean(regional_ens_handle, 1, ens_size, ENS_MEAN_COPY)
-
-allocate(index_list(ens_size))
-
-! are we adding up only differences from particular quantities, or the entire
-! vector?
-if (use_only_qtys(1) /= '') then
- allqtys = .false.
-
- num_qtys = get_num_quantities()
- allocate(useqty(0:num_qtys))
- useqty = .false.
-
- done = .false.
- QtyList:do i=1, max_list_len
- if (use_only_qtys(i) == '') then
- done = .true.
- exit QtyList
- endif
- qtyindex = get_index_for_quantity(use_only_qtys(i))
- if (qtyindex < 0) then
- write(msgstring, *) 'unrecognized QTY string: '//trim(use_only_qtys(i))
- call error_handler(E_ERR,'update_bc', msgstring, &
- source,revision,revdate)
- endif
- useqty(qtyindex) = .true.
-
- enddo QtyList
-
- if (.not. done) then
- write(msgstring, *) 'cannot have more than ', max_list_len, ' qtys'
- call error_handler(E_ERR,'update_bc', msgstring, &
- source,revision,revdate)
- endif
-
- write(msgstring, *) 'Computing blending based only on items in state vector items of quantity:'
- call error_handler(E_MSG,'',msgstring)
- do i=0, num_qtys
- if (useqty(i)) then
- write(msgstring, *) ' ', trim(get_name_for_quantity(i))
- call error_handler(E_MSG,'',msgstring)
- endif
- enddo
-
-else
- allqtys = .true.
-endif
-
-! if we are not processing all qtys of state vector items, set up a mask
-! for the qtys we are. do this once at the start so we don't replicate
-! work. the useqty(number_of_different_qtys) array says whether we are
-! going to add differences for this type. the useindex(state_vector_len)
-! array is being precomputed here so it's fast to loop over the ensemble
-! members and only add up differences for the qtys of interest.
-
-my_num_vars = get_my_num_vars(regional_ens_handle)
-num_copies = regional_ens_handle%num_copies
-allocate(useindex(my_num_vars), vars_array(my_num_vars))
-call get_my_vars(regional_ens_handle, vars_array)
-
-if (.not. allqtys) then
- useindex(:) = .false.
-
- j = 0
- do ii=1, my_num_vars
- owners_index = vars_array(ii)
- call get_state_meta_data(owners_index, loc, stype)
- if (stype < 0 .or. stype > num_qtys) then
- write(msgstring, *) 'bad QTY from get_state_meta_data, ', stype, ' for index ', owners_index
- write(msgstring1, *) 'must be between 0 and ', num_qtys
- call error_handler(E_ERR,'update_bc', msgstring, &
- source,revision,revdate, text2=msgstring1)
-
- endif
-
- if (useqty(stype)) then
- useindex(ii) = .true.
- j = j + 1
- endif
- enddo
-
- ! compute the total across all members
- call sum_across_tasks(j, total_j)
- write(msgstring, *) 'using ', total_j, ' of ', model_size, ' items in the state vector'
- call error_handler(E_MSG,'update_bc', msgstring)
-else
- ! use everything.
- useindex(:) = .true.
-endif
-
-total_diff = compute_diff(regional_ens_handle%copies(:,:), regional_ens_handle%copies(ENS_MEAN_COPY,:))
-
-!------------------- Print out results -----------------------
-
-if (my_task_id() == 0) then
- call index_sort(total_diff, index_list, ens_size)
- call error_handler(E_MSG, '', ' ')
- write(msgstring, "(A,I5)") 'Member with the minimum difference from the mean is ', index_list(1)
- call error_handler(E_MSG, '', msgstring)
- call error_handler(E_MSG, '', ' ')
-
- do ens=1, ens_size
- write(msgstring, "(A,I5,A,G18.6)") "Member ", index_list(ens), " difference ", total_diff(index_list(ens))
- call error_handler(E_MSG, '', msgstring)
- enddo
-
- !------------------- Write results to file -----------------------
-
- ! if the input is a single file, write the ensemble member number to a file.
- ! if the input is separate files, write the full filename to a file.
-
- iunit = open_file(output_file_name, 'formatted', 'write')
-
- if (single_restart_file_in) then
- write(iunit, "(I6)") index_list(1)
- else
- !> @todo FIXME is this domain by domain? if so, need to loop over domains?
- msgstring = get_next_filename(input_restart_file_list(1), index_list(1))
- write(iunit, "(A)") trim(msgstring)
- endif
-
- call close_file(iunit)
-
- call error_handler(E_MSG, '', ' ')
- write(msgstring, *) 'Writing closest member information to file: ', trim(output_file_name)
- call error_handler(E_MSG, '', msgstring)
-
-endif
-
-!------------------- Write results to file -----------------------
-
-deallocate(index_list, useindex, vars_array)
-deallocate(total_diff)
-if (.not. allqtys) deallocate(useqty)
-
-call end_ensemble_manager(regional_ens_handle)
-call finalize_mpi_utilities()
-
-!----------------------------------------------------------------
-!----------------------------------------------------------------
-
-contains
-
-function compute_blending(ens_mems, ens_mean)
-real(r8), intent(in) :: ens_mems(num_copies,my_num_vars)
-real(r8), intent(in) :: ens_mean(my_num_vars)
-real(r8) :: compute_diff(ens_size)
-
-real(r8), allocatable :: local_diffs(:), adiff(:), global_diff(:)
-
-allocate(adiff(my_num_vars))
-allocate(local_diffs(ens_size), global_diff(ens_size))
-
-do copies = 1, ens_size
- select case (difference_method)
-
- ! simple absolute difference
- case (1)
- where(useindex)
- adiff(:) = abs(ens_mean(:) - ens_mems(copies,:))
- endwhere
-
- local_diffs(copies) = sum(adiff(:))
-
- ! normalized absolute difference
- case (2)
-
- where (useindex)
- where (ens_mean(:) /= 0.0_r8)
- adiff(:) = abs((ens_mean(:) - ens_mems(copies,:))/ens_mean(:))
- elsewhere
- adiff(:) = abs(ens_mems(copies,:))
- endwhere
- endwhere
-
- local_diffs(copies) = sum(adiff(:))
-
- ! simple rms difference
- case (3)
-
- where (useindex) adiff(:) = ens_mean(:) - ens_mems(copies, :)
-
- local_diffs(copies) = sum(adiff * adiff)
-
- ! normalized rms difference
- case (4)
-
- where (useindex)
- where (ens_mean(:) /= 0.0_r8)
- adiff = (ens_mean(:) - ens_mems(copies,:)) / ens_mean(:)
- elsewhere
- adiff = ens_mems(copies,:)
- endwhere
- endwhere
-
- local_diffs(copies) = sum(adiff * adiff)
-
- case default
- write(msgstring, *) 'Valid values for difference_method are 1-4, value is', difference_method
- call error_handler(E_ERR,'update_bc','Bad value for difference_method', &
- source,revision,revdate, text2=msgstring)
- end select
-enddo
-
-call send_sum_to(local_diffs, 0, global_diff)
-
-if(my_task_id() == 0) then
- ! need to square total difference after local sums are computed
- if (difference_method == 3 .or. difference_method == 4) then
- where(global_diff > 0) global_diff = sqrt(global_diff)
- endif
-endif
-
-compute_diff = global_diff
-
-deallocate(adiff, global_diff, local_diffs)
-
-end function compute_diff
-
-end program update_bc
-
-!
-! $URL$
-! $Id$
-! $Revision$
-! $Date$
diff --git a/models/mpas_atm/update_bc.html b/models/mpas_atm/update_bc.html
deleted file mode 100644
index 647432503c..0000000000
--- a/models/mpas_atm/update_bc.html
+++ /dev/null
@@ -1,346 +0,0 @@
-
-
-
-program update_bc
-
-
-
-
-
-
-
-Utility program to blend the boundary cells at the edge of
-a regional MPAS grid between the updated values after an
-assimilation and the original values? the global grid values?
-what do boundary files look like?
-
- FIXME BELOW HERE:
-
-Utility program to compare the ensemble mean to an ensemble of
-restart files, which can now be run in parallel.
-The program prints out a sorted order of which
-members are 'closest' to the mean, where the method used to determine
-'close' is selectable by namelist option. It also creates a file with
-a single number or character string in it, for ease in scripting, which
-identifies the closest member.
-
-The ensemble mean is computed from the input ensemble.
-The difference is computed point by point across the ensemble members.
-There is an option to restrict the computation to just a subset of the
-entire state vector by listing one or more generic quantities.
-In this case, only state vector items matching one of these quantities
-will contribute to the total difference value.
-
-Available methods are:
-
-
-
1 - simple absolute difference:
-
-The absolute value of the difference between each item in the
-mean vector and the corresponding item in each ensemble member,
-accumulated over the entire state vector.
-
-
2 - normalized absolute difference:
-
-The absolute value of the difference between each item in the
-mean vector and the corresponding item in each ensemble member
-normalized by the mean value,
-accumulated over the entire state vector.
-
-
3 - simple RMS difference:
-
-The square root of the accumulated sum of the
-square of the difference between each item in the mean vector
-and the corresponding item in each ensemble member.
-
-
4 - normalized RMS difference:
-
-The square root of the accumulated sum of the
-square of the normalized difference between each item in the mean
-vector and the corresponding item in each ensemble member.
-
-
-
-This program could be used to select one or more ensemble
-members to run a free model forecast forward in time after
-the assimilation is finished. Each member is an equally likely
-representation of the model state. Using the ensemble mean
-may not be the best choice since the mean may not have
-self-consistent fine-scale structures in the data.
-
-In addition to printing out data about all members to both
-the console and to the dart log file, this program creates
-a single output file containing information about the closest member.
-If the input restart data is in a single file, the output
-file 'closest_restart' contains a single number which is
-the ensemble member number.
-If the input restart data is in separate files, the output file
-contains the full filename of the closest member, e.g.
-'filter_restart.0004' if member 4 is closest. For scripting
-the contents of this file can be used to copy the corresponding
-member data and convert it to the model input format for a
-free forecast, for example.
-
-This namelist is read from the file input.nml.
-Namelists start with an ampersand
-'&' and terminate with a slash '/'.
-Character strings that contain a '/' must be
-enclosed in quotes to prevent them from
-prematurely terminating the namelist.
-
character(len=256),dimension(ens_size x num_domains)
-
An array of filenames each containing a list DART restart data.
-
-
-
input_restart_file_list
-
character(len=256),dimension(num_domains)
-
A file containing a list of filenames for DART restart data, one for each domain.
-
-
-
output_file_name
-
character(len=256)
-
This is a file containing the member number that is closest to the ensemble mean.
-
-
-
ens_size
-
integer
-
Total number of ensemble members.
-
-
-
difference_method
-
integer
-
Select which method is used to compute 'distance' from mean:
-
-
1 = simple absolute difference
-
2 = absolute difference normalized by the mean
-
3 = simple RMS difference
-
4 = RMS of the normalized difference
-
-
-
-
use_only_quantities
-
character(len=32)
-
If unspecified, all items in the state vector contribute to the total
-difference. If one or more quantities are listed here, only items in the state
-vector of these quantities contribute to the total difference. These are the
-generic quantities, such as QTY_TEMPERATURE, QTY_U_WIND_COMPONENT, QTY_DENSITY,
-etc. and not specific types like RADIOSONDE_TEMPERATURE. Consult the model
-interface code to determine which possible quantities are returned by the
-get_state_meta_data()
-routine.
-
-
-
-
-
-
-
-
-
-Below is an example of a typical namelist for the
-update_bc.
-
Revision 2801 implements a more robust method
(based on Bolton's Approximation) for calculating dew point.
-This has been further revised to avoid a numerical instability that could lead
-to failed forward operators for dewpoints almost exactly 0 C.
@@ -139,7 +137,7 @@