FV3DYCORE  Version 2.0.0
fv_nesting.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the FV3 dynamical core.
5 !*
6 !* The FV3 dynamical core is free software: you can redistribute it
7 !* and/or modify it under the terms of the
8 !* GNU Lesser General Public License as published by the
9 !* Free Software Foundation, either version 3 of the License, or
10 !* (at your option) any later version.
11 !*
12 !* The FV3 dynamical core is distributed in the hope that it will be
13 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
14 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 !* See the GNU General Public License for more details.
16 !*
17 !* You should have received a copy of the GNU Lesser General Public
18 !* License along with the FV3 dynamical core.
19 !* If not, see <http://www.gnu.org/licenses/>.
20 !***********************************************************************
21 
24 
25 #undef MULTI_GASES
26 
28 
29 ! Modules Included:
30 ! <table>
31 ! <tr>
32 ! <th>Module Name</th>
33 ! <th>Functions Included</th>
34 ! </tr>
35 ! <tr>
36 ! <td>boundary_mod</td>
37 ! <td>update_coarse_grid,nested_grid_BC_send, nested_grid_BC_recv, nested_grid_BC_save_proc
38 ! nested_grid_BC, nested_grid_BC_apply_intT</td>
39 ! </tr>
40 ! <tr>
41 ! <td>constants_mod</td>
42 ! <td>grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa</td>
43 ! </tr>
44 ! <tr>
45 ! <td>field_manager_mod</td>
46 ! <td>MODEL_ATMOS</td>
47 ! </tr>
48 ! <tr>
49 ! <td>fv_arrays_mod</td>
50 ! <td>fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type,
51 ! fv_nest_BC_type_3D,allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type</td>
52 ! </tr>
53 ! <tr>
54 ! <td>fv_diagnostics_mod</td>
55 ! <td>sphum_ll_fix, range_check</td>
56 ! </tr>
57 ! <tr>
58 ! <td>fv_grid_utils_mod</td>
59 ! <td>ptop_min, g_sum, cubed_to_latlon, f_p</td>
60 ! </tr>
61 ! <tr>
62 ! <td>fv_mapz_mod</td>
63 ! <td>mappm, remap_2d</td>
64 ! </tr>
65 ! <tr>
66 ! <td>fv_mp_mod</td>
67 ! <td>is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec,is_master,mp_reduce_sum</td>
68 ! </tr>
69 ! <tr>
70 ! <td>fv_restart_mod</td>
71 ! <td>d2a_setup, d2c_setup</td>
72 ! </tr>
73 ! <tr>
74 ! <td>fv_sg_mod</td>
75 ! <td>neg_adj3</td>
76 ! </tr>
77 ! <tr>
78 ! <td>fv_timing_mod</td>
79 ! <td>timing_on, timing_off</td>
80 ! </tr>
81 ! <tr>
82 ! <td>init_hydro_mod</td>
83 ! <td>p_var</td>
84 ! </tr>
85 ! <tr>
86 ! <td>mpp_mod/td>
87 ! <td>mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, FATAL</td>
88 ! </tr>
89 ! <tr>
90 ! <td>mpp_domains_mod/td>
91 ! <td>mpp_update_domains, mpp_global_field,mpp_get_data_domain, mpp_get_compute_domain,
92 ! mpp_get_global_domain, DGRID_NE, mpp_update_domains, domain2D, mpp_global_sum,
93 ! BITWISE_EFP_SUM, BITWISE_EXACT_SUM</td>
94 ! </tr>
95 ! <tr>
96 ! <td>sw_core_mod</td>
97 ! <td>divergence_corner, divergence_corner_nest</td>
98 ! </tr>
99 ! <tr>
100 ! <td>tracer_manager_mod</td>
101 ! <td>get_tracer_index</td>
102 ! </tr>
103 ! </table>
104 
105  use mpp_domains_mod, only: mpp_update_domains
106  use mpp_domains_mod, only: mpp_global_field
107  use field_manager_mod, only: model_atmos
108  use tracer_manager_mod, only: get_tracer_index
109  use fv_sg_mod, only: neg_adj3
110  use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_global_domain
111  use mpp_domains_mod, only: agrid, cgrid_ne, dgrid_ne, mpp_update_domains, domain2d
112  use mpp_mod, only: mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, fatal, mpp_pe, warning, note
113  use mpp_domains_mod, only: mpp_global_sum, bitwise_efp_sum, bitwise_exact_sum
120  use init_hydro_mod, only: p_var
121  use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa
122  use fv_mapz_mod, only: mappm, remap_2d
124  use fv_mp_mod, only: is_master
125  use fv_mp_mod, only: mp_reduce_sum, global_nest_domain
128  use time_manager_mod, only: time_type
129 
130 implicit none
131  logical :: rf_initialized = .false.
132  logical :: bad_range
133  real, allocatable :: rf(:), rw(:)
134  integer :: kmax=1
135  !Arrays for global grid total energy, used for grid nesting
136  real, allocatable :: te_2d_coarse(:,:)
137  real, allocatable :: dp1_coarse(:,:,:)
138 
139  !For nested grid buffers
140  !Individual structures are allocated by nested_grid_BC_recv
142  type(fv_nest_bc_type_3d), allocatable:: q_buf(:)
143 !#ifdef USE_COND
144  real, dimension(:,:,:), allocatable, target :: dum_west, dum_east, dum_north, dum_south
145 !#endif
146 
147 private
149 
150 contains
153  subroutine setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, &
154  u, v, w, pt, delp, delz,q, uc, vc, &
155 #ifdef USE_COND
156  q_con, &
157 #ifdef MOIST_CAPPA
158  cappa, &
159 #endif
160 #endif
161  nested, inline_q, make_nh, ng, &
162  gridstruct, flagstruct, neststruct, &
163  nest_timestep, tracer_nest_timestep, &
164  domain, parent_grid, bd, nwat, ak, bk)
166 
167  type(fv_grid_bounds_type), intent(IN) :: bd
168  real, intent(IN) :: zvir
169 
170  integer, intent(IN) :: npx, npy, npz
171  integer, intent(IN) :: ncnst, ng, nwat
172  logical, intent(IN) :: inline_q, make_nh,nested
173  real, intent(IN), dimension(npz) :: ak, bk
174 
175  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
176  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
177  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:)
178  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
179  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
180  real, intent(inout) :: delz(bd%is: ,bd%js: ,1:)
181  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
182  real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
183  real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
184 #ifdef USE_COND
185  real, intent(inout) :: q_con( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
186 #ifdef MOIST_CAPPA
187  real, intent(inout) :: cappa( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
188 #endif
189 #endif
190  integer, intent(INOUT) :: nest_timestep, tracer_nest_timestep
191  type(fv_atmos_type), pointer, intent(IN) :: parent_grid
192 
193  type(fv_grid_type), intent(INOUT) :: gridstruct
194  type(fv_flags_type), intent(INOUT) :: flagstruct
195  type(fv_nest_type), intent(INOUT), target :: neststruct
196  type(domain2d), intent(INOUT) :: domain
197  real :: divg(bd%isd:bd%ied+1,bd%jsd:bd%jed+1, npz)
198  real :: ua(bd%isd:bd%ied,bd%jsd:bd%jed)
199  real :: va(bd%isd:bd%ied,bd%jsd:bd%jed)
200  real :: pe_ustag(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz+1)
201  real :: pe_vstag(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz+1)
202  real :: pe_bstag(bd%isd:bd%ied+1,bd%jsd:bd%jed+1,npz+1)
203  real, parameter :: a13 = 1./3.
204 
205  integer :: i,j,k,n,p, sphum, npz_coarse, nnest
206  logical :: do_pd
207 
208  type(fv_nest_bc_type_3d) :: delp_lag_BC, lag_BC, pe_lag_BC, pe_eul_BC
209  type(fv_nest_bc_type_3d) :: lag_u_BC, pe_u_lag_BC, pe_u_eul_BC
210  type(fv_nest_bc_type_3d) :: lag_v_BC, pe_v_lag_BC, pe_v_eul_BC
211  type(fv_nest_bc_type_3d) :: lag_b_BC, pe_b_lag_BC, pe_b_eul_BC
212 
213  !local pointers
214  logical, pointer :: child_grids(:)
215 
216  integer :: is, ie, js, je
217  integer :: isd, ied, jsd, jed
218 
219  is = bd%is
220  ie = bd%ie
221  js = bd%js
222  je = bd%je
223  isd = bd%isd
224  ied = bd%ied
225  jsd = bd%jsd
226  jed = bd%jed
227 
228  child_grids => neststruct%child_grids
229 
230  !IF nested, set up nested grid BCs for time-interpolation
231  !(actually applying the BCs is done in dyn_core)
232 
233  !For multiple grids: Each grid has ONE parent but potentially MULTIPLE nests
234 
235  nest_timestep = 0
236  if (.not. inline_q) tracer_nest_timestep = 0
237 
238 
239  if (neststruct%nested .and. (.not. (neststruct%first_step) .or. make_nh) ) then
240  do_pd = .true.
241  call set_bcs_t0(ncnst, flagstruct%hydrostatic, neststruct)
242  else
243  !On first timestep the t0 BCs are not initialized and may contain garbage
244  do_pd = .false.
245  end if
246 
247  !compute uc/vc for nested-grid BCs
248  !!! CLEANUP: if we compute uc/vc here we don't need to do on the first call of c_sw, right?
249  if (any(neststruct%child_grids)) then
250  call timing_on('COMM_TOTAL')
251  !!! CLEANUP: could we make this a non-blocking operation?
252  !!! Is this needed? it is on the initialization step.
253  call mpp_update_domains(delp, domain) !This is needed to make sure delp is updated for pe calculations
254  call mpp_update_domains(u, v, &
255  domain, gridtype=dgrid_ne, complete=.true.)
256  call timing_off('COMM_TOTAL')
257 !$OMP parallel do default(none) shared(isd,jsd,ied,jed,is,ie,js,je,npx,npy,npz, &
258 !$OMP gridstruct,flagstruct,bd,u,v,uc,vc,nested,divg) &
259 !$OMP private(ua,va)
260  do k=1,npz
261  call d2c_setup(u(isd,jsd,k), v(isd,jsd,k), &
262  ua, va, &
263  uc(isd,jsd,k), vc(isd,jsd,k), flagstruct%nord>0, &
264  isd,ied,jsd,jed, is,ie,js,je, npx,npy, &
265  gridstruct%grid_type, gridstruct%bounded_domain, &
266  gridstruct%se_corner, gridstruct%sw_corner, &
267  gridstruct%ne_corner, gridstruct%nw_corner, &
268  gridstruct%rsin_u, gridstruct%rsin_v, &
269  gridstruct%cosa_s, gridstruct%rsin2 )
270  if (nested) then
271  call divergence_corner_nest(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd)
272  else
273  call divergence_corner(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd)
274  endif
275  end do
276  endif
277 
278  nnest = flagstruct%grid_number - 1
279 
280 !! Nested grid: receive from parent grid (Lagrangian coordinate, npz_coarse)
281  if (neststruct%nested) then
282 
283  npz_coarse = neststruct%parent_grid%npz
284 
285  if (.not. allocated(q_buf)) then
286  allocate(q_buf(ncnst))
287  endif
288 
289  call nested_grid_bc_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
290  delp_buf, nnest)
291  do n=1,ncnst
292  call nested_grid_bc_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
293  q_buf(n), nnest)
294  enddo
295 #ifndef SW_DYNAMICS
296  call nested_grid_bc_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
297  pt_buf, nnest)
298 
299  if (.not. flagstruct%hydrostatic) then
300  call nested_grid_bc_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
301  w_buf, nnest)
302  call nested_grid_bc_recv(global_nest_domain, 0, 0, npz_coarse, bd, &
303  delz_buf, nnest)
304  endif
305 #endif
306  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
307  call nested_grid_bc_recv(global_nest_domain, npz_coarse+1, bd, &
308  pe_u_buf, pe_v_buf, nnest, gridtype=dgrid_ne)
309  call nested_grid_bc_recv(global_nest_domain, 1, 1, npz_coarse+1, bd, &
310  pe_b_buf, nnest)
311  endif
312 
313  call nested_grid_bc_recv(global_nest_domain, npz_coarse, bd, &
314  u_buf, v_buf, nnest, gridtype=dgrid_ne)
315  call nested_grid_bc_recv(global_nest_domain, npz_coarse, bd, &
316  uc_buf, vc_buf, nnest, gridtype=cgrid_ne)
317  call nested_grid_bc_recv(global_nest_domain, 1, 1, npz_coarse, bd, &
318  divg_buf, nnest)
319  endif
320 
321 
322 !! Coarse grid: send to child grids (Lagrangian coordinate, npz_coarse)
323 
324  do p=1,size(child_grids)
325  if (child_grids(p)) then
326  call nested_grid_bc_send(delp, global_nest_domain, 0, 0, p-1)
327  do n=1,ncnst
328  call nested_grid_bc_send(q(:,:,:,n), global_nest_domain, 0, 0, p-1)
329  enddo
330 #ifndef SW_DYNAMICS
331  call nested_grid_bc_send(pt, global_nest_domain, 0, 0, p-1)
332 
333  if (.not. flagstruct%hydrostatic) then
334  call nested_grid_bc_send(w, global_nest_domain, 0, 0, p-1)
335  call nested_grid_bc_send(delz, global_nest_domain, 0, 0, p-1)
336  endif
337 #endif
338 
339  if (neststruct%do_remap_BC(p)) then
340 
341  !Compute and send staggered pressure
342  !u points
343 !$OMP parallel do default(none) shared(ak,pe_ustag,delp, &
344 !$OMP is,ie,js,je,npz)
345  do j=js,je+1
346  do i=is,ie
347  pe_ustag(i,j,1) = ak(1)
348  enddo
349  do k=1,npz
350  do i=is,ie
351  pe_ustag(i,j,k+1) = pe_ustag(i,j,k) + 0.5*(delp(i,j,k)+delp(i,j-1,k))
352  enddo
353  enddo
354  enddo
355 
356  !v points
357 !$OMP parallel do default(none) shared(ak,pe_vstag,delp, &
358 !$OMP is,ie,js,je,npz)
359  do j=js,je
360  do i=is,ie+1
361  pe_vstag(i,j,1) = ak(1)
362  enddo
363  do k=1,npz
364  do i=is,ie+1
365  pe_vstag(i,j,k+1) = pe_vstag(i,j,k) + 0.5*(delp(i,j,k)+delp(i-1,j,k))
366  enddo
367  enddo
368  enddo
369  call nested_grid_bc_send(pe_ustag, pe_vstag, global_nest_domain, p-1, gridtype=dgrid_ne)
370 
371  !b points
372 !$OMP parallel do default(none) shared(ak,pe_bstag,delp, &
373 !$OMP is,ie,js,je,npz)
374  do j=js,je+1
375  do i=is,ie+1
376  pe_bstag(i,j,1) = ak(1)
377  enddo
378  enddo
379  !Sets up so 3-point average is automatically done at the corner
380  if (is == 1 .and. js == 1) then
381  do k=1,npz
382  delp(0,0,k) = a13*(delp(1,1,k) + delp(0,1,k) + delp(1,0,k))
383  enddo
384  endif
385  if (ie == npx-1 .and. js == 1) then
386  do k=1,npz
387  delp(npx,0,k) = a13*(delp(npx-1,1,k) + delp(npx,1,k) + delp(npx-1,0,k))
388  enddo
389  endif
390  if (is == 1 .and. je == npy-1) then
391  do k=1,npz
392  delp(0,npy,k) = a13*(delp(1,npy-1,k) + delp(0,npy-1,k) + delp(1,npy,k))
393  enddo
394  endif
395  if (ie == npx-1 .and. je == npy-1) then
396  do k=1,npz
397  delp(npx,npy,k) = a13*(delp(npx-1,npy-1,k) + delp(npx,npy-1,k) + delp(npx-1,npy,k))
398  enddo
399  endif
400 
401 !$OMP parallel do default(none) shared(ak,pe_bstag,delp, &
402 !$OMP is,ie,js,je,npz)
403  do j=js,je+1
404  do k=1,npz
405  do i=is,ie+1
406  pe_bstag(i,j,k+1) = pe_bstag(i,j,k) + &
407  0.25*(delp(i,j,k)+delp(i-1,j,k)+delp(i,j-1,k)+delp(i-1,j-1,k))
408  enddo
409  enddo
410  enddo
411  call nested_grid_bc_send(pe_bstag, global_nest_domain, 1, 1, p-1)
412 
413  endif
414 
415  call nested_grid_bc_send(u, v, global_nest_domain, p-1, gridtype=dgrid_ne)
416  call nested_grid_bc_send(uc, vc, global_nest_domain, p-1, gridtype=cgrid_ne)
417  call nested_grid_bc_send(divg, global_nest_domain, 1, 1, p-1)
418  endif
419  enddo
420 
421  !Nested grid: do computations
422  ! Lag: coarse grid, npz_coarse, lagrangian coordinate---receive and use save_proc to copy into lag_BCs
423  ! Eul: nested grid, npz, Eulerian (reference) coordinate
424  ! Remapping from Lag to Eul
425  if (nested) then
426 
427  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
428 
429  call allocate_fv_nest_bc_type(delp_lag_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse,ng,0,0,0,.false.)
430  call allocate_fv_nest_bc_type(lag_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse,ng,0,0,0,.false.)
431  call allocate_fv_nest_bc_type(pe_lag_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,0,.false.)
432  call allocate_fv_nest_bc_type(pe_eul_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1,ng,0,0,0,.false.)
433 
434  call nested_grid_bc_save_proc(global_nest_domain, &
435  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
436  delp_lag_bc, delp_buf, pd_in=do_pd)
437  !The incoming delp is on the coarse grid's lagrangian coordinate. Re-create the reference coordinate
438  call setup_eul_delp_bc(delp_lag_bc, neststruct%delp_BC, pe_lag_bc, pe_eul_bc, ak, bk, npx, npy, npz, npz_coarse, parent_grid%ptop, bd)
439 
440  else
441  call nested_grid_bc_save_proc(global_nest_domain, &
442  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
443  neststruct%delp_BC, delp_buf, pd_in=do_pd)
444  endif
445 
446 !!$ do n=1,ncnst
447 !!$ call nested_grid_BC_save_proc(global_nest_domain, &
448 !!$ neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
449 !!$ lag_BC, q_buf(n), pd_in=do_pd)
450 !!$ !This remapping appears to have some trouble with rounding error random noise
451 !!$ call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%q_BC(n), npx, npy, npz, npz_coarse, bd, 0, 0, 0, flagstruct%kord_tr, 'q')
452 !!$ enddo
453 #ifndef SW_DYNAMICS
454  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
455 
456  call nested_grid_bc_save_proc(global_nest_domain, &
457  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
458  lag_bc, pt_buf)
459  !NOTE: need to remap using peln, not pe
460  call remap_bc(pe_lag_bc, pe_eul_bc, lag_bc, neststruct%pt_BC, npx, npy, npz, npz_coarse, bd, 0, 0, 1, abs(flagstruct%kord_tm), 'pt', do_log_pe=.true.)
461 
462  else
463  call nested_grid_bc_save_proc(global_nest_domain, &
464  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
465  neststruct%pt_BC, pt_buf)
466  endif
467 
468 
469  !For whatever reason moving the calls for q BC remapping here avoids problems with cross-restart reproducibility.
470  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
471  do n=1,ncnst
472  call nested_grid_bc_save_proc(global_nest_domain, &
473  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
474  lag_bc, q_buf(n), pd_in=do_pd)
475  call remap_bc(pe_lag_bc, pe_eul_bc, lag_bc, neststruct%q_BC(n), npx, npy, npz, npz_coarse, bd, 0, 0, 0, flagstruct%kord_tr, 'q2')
476  enddo
477  else
478  do n=1,ncnst
479  call nested_grid_bc_save_proc(global_nest_domain, &
480  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
481  neststruct%q_BC(n), q_buf(n), pd_in=do_pd)
482  enddo
483  endif
484 
485  sphum = get_tracer_index(model_atmos, 'sphum')
486  if (flagstruct%hydrostatic) then
487  call setup_pt_bc(neststruct%pt_BC, pe_eul_bc, neststruct%q_BC(sphum), npx, npy, npz, zvir, bd)
488  else
489  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
490 
491  call nested_grid_bc_save_proc(global_nest_domain, &
492  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
493  lag_bc, w_buf)
494  call remap_bc(pe_lag_bc, pe_eul_bc, lag_bc, neststruct%w_BC, npx, npy, npz, npz_coarse, bd, 0, 0, -1, flagstruct%kord_wz, 'w')
495  call nested_grid_bc_save_proc(global_nest_domain, &
496  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
497  lag_bc, delz_buf) !Need a negative-definite method?
498  call remap_delz_bc(pe_lag_bc, pe_eul_bc, delp_lag_bc, lag_bc, neststruct%delp_BC, neststruct%delz_BC, npx, npy, npz, npz_coarse, bd, 0, 0, 1, flagstruct%kord_wz)
499 
500  else
501  call nested_grid_bc_save_proc(global_nest_domain, &
502  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
503  neststruct%w_BC, w_buf)
504  call nested_grid_bc_save_proc(global_nest_domain, &
505  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, &
506  neststruct%delz_BC, delz_buf) !Need a negative-definite method?
507  endif
508 
509  call setup_pt_nh_bc(neststruct%pt_BC, neststruct%delp_BC, neststruct%delz_BC, &
510  neststruct%q_BC(sphum), neststruct%q_BC, ncnst, &
511 #ifdef USE_COND
512  neststruct%q_con_BC, &
513 #ifdef MOIST_CAPPA
514  neststruct%cappa_BC, &
515 #endif
516 #endif
517  npx, npy, npz, zvir, bd)
518  endif
519 
520 #endif
521 
522  !!!NOTE: The following require remapping on STAGGERED grids, which requires additional pressure data
523 
524  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
525 
526 
527  call allocate_fv_nest_bc_type(pe_u_lag_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,1,.false.)
528  call allocate_fv_nest_bc_type(pe_u_eul_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,0,1,.false.)
529  call allocate_fv_nest_bc_type(lag_u_bc, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,0,1,.false.)
530  call allocate_fv_nest_bc_type(pe_v_lag_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,1,0,.false.)
531  call allocate_fv_nest_bc_type(pe_v_eul_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,1,0,.false.)
532  call allocate_fv_nest_bc_type(lag_v_bc, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,1,0,.false.)
533  call allocate_fv_nest_bc_type(pe_b_lag_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,1,1,.false.)
534  call allocate_fv_nest_bc_type(pe_b_eul_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,1,1,.false.)
535  call allocate_fv_nest_bc_type(lag_b_bc, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,1,1,.false.)
536 
537  call nested_grid_bc_save_proc(global_nest_domain, &
538  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse+1, bd, &
539  pe_u_lag_bc, pe_u_buf)
540  call setup_eul_pe_bc(pe_u_lag_bc, pe_u_eul_bc, ak, bk, npx, npy, npz, npz_coarse, 0, 1, bd)
541  call nested_grid_bc_save_proc(global_nest_domain, &
542  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse+1, bd, &
543  pe_v_lag_bc, pe_v_buf)
544  call setup_eul_pe_bc(pe_v_lag_bc, pe_v_eul_bc, ak, bk, npx, npy, npz, npz_coarse, 1, 0, bd)
545  call nested_grid_bc_save_proc(global_nest_domain, &
546  neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse+1, bd, &
547  pe_b_lag_bc, pe_b_buf)
548  call setup_eul_pe_bc(pe_b_lag_bc, pe_b_eul_bc, ak, bk, npx, npy, npz, npz_coarse, 1, 1, bd)
549 
550  call nested_grid_bc_save_proc(global_nest_domain, &
551  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
552  lag_u_bc, u_buf)
553  call remap_bc(pe_u_lag_bc, pe_u_eul_bc, lag_u_bc, neststruct%u_BC, npx, npy, npz, npz_coarse, bd, 0, 1, -1, flagstruct%kord_mt, 'u')
554  call nested_grid_bc_save_proc(global_nest_domain, &
555  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
556  lag_u_bc, vc_buf)
557  call remap_bc(pe_u_lag_bc, pe_u_eul_bc, lag_u_bc, neststruct%vc_BC, npx, npy, npz, npz_coarse, bd, 0, 1, -1, flagstruct%kord_mt, 'vc')
558  call nested_grid_bc_save_proc(global_nest_domain, &
559  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
560  lag_v_bc, v_buf)
561  call remap_bc(pe_v_lag_bc, pe_v_eul_bc, lag_v_bc, neststruct%v_BC, npx, npy, npz, npz_coarse, bd, 1, 0, -1, flagstruct%kord_mt, 'v')
562  call nested_grid_bc_save_proc(global_nest_domain, &
563  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
564  lag_v_bc, uc_buf)
565  call remap_bc(pe_v_lag_bc, pe_v_eul_bc, lag_v_bc, neststruct%uc_BC, npx, npy, npz, npz_coarse, bd, 1, 0, -1, flagstruct%kord_mt, 'uc')
566  call nested_grid_bc_save_proc(global_nest_domain, &
567  neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse, bd, &
568  lag_b_bc, divg_buf)
569  call remap_bc(pe_b_lag_bc, pe_b_eul_bc, lag_b_bc, neststruct%divg_BC, npx, npy, npz, npz_coarse, bd, 1, 1, -1, flagstruct%kord_mt, 'divg')
570 
571  call deallocate_fv_nest_bc_type(delp_lag_bc)
572  call deallocate_fv_nest_bc_type(lag_bc)
573  call deallocate_fv_nest_bc_type(pe_lag_bc)
574  call deallocate_fv_nest_bc_type(pe_eul_bc)
575 
576  call deallocate_fv_nest_bc_type(pe_u_lag_bc)
577  call deallocate_fv_nest_bc_type(pe_u_eul_bc)
578  call deallocate_fv_nest_bc_type(lag_u_bc)
579  call deallocate_fv_nest_bc_type(pe_v_lag_bc)
580  call deallocate_fv_nest_bc_type(pe_v_eul_bc)
581  call deallocate_fv_nest_bc_type(lag_v_bc)
582  call deallocate_fv_nest_bc_type(pe_b_lag_bc)
583  call deallocate_fv_nest_bc_type(pe_b_eul_bc)
584  call deallocate_fv_nest_bc_type(lag_b_bc)
585 
586  else
587 
588  call nested_grid_bc_save_proc(global_nest_domain, &
589  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
590  neststruct%u_BC, u_buf)
591  call nested_grid_bc_save_proc(global_nest_domain, &
592  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, &
593  neststruct%vc_BC, vc_buf)
594  call nested_grid_bc_save_proc(global_nest_domain, &
595  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
596  neststruct%v_BC, v_buf)
597  call nested_grid_bc_save_proc(global_nest_domain, &
598  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, &
599  neststruct%uc_BC, uc_buf)
600  call nested_grid_bc_save_proc(global_nest_domain, &
601  neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse, bd, &
602  neststruct%divg_BC, divg_buf)
603 
604  endif
605 
606  !Correct halo values have now been set up for BCs; we can go ahead and apply them too
607  call nested_grid_bc_apply_intt(delp, &
608  0, 0, npx, npy, npz, bd, 1., 1., &
609  neststruct%delp_BC, bctype=neststruct%nestbctype )
610  do n=1,ncnst
611  call nested_grid_bc_apply_intt(q(:,:,:,n), &
612  0, 0, npx, npy, npz, bd, 1., 1., &
613  neststruct%q_BC(n), bctype=neststruct%nestbctype )
614  enddo
615 #ifndef SW_DYNAMICS
616  call nested_grid_bc_apply_intt(pt, &
617  0, 0, npx, npy, npz, bd, 1., 1., &
618  neststruct%pt_BC, bctype=neststruct%nestbctype )
619  if (.not. flagstruct%hydrostatic) then
620  call nested_grid_bc_apply_intt(w, &
621  0, 0, npx, npy, npz, bd, 1., 1., &
622  neststruct%w_BC, bctype=neststruct%nestbctype )
623  !Removed halo from delz --- BCs now directly applied in nh_BC --- lmh june 2018
624 !!$ call nested_grid_BC_apply_intT(delz, &
625 !!$ 0, 0, npx, npy, npz, bd, 1., 1., &
626 !!$ neststruct%delz_BC, bctype=neststruct%nestbctype )
627  endif
628 #ifdef USE_COND
629  call nested_grid_bc_apply_intt(q_con, &
630  0, 0, npx, npy, npz, bd, 1., 1., &
631  neststruct%q_con_BC, bctype=neststruct%nestbctype )
632 #ifdef MOIST_CAPPA
633  call nested_grid_bc_apply_intt(cappa, &
634  0, 0, npx, npy, npz, bd, 1., 1., &
635  neststruct%cappa_BC, bctype=neststruct%nestbctype )
636 #endif
637 #endif
638 #endif
639  call nested_grid_bc_apply_intt(u, &
640  0, 1, npx, npy, npz, bd, 1., 1., &
641  neststruct%u_BC, bctype=neststruct%nestbctype )
642  call nested_grid_bc_apply_intt(vc, &
643  0, 1, npx, npy, npz, bd, 1., 1., &
644  neststruct%vc_BC, bctype=neststruct%nestbctype )
645  call nested_grid_bc_apply_intt(v, &
646  1, 0, npx, npy, npz, bd, 1., 1., &
647  neststruct%v_BC, bctype=neststruct%nestbctype )
648  call nested_grid_bc_apply_intt(uc, &
649  1, 0, npx, npy, npz, bd, 1., 1., &
650  neststruct%uc_BC, bctype=neststruct%nestbctype )
651  !!!NOTE: Divg not available here but not needed
652  !!! until dyn_core anyway.
653 !!$ call nested_grid_BC_apply_intT(divg, &
654 !!$ 1, 1, npx, npy, npz, bd, 1., 1., &
655 !!$ neststruct%divg_BC, bctype=neststruct%nestbctype )
656 
657  !Update domains needed for Rayleigh damping
658  if (.not. flagstruct%hydrostatic) call mpp_update_domains(w, domain)
659  call mpp_update_domains(u, v, domain, gridtype=dgrid_ne, complete=.true.)
660 
661  endif
662 
663  if (neststruct%first_step) then
664  if (neststruct%nested) call set_bcs_t0(ncnst, flagstruct%hydrostatic, neststruct)
665  neststruct%first_step = .false.
666  if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false.
667  else if (flagstruct%make_nh) then
668  if (neststruct%nested) call set_nh_bcs_t0(neststruct)
669  flagstruct%make_nh= .false.
670  endif
671 
672  !Unnecessary?
673 !!$ if ( neststruct%nested .and. .not. neststruct%divg_BC%initialized) then
674 !!$ neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
675 !!$ neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
676 !!$ neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
677 !!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
678 !!$ neststruct%divg_BC%initialized = .true.
679 !!$ endif
680 
681 
682  call mpp_sync_self
683 
684  end subroutine setup_nested_grid_bcs
685 
686  subroutine set_physics_bcs(ps, u_dt, v_dt, flagstruct, gridstruct, neststruct, npx, npy, npz, ng, ak, bk, bd)
688  type(fv_grid_bounds_type), intent(IN) :: bd
689  type(fv_flags_type), intent(IN) :: flagstruct
690  type(fv_nest_type), intent(INOUT), target :: neststruct
691  type(fv_grid_type) :: gridstruct
692  integer, intent(IN) :: npx, npy, npz, ng
693  real, intent(IN), dimension(npz+1) :: ak, bk
694  real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: ps
695  real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz) :: u_dt, v_dt
696  real, dimension(1,1) :: parent_ps ! dummy variable for nesting
697  type(fv_nest_bc_type_3d) :: u_dt_buf, v_dt_buf, pe_src_BC, pe_dst_BC!, var_BC
698 
699  integer :: n, npz_coarse, nnest
700  integer :: is, ie, js, je
701  integer :: isd, ied, jsd, jed
702  real :: dum(1,1,1)
703 
704  is = bd%is
705  ie = bd%ie
706  js = bd%js
707  je = bd%je
708  isd = bd%isd
709  ied = bd%ied
710  jsd = bd%jsd
711  jed = bd%jed
712 
713  nnest = flagstruct%grid_number - 1
714 
715  if (gridstruct%nested) then
716 
717  if (neststruct%do_remap_BC(flagstruct%grid_number)) then
718 
719  npz_coarse = neststruct%parent_grid%npz
720 
721  !Both nested and coarse grids assumed on Eulerian coordinates at this point
722  !Only need to fetch ps to form pressure levels
723  !Note also u_dt and v_dt are unstaggered
724  call nested_grid_bc(ps, parent_ps, global_nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, &
725  npx, npy, bd, 1, npx-1, 1, npy-1)
726  call nested_grid_bc_recv(global_nest_domain, npz_coarse, bd, u_dt_buf, v_dt_buf, nnest, gridtype=agrid)
727 
728  call allocate_fv_nest_bc_type(pe_src_bc, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,0,.false.)
729  call allocate_fv_nest_bc_type(pe_dst_bc, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1,ng,0,0,0,.false.)
730 
731  call copy_ps_bc(ps, pe_src_bc, npx, npy, npz_coarse, 0, 0, bd)
732  call setup_eul_pe_bc(pe_src_bc, pe_dst_bc, ak, bk, npx, npy, npz, npz_coarse, 0, 0, bd, &
733  make_src_in=.true., ak_src=neststruct%parent_grid%ak, bk_src=neststruct%parent_grid%bk)
734 
735  !Note that iv=-1 is used for remapping winds, which sets the lower reconstructed values to 0 if
736  ! there is a 2dx signal. Is this the best for **tendencies** though?? Probably not---so iv=1 here
737  call set_bc_direct( pe_src_bc, pe_dst_bc, u_dt_buf, u_dt, neststruct, npx, npy, npz, npz_coarse, ng, bd, 0, 0, 1, flagstruct%kord_mt)
738  call set_bc_direct( pe_src_bc, pe_dst_bc, v_dt_buf, v_dt, neststruct, npx, npy, npz, npz_coarse, ng, bd, 0, 0, 1, flagstruct%kord_mt)
739 
740  call deallocate_fv_nest_bc_type(pe_src_bc)
741  call deallocate_fv_nest_bc_type(pe_dst_bc)
742 
743  else
744  call nested_grid_bc(u_dt, v_dt, dum, dum, global_nest_domain, neststruct%ind_h, neststruct%ind_h, &
745  neststruct%wt_h, neststruct%wt_h, 0, 0, 0, 0, npx, npy, npz, bd, 1, npx-1, 1, npy-1, nnest, gridtype=agrid)
746  endif
747 
748  endif
749  do n=1,size(neststruct%child_grids)
750  if (neststruct%child_grids(n)) then
751  if (neststruct%do_remap_BC(n)) &
752  call nested_grid_bc(ps, global_nest_domain, 0, 0, n-1)
753  call nested_grid_bc_send(u_dt, v_dt, global_nest_domain, n-1, gridtype=agrid)
754  endif
755  enddo
756 
757 
758  end subroutine set_physics_bcs
759 
760  subroutine set_bc_direct( pe_src_BC, pe_dst_BC, buf, var, neststruct, npx, npy, npz, npz_coarse, ng, bd, istag, jstag, iv, kord)
762  type(fv_grid_bounds_type), intent(IN) :: bd
763  type(fv_nest_type), intent(INOUT) :: neststruct
764  integer, intent(IN) :: npx, npy, npz, npz_coarse, ng, istag, jstag, iv, kord
765  real, intent(INOUT), dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var
766  type(fv_nest_bc_type_3d), intent(INOUT) :: buf, pe_src_BC, pe_dst_BC
767  type(fv_nest_bc_type_3d) :: var_BC
768 
769 
770  call allocate_fv_nest_bc_type(var_bc,bd%is,bd%ie,bd%js,bd%je,bd%isd,bd%ied,bd%jsd,bd%jed,npx,npy,npz_coarse,ng,0,istag,jstag,.false.)
771 
772  call nested_grid_bc_save_proc(global_nest_domain, neststruct%ind_h, neststruct%wt_h, istag, jstag, &
773  npx, npy, npz_coarse, bd, var_bc, buf)
774  call remap_bc_direct(pe_src_bc, pe_dst_bc, var_bc, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord)
775 
776  call deallocate_fv_nest_bc_type(var_bc)
777 
778 
779  end subroutine set_bc_direct
780 
781  subroutine setup_pt_bc(pt_BC, pe_eul_BC, sphum_BC, npx, npy, npz, zvir, bd)
783  type(fv_grid_bounds_type), intent(IN) :: bd
784  type(fv_nest_bc_type_3d), intent(IN) :: pe_eul_BC, sphum_BC
785  type(fv_nest_bc_type_3d), intent(INOUT) :: pt_BC
786  integer, intent(IN) :: npx, npy, npz
787  real, intent(IN) :: zvir
788 
789  integer :: istart, iend
790 
791  integer :: is, ie, js, je
792  integer :: isd, ied, jsd, jed
793 
794  is = bd%is
795  ie = bd%ie
796  js = bd%js
797  je = bd%je
798  isd = bd%isd
799  ied = bd%ied
800  jsd = bd%jsd
801  jed = bd%jed
802 
803  if (is == 1) then
804  call setup_pt_bc_k(pt_bc%west_t1, sphum_bc%west_t1, pe_eul_bc%west_t1, zvir, isd, ied, isd, 0, jsd, jed, npz)
805  end if
806 
807  if (js == 1) then
808  if (is == 1) then
809  istart = is
810  else
811  istart = isd
812  end if
813  if (ie == npx-1) then
814  iend = ie
815  else
816  iend = ied
817  end if
818 
819  call setup_pt_bc_k(pt_bc%south_t1, sphum_bc%south_t1, pe_eul_bc%south_t1, zvir, isd, ied, istart, iend, jsd, 0, npz)
820  end if
821 
822 
823  if (ie == npx-1) then
824  call setup_pt_bc_k(pt_bc%east_t1, sphum_bc%east_t1, pe_eul_bc%east_t1, zvir, isd, ied, npx, ied, jsd, jed, npz)
825  end if
826 
827  if (je == npy-1) then
828  if (is == 1) then
829  istart = is
830  else
831  istart = isd
832  end if
833  if (ie == npx-1) then
834  iend = ie
835  else
836  iend = ied
837  end if
838 
839  call setup_pt_bc_k(pt_bc%north_t1, sphum_bc%north_t1, pe_eul_bc%north_t1, zvir, isd, ied, istart, iend, npy, jed, npz)
840  end if
841 
842  end subroutine setup_pt_bc
843 
844 
845 !!!! A NOTE ON NOMENCLATURE
846 !!!! Originally the BC arrays were bounded by isd and ied in the i-direction.
847 !!!! However these were NOT intended to delineate the dimensions of the data domain
848 !!!! but instead were of the BC arrays. This is confusing especially in other locations
849 !!!! where BCs and data arrays are both present.
850  subroutine setup_pt_bc_k(ptBC, sphumBC, peBC, zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
852  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
853  real, intent(IN) :: zvir
854  real, intent(INOUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: ptBC
855  real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: sphumBC
856  real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz+1) :: peBC
857 
858  integer :: i,j,k
859  real :: pealn, pebln, rpkz
860 
861 !Assumes dry kappa
862 !$OMP parallel do default(none) shared(peBC,ptBC,zvir,sphumBC, &
863 !$OMP istart,iend,jstart,jend,npz) &
864 !$OMP private(pealn,pebln,rpkz)
865  do k=1,npz
866  do j=jstart,jend
867  do i=istart,iend
868  pealn = log(pebc(i,j,k))
869  pebln = log(pebc(i,j,k+1))
870 
871  rpkz = kappa*(pebln - pealn)/(exp(kappa*pebln)-exp(kappa*pealn) )
872 
873  ptbc(i,j,k) = ptbc(i,j,k)*rpkz * &
874  (1.+zvir*sphumbc(i,j,k))
875  enddo
876  enddo
877  enddo
878 
879  end subroutine setup_pt_bc_k
880 
881  subroutine setup_eul_delp_bc(delp_lag_BC, delp_eul_BC, pe_lag_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_coarse, ptop_src, bd)
883  type(fv_grid_bounds_type), intent(IN) :: bd
884  type(fv_nest_bc_type_3d), intent(INOUT), target :: delp_lag_BC
885  type(fv_nest_bc_type_3d), intent(INOUT), target :: delp_eul_BC, pe_lag_BC, pe_eul_BC
886  integer, intent(IN) :: npx, npy, npz, npz_coarse
887  real, intent(IN), dimension(npz+1) :: ak_dst, bk_dst
888  real, intent(IN) :: ptop_src
889 
890  integer :: i,j,k, istart, iend
891 
892  integer :: is, ie, js, je
893  integer :: isd, ied, jsd, jed
894 
895  is = bd%is
896  ie = bd%ie
897  js = bd%js
898  je = bd%je
899  isd = bd%isd
900  ied = bd%ied
901  jsd = bd%jsd
902  jed = bd%jed
903 
904  if (is == 1) then
905  call setup_eul_delp_bc_k(delp_lag_bc%west_t1, delp_eul_bc%west_t1, pe_lag_bc%west_t1, pe_eul_bc%west_t1, &
906  ptop_src, ak_dst, bk_dst, isd, 0, isd, 0, jsd, jed, npz, npz_coarse)
907  end if
908 
909  if (ie == npx-1) then
910  call setup_eul_delp_bc_k(delp_lag_bc%east_t1, delp_eul_bc%east_t1, pe_lag_bc%east_t1, pe_eul_bc%east_t1, &
911  ptop_src, ak_dst, bk_dst, npx, ied, npx, ied, jsd, jed, npz, npz_coarse)
912  end if
913 
914  if (is == 1) then
915  istart = is
916  else
917  istart = isd
918  end if
919  if (ie == npx-1) then
920  iend = ie
921  else
922  iend = ied
923  end if
924 
925  if (js == 1) then
926  call setup_eul_delp_bc_k(delp_lag_bc%south_t1, delp_eul_bc%south_t1, pe_lag_bc%south_t1, pe_eul_bc%south_t1, &
927  ptop_src, ak_dst, bk_dst, isd, ied, istart, iend, jsd, 0, npz, npz_coarse)
928  end if
929 
930  if (je == npy-1) then
931  call setup_eul_delp_bc_k(delp_lag_bc%north_t1, delp_eul_bc%north_t1, pe_lag_bc%north_t1, pe_eul_bc%north_t1, &
932  ptop_src, ak_dst, bk_dst, isd, ied, istart, iend, npy, jed, npz, npz_coarse)
933  end if
934 
935  end subroutine setup_eul_delp_bc
936 
937  subroutine setup_eul_delp_bc_k(delplagBC, delpeulBC, pelagBC, peeulBC, ptop_src, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse)
939  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse
940  real, intent(INOUT) :: delplagBC(isd_bc:ied_bc,jstart:jend,npz_coarse), pelagBC(isd_bc:ied_bc,jstart:jend,npz_coarse+1)
941  real, intent(INOUT) :: delpeulBC(isd_bc:ied_bc,jstart:jend,npz), peeulBC(isd_bc:ied_bc,jstart:jend,npz+1)
942  real, intent(IN) :: ptop_src, ak_dst(npz+1), bk_dst(npz+1)
943 
944  integer :: i,j,k
945 
946  character(len=120) :: errstring
947 
948 
949 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,pelagBC,ptop_src)
950  do j=jstart,jend
951  do i=istart,iend
952  pelagbc(i,j,1) = ptop_src
953  enddo
954  enddo
955 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_coarse,pelagBC,delplagBC)
956  do j=jstart,jend
957  do k=1,npz_coarse
958  do i=istart,iend
959  pelagbc(i,j,k+1) = pelagbc(i,j,k) + delplagbc(i,j,k)
960  end do
961  end do
962  end do
963 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,peeulBC,pelagBC,ak_dst,bk_dst)
964  do k=1,npz+1
965  do j=jstart,jend
966  do i=istart,iend
967  peeulbc(i,j,k) = ak_dst(k) + pelagbc(i,j,npz_coarse+1)*bk_dst(k)
968  enddo
969  enddo
970  enddo
971 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,peeulBC,delpeulBC)
972  do k=1,npz
973  do j=jstart,jend
974  do i=istart,iend
975  delpeulbc(i,j,k) = peeulbc(i,j,k+1) - peeulbc(i,j,k)
976  enddo
977  enddo
978  enddo
979 
980 !!$!!! DEBUG CODE
981 !!$ !If more than a few percent difference then log the error
982 !!$ do k=1,npz
983 !!$ do j=jstart,jend
984 !!$ do i=istart,iend
985 !!$ if (delpeulBC(i,j,k) <= 0.) then
986 !!$ write(errstring,'(3I5, 3(2x, G))'), i, j, k, pelagBC(i,j,k), peeulBC(i,j,k)
987 !!$ call mpp_error(WARNING, ' Invalid pressure BC at '//errstring)
988 !!$ else if (abs( peeulBC(i,j,k) - pelagBC(i,j,k)) > 100.0 ) then
989 !!$ write(errstring,'(3I5, 3(2x, G))'), i, j, k, pelagBC(i,j,k), peeulBC(i,j,k)
990 !!$ call mpp_error(WARNING, ' Remap BC: pressure deviation at '//errstring)
991 !!$ endif
992 !!$ enddo
993 !!$ enddo
994 !!$ enddo
995 !!$!!! END DEBUG CODE
996 
997  end subroutine setup_eul_delp_bc_k
998 
999  subroutine copy_ps_bc(ps, pe_BC, npx, npy, npz, istag, jstag, bd)
1001  integer, intent(IN) :: npx, npy, npz, istag, jstag
1002  type(fv_grid_bounds_type), intent(IN) :: bd
1003  real, intent(IN) :: ps(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag)
1004  type(fv_nest_bc_type_3d), intent(INOUT) :: pe_BC
1005 
1006  integer :: i,j,k, istart, iend
1007 
1008  integer :: is, ie, js, je
1009  integer :: isd, ied, jsd, jed
1010 
1011  is = bd%is
1012  ie = bd%ie
1013  js = bd%js
1014  je = bd%je
1015  isd = bd%isd
1016  ied = bd%ied
1017  jsd = bd%jsd
1018  jed = bd%jed
1019 
1020  if (is == 1) then
1021 !$OMP parallel do default(none) shared(isd,jsd,jed,jstag,npz,pe_BC,ps)
1022  do j=jsd,jed+jstag
1023  do i=isd,0
1024  pe_bc%west_t1(i,j,npz+1) = ps(i,j)
1025  enddo
1026  enddo
1027  end if
1028 
1029  if (ie == npx-1) then
1030 !$OMP parallel do default(none) shared(npx,ied,istag,jsd,jed,jstag,npz,pe_BC,ps)
1031  do j=jsd,jed+jstag
1032  do i=npx+istag,ied+istag
1033  pe_bc%east_t1(i,j,npz+1) = ps(i,j)
1034  enddo
1035  enddo
1036  end if
1037 
1038  if (is == 1) then
1039  istart = is
1040  else
1041  istart = isd
1042  end if
1043  if (ie == npx-1) then
1044  iend = ie
1045  else
1046  iend = ied
1047  end if
1048 
1049  if (js == 1) then
1050 !$OMP parallel do default(none) shared(isd,ied,istag,jsd,npz,pe_BC,ps)
1051  do j=jsd,0
1052  do i=isd,ied+istag
1053  pe_bc%south_t1(i,j,npz+1) = ps(i,j)
1054  enddo
1055  enddo
1056  end if
1057 
1058  if (je == npy-1) then
1059 !$OMP parallel do default(none) shared(isd,ied,istag,npy,jed,jstag,npz,pe_BC,ps)
1060  do j=npy+jstag,jed+jstag
1061  do i=isd,ied+istag
1062  pe_bc%north_t1(i,j,npz+1) = ps(i,j)
1063  enddo
1064  enddo
1065  end if
1066 
1067  end subroutine copy_ps_bc
1068 
1069 !In this routine, the pe_*_BC arrays should already have PS filled in on the npz+1 level
1070  subroutine setup_eul_pe_bc(pe_src_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_src, istag, jstag, bd, make_src_in, ak_src, bk_src)
1072  type(fv_grid_bounds_type), intent(IN) :: bd
1073  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_src_BC, pe_eul_BC
1074  integer, intent(IN) :: npx, npy, npz, npz_src, istag, jstag
1075  real, intent(IN), dimension(npz+1) :: ak_dst, bk_dst
1076  logical, intent(IN), OPTIONAL :: make_src_in
1077  real, intent(IN), OPTIONAL :: ak_src(npz_src), bk_src(npz_src)
1078 
1079  logical :: make_src
1080 
1081  integer :: i,j,k, istart, iend
1082 
1083  integer :: is, ie, js, je
1084  integer :: isd, ied, jsd, jed
1085 
1086  is = bd%is
1087  ie = bd%ie
1088  js = bd%js
1089  je = bd%je
1090  isd = bd%isd
1091  ied = bd%ied
1092  jsd = bd%jsd
1093  jed = bd%jed
1094 
1095  make_src = .false.
1096  if (present(make_src_in)) make_src = make_src_in
1097 
1098  if (is == 1) then
1099  call setup_eul_pe_bc_k(pe_src_bc%west_t1, pe_eul_bc%west_t1, ak_dst, bk_dst, isd, 0, isd, 0, jsd, jed+jstag, npz, npz_src, &
1100  make_src, ak_src, bk_src)
1101  end if
1102 
1103  if (ie == npx-1) then
1104  call setup_eul_pe_bc_k(pe_src_bc%east_t1, pe_eul_bc%east_t1, ak_dst, bk_dst, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_src, &
1105  make_src, ak_src, bk_src)
1106  end if
1107 
1108  if (is == 1) then
1109  istart = is
1110  else
1111  istart = isd
1112  end if
1113  if (ie == npx-1) then
1114  iend = ie
1115  else
1116  iend = ied
1117  end if
1118 
1119  if (js == 1) then
1120  call setup_eul_pe_bc_k(pe_src_bc%south_t1, pe_eul_bc%south_t1, ak_dst, bk_dst, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_src, &
1121  make_src, ak_src, bk_src)
1122  end if
1123 
1124  if (je == npy-1) then
1125  call setup_eul_pe_bc_k(pe_src_bc%north_t1, pe_eul_bc%north_t1, ak_dst, bk_dst, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_src, &
1126  make_src, ak_src, bk_src)
1127  end if
1128 
1129  end subroutine setup_eul_pe_bc
1130 
1131  subroutine setup_eul_pe_bc_k(pesrcBC, peeulBC, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src, make_src, ak_src, bk_src)
1133  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src
1134  real, intent(INOUT) :: pesrcBC(isd_bc:ied_bc,jstart:jend,npz_src+1)
1135  real, intent(INOUT) :: peeulBC(isd_bc:ied_bc,jstart:jend,npz+1)
1136  real, intent(IN) :: ak_dst(npz+1), bk_dst(npz+1)
1137  logical, intent(IN) :: make_src
1138  real, intent(IN) :: ak_src(npz_src+1), bk_src(npz_src+1)
1139 
1140  integer :: i,j,k
1141 
1142  character(len=120) :: errstring
1143 
1144 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_src,peeulBC,ak_dst,pesrcBC,bk_dst)
1145  do k=1,npz+1
1146  do j=jstart,jend
1147  do i=istart,iend
1148  peeulbc(i,j,k) = ak_dst(k) + pesrcbc(i,j,npz_src+1)*bk_dst(k)
1149  enddo
1150  enddo
1151  enddo
1152 
1153  if (make_src) then
1154 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,pesrcBC,ak_src,bk_src)
1155  do k=1,npz_src+1
1156  do j=jstart,jend
1157  do i=istart,iend
1158  pesrcbc(i,j,k) = ak_src(k) + pesrcbc(i,j,npz_src+1)*bk_src(k)
1159  enddo
1160  enddo
1161  enddo
1162  endif
1163 
1164 
1165  end subroutine setup_eul_pe_bc_k
1166 
1167  subroutine remap_bc(pe_lag_BC, pe_eul_BC, var_lag_BC, var_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, varname, do_log_pe)
1169  type(fv_grid_bounds_type), intent(IN) :: bd
1170  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_lag_BC, var_lag_BC
1171  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_eul_BC, var_eul_BC
1172  integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord
1173  character(len=*), intent(IN) :: varname
1174  logical, intent(IN), OPTIONAL :: do_log_pe
1175 
1176  logical :: log_pe = .false.
1177 
1178  integer :: i,j,k, istart, iend
1179 
1180  integer :: is, ie, js, je
1181  integer :: isd, ied, jsd, jed
1182 
1183  is = bd%is
1184  ie = bd%ie
1185  js = bd%js
1186  je = bd%je
1187  isd = bd%isd
1188  ied = bd%ied
1189  jsd = bd%jsd
1190  jed = bd%jed
1191 
1192  if (present(do_log_pe)) log_pe = do_log_pe
1193 
1194  if (is == 1) then
1195  call remap_bc_k(pe_lag_bc%west_t1, pe_eul_bc%west_t1, var_lag_bc%west_t1, var_eul_bc%west_t1, isd, 0, isd, 0, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
1196  end if
1197 
1198  if (ie == npx-1) then
1199  call remap_bc_k(pe_lag_bc%east_t1, pe_eul_bc%east_t1, var_lag_bc%east_t1, var_eul_bc%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
1200  end if
1201 
1202  if (is == 1) then
1203  istart = is
1204  else
1205  istart = isd
1206  end if
1207  if (ie == npx-1) then
1208  iend = ie
1209  else
1210  iend = ied
1211  end if
1212 
1213  if (js == 1) then
1214  call remap_bc_k(pe_lag_bc%south_t1, pe_eul_bc%south_t1, var_lag_bc%south_t1, var_eul_bc%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, iv, kord, log_pe)
1215  end if
1216 
1217  if (je == npy-1) then
1218  call remap_bc_k(pe_lag_bc%north_t1, pe_eul_bc%north_t1, var_lag_bc%north_t1, var_eul_bc%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
1219  end if
1220 
1221  end subroutine remap_bc
1222 
1223  subroutine remap_bc_direct(pe_lag_BC, pe_eul_BC, var_lag_BC, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, do_log_pe)
1225  type(fv_grid_bounds_type), intent(IN) :: bd
1226  integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord
1227  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_lag_BC, var_lag_BC
1228  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_eul_BC
1229  real, intent(INOUT) :: var(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz)
1230  logical, intent(IN), OPTIONAL :: do_log_pe
1231 
1232  logical :: log_pe = .false.
1233 
1234  integer :: i,j,k, istart, iend
1235 
1236  integer :: is, ie, js, je
1237  integer :: isd, ied, jsd, jed
1238 
1239  is = bd%is
1240  ie = bd%ie
1241  js = bd%js
1242  je = bd%je
1243  isd = bd%isd
1244  ied = bd%ied
1245  jsd = bd%jsd
1246  jed = bd%jed
1247 
1248  if (present(do_log_pe)) log_pe = do_log_pe
1249 
1250  if (is == 1) then
1251  !I was unable how to do pass-by-memory referencing on parts of the 3D var array,
1252  ! so instead I am doing an inefficient copy and copy-back. --- lmh 14jun17
1253  call remap_bc_k(pe_lag_bc%west_t1, pe_eul_bc%west_t1, var_lag_bc%west_t1, var(isd:0,jsd:jed+jstag,:), isd, 0, isd, 0, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
1254  end if
1255 
1256  if (ie == npx-1) then
1257  call remap_bc_k(pe_lag_bc%east_t1, pe_eul_bc%east_t1, var_lag_bc%east_t1, var(npx+istag:ied+istag,jsd:jed+jstag,:), npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
1258  end if
1259 
1260  if (is == 1) then
1261  istart = is
1262  else
1263  istart = isd
1264  end if
1265  if (ie == npx-1) then
1266  iend = ie
1267  else
1268  iend = ied
1269  end if
1270 
1271  if (js == 1) then
1272  call remap_bc_k(pe_lag_bc%south_t1, pe_eul_bc%south_t1, var_lag_bc%south_t1, var(isd:ied+istag,jsd:0,:), isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, iv, kord, log_pe)
1273  end if
1274 
1275  if (je == npy-1) then
1276  call remap_bc_k(pe_lag_bc%north_t1, pe_eul_bc%north_t1, var_lag_bc%north_t1, var(isd:ied+istag,npy+jstag:jed+jstag,:), isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe)
1277  end if
1278 
1279  end subroutine remap_bc_direct
1280 
1281  subroutine remap_bc_k(pe_lagBC, pe_eulBC, var_lagBC, var_eulBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord, log_pe)
1283  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord
1284  logical, intent(IN) :: log_pe
1285  real, intent(INOUT) :: pe_lagBC(isd_bc:ied_bc,jstart:jend,npz_coarse+1), var_lagBC(isd_bc:ied_bc,jstart:jend,npz_coarse)
1286  real, intent(INOUT) :: pe_eulBC(isd_bc:ied_bc,jstart:jend,npz+1), var_eulBC(isd_bc:ied_bc,jstart:jend,npz)
1287 
1288  integer :: i, j, k
1289  real peln_lag(istart:iend,npz_coarse+1)
1290  real peln_eul(istart:iend,npz+1)
1291  character(120) :: errstring
1292 
1293  if (log_pe) then
1294 
1295 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,pe_lagBC,pe_eulBC,var_lagBC,var_eulBC,iv,kord) &
1296 !$OMP private(peln_lag,peln_eul)
1297  do j=jstart,jend
1298 
1299  do k=1,npz_coarse+1
1300  do i=istart,iend
1301 !!$!!! DEBUG CODE
1302 !!$ if (pe_lagBC(i,j,k) <= 0.) then
1303 !!$ write(errstring,'(3I5, 2x, G)'), i, j, k, pe_lagBC(i,j,k)
1304 !!$ call mpp_error(WARNING, ' Remap BC: invalid pressure at at '//errstring)
1305 !!$ endif
1306 !!$!!! END DEBUG CODE
1307  peln_lag(i,k) = log(pe_lagbc(i,j,k))
1308  enddo
1309  enddo
1310 
1311  do k=1,npz+1
1312  do i=istart,iend
1313 !!$!!! DEBUG CODE
1314 !!$ if (pe_lagBC(i,j,k) <= 0.) then
1315 !!$ write(errstring,'(3I5, 2x, G)'), i, j, k, pe_lagBC(i,j,k)
1316 !!$ call mpp_error(WARNING, ' Remap BC: invalid pressure at at '//errstring)
1317 !!$ endif
1318 !!$!!! END DEBUG CODE
1319  peln_eul(i,k) = log(pe_eulbc(i,j,k))
1320  enddo
1321  enddo
1322 
1323  call mappm(npz_coarse, peln_lag, var_lagbc(istart:iend,j:j,:), &
1324  npz, peln_eul, var_eulbc(istart:iend,j:j,:), &
1325  istart, iend, iv, kord, pe_eulbc(istart,j,1))
1326 
1327  enddo
1328 
1329  else
1330 
1331 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,pe_lagBC,pe_eulBC,var_lagBC,var_eulBC,iv,kord)
1332  do j=jstart,jend
1333 
1334  call mappm(npz_coarse, pe_lagbc(istart:iend,j:j,:), var_lagbc(istart:iend,j:j,:), &
1335  npz, pe_eulbc(istart:iend,j:j,:), var_eulbc(istart:iend,j:j,:), &
1336  istart, iend, iv, kord, pe_eulbc(istart,j,1))
1337  !!! NEED A FILLQ/FILLZ CALL HERE??
1338 
1339  enddo
1340  endif
1341 
1342  end subroutine remap_bc_k
1343 
1344  subroutine remap_delz_bc(pe_lag_BC, pe_eul_BC, delp_lag_BC, delz_lag_BC, delp_eul_BC, delz_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord)
1346  type(fv_grid_bounds_type), intent(IN) :: bd
1347  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_lag_BC, delp_lag_BC, delz_lag_BC
1348  type(fv_nest_bc_type_3d), intent(INOUT), target :: pe_eul_BC, delp_eul_BC, delz_eul_BC
1349  integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord
1350 
1351  integer :: i,j,k, istart, iend
1352 
1353  integer :: is, ie, js, je
1354  integer :: isd, ied, jsd, jed
1355 
1356  is = bd%is
1357  ie = bd%ie
1358  js = bd%js
1359  je = bd%je
1360  isd = bd%isd
1361  ied = bd%ied
1362  jsd = bd%jsd
1363  jed = bd%jed
1364 
1365  if (is == 1) then
1366  call compute_specific_volume_bc_k(delp_lag_bc%west_t1, delz_lag_bc%west_t1, isd, 0, isd, 0, jsd, jed, npz_coarse)
1367  call remap_bc_k(pe_lag_bc%west_t1, pe_eul_bc%west_t1, delz_lag_bc%west_t1, delz_eul_bc%west_t1, isd, 0, isd, 0, jsd, jed+jstag, &
1368  npz, npz_coarse, iv, kord, log_pe=.false.)
1369  call compute_delz_bc_k(delp_eul_bc%west_t1, delz_eul_bc%west_t1, isd, 0, isd, 0, jsd, jed, npz)
1370  end if
1371 
1372  if (ie == npx-1) then
1373  call compute_specific_volume_bc_k(delp_lag_bc%east_t1, delz_lag_bc%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz_coarse)
1374  call remap_bc_k(pe_lag_bc%east_t1, pe_eul_bc%east_t1, delz_lag_bc%east_t1, delz_eul_bc%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, &
1375  npz, npz_coarse, iv, kord, log_pe=.false.)
1376  call compute_delz_bc_k(delp_eul_bc%east_t1, delz_eul_bc%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz)
1377  end if
1378 
1379  if (is == 1) then
1380  istart = is
1381  else
1382  istart = isd
1383  end if
1384  if (ie == npx-1) then
1385  iend = ie
1386  else
1387  iend = ied
1388  end if
1389 
1390  if (js == 1) then
1391  call compute_specific_volume_bc_k(delp_lag_bc%south_t1, delz_lag_bc%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz_coarse)
1392  call remap_bc_k(pe_lag_bc%south_t1, pe_eul_bc%south_t1, delz_lag_bc%south_t1, delz_eul_bc%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, &
1393  iv, kord, log_pe=.false.)
1394  call compute_delz_bc_k(delp_eul_bc%south_t1, delz_eul_bc%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz)
1395  end if
1396 
1397  if (je == npy-1) then
1398  call compute_specific_volume_bc_k(delp_lag_bc%north_t1, delz_lag_bc%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz_coarse)
1399  call remap_bc_k(pe_lag_bc%north_t1, pe_eul_bc%north_t1, delz_lag_bc%north_t1, delz_eul_bc%north_t1, &
1400  isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe=.false.)
1401  call compute_delz_bc_k(delp_eul_bc%north_t1, delz_eul_bc%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz)
1402  end if
1403 
1404  end subroutine remap_delz_bc
1405 
1406  subroutine compute_specific_volume_bc_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
1408  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
1409  real, intent(IN) :: delpBC(isd_bc:ied_bc,jstart:jend,npz)
1410  real, intent(INOUT) :: delzBC(isd_bc:ied_bc,jstart:jend,npz)
1411 
1412  character(len=120) :: errstring
1413  integer :: i,j,k
1414 
1415 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,delzBC,delpBC)
1416  do k=1,npz
1417  do j=jstart,jend
1418  do i=istart,iend
1419  delzbc(i,j,k) = -delzbc(i,j,k)/delpbc(i,j,k)
1420 !!$!!! DEBUG CODE
1421 !!$ if (delzBC(i,j,k) <= 0. ) then
1422 !!$ write(errstring,'(3I5, 2(2x, G))'), i, j, k, delzBC(i,j,k), delpBC(i,j,k)
1423 !!$ call mpp_error(WARNING, ' Remap BC (sfc volume): invalid delz at '//errstring)
1424 !!$ endif
1425 !!$!!! END DEBUG CODE
1426  end do
1427  end do
1428  end do
1429 
1430  end subroutine compute_specific_volume_bc_k
1431 
1432  subroutine compute_delz_bc_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
1434  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
1435  real, intent(IN) :: delpBC(isd_bc:ied_bc,jstart:jend,npz)
1436  real, intent(INOUT) :: delzBC(isd_bc:ied_bc,jstart:jend,npz)
1437 
1438  character(len=120) :: errstring
1439  integer :: i,j,k
1440 
1441 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,delzBC,delpBC)
1442  do k=1,npz
1443  do j=jstart,jend
1444  do i=istart,iend
1445  delzbc(i,j,k) = -delzbc(i,j,k)*delpbc(i,j,k)
1446 !!$!!! DEBUG CODE
1447 !!$ if (delzBC(i,j,k) >=0. ) then
1448 !!$ write(errstring,'(3I5, 2(2x, G))'), i, j, k, delzBC(i,j,k), delpBC(i,j,k)
1449 !!$ call mpp_error(WARNING, ' Remap BC (compute delz): invalid delz at '//errstring)
1450 !!$ endif
1451 !!$!!! END DEBUG CODE
1452  end do
1453  end do
1454  end do
1455 
1456  end subroutine compute_delz_bc_k
1457 
1458 
1459  subroutine setup_pt_nh_bc(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, &
1460 #ifdef USE_COND
1461  q_con_BC, &
1462 #ifdef MOIST_CAPPA
1463  cappa_BC, &
1464 #endif
1465 #endif
1466  npx, npy, npz, zvir, bd)
1468  type(fv_grid_bounds_type), intent(IN) :: bd
1469  type(fv_nest_bc_type_3d), intent(IN), target :: delp_BC, delz_BC, sphum_BC
1470  type(fv_nest_bc_type_3d), intent(INOUT), target :: pt_BC
1471  integer, intent(IN) :: nq
1472  type(fv_nest_bc_type_3d), intent(IN), target :: q_BC(nq)
1473 #ifdef USE_COND
1474  type(fv_nest_bc_type_3d), intent(INOUT), target :: q_con_BC
1475 #ifdef MOIST_CAPPA
1476  type(fv_nest_bc_type_3d), intent(INOUT), target :: cappa_BC
1477 #endif
1478 #endif
1479  integer, intent(IN) :: npx, npy, npz
1480  real, intent(IN) :: zvir
1481 
1482  real, parameter:: c_liq = 4185.5
1483  real, parameter:: c_ice = 1972.
1484  real, parameter:: cv_vap = cp_vapor - rvgas
1485 
1486  real, dimension(:,:,:), pointer :: liq_watBC_west, ice_watBC_west, rainwatBC_west, snowwatBC_west, graupelBC_west
1487  real, dimension(:,:,:), pointer :: liq_watBC_east, ice_watBC_east, rainwatBC_east, snowwatBC_east, graupelBC_east
1488  real, dimension(:,:,:), pointer :: liq_watBC_north, ice_watBC_north, rainwatBC_north, snowwatBC_north, graupelBC_north
1489  real, dimension(:,:,:), pointer :: liq_watBC_south, ice_watBC_south, rainwatBC_south, snowwatBC_south, graupelBC_south
1490 
1491  real :: dp1, q_liq, q_sol, q_con = 0., cvm, pkz, rdg, cv_air
1492 
1493  integer :: i,j,k, istart, iend
1494  integer :: liq_wat, ice_wat, rainwat, snowwat, graupel
1495  real, parameter:: tice = 273.16
1496  real, parameter:: t_i0 = 15.
1497 
1498  integer :: is, ie, js, je
1499  integer :: isd, ied, jsd, jed
1500 
1501  is = bd%is
1502  ie = bd%ie
1503  js = bd%js
1504  je = bd%je
1505  isd = bd%isd
1506  ied = bd%ied
1507  jsd = bd%jsd
1508  jed = bd%jed
1509 
1510  rdg = -rdgas / grav
1511  cv_air = cp_air - rdgas
1512 
1513  liq_wat = get_tracer_index(model_atmos, 'liq_wat')
1514  ice_wat = get_tracer_index(model_atmos, 'ice_wat')
1515  rainwat = get_tracer_index(model_atmos, 'rainwat')
1516  snowwat = get_tracer_index(model_atmos, 'snowwat')
1517  graupel = get_tracer_index(model_atmos, 'graupel')
1518 
1519  if (is == 1) then
1520  if (.not. allocated(dum_west)) then
1521  allocate(dum_west(isd:0,jsd:jed,npz))
1522 !$OMP parallel do default(none) shared(npz,isd,jsd,jed,dum_West)
1523  do k=1,npz
1524  do j=jsd,jed
1525  do i=isd,0
1526  dum_west(i,j,k) = 0.
1527  enddo
1528  enddo
1529  enddo
1530  endif
1531  endif
1532  if (js == 1) then
1533  if (.not. allocated(dum_south)) then
1534  allocate(dum_south(isd:ied,jsd:0,npz))
1535 !$OMP parallel do default(none) shared(npz,isd,ied,jsd,dum_South)
1536  do k=1,npz
1537  do j=jsd,0
1538  do i=isd,ied
1539  dum_south(i,j,k) = 0.
1540  enddo
1541  enddo
1542  enddo
1543  endif
1544  endif
1545  if (ie == npx-1) then
1546  if (.not. allocated(dum_east)) then
1547  allocate(dum_east(npx:ied,jsd:jed,npz))
1548 !$OMP parallel do default(none) shared(npx,npz,ied,jsd,jed,dum_East)
1549  do k=1,npz
1550  do j=jsd,jed
1551  do i=npx,ied
1552  dum_east(i,j,k) = 0.
1553  enddo
1554  enddo
1555  enddo
1556  endif
1557  endif
1558  if (je == npy-1) then
1559  if (.not. allocated(dum_north)) then
1560  allocate(dum_north(isd:ied,npy:jed,npz))
1561 !$OMP parallel do default(none) shared(npy,npz,isd,ied,jed,dum_North)
1562  do k=1,npz
1563  do j=npy,jed
1564  do i=isd,ied
1565  dum_north(i,j,k) = 0.
1566  enddo
1567  enddo
1568  enddo
1569  endif
1570  endif
1571 
1572  if (liq_wat > 0) then
1573  liq_watbc_west => q_bc(liq_wat)%west_t1
1574  liq_watbc_east => q_bc(liq_wat)%east_t1
1575  liq_watbc_north => q_bc(liq_wat)%north_t1
1576  liq_watbc_south => q_bc(liq_wat)%south_t1
1577  else
1578  liq_watbc_west => dum_west
1579  liq_watbc_east => dum_east
1580  liq_watbc_north => dum_north
1581  liq_watbc_south => dum_south
1582  endif
1583  if (ice_wat > 0) then
1584  ice_watbc_west => q_bc(ice_wat)%west_t1
1585  ice_watbc_east => q_bc(ice_wat)%east_t1
1586  ice_watbc_north => q_bc(ice_wat)%north_t1
1587  ice_watbc_south => q_bc(ice_wat)%south_t1
1588  else
1589  ice_watbc_west => dum_west
1590  ice_watbc_east => dum_east
1591  ice_watbc_north => dum_north
1592  ice_watbc_south => dum_south
1593  endif
1594  if (rainwat > 0) then
1595  rainwatbc_west => q_bc(rainwat)%west_t1
1596  rainwatbc_east => q_bc(rainwat)%east_t1
1597  rainwatbc_north => q_bc(rainwat)%north_t1
1598  rainwatbc_south => q_bc(rainwat)%south_t1
1599  else
1600  rainwatbc_west => dum_west
1601  rainwatbc_east => dum_east
1602  rainwatbc_north => dum_north
1603  rainwatbc_south => dum_south
1604  endif
1605  if (snowwat > 0) then
1606  snowwatbc_west => q_bc(snowwat)%west_t1
1607  snowwatbc_east => q_bc(snowwat)%east_t1
1608  snowwatbc_north => q_bc(snowwat)%north_t1
1609  snowwatbc_south => q_bc(snowwat)%south_t1
1610  else
1611  snowwatbc_west => dum_west
1612  snowwatbc_east => dum_east
1613  snowwatbc_north => dum_north
1614  snowwatbc_south => dum_south
1615  endif
1616  if (graupel > 0) then
1617  graupelbc_west => q_bc(graupel)%west_t1
1618  graupelbc_east => q_bc(graupel)%east_t1
1619  graupelbc_north => q_bc(graupel)%north_t1
1620  graupelbc_south => q_bc(graupel)%south_t1
1621  else
1622  graupelbc_west => dum_west
1623  graupelbc_east => dum_east
1624  graupelbc_north => dum_north
1625  graupelbc_south => dum_south
1626  endif
1627 
1628  if (is == 1) then
1629 
1630  call setup_pt_nh_bc_k(pt_bc%west_t1, sphum_bc%west_t1, delp_bc%west_t1, delz_bc%west_t1, &
1631  liq_watbc_west, rainwatbc_west, ice_watbc_west, snowwatbc_west, graupelbc_west, &
1632 #ifdef USE_COND
1633  q_con_bc%west_t1, &
1634 #ifdef MOIST_CAPPA
1635  cappa_bc%west_t1, &
1636 #endif
1637 #endif
1638  zvir, isd, 0, isd, 0, jsd, jed, npz)
1639  end if
1640 
1641 
1642  if (js == 1) then
1643  if (is == 1) then
1644  istart = is
1645  else
1646  istart = isd
1647  end if
1648  if (ie == npx-1) then
1649  iend = ie
1650  else
1651  iend = ied
1652  end if
1653 
1654  call setup_pt_nh_bc_k(pt_bc%south_t1, sphum_bc%south_t1, delp_bc%south_t1, delz_bc%south_t1, &
1655  liq_watbc_south, rainwatbc_south, ice_watbc_south, snowwatbc_south, graupelbc_south, &
1656 #ifdef USE_COND
1657  q_con_bc%south_t1, &
1658 #ifdef MOIST_CAPPA
1659  cappa_bc%south_t1, &
1660 #endif
1661 #endif
1662  zvir, isd, ied, istart, iend, jsd, 0, npz)
1663  end if
1664 
1665 
1666  if (ie == npx-1) then
1667 
1668  call setup_pt_nh_bc_k(pt_bc%east_t1, sphum_bc%east_t1, delp_bc%east_t1, delz_bc%east_t1, &
1669  liq_watbc_east, rainwatbc_east, ice_watbc_east, snowwatbc_east, graupelbc_east, &
1670 #ifdef USE_COND
1671  q_con_bc%east_t1, &
1672 #ifdef MOIST_CAPPA
1673  cappa_bc%east_t1, &
1674 #endif
1675 #endif
1676  zvir, npx, ied, npx, ied, jsd, jed, npz)
1677  end if
1678 
1679  if (je == npy-1) then
1680  if (is == 1) then
1681  istart = is
1682  else
1683  istart = isd
1684  end if
1685  if (ie == npx-1) then
1686  iend = ie
1687  else
1688  iend = ied
1689  end if
1690 
1691  call setup_pt_nh_bc_k(pt_bc%north_t1, sphum_bc%north_t1, delp_bc%north_t1, delz_bc%north_t1, &
1692  liq_watbc_north, rainwatbc_north, ice_watbc_north, snowwatbc_north, graupelbc_north, &
1693 #ifdef USE_COND
1694  q_con_bc%north_t1, &
1695 #ifdef MOIST_CAPPA
1696  cappa_bc%north_t1, &
1697 #endif
1698 #endif
1699  zvir, isd, ied, istart, iend, npy, jed, npz)
1700  end if
1701 
1702  end subroutine setup_pt_nh_bc
1703 
1704 
1705  subroutine setup_pt_nh_bc_k(ptBC,sphumBC,delpBC,delzBC, &
1706  liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC, &
1707 #ifdef USE_COND
1708  q_conBC, &
1709 #ifdef MOIST_CAPPA
1710  cappaBC, &
1711 #endif
1712 #endif
1713  zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
1715  integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz
1716  real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: ptBC
1717  real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: sphumBC, delpBC, delzBC
1718  real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC
1719 #ifdef USE_COND
1720  real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: q_conBC
1721 #ifdef MOIST_CAPPA
1722  real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: cappaBC
1723 #endif
1724 #endif
1725  real, intent(IN) :: zvir
1726 
1727  integer :: i,j,k
1728  real :: dp1, q_con, q_sol, q_liq, cvm, pkz, rdg, cv_air
1729 
1730  real, parameter:: c_liq = 4185.5 ! heat capacity of water at 0C
1731  real, parameter:: c_ice = 1972. ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice)
1732  real, parameter:: cv_vap = cp_vapor - rvgas ! 1384.5
1733  real, parameter:: tice = 273.16 ! For GFS Partitioning
1734  real, parameter:: t_i0 = 15.
1735 
1736  rdg = -rdgas / grav
1737  cv_air = cp_air - rdgas
1738 
1739 !!$!!! DEBUG CODE
1740 !!$ write(*, '(A, 7I5)') 'setup_pt_NH_BC_k', mpp_pe(), isd, ied, istart, iend, lbound(ptBC,1), ubound(ptBC,1)
1741 !!$!!! END DEBUG CODE
1742 
1743 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,zvir,ptBC,sphumBC,delpBC,delzBC,liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC, &
1744 #ifdef USE_COND
1745 !$OMP q_conBC, &
1746 #ifdef MOIST_CAPPA
1747 !$OMP cappaBC, &
1748 #endif
1749 #endif
1750 !$OMP rdg, cv_air) &
1751 !$OMP private(dp1,q_liq,q_sol,q_con,cvm,pkz)
1752  do k=1,npz
1753  do j=jstart,jend
1754  do i=istart,iend
1755  dp1 = zvir*sphumbc(i,j,k)
1756 #ifdef USE_COND
1757  q_liq = liq_watbc(i,j,k) + rainwatbc(i,j,k)
1758  q_sol = ice_watbc(i,j,k) + snowwatbc(i,j,k) + graupelbc(i,j,k)
1759  q_con = q_liq + q_sol
1760  q_conbc(i,j,k) = q_con
1761 #ifdef MOIST_CAPPA
1762  cvm = (1.-(sphumbc(i,j,k)+q_con))*cv_air+sphumbc(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice
1763  cappabc(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1))
1764  pkz = exp( cappabc(i,j,k)*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
1765  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
1766 #else
1767  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
1768  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
1769 #endif
1770  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)*(1.-q_con)/pkz
1771 #else
1772  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
1773  (1.+dp1)/delzbc(i,j,k)))
1774  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)/pkz
1775 #endif
1776  end do
1777  end do
1778  end do
1779 
1780  end subroutine setup_pt_nh_bc_k
1781 
1782  subroutine set_nh_bcs_t0(neststruct)
1784  type(fv_nest_type), intent(INOUT) :: neststruct
1785 
1786 #ifndef SW_DYNAMICS
1787  neststruct%delz_BC%east_t0 = neststruct%delz_BC%east_t1
1788  neststruct%delz_BC%west_t0 = neststruct%delz_BC%west_t1
1789  neststruct%delz_BC%north_t0 = neststruct%delz_BC%north_t1
1790  neststruct%delz_BC%south_t0 = neststruct%delz_BC%south_t1
1791 
1792  neststruct%w_BC%east_t0 = neststruct%w_BC%east_t1
1793  neststruct%w_BC%west_t0 = neststruct%w_BC%west_t1
1794  neststruct%w_BC%north_t0 = neststruct%w_BC%north_t1
1795  neststruct%w_BC%south_t0 = neststruct%w_BC%south_t1
1796 #endif
1797 
1798  end subroutine set_nh_bcs_t0
1799 
1800  subroutine set_bcs_t0(ncnst, hydrostatic, neststruct)
1802  integer, intent(IN) :: ncnst
1803  logical, intent(IN) :: hydrostatic
1804  type(fv_nest_type), intent(INOUT) :: neststruct
1805 
1806  integer :: n
1807 
1808  neststruct%delp_BC%east_t0 = neststruct%delp_BC%east_t1
1809  neststruct%delp_BC%west_t0 = neststruct%delp_BC%west_t1
1810  neststruct%delp_BC%north_t0 = neststruct%delp_BC%north_t1
1811  neststruct%delp_BC%south_t0 = neststruct%delp_BC%south_t1
1812  do n=1,ncnst
1813  neststruct%q_BC(n)%east_t0 = neststruct%q_BC(n)%east_t1
1814  neststruct%q_BC(n)%west_t0 = neststruct%q_BC(n)%west_t1
1815  neststruct%q_BC(n)%north_t0 = neststruct%q_BC(n)%north_t1
1816  neststruct%q_BC(n)%south_t0 = neststruct%q_BC(n)%south_t1
1817  enddo
1818 #ifndef SW_DYNAMICS
1819  neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1
1820  neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1
1821  neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1
1822  neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1
1823  neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1
1824  neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1
1825  neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1
1826  neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1
1827 
1828 #ifdef USE_COND
1829  neststruct%q_con_BC%east_t0 = neststruct%q_con_BC%east_t1
1830  neststruct%q_con_BC%west_t0 = neststruct%q_con_BC%west_t1
1831  neststruct%q_con_BC%north_t0 = neststruct%q_con_BC%north_t1
1832  neststruct%q_con_BC%south_t0 = neststruct%q_con_BC%south_t1
1833 #ifdef MOIST_CAPPA
1834  neststruct%cappa_BC%east_t0 = neststruct%cappa_BC%east_t1
1835  neststruct%cappa_BC%west_t0 = neststruct%cappa_BC%west_t1
1836  neststruct%cappa_BC%north_t0 = neststruct%cappa_BC%north_t1
1837  neststruct%cappa_BC%south_t0 = neststruct%cappa_BC%south_t1
1838 #endif
1839 #endif
1840 
1841  if (.not. hydrostatic) then
1842  call set_nh_bcs_t0(neststruct)
1843  endif
1844 #endif
1845  neststruct%u_BC%east_t0 = neststruct%u_BC%east_t1
1846  neststruct%u_BC%west_t0 = neststruct%u_BC%west_t1
1847  neststruct%u_BC%north_t0 = neststruct%u_BC%north_t1
1848  neststruct%u_BC%south_t0 = neststruct%u_BC%south_t1
1849  neststruct%v_BC%east_t0 = neststruct%v_BC%east_t1
1850  neststruct%v_BC%west_t0 = neststruct%v_BC%west_t1
1851  neststruct%v_BC%north_t0 = neststruct%v_BC%north_t1
1852  neststruct%v_BC%south_t0 = neststruct%v_BC%south_t1
1853 
1854 
1855  neststruct%vc_BC%east_t0 = neststruct%vc_BC%east_t1
1856  neststruct%vc_BC%west_t0 = neststruct%vc_BC%west_t1
1857  neststruct%vc_BC%north_t0 = neststruct%vc_BC%north_t1
1858  neststruct%vc_BC%south_t0 = neststruct%vc_BC%south_t1
1859  neststruct%uc_BC%east_t0 = neststruct%uc_BC%east_t1
1860  neststruct%uc_BC%west_t0 = neststruct%uc_BC%west_t1
1861  neststruct%uc_BC%north_t0 = neststruct%uc_BC%north_t1
1862  neststruct%uc_BC%south_t0 = neststruct%uc_BC%south_t1
1863 
1864  neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
1865  neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
1866  neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
1867  neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
1868 
1869  end subroutine set_bcs_t0
1870 
1871  subroutine d2c_setup(u, v, &
1872  ua, va, &
1873  uc, vc, dord4, &
1874  isd,ied,jsd,jed, is,ie,js,je, npx,npy, &
1875  grid_type, bounded_domain, &
1876  se_corner, sw_corner, ne_corner, nw_corner, &
1877  rsin_u,rsin_v,cosa_s,rsin2 )
1879  logical, intent(in):: dord4
1880  real, intent(in) :: u(isd:ied,jsd:jed+1)
1881  real, intent(in) :: v(isd:ied+1,jsd:jed)
1882  real, intent(out), dimension(isd:ied ,jsd:jed ):: ua
1883  real, intent(out), dimension(isd:ied ,jsd:jed ):: va
1884  real, intent(out), dimension(isd:ied+1,jsd:jed ):: uc
1885  real, intent(out), dimension(isd:ied ,jsd:jed+1):: vc
1886  integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type
1887  logical, intent(in) :: bounded_domain, se_corner, sw_corner, ne_corner, nw_corner
1888  real, intent(in) :: rsin_u(isd:ied+1,jsd:jed)
1889  real, intent(in) :: rsin_v(isd:ied,jsd:jed+1)
1890  real, intent(in) :: cosa_s(isd:ied,jsd:jed)
1891  real, intent(in) :: rsin2(isd:ied,jsd:jed)
1892 
1893 ! Local
1894  real, dimension(isd:ied,jsd:jed):: utmp, vtmp
1895  real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28.
1896  real, parameter:: a1 = 0.5625
1897  real, parameter:: a2 = -0.0625
1898  real, parameter:: c1 = -2./14.
1899  real, parameter:: c2 = 11./14.
1900  real, parameter:: c3 = 5./14.
1901  integer npt, i, j, ifirst, ilast, id
1902 
1903  if ( dord4) then
1904  id = 1
1905  else
1906  id = 0
1907  endif
1908 
1909 
1910  if (grid_type < 3 .and. .not. bounded_domain) then
1911  npt = 4
1912  else
1913  npt = -2
1914  endif
1915 
1916  if ( bounded_domain) then
1917 
1918  do j=jsd+1,jed-1
1919  do i=isd,ied
1920  utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1))
1921  enddo
1922  enddo
1923  do i=isd,ied
1924  j = jsd
1925  utmp(i,j) = 0.5*(u(i,j)+u(i,j+1))
1926  j = jed
1927  utmp(i,j) = 0.5*(u(i,j)+u(i,j+1))
1928  end do
1929 
1930  do j=jsd,jed
1931  do i=isd+1,ied-1
1932  vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j))
1933  enddo
1934  i = isd
1935  vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j))
1936  i = ied
1937  vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j))
1938  enddo
1939 
1940  do j=jsd,jed
1941  do i=isd,ied
1942  ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
1943  va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
1944  enddo
1945  enddo
1946 
1947  else
1948 
1949  !----------
1950  ! Interior:
1951  !----------
1952  utmp = 0.
1953  vtmp = 0.
1954 
1955 
1956  do j=max(npt,js-1),min(npy-npt,je+1)
1957  do i=max(npt,isd),min(npx-npt,ied)
1958  utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1))
1959  enddo
1960  enddo
1961  do j=max(npt,jsd),min(npy-npt,jed)
1962  do i=max(npt,is-1),min(npx-npt,ie+1)
1963  vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j))
1964  enddo
1965  enddo
1966 
1967  !----------
1968  ! edges:
1969  !----------
1970  if (grid_type < 3) then
1971 
1972  if ( js==1 .or. jsd<npt) then
1973  do j=jsd,npt-1
1974  do i=isd,ied
1975  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
1976  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
1977  enddo
1978  enddo
1979  endif
1980 
1981  if ( (je+1)==npy .or. jed>=(npy-npt)) then
1982  do j=npy-npt+1,jed
1983  do i=isd,ied
1984  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
1985  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
1986  enddo
1987  enddo
1988  endif
1989 
1990  if ( is==1 .or. isd<npt ) then
1991  do j=max(npt,jsd),min(npy-npt,jed)
1992  do i=isd,npt-1
1993  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
1994  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
1995  enddo
1996  enddo
1997  endif
1998 
1999  if ( (ie+1)==npx .or. ied>=(npx-npt)) then
2000  do j=max(npt,jsd),min(npy-npt,jed)
2001  do i=npx-npt+1,ied
2002  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
2003  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
2004  enddo
2005  enddo
2006  endif
2007 
2008  endif
2009  do j=js-1-id,je+1+id
2010  do i=is-1-id,ie+1+id
2011  ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
2012  va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
2013  enddo
2014  enddo
2015 
2016  end if
2017 
2018 ! A -> C
2019 !--------------
2020 ! Fix the edges
2021 !--------------
2022 ! Xdir:
2023  if( sw_corner ) then
2024  do i=-2,0
2025  utmp(i,0) = -vtmp(0,1-i)
2026  enddo
2027  endif
2028  if( se_corner ) then
2029  do i=0,2
2030  utmp(npx+i,0) = vtmp(npx,i+1)
2031  enddo
2032  endif
2033  if( ne_corner ) then
2034  do i=0,2
2035  utmp(npx+i,npy) = -vtmp(npx,je-i)
2036  enddo
2037  endif
2038  if( nw_corner ) then
2039  do i=-2,0
2040  utmp(i,npy) = vtmp(0,je+i)
2041  enddo
2042  endif
2043 
2044  if (grid_type < 3 .and. .not. bounded_domain) then
2045  ifirst = max(3, is-1)
2046  ilast = min(npx-2,ie+2)
2047  else
2048  ifirst = is-1
2049  ilast = ie+2
2050  endif
2051 !---------------------------------------------
2052 ! 4th order interpolation for interior points:
2053 !---------------------------------------------
2054  do j=js-1,je+1
2055  do i=ifirst,ilast
2056  uc(i,j) = a1*(utmp(i-1,j)+utmp(i,j))+a2*(utmp(i-2,j)+utmp(i+1,j))
2057  enddo
2058  enddo
2059 
2060  if (grid_type < 3) then
2061 ! Xdir:
2062  if( is==1 .and. .not. bounded_domain ) then
2063  do j=js-1,je+1
2064  uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j)
2065  uc(1,j) = ( t14*(utmp( 0,j)+utmp(1,j)) &
2066  + t12*(utmp(-1,j)+utmp(2,j)) &
2067  + t15*(utmp(-2,j)+utmp(3,j)) )*rsin_u(1,j)
2068  uc(2,j) = c1*utmp(3,j) + c2*utmp(2,j) + c3*utmp(1,j)
2069  enddo
2070  endif
2071 
2072  if( (ie+1)==npx .and. .not. bounded_domain ) then
2073  do j=js-1,je+1
2074  uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j)
2075  uc(npx,j) = (t14*(utmp(npx-1,j)+utmp(npx,j))+ &
2076  t12*(utmp(npx-2,j)+utmp(npx+1,j)) &
2077  + t15*(utmp(npx-3,j)+utmp(npx+2,j)))*rsin_u(npx,j)
2078  uc(npx+1,j) = c3*utmp(npx,j)+c2*utmp(npx+1,j)+c1*utmp(npx+2,j)
2079  enddo
2080  endif
2081 
2082  endif
2083 
2084 !------
2085 ! Ydir:
2086 !------
2087  if( sw_corner ) then
2088  do j=-2,0
2089  vtmp(0,j) = -utmp(1-j,0)
2090  enddo
2091  endif
2092  if( nw_corner ) then
2093  do j=0,2
2094  vtmp(0,npy+j) = utmp(j+1,npy)
2095  enddo
2096  endif
2097  if( se_corner ) then
2098  do j=-2,0
2099  vtmp(npx,j) = utmp(ie+j,0)
2100  enddo
2101  endif
2102  if( ne_corner ) then
2103  do j=0,2
2104  vtmp(npx,npy+j) = -utmp(ie-j,npy)
2105  enddo
2106  endif
2107 
2108  if (grid_type < 3) then
2109 
2110  do j=js-1,je+2
2111  if ( j==1 .and. .not. bounded_domain) then
2112  do i=is-1,ie+1
2113  vc(i,1) = (t14*(vtmp(i, 0)+vtmp(i,1)) &
2114  + t12*(vtmp(i,-1)+vtmp(i,2)) &
2115  + t15*(vtmp(i,-2)+vtmp(i,3)))*rsin_v(i,1)
2116  enddo
2117  elseif ( (j==0 .or. j==(npy-1)) .and. .not. bounded_domain) then
2118  do i=is-1,ie+1
2119  vc(i,j) = c1*vtmp(i,j-2) + c2*vtmp(i,j-1) + c3*vtmp(i,j)
2120  enddo
2121  elseif ( (j==2 .or. j==(npy+1)) .and. .not. bounded_domain) then
2122  do i=is-1,ie+1
2123  vc(i,j) = c1*vtmp(i,j+1) + c2*vtmp(i,j) + c3*vtmp(i,j-1)
2124  enddo
2125  elseif ( j==npy .and. .not. bounded_domain) then
2126  do i=is-1,ie+1
2127  vc(i,npy) = (t14*(vtmp(i,npy-1)+vtmp(i,npy)) &
2128  + t12*(vtmp(i,npy-2)+vtmp(i,npy+1)) &
2129  + t15*(vtmp(i,npy-3)+vtmp(i,npy+2)))*rsin_v(i,npy)
2130  enddo
2131  else
2132 ! 4th order interpolation for interior points:
2133  do i=is-1,ie+1
2134  vc(i,j) = a2*(vtmp(i,j-2)+vtmp(i,j+1))+a1*(vtmp(i,j-1)+vtmp(i,j))
2135  enddo
2136  endif
2137  enddo
2138  else
2139 ! 4th order interpolation:
2140  do j=js-1,je+2
2141  do i=is-1,ie+1
2142  vc(i,j) = a2*(vtmp(i,j-2)+vtmp(i,j+1))+a1*(vtmp(i,j-1)+vtmp(i,j))
2143  enddo
2144  enddo
2145  endif
2146 
2147  end subroutine d2c_setup
2148 
2149  subroutine d2a_setup(u, v, ua, va, dord4, &
2150  isd,ied,jsd,jed, is,ie,js,je, npx,npy, &
2151  grid_type, bounded_domain, &
2152  cosa_s,rsin2 )
2154  logical, intent(in):: dord4
2155  real, intent(in) :: u(isd:ied,jsd:jed+1)
2156  real, intent(in) :: v(isd:ied+1,jsd:jed)
2157  real, intent(out), dimension(isd:ied ,jsd:jed ):: ua
2158  real, intent(out), dimension(isd:ied ,jsd:jed ):: va
2159  integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type
2160  real, intent(in) :: cosa_s(isd:ied,jsd:jed)
2161  real, intent(in) :: rsin2(isd:ied,jsd:jed)
2162  logical, intent(in) :: bounded_domain
2163 
2164 ! Local
2165  real, dimension(isd:ied,jsd:jed):: utmp, vtmp
2166  real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28.
2167  real, parameter:: a1 = 0.5625
2168  real, parameter:: a2 = -0.0625
2169  real, parameter:: c1 = -2./14.
2170  real, parameter:: c2 = 11./14.
2171  real, parameter:: c3 = 5./14.
2172  integer npt, i, j, ifirst, ilast, id
2173 
2174  if ( dord4) then
2175  id = 1
2176  else
2177  id = 0
2178  endif
2179 
2180 
2181  if (grid_type < 3 .and. .not. bounded_domain) then
2182  npt = 4
2183  else
2184  npt = -2
2185  endif
2186 
2187  if ( bounded_domain) then
2188 
2189  do j=jsd+1,jed-1
2190  do i=isd,ied
2191  utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1))
2192  enddo
2193  enddo
2194  do i=isd,ied
2195  j = jsd
2196  utmp(i,j) = 0.5*(u(i,j)+u(i,j+1))
2197  j = jed
2198  utmp(i,j) = 0.5*(u(i,j)+u(i,j+1))
2199  end do
2200 
2201  do j=jsd,jed
2202  do i=isd+1,ied-1
2203  vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j))
2204  enddo
2205  i = isd
2206  vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j))
2207  i = ied
2208  vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j))
2209  enddo
2210 
2211  else
2212 
2213  !----------
2214  ! Interior:
2215  !----------
2216 
2217  do j=max(npt,js-1),min(npy-npt,je+1)
2218  do i=max(npt,isd),min(npx-npt,ied)
2219  utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1))
2220  enddo
2221  enddo
2222  do j=max(npt,jsd),min(npy-npt,jed)
2223  do i=max(npt,is-1),min(npx-npt,ie+1)
2224  vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j))
2225  enddo
2226  enddo
2227 
2228  !----------
2229  ! edges:
2230  !----------
2231  if (grid_type < 3) then
2232 
2233  if ( js==1 .or. jsd<npt) then
2234  do j=jsd,npt-1
2235  do i=isd,ied
2236  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
2237  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
2238  enddo
2239  enddo
2240  endif
2241 
2242  if ( (je+1)==npy .or. jed>=(npy-npt)) then
2243  do j=npy-npt+1,jed
2244  do i=isd,ied
2245  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
2246  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
2247  enddo
2248  enddo
2249  endif
2250 
2251  if ( is==1 .or. isd<npt ) then
2252  do j=max(npt,jsd),min(npy-npt,jed)
2253  do i=isd,npt-1
2254  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
2255  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
2256  enddo
2257  enddo
2258  endif
2259 
2260  if ( (ie+1)==npx .or. ied>=(npx-npt)) then
2261  do j=max(npt,jsd),min(npy-npt,jed)
2262  do i=npx-npt+1,ied
2263  utmp(i,j) = 0.5*(u(i,j) + u(i,j+1))
2264  vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j))
2265  enddo
2266  enddo
2267  endif
2268 
2269  endif
2270 
2271  end if
2272 
2273 
2274 
2275  do j=js-1-id,je+1+id
2276  do i=is-1-id,ie+1+id
2277  ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
2278  va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j)
2279  enddo
2280  enddo
2281 
2282 end subroutine d2a_setup
2283 
2284 
2285 
2286 
2287 !! nestupdate types
2288 !! 1 - Interpolation update on all variables
2289 !! 2 - Conserving update (over areas on cell-
2290 !! centered variables, over faces on winds) on all variables
2291 !! 3 - Interpolation update on winds only
2292 !! 4 - Interpolation update on all variables except delp (mass conserving)
2293 !! 5 - Remap interpolating update, delp not updated
2294 !! 6 - Remap conserving update, delp not updated
2295 !! 7 - Remap conserving update, delp and q not updated
2296 !! 8 - Remap conserving update, only winds updated
2297 
2298 !! Note that nestupdate > 3 will not update delp.
2299 
2300 !! "Remap update" remaps updated variables from the nested grid's
2301 !! vertical coordinate to that of the coarse grid. When delp is not
2302 !! updated (nestbctype >= 3) the vertical coordinates differ on
2303 !! the two grids, because the surface pressure will be different
2304 !! on the two grids.
2305 !! Note: "conserving updates" do not guarantee global conservation
2306 !! unless flux nested grid BCs are specified, or if a quantity is
2307 !! not updated at all. This ability has not been implemented.
2308 
2311 subroutine twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, Time, this_grid)
2313  type(fv_atmos_type), intent(INOUT) :: Atm(ngrids)
2314  integer, intent(IN) :: ngrids, this_grid
2315  logical, intent(IN) :: grids_on_this_pe(ngrids)
2316  real, intent(IN) :: zvir
2317  type(time_type), intent(IN) :: Time
2318 
2319  integer :: n, p, sphum
2320 
2321 
2322  if (ngrids > 1) then
2323 
2324 ! Re-compute pressures on each grid
2325 
2326  call p_var(atm(this_grid)%npz, atm(this_grid)%bd%is, atm(this_grid)%bd%ie, atm(this_grid)%bd%js, atm(this_grid)%bd%je, &
2327  atm(this_grid)%ptop, ptop_min, atm(this_grid)%delp, atm(this_grid)%delz, atm(this_grid)%pt, &
2328  atm(this_grid)%ps, atm(this_grid)%pe, atm(this_grid)%peln, atm(this_grid)%pk, atm(this_grid)%pkz, kappa, &
2329  atm(this_grid)%q, atm(this_grid)%ng, atm(this_grid)%flagstruct%ncnst, atm(this_grid)%gridstruct%area_64, 0., &
2330  .false., .false., &
2331  atm(this_grid)%flagstruct%moist_phys, atm(this_grid)%flagstruct%hydrostatic, &
2332  atm(this_grid)%flagstruct%nwat, atm(this_grid)%domain, atm(this_grid)%flagstruct%adiabatic, .false.)
2333 
2334  do n=ngrids,2,-1 !loop backwards to allow information to propagate from finest to coarsest grids
2335 
2336  !two-way updating
2337  if (atm(n)%neststruct%twowaynest ) then
2338  !if (grids_on_this_pe(n) .or. grids_on_this_pe(Atm(n)%parent_grid%grid_number)) then
2339  if (n==this_grid .or. atm(n)%parent_grid%grid_number==this_grid) then
2340  sphum = get_tracer_index(model_atmos, 'sphum')
2341  call twoway_nest_update(atm(n)%npx, atm(n)%npy, atm(n)%npz, zvir, &
2342  atm(n)%ncnst, sphum, atm(n)%u, atm(n)%v, atm(n)%w, &
2343  atm(n)%pt, atm(n)%delp, atm(n)%q, &
2344  atm(n)%pe, atm(n)%pkz, atm(n)%delz, atm(n)%ps, atm(n)%ptop, atm(n)%ak, atm(n)%bk, &
2345  atm(n)%gridstruct, atm(n)%flagstruct, atm(n)%neststruct, atm(n)%domain, &
2346  atm(n)%parent_grid, atm(n)%bd, n, .false.)
2347  endif
2348  endif
2349 
2350  end do
2351 
2352  !NOTE: these routines need to be used with any grid which has been updated to, not just the coarsest grid.
2353  if (atm(this_grid)%neststruct%parent_of_twoway .and. grids_on_this_pe(n)) then
2354  call after_twoway_nest_update( atm(this_grid)%npx, atm(this_grid)%npy, atm(this_grid)%npz, &
2355  atm(this_grid)%ng, atm(this_grid)%ncnst, &
2356  atm(this_grid)%u, atm(this_grid)%v, atm(this_grid)%w, atm(this_grid)%delz, &
2357  atm(this_grid)%pt, atm(this_grid)%delp, atm(this_grid)%q, &
2358  atm(this_grid)%ps, atm(this_grid)%pe, atm(this_grid)%pk, atm(this_grid)%peln, atm(this_grid)%pkz, &
2359  atm(this_grid)%phis, atm(this_grid)%ua, atm(this_grid)%va, &
2360  atm(this_grid)%ptop, atm(this_grid)%gridstruct, atm(this_grid)%flagstruct, &
2361  atm(this_grid)%domain, atm(this_grid)%bd, time)
2362  endif
2363 
2364  endif ! ngrids > 1
2365 
2366  end subroutine twoway_nesting
2367 
2368 !!!CLEANUP: this routine assumes that the PARENT GRID has pt = (regular) temperature,
2369 !!!not potential temperature; which may cause problems when updating if this is not the case.
2370 
2371 !!! NOTE ALSO: parent_grid%flagstruct is NOT SET UP by default and may be missing much information
2372 !!! Either make sure that parent_grid%flagstruct is filled in fv_control or that proper steps
2373 !!! are taken to make sure null flags are not used
2374  subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, &
2375  u, v, w, pt, delp, q, &
2376  pe, pkz, delz, ps, ptop, ak, bk, &
2377  gridstruct, flagstruct, neststruct, &
2378  domain, parent_grid, bd, grid_number, conv_theta_in)
2380  real, intent(IN) :: zvir, ptop, ak(npz+1), bk(npz+1)
2381 
2382  integer, intent(IN) :: npx, npy, npz
2383  integer, intent(IN) :: ncnst, sphum, grid_number
2384  logical, intent(IN), OPTIONAL :: conv_theta_in
2385 
2386  type(fv_grid_bounds_type), intent(IN) :: bd
2387  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
2388  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
2389  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1: )
2390 
2391  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
2392  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
2393  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
2394 
2395  real, intent(inout) :: pe (bd%is-1:bd%ie+1,npz+1,bd%js-1:bd%je+1)
2396  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
2397  real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1: )
2398  real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed)
2399 
2400  type(fv_grid_type), intent(INOUT) :: gridstruct
2401  type(fv_flags_type), intent(INOUT) :: flagstruct
2402  type(fv_nest_type), intent(INOUT) :: neststruct
2403  type(domain2d), intent(INOUT) :: domain
2404 
2405  type(fv_atmos_type), pointer, intent(IN) :: parent_grid
2406 
2407  real, allocatable :: t_nest(:,:,:), ps0(:,:)
2408  integer :: i,j,k,n
2409  integer :: isd_p, ied_p, jsd_p, jed_p, isc_p, iec_p, jsc_p, jec_p
2410  integer :: isg, ieg, jsg,jeg, npx_p, npy_p
2411  integer :: istart, iend
2412  real :: qmass_b, qmass_a, fix = 1.
2413  logical :: used, conv_theta=.true.
2414 
2415  real :: qdp( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
2416  real, allocatable, dimension(:,:,:) :: qdp_coarse
2417  real, allocatable, dimension(:,:,:) :: var_src
2418  real, allocatable, dimension(:,:,:) :: pt_src, w_src, u_src, v_src
2419  real(kind=f_p), allocatable :: q_diff(:,:,:)
2420  real :: L_sum_b(npz), L_sum_a(npz), blend_wt(parent_grid%npz)
2421  real :: pfull, ph1, ph2, rfcut, sgcut
2422 
2423  integer :: upoff
2424  integer :: is, ie, js, je
2425  integer :: isd, ied, jsd, jed
2426  integer :: isu, ieu, jsu, jeu
2427  logical, SAVE :: first_timestep = .true.
2428 
2429  is = bd%is
2430  ie = bd%ie
2431  js = bd%js
2432  je = bd%je
2433  isd = bd%isd
2434  ied = bd%ied
2435  jsd = bd%jsd
2436  jed = bd%jed
2437  isu = neststruct%isu
2438  ieu = neststruct%ieu
2439  jsu = neststruct%jsu
2440  jeu = neststruct%jeu
2441 
2442  upoff = neststruct%upoff
2443 
2444  !We update actual temperature, not theta.
2445  !If pt is actual temperature, set conv_theta to .false.
2446  if (present(conv_theta_in)) conv_theta = conv_theta_in
2447 
2448  if ((.not. parent_grid%neststruct%parent_proc) .and. (.not. neststruct%child_proc)) return
2449 
2450  call mpp_get_data_domain( parent_grid%domain, &
2451  isd_p, ied_p, jsd_p, jed_p )
2452  call mpp_get_compute_domain( parent_grid%domain, &
2453  isc_p, iec_p, jsc_p, jec_p )
2454 
2455  ph2 = parent_grid%ak(1)
2456  rfcut = max(flagstruct%rf_cutoff, parent_grid%flagstruct%rf_cutoff)
2457  sgcut = ak(flagstruct%n_sponge+1) + bk(flagstruct%n_sponge+1)*flagstruct%p_ref
2458  sgcut = max(sgcut, parent_grid%ak(parent_grid%flagstruct%n_sponge+1) + parent_grid%bk(parent_grid%flagstruct%n_sponge+1)*parent_grid%flagstruct%p_ref)
2459  rfcut = max(rfcut, sgcut)
2460  do k=1,parent_grid%npz
2461  ph1 = ph2
2462  ph2 = parent_grid%ak(k+1) + parent_grid%bk(k+1)*parent_grid%flagstruct%p_ref
2463  pfull = (ph2 - ph1) / log(ph2/ph1)
2464  !if above nested-grid ptop or top two nested-grid levels do not remap
2465  if ( pfull <= ak(3) .or. k <= 2 ) then
2466  blend_wt(k) = 0.
2467  !Partial blend of nested-grid's Rayleigh damping region
2468  !ALSO do not blend n_sponge areas??
2469  elseif (pfull <= rfcut) then
2470  blend_wt(k) = 0.
2471  !blend_wt(k) = neststruct%update_blend*cos(0.5*pi*log(rfcut/pfull)/log(rfcut/ptop))**2
2472  else
2473  blend_wt(k) = neststruct%update_blend
2474  endif
2475  enddo
2476 
2477  if (parent_grid%neststruct%parent_proc .and. is_master() .and. first_timestep) then
2478  print*, ' TWO-WAY BLENDING WEIGHTS'
2479  ph2 = parent_grid%ak(1)
2480  do k=1,parent_grid%npz
2481  ph1 = ph2
2482  ph2 = parent_grid%ak(k+1) + parent_grid%bk(k+1)*parent_grid%flagstruct%p_ref
2483  pfull = (ph2 - ph1) / log(ph2/ph1)
2484  print*, k, pfull, blend_wt(k)
2485  enddo
2486  first_timestep = .false.
2487  endif
2488 
2489 #ifndef SW_DYNAMICS
2490  if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 8) then
2491 
2492  if (neststruct%child_proc) then
2493  call mpp_update_domains(ps, domain, complete=.true.)
2494  if (.not. flagstruct%hydrostatic) call mpp_update_domains(w, domain)
2495  ! if (neststruct%child_proc) call mpp_update_domains(delz, domain)
2496  call mpp_update_domains(u, v, domain, gridtype=dgrid_ne)
2497  endif
2498  allocate(pt_src(isd_p:ied_p,jsd_p:jed_p,npz))
2499  pt_src = -999.
2500 
2501  if (conv_theta) then
2502 
2503  if (neststruct%child_proc) then
2504  !pt is potential temperature on the nested grid, but actual
2505  !temperature on the coarse grid. Compute actual temperature
2506  !on the nested grid, then gather.
2507  allocate(t_nest(isd:ied,jsd:jed,1:npz))
2508 !$OMP parallel do default(none) shared(npz,js,je,is,ie,t_nest,pt,pkz,zvir,q,sphum)
2509  do k=1,npz
2510  do j=js,je
2511  do i=is,ie
2512  t_nest(i,j,k) = pt(i,j,k)*pkz(i,j,k)/(1.+zvir*q(i,j,k,sphum))
2513  enddo
2514  enddo
2515  enddo
2516  call mpp_update_domains(t_nest, domain, complete=.true.)
2517  endif
2518 
2519  call update_coarse_grid(pt_src, &
2520  t_nest, global_nest_domain, &
2521  gridstruct%dx, gridstruct%dy, gridstruct%area, &
2522  bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
2523  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
2524  npx, npy, npz, 0, 0, &
2525  neststruct%refinement, neststruct%nestupdate, upoff, 0, &
2526  parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1)
2527  if (neststruct%child_proc) deallocate(t_nest)
2528  else
2529  if (neststruct%child_proc) call mpp_update_domains(pt, domain, complete=.true.)
2530 
2531  call update_coarse_grid(pt_src, &
2532  pt, global_nest_domain, &
2533  gridstruct%dx, gridstruct%dy, gridstruct%area, &
2534  bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
2535  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
2536  npx, npy, npz, 0, 0, &
2537  neststruct%refinement, neststruct%nestupdate, upoff, 0, &
2538  parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1)
2539 
2540  endif !conv_theta
2541 
2542  call mpp_sync!self
2543 
2544 
2545  !We don't currently have a good way to communicate all namelist items between
2546  ! grids (since we cannot assume that we have internal namelists available), so
2547  ! we get the clutzy structure here.
2548  if ( (neststruct%child_proc .and. .not. flagstruct%hydrostatic) .or. &
2549  (parent_grid%neststruct%parent_proc .and. .not. parent_grid%flagstruct%hydrostatic) ) then
2550 
2551  allocate(w_src(isd_p:ied_p,jsd_p:jed_p,npz))
2552  w_src = -999.
2553  call update_coarse_grid(w_src, w, global_nest_domain, &
2554  gridstruct%dx, gridstruct%dy, gridstruct%area, &
2555  bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
2556  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
2557  npx, npy, npz, 0, 0, &
2558  neststruct%refinement, neststruct%nestupdate, upoff, 0, &
2559  parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1)
2560  call mpp_sync!self
2561 
2562  !Updating for delz not yet implemented;
2563  ! may need to think very carefully how one would do this!!!
2564  ! consider updating specific volume instead?
2565 !!$ call update_coarse_grid(parent_grid%delz, delz, global_nest_domain, &
2566 !!$ bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, npz, 0, 0, &
2567 !!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, parent_grid%neststruct%parent_proc, neststruct%child_proc)
2568 
2569  end if
2570 
2571  end if !Neststruct%nestupdate /= 3
2572 
2573 #endif
2574 
2575  allocate(u_src(isd_p:ied_p, jsd_p:jed_p+1,npz))
2576  allocate(v_src(isd_p:ied_p+1,jsd_p:jed_p,npz))
2577  u_src = -999.
2578  v_src = -999.
2579  call update_coarse_grid(u_src, v_src, u, v, global_nest_domain, &
2580  gridstruct%dx, gridstruct%dy, gridstruct%area, &
2581  bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
2582  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
2583  npx, npy, npz, 0, 1, 1, 0, &
2584  neststruct%refinement, neststruct%nestupdate, upoff, 0, &
2585  parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1, gridtype=dgrid_ne)
2586 
2587  call mpp_sync()
2588 
2589 #ifndef SW_DYNAMICS
2590  if (neststruct%nestupdate >= 5 .and. npz > 4) then
2591 
2592  !Use PS0 from nested grid, NOT the full delp. Also we assume the same number of levels on both grids.
2593  !PS0 should be initially set to be ps so that this routine does NOTHING outside of the update region
2594 
2595  !Re-compute nested (AND COARSE) grid ps
2596 
2597  allocate(ps0(isd_p:ied_p,jsd_p:jed_p))
2598  if (parent_grid%neststruct%parent_proc) then
2599 
2600  parent_grid%ps = parent_grid%ptop
2601 !$OMP parallel do default(none) shared(jsd_p,jed_p,isd_p,ied_p,parent_grid)
2602  do j=jsd_p,jed_p
2603  do k=1,parent_grid%npz
2604  do i=isd_p,ied_p
2605  parent_grid%ps(i,j) = parent_grid%ps(i,j) + &
2606  parent_grid%delp(i,j,k)
2607  end do
2608  end do
2609  end do
2610 
2611  ps0 = parent_grid%ps
2612  endif
2613 
2614  if (neststruct%child_proc) then
2615 
2616  ps = ptop
2617 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,ied,ps,delp)
2618  do j=jsd,jed
2619  do k=1,npz
2620  do i=isd,ied
2621  ps(i,j) = ps(i,j) + delp(i,j,k)
2622  end do
2623  end do
2624  end do
2625  endif
2626 
2627  call update_coarse_grid(ps0, ps, global_nest_domain, &
2628  gridstruct%dx, gridstruct%dy, gridstruct%area, &
2629  bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
2630  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
2631  npx, npy, 0, 0, &
2632  neststruct%refinement, neststruct%nestupdate, upoff, 0, parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1)
2633 
2634  !!! The mpp version of update_coarse_grid does not return a consistent value of ps
2635  !!! across PEs, as it does not go into the haloes of a given coarse-grid PE. This
2636  !!! update_domains call takes care of the problem.
2637 
2638  if (parent_grid%neststruct%parent_proc) then
2639  call mpp_update_domains(parent_grid%ps, parent_grid%domain, complete=.false.)
2640  call mpp_update_domains(ps0, parent_grid%domain, complete=.true.)
2641  endif
2642 
2643  call mpp_sync!self
2644 
2645  if (parent_grid%global_tile == neststruct%parent_tile) then
2646 
2647  if (parent_grid%neststruct%parent_proc) then
2648 
2649  !comment out if statement to always remap theta instead of t in the remap-update.
2650  !(In LtE typically we use remap_t = .true.: remapping t is better (except in
2651  !idealized simulations with a background uniform theta) since near the top
2652  !boundary theta is exponential, which is hard to accurately interpolate with a spline
2653  if (.not. parent_grid%flagstruct%remap_t) then
2654 !$OMP parallel do default(none) shared(jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum)
2655  do k=1,parent_grid%npz
2656  do j=jsc_p,jec_p
2657  do i=isc_p,iec_p
2658  parent_grid%pt(i,j,k) = &
2659  parent_grid%pt(i,j,k)/parent_grid%pkz(i,j,k)*&
2660  (1.+zvir*parent_grid%q(i,j,k,sphum))
2661  end do
2662  end do
2663  end do
2664  end if
2665 !!$!!!! DEBUG CODE
2666 !!$ do k=1,parent_grid%npz
2667 !!$ write(mpp_pe()+3000,*) 'k = ', k, parent_grid%ak(k), parent_grid%bk(k)
2668 !!$ enddo
2669 !!$ write(mpp_pe()+3000,*)
2670 !!$ do k=1,npz
2671 !!$ write(mpp_pe()+3000,*) 'k = ', k, ak(k), bk(k)
2672 !!$ enddo
2673 !!$!!!! END DEBUG CODE
2674 
2675  call update_remap_tqw(parent_grid%npz, parent_grid%ak, parent_grid%bk, &
2676  parent_grid%ps, &
2677  parent_grid%pt, parent_grid%q, parent_grid%w, &
2678  parent_grid%flagstruct%hydrostatic, &
2679  npz, ps0, ak, bk, pt_src, w_src, &
2680  zvir, parent_grid%ptop, ncnst, &
2681  parent_grid%flagstruct%kord_tm, parent_grid%flagstruct%kord_tr, &
2682  parent_grid%flagstruct%kord_wz, &
2683  isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, .false., &
2684  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, blend_wt) !neststruct%nestupdate < 7)
2685  if (.not. parent_grid%flagstruct%remap_t) then
2686 !$OMP parallel do default(none) shared(jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum)
2687  do k=1,parent_grid%npz
2688  do j=jsc_p,jec_p
2689  do i=isc_p,iec_p
2690  parent_grid%pt(i,j,k) = &
2691  parent_grid%pt(i,j,k)*parent_grid%pkz(i,j,k) / &
2692  (1.+zvir*parent_grid%q(i,j,k,sphum))
2693  end do
2694  end do
2695  end do
2696  end if
2697 
2698  call update_remap_uv(parent_grid%npz, parent_grid%ak, parent_grid%bk, &
2699  parent_grid%ps, parent_grid%u, parent_grid%v, &
2700  npz, ak, bk, ps0, u_src, v_src, &
2701  parent_grid%flagstruct%kord_mt, &
2702  isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, parent_grid%ptop, &
2703  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, blend_wt)
2704 
2705  endif !parent_grid%neststruct%parent_proc
2706 
2707  end if
2708 
2709  if (allocated(ps0)) deallocate(ps0)
2710 
2711  end if
2712 
2713 #endif
2714 
2715 
2716 
2717  deallocate(pt_src)
2718  deallocate(w_src)
2719  deallocate(u_src)
2720  deallocate(v_src)
2721 
2722 
2723  end subroutine twoway_nest_update
2724 
2725  subroutine level_sum(q, area, domain, bd, npz, L_sum)
2727  integer, intent(IN) :: npz
2728  type(fv_grid_bounds_type), intent(IN) :: bd
2729  real, intent(in) :: area( bd%isd:bd%ied ,bd%jsd:bd%jed)
2730  real, intent(in) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
2731  real, intent(OUT) :: L_sum( npz )
2732  type(domain2d), intent(IN) :: domain
2733 
2734  integer :: i, j, k, n
2735  real :: qA!(bd%is:bd%ie, bd%js:bd%je)
2736 
2737  do k=1,npz
2738  qa = 0.
2739  do j=bd%js,bd%je
2740  do i=bd%is,bd%ie
2741  !qA(i,j) = q(i,j,k)*area(i,j)
2742  qa = qa + q(i,j,k)*area(i,j)
2743  enddo
2744  enddo
2745  call mp_reduce_sum(qa)
2746  l_sum(k) = qa
2747 ! L_sum(k) = mpp_global_sum(domain, qA, flags=BITWISE_EXACT_SUM)
2748 ! L_sum(k) = mpp_global_sum(domain, qA, flags=BITWISE_EFP_SUM) ! doesn't work??
2749  enddo
2750 
2751  end subroutine level_sum
2752 
2753 ![ij]start and [ij]end should already take staggering into account
2754 !!! CHECK ARRAY BOUNDS!!
2755 !! Make sure data is in the correct place.
2756  subroutine remap_up_k(ps_src, ps_dst, ak_src, bk_src, ak_dst, bk_dst, var_src, var_dst, &
2757  bd, istart, iend, jstart, jend, istag, jstag, npz_src, npz_dst, iv, kord, blend_wt, log_pe)
2759  !Note here that pe is TRANSPOSED to make loops faster
2760  type(fv_grid_bounds_type), intent(IN) :: bd
2761  integer, intent(IN) :: istart, iend, jstart, jend, npz_dst, npz_src, iv, kord, istag, jstag
2762  logical, intent(IN) :: log_pe
2763  real, intent(INOUT) :: ps_src(bd%isd:bd%ied,bd%jsd:bd%jed), var_src(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz_src)
2764  real, intent(INOUT) :: ps_dst(bd%isd:bd%ied,bd%jsd:bd%jed), var_dst(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz_dst)
2765  real, intent(IN) :: blend_wt(npz_dst), ak_src(npz_src+1), bk_src(npz_src+1), ak_dst(npz_dst+1), bk_dst(npz_dst+1)
2766 
2767  integer :: i, j, k
2768  real pe_src(istart:iend,npz_src+1)
2769  real pe_dst(istart:iend,npz_dst+1)
2770  real peln_src(istart:iend,npz_src+1)
2771  real peln_dst(istart:iend,npz_dst+1)
2772  character(120) :: errstring
2773  real var_dst_unblend(istart:iend,npz_dst)
2774  real bw1, bw2
2775 
2776  if (iend < istart) return
2777  if (jend < jstart) return
2778 
2779 !!$!!!! DEBUG CODE
2780 !!$ write(debug_unit,*) bd%isd,bd%ied,bd%jsd,bd%jed
2781 !!$ write(debug_unit,*) istart,iend,jstart,jend,istag,jstag
2782 !!$ write(debug_unit,*)
2783 !!$!!! END DEBUG CODE
2784 
2785 
2786  !Compute Eulerian pressures
2787  !NOTE: assumes that istag + jstag <= 1
2788  if (istag > 0) then
2789 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,ak_src,ps_src,bk_src,pe_dst,ak_dst,ps_dst,bk_dst)
2790  do j=jstart,jend
2791  do k=1,npz_src+1
2792  do i=istart,iend
2793  pe_src(i,k) = ak_src(k) + 0.5*(ps_src(i,j)+ps_src(i-1,j))*bk_src(k)
2794  enddo
2795  enddo
2796  do k=1,npz_dst+1
2797  do i=istart,iend
2798  pe_dst(i,k) = ak_dst(k) + 0.5*(ps_dst(i,j)+ps_dst(i-1,j))*bk_dst(k)
2799  enddo
2800  enddo
2801  enddo
2802  elseif (jstag > 0) then
2803 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,ak_src,ps_src,bk_src,pe_dst,ak_dst,ps_dst,bk_dst)
2804  do j=jstart,jend
2805  do k=1,npz_src+1
2806  do i=istart,iend
2807  pe_src(i,k) = ak_src(k) + 0.5*(ps_src(i,j)+ps_src(i,j-1))*bk_src(k)
2808  enddo
2809  enddo
2810  do k=1,npz_dst+1
2811  do i=istart,iend
2812  pe_dst(i,k) = ak_dst(k) + 0.5*(ps_dst(i,j)+ps_dst(i,j-1))*bk_dst(k)
2813  enddo
2814  enddo
2815  enddo
2816  else
2817 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,ak_src,ps_src,bk_src,pe_dst,ak_dst,ps_dst,bk_dst)
2818  do j=jstart,jend
2819  do k=1,npz_src+1
2820  do i=istart,iend
2821  pe_src(i,k) = ak_src(k) + ps_src(i,j)*bk_src(k)
2822  enddo
2823  enddo
2824  do k=1,npz_dst+1
2825  do i=istart,iend
2826  pe_dst(i,k) = ak_dst(k) + ps_dst(i,j)*bk_dst(k)
2827  enddo
2828  enddo
2829  enddo
2830  endif
2831 
2832  if (log_pe) then
2833 
2834 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,pe_dst,var_src,var_dst,iv,kord,blend_wt) &
2835 !$OMP private(peln_src,peln_dst,bw1,bw2,var_dst_unblend)
2836  do j=jstart,jend
2837 
2838  do k=1,npz_src+1
2839  do i=istart,iend
2840  peln_src(i,k) = log(pe_src(i,k))
2841  enddo
2842  enddo
2843 
2844  do k=1,npz_dst+1
2845  do i=istart,iend
2846  peln_dst(i,k) = log(pe_dst(i,k))
2847  enddo
2848  enddo
2849 
2850  !remap_2d seems to have some bugs when doing logp remapping
2851  call mappm(npz_src, peln_src, var_src(istart:iend,j:j,:), &
2852  npz_dst, peln_dst, var_dst_unblend, &
2853  istart, iend, iv, kord, peln_dst(istart,1))
2854 
2855  do k=1,npz_dst
2856  bw1 = blend_wt(k)
2857  bw2 = 1. - bw1
2858  do i=istart,iend
2859  var_dst(i,j,k) = var_dst(i,j,k)*bw2 + var_dst_unblend(i,k)*bw1
2860  enddo
2861  enddo
2862  enddo
2863 
2864  else
2865 
2866 !$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,pe_dst,var_src,var_dst,iv,kord,blend_wt) &
2867 !$OMP private(bw1,bw2,var_dst_unblend)
2868  do j=jstart,jend
2869 
2870  call mappm(npz_src, pe_src, var_src(istart:iend,j:j,:), &
2871  npz_dst, pe_dst, var_dst_unblend, &
2872  istart, iend, iv, kord, pe_dst(istart,1))
2873 
2874  do k=1,npz_dst
2875  bw1 = blend_wt(k)
2876  bw2 = 1. - bw1
2877  do i=istart,iend
2878  var_dst(i,j,k) = var_dst(i,j,k)*bw2 + var_dst_unblend(i,k)*bw1
2879  enddo
2880  enddo
2881  enddo
2882 
2883  endif
2884 
2885  end subroutine remap_up_k
2886 
2887  subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, &
2888  u, v, w, delz, pt, delp, q, &
2889  ps, pe, pk, peln, pkz, phis, ua, va, &
2890  ptop, gridstruct, flagstruct, &
2891  domain, bd, Time)
2893  type(fv_grid_bounds_type), intent(IN) :: bd
2894  real, intent(IN) :: ptop
2895 
2896  integer, intent(IN) :: ng, npx, npy, npz
2897  integer, intent(IN) :: ncnst
2898 
2899  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
2900  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
2901  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1: )
2902  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
2903  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
2904  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
2905  real, intent(inout) :: delz(bd%is: ,bd%js: ,1: )
2906 
2907 !-----------------------------------------------------------------------
2908 ! Auxilliary pressure arrays:
2909 ! The 5 vars below can be re-computed from delp and ptop.
2910 !-----------------------------------------------------------------------
2911 ! dyn_aux:
2912  real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed)
2913  real, intent(inout) :: pe (bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1)
2914  real, intent(inout) :: pk (bd%is:bd%ie,bd%js:bd%je, npz+1)
2915  real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je)
2916  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
2917 
2918 !-----------------------------------------------------------------------
2919 ! Others:
2920 !-----------------------------------------------------------------------
2921  real, intent(inout) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
2922 
2923  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz):: ua, va
2924  type(fv_grid_type), intent(IN) :: gridstruct
2925  type(fv_flags_type), intent(IN) :: flagstruct
2926  type(domain2d), intent(INOUT) :: domain
2927  type(time_type), intent(IN) :: Time
2928 
2929  logical :: bad_range
2930 
2931  integer :: is, ie, js, je
2932  integer :: isd, ied, jsd, jed
2933 
2934  is = bd%is
2935  ie = bd%ie
2936  js = bd%js
2937  je = bd%je
2938  isd = bd%isd
2939  ied = bd%ied
2940  jsd = bd%jsd
2941  jed = bd%jed
2942 
2943  call cubed_to_latlon(u, v, ua, va, &
2944  gridstruct, npx, npy, npz, &
2945  1, gridstruct%grid_type, domain, &
2946  gridstruct%bounded_domain, flagstruct%c2l_ord, bd)
2947 
2948 #ifndef SW_DYNAMICS
2949 
2950  !To get coarse grid pkz, etc right after a two-way update so
2951  !that it is consistent across a restart:
2952  !(should only be called after doing such an update)
2953 
2954  !! CLEANUP: move to twoway_nest_update??
2955  call p_var(npz, is, ie, js, je, ptop, ptop_min, &
2956  delp, delz, &
2957  pt, ps, &
2958  pe, peln, &
2959  pk, pkz, kappa, &
2960  q, ng, flagstruct%ncnst, gridstruct%area_64, 0., &
2961  .false., .false., & !mountain argument not used
2962  flagstruct%moist_phys, flagstruct%hydrostatic, &
2963  flagstruct%nwat, domain, flagstruct%adiabatic, .false.)
2964 
2965 #endif
2966 
2967  if (flagstruct%range_warn) then
2968  call range_check('TA update', pt, is, ie, js, je, ng, npz, gridstruct%agrid, 130., 350., bad_range, time)
2969  call range_check('UA update', ua, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 250., bad_range, time)
2970  call range_check('VA update', va, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 220., bad_range, time)
2971  if (.not. flagstruct%hydrostatic) then
2972  call range_check('W update', w, is, ie, js, je, ng, npz, gridstruct%agrid, -50., 100., bad_range, time)
2973  endif
2974  endif
2975 
2976 
2977 
2978  end subroutine after_twoway_nest_update
2979 
2982  !This does not yet do anything for the tracers
2983 
2984  subroutine update_remap_tqw( npz, ak_dst, bk_dst, ps_dst, t_dst, q_dst, w_dst, &
2985  hydrostatic, &
2986  kmd, ps_src, ak_src, bk_src, t_src, w_src, &
2987  zvir, ptop, nq, kord_tm, kord_tr, kord_wz, &
2988  is, ie, js, je, isd, ied, jsd, jed, do_q, &
2989  istart, iend, jstart, jend, blend_wt)
2990  integer, intent(in):: npz, kmd, nq, kord_tm, kord_tr, kord_wz
2991  real, intent(in):: zvir, ptop
2992  real, intent(in):: ak_src(kmd+1), bk_src(kmd+1)
2993  real, intent(in):: ak_dst(npz+1), bk_dst(npz+1), blend_wt(npz)
2994  real, intent(in), dimension(isd:ied,jsd:jed):: ps_src
2995  real, intent(in), dimension(isd:ied,jsd:jed):: ps_dst
2996  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: t_dst, w_dst
2997  real, intent(inout), dimension(isd:ied,jsd:jed,npz,nq):: q_dst
2998  real, intent(in), dimension(isd:ied,jsd:jed,kmd):: t_src, w_src
2999  integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed, istart, iend, jstart, jend
3000  logical, intent(in) :: hydrostatic, do_q
3001 ! local:
3002  real, dimension(is:ie,kmd):: tp, qp
3003  real, dimension(is:ie,kmd+1):: pe0, pn0
3004  real, dimension(is:ie,npz):: qn1
3005  real, dimension(is:ie,npz+1):: pe1, pn1
3006  integer i,j,k,iq
3007  real :: wt1, wt2
3008 
3009  if (do_q) call mpp_error(fatal, ' update_remap_tqw: q remapping not yet supported')
3010 
3011  !This line to check if the update region is correctly defined or not is
3012  ! IMPORTANT. Sometimes one or the other pair of limits will give a
3013  ! non-empty loop, even though no data was transferred! This is why
3014  ! I was having so much trouble getting the remap-update to work --- lmh 11jul17
3015  if (istart > iend .or. jstart > jend) return
3016 
3017 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak_dst,bk_dst,ps_dst,q_dst,npz,ptop,do_q,&
3018 !$OMP t_dst,w_dst,t_src,w_src,ak_src,bk_src,ps_src,nq,hydrostatic,kord_tm,kord_tr,kord_wz,istart,iend,jstart,jend,blend_wt) &
3019 !$OMP private(pe0,pn0,pe1,pn1,qp,tp,qn1,wt1,wt2)
3020  do 5000 j=jstart,jend
3021 
3022  do k=1,kmd+1
3023  do i=istart,iend
3024  pe0(i,k) = ak_src(k) + bk_src(k)*ps_src(i,j)
3025  pn0(i,k) = log(pe0(i,k))
3026  enddo
3027  enddo
3028  do k=1,npz+1
3029  do i=istart,iend
3030  pe1(i,k) = ak_dst(k) + bk_dst(k)*ps_dst(i,j)
3031  pn1(i,k) = log(pe1(i,k))
3032  enddo
3033  enddo
3034  if (do_q) then
3035  do iq=1,nq
3036  do k=1,kmd
3037  do i=istart,iend
3038  qp(i,k) = q_dst(i,j,k,iq)
3039  enddo
3040  enddo
3041  call mappm(kmd, pe0, qp, npz, pe1, qn1, is,ie, 0, kord_tr, ptop) !not sure about indices
3042  do k=1,npz
3043  do i=istart,iend
3044  q_dst(i,j,k,iq) = qn1(i,k)
3045  enddo
3046  enddo
3047  enddo
3048  endif
3049 
3050  do k=1,kmd
3051  do i=istart,iend
3052  tp(i,k) = t_src(i,j,k)
3053  enddo
3054  enddo
3055  !Remap T using logp
3056  call mappm(kmd, pn0(istart:iend,:), tp(istart:iend,:), npz, pn1(istart:iend,:), qn1(istart:iend,:), istart,iend, 1, abs(kord_tm), ptop)
3057 
3058  do k=1,npz
3059  wt1 = blend_wt(k)
3060  wt2 = 1. - wt1
3061  do i=istart,iend
3062  t_dst(i,j,k) = qn1(i,k)*wt1 + t_dst(i,j,k)*wt2
3063  enddo
3064  enddo
3065 
3066  if (.not. hydrostatic) then
3067  do k=1,kmd
3068  do i=istart,iend
3069  tp(i,k) = w_src(i,j,k)
3070  enddo
3071  enddo
3072  !Remap w using p
3073  !Using iv == -1 instead of -2
3074  call mappm(kmd, pe0(istart:iend,:), tp(istart:iend,:), npz, pe1(istart:iend,:), qn1(istart:iend,:), istart,iend, -1, kord_wz, ptop)
3075 
3076  do k=1,npz
3077  wt1 = blend_wt(k)
3078  wt2 = 1. - wt1
3079  do i=istart,iend
3080  w_dst(i,j,k) = qn1(i,k)*wt1 + w_dst(i,j,k)*wt2
3081  enddo
3082  enddo
3083  endif
3084 
3085 5000 continue
3086 
3087  end subroutine update_remap_tqw
3088 
3089  !remap_uv as-is remaps only a-grid velocities. A new routine has been written to handle staggered grids.
3090  subroutine update_remap_uv(npz, ak_dst, bk_dst, ps_dst, u_dst, v_dst, &
3091  kmd, ak_src, bk_src, ps_src, u_src, v_src, &
3092  kord_mt, &
3093  is, ie, js, je, isd, ied, jsd, jed, ptop, &
3094  istart, iend, jstart, jend, blend_wt)
3095  integer, intent(in):: npz
3096  real, intent(in):: ak_dst(npz+1), bk_dst(npz+1), blend_wt(npz)
3097  real, intent(in):: ps_dst(isd:ied,jsd:jed)
3098  real, intent(inout), dimension(isd:ied,jsd:jed+1,npz):: u_dst
3099  real, intent(inout), dimension(isd:ied+1,jsd:jed,npz):: v_dst
3100  integer, intent(in):: kmd
3101  real, intent(in):: ak_src(kmd+1), bk_src(kmd+1)
3102  real, intent(in):: ps_src(isd:ied,jsd:jed)
3103  real, intent(inout), dimension(isd:ied,jsd:jed+1,kmd):: u_src
3104  real, intent(inout), dimension(isd:ied+1,jsd:jed,kmd):: v_src
3105 !
3106  integer, intent(in):: kord_mt
3107  real, intent(IN) :: ptop
3108  integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed
3109  integer, intent(IN) :: istart, iend, jstart, jend
3110 !
3111 ! local:
3112  real, dimension(is:ie+1,kmd+1):: pe0
3113  real, dimension(is:ie+1,npz+1):: pe1
3114  real, dimension(is:ie+1,kmd):: qt
3115  real, dimension(is:ie+1,npz):: qn1
3116  integer i,j,k
3117  real :: wt1, wt2
3118 
3119  !This line to check if the update region is correctly defined or not is
3120  ! IMPORTANT. Sometimes one or the other pair of limits will give a
3121  ! non-empty loop, even though no data was transferred!
3122  if (istart > iend .or. jstart > jend) return
3123 
3124 !------
3125 ! map u
3126 !------
3127 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak_dst,bk_dst,ps_dst,u_dst,v_dst,npz,ak_src,bk_src,ps_src,u_src,v_src,ptop,kord_mt,istart,iend,jstart,jend,blend_wt) &
3128 !$OMP private(pe0,pe1,qt,qn1,wt1,wt2)
3129  do j=jstart,jend+1
3130 !------
3131 ! Data
3132 !------
3133  do k=1,kmd+1
3134  do i=istart,iend
3135  pe0(i,k) = ak_src(k) + bk_src(k)*0.5*(ps_src(i,j)+ps_src(i,j-1))
3136  enddo
3137  enddo
3138 !------
3139 ! Model
3140 !------
3141  do k=1,npz+1
3142  do i=istart,iend
3143  pe1(i,k) = ak_dst(k) + bk_dst(k)*0.5*(ps_dst(i,j)+ps_dst(i,j-1))
3144  enddo
3145  enddo
3146 !------
3147 !Do map
3148 !------
3149  qt = 0.
3150  do k=1,kmd
3151  do i=istart,iend
3152  qt(i,k) = u_src(i,j,k)
3153  enddo
3154  enddo
3155  qn1 = 0.
3156  call mappm(kmd, pe0(istart:iend,:), qt(istart:iend,:), npz, pe1(istart:iend,:), qn1(istart:iend,:), istart,iend, -1, kord_mt, ptop)
3157  do k=1,npz
3158  wt1 = blend_wt(k)
3159  wt2 = 1. - wt1
3160  do i=istart,iend
3161  u_dst(i,j,k) = qn1(i,k)*wt1 + u_dst(i,j,k)*wt2
3162  enddo
3163  enddo
3164 
3165  end do
3166 
3167 !------
3168 ! map v
3169 !------
3170 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak_dst,bk_dst,ps_dst,u_dst,v_dst,ak_src,bk_src,ps_src,npz,u_src,v_src,ptop,istart,iend,jstart,jend,blend_wt) &
3171 !$OMP private(pe0,pe1,qt,qn1,wt1,wt2)
3172  do j=jstart,jend
3173 !------
3174 ! Data
3175 !------
3176  do k=1,kmd+1
3177  do i=istart,iend+1
3178  pe0(i,k) = ak_src(k) + bk_src(k)*0.5*(ps_src(i,j)+ps_src(i-1,j))
3179  enddo
3180  enddo
3181 !------
3182 ! Model
3183 !------
3184  do k=1,npz+1
3185  do i=istart,iend+1
3186  pe1(i,k) = ak_dst(k) + bk_dst(k)*0.5*(ps_dst(i,j)+ps_dst(i-1,j))
3187  enddo
3188  enddo
3189 !------
3190 !Do map
3191 !------
3192  qt = 0.
3193  do k=1,kmd
3194  do i=istart,iend+1
3195  qt(i,k) = v_src(i,j,k)
3196  enddo
3197  enddo
3198  qn1 = 0.
3199  call mappm(kmd, pe0(istart:iend+1,:), qt(istart:iend+1,:), npz, pe1(istart:iend+1,:), qn1(istart:iend+1,:), istart,iend+1, -1, 8, ptop)
3200  do k=1,npz
3201  wt1 = blend_wt(k)
3202  wt2 = 1. - wt1
3203  do i=istart,iend+1
3204  v_dst(i,j,k) = qn1(i,k)*wt1 + v_dst(i,j,k)*wt2 !Does this kill OMP???
3205  enddo
3206  enddo
3207  end do
3208 
3209  end subroutine update_remap_uv
3210 
3211 
3212 
3213 end module fv_nesting_mod
subroutine remap_bc_direct(pe_lag_BC, pe_eul_BC, var_lag_BC, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, do_log_pe)
subroutine, public twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, Time, this_grid)
The subroutine&#39;twoway_nesting&#39; performs a two-way update of nested-grid data onto the parent grid...
real, dimension(:,:,:), allocatable, target dum_east
Definition: fv_nesting.F90:144
subroutine, public divergence_corner_nest(u, v, ua, va, divg_d, gridstruct, flagstruct, bd)
Definition: sw_core.F90:1803
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, u, v, w, delz, pt, delp, q, ps, pe, pk, peln, pkz, phis, ua, va, ptop, gridstruct, flagstruct, domain, bd, Time)
type(fv_nest_bc_type_3d) divg_buf
Definition: fv_nesting.F90:141
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:123
real, dimension(:,:,:), allocatable, target dum_south
Definition: fv_nesting.F90:144
subroutine setup_pt_nh_bc_k(ptBC, sphumBC, delpBC, delzBC, liq_watBC, rainwatBC, ice_watBC, snowwatBC, graupelBC, zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
subroutine, public neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qg, qa, check_negative)
Definition: fv_sg.F90:1549
type(fv_nest_bc_type_3d) pe_u_buf
Definition: fv_nesting.F90:141
type(fv_nest_bc_type_3d) delz_buf
Definition: fv_nesting.F90:141
The interface&#39;update_coarse_grid_mpp&#39;contains subroutines that fetch data from the nested grid and in...
Definition: boundary.F90:122
subroutine set_nh_bcs_t0(neststruct)
type(fv_nest_bc_type_3d) vc_buf
Definition: fv_nesting.F90:141
subroutine set_bcs_t0(ncnst, hydrostatic, neststruct)
subroutine, public mappm(km, pe1, q1, kn, pe2, q2, i1, i2, iv, kord, ptop)
The subroutine &#39;mappm&#39; is a general-purpose routine for remapping one set of vertical levels to anoth...
Definition: fv_mapz.F90:3281
real, public sphum_ll_fix
subroutine, public setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, u, v, w, pt, delp, delz, q, uc, vc, nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, nest_timestep, tracer_nest_timestep, domain, parent_grid, bd, nwat, ak, bk)
The subroutine &#39;setup_nested_grid_BCs&#39; fetches data from the coarse grid to set up the nested-grid bo...
Definition: fv_nesting.F90:165
subroutine set_bc_direct(pe_src_BC, pe_dst_BC, buf, var, neststruct, npx, npy, npz, npz_coarse, ng, bd, istag, jstag, iv, kord)
Definition: fv_nesting.F90:761
subroutine setup_eul_pe_bc(pe_src_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_src, istag, jstag, bd, make_src_in, ak_src, bk_src)
The module &#39;sw_core&#39; advances the forward step of the Lagrangian dynamics as described by ...
Definition: sw_core.F90:26
subroutine setup_pt_bc_k(ptBC, sphumBC, peBC, zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
Definition: fv_nesting.F90:851
type(fv_nest_bc_type_3d) uc_buf
Definition: fv_nesting.F90:141
subroutine, public p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, delp, delz, pt, ps, pe, peln, pk, pkz, cappa, q, ng, nq, area, dry_mass, adjust_dry_mass, mountain, moist_phys, hydrostatic, nwat, domain, adiabatic, make_nh)
the subroutine &#39;p_var&#39; computes auxiliary pressure variables for a hydrostatic state.
Definition: init_hydro.F90:86
subroutine, public divergence_corner(u, v, ua, va, divg_d, gridstruct, flagstruct, bd)
The subroutine &#39;divergence_corner&#39; computes the cell-mean divergence on the "dual grid"...
Definition: sw_core.F90:1696
subroutine setup_pt_nh_bc(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, npx, npy, npz, zvir, bd)
real function, public g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, reproduce)
The function &#39;g_sum&#39; is the fast version of &#39;globalsum&#39;.
The module &#39;fv_sg&#39; performs FV sub-grid mixing.
Definition: fv_sg.F90:54
&#39;allocate_fv_nest_BC_type&#39; is an interface to subroutines that allocate the &#39;fv_nest_BC_type&#39; structu...
Definition: fv_arrays.F90:1176
type(fv_nest_bc_type_3d) v_buf
Definition: fv_nesting.F90:141
subroutine remap_bc_k(pe_lagBC, pe_eulBC, var_lagBC, var_eulBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord, log_pe)
subroutine, public remap_2d(km, pe1, q1, kn, pe2, q2, i1, i2, iv, kord)
Definition: fv_mapz.F90:1624
subroutine setup_eul_pe_bc_k(pesrcBC, peeulBC, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src, make_src, ak_src, bk_src)
type(fv_nest_bc_type_3d) delp_buf
Definition: fv_nesting.F90:141
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
&#39;deallocate_fv_nest_BC_type&#39; is an interface to a subroutine that deallocates the &#39;fv_nest_BC_type&#39; s...
Definition: fv_arrays.F90:1183
type(fv_nest_bc_type_3d) pe_v_buf
Definition: fv_nesting.F90:141
real, parameter, public ptop_min
integer, parameter, public f_p
subroutine d2a_setup(u, v, ua, va, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, bounded_domain, cosa_s, rsin2)
subroutine setup_eul_delp_bc(delp_lag_BC, delp_eul_BC, pe_lag_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_coarse, ptop_src, bd)
Definition: fv_nesting.F90:882
The module &#39;fv_mapz&#39; contains the vertical mapping routines .
Definition: fv_mapz.F90:27
real, dimension(:,:,:), allocatable, target dum_west
Definition: fv_nesting.F90:144
type(fv_nest_bc_type_3d) w_buf
Definition: fv_nesting.F90:141
subroutine remap_delz_bc(pe_lag_BC, pe_eul_BC, delp_lag_BC, delz_lag_BC, delp_eul_BC, delz_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord)
subroutine d2c_setup(u, v, ua, va, uc, vc, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, bounded_domain, se_corner, sw_corner, ne_corner, nw_corner, rsin_u, rsin_v, cosa_s, rsin2)
subroutine, public cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, bounded_domain, c2l_ord, bd)
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
interface &#39;nested_grid_BC&#39; includes subroutines &#39;nested_grid_BC_2d&#39; and &#39;nested_grid_BC_3d&#39; that fetc...
Definition: boundary.F90:89
type(fv_nest_bc_type_3d), dimension(:), allocatable q_buf
Definition: fv_nesting.F90:142
subroutine setup_eul_delp_bc_k(delplagBC, delpeulBC, pelagBC, peeulBC, ptop_src, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse)
Definition: fv_nesting.F90:938
real, dimension(:), allocatable rw
Definition: fv_nesting.F90:133
type(fv_nest_bc_type_3d) pe_b_buf
Definition: fv_nesting.F90:141
subroutine update_remap_tqw(npz, ak_dst, bk_dst, ps_dst, t_dst, q_dst, w_dst, hydrostatic, kmd, ps_src, ak_src, bk_src, t_src, w_src, zvir, ptop, nq, kord_tm, kord_tr, kord_wz, is, ie, js, je, isd, ied, jsd, jed, do_q, istart, iend, jstart, jend, blend_wt)
The subroutine &#39;update_remap_tqw&#39; remaps (interpolated) nested-grid data to the coarse-grid&#39;s vertica...
type(fv_nest_bc_type_3d) pt_buf
Definition: fv_nesting.F90:141
logical rf_initialized
Definition: fv_nesting.F90:131
subroutine, public set_physics_bcs(ps, u_dt, v_dt, flagstruct, gridstruct, neststruct, npx, npy, npz, ng, ak, bk, bd)
Definition: fv_nesting.F90:687
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
subroutine copy_ps_bc(ps, pe_BC, npx, npy, npz, istag, jstag, bd)
subroutine setup_pt_bc(pt_BC, pe_eul_BC, sphum_BC, npx, npy, npz, zvir, bd)
Definition: fv_nesting.F90:782
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, u, v, w, pt, delp, q, pe, pkz, delz, ps, ptop, ak, bk, gridstruct, flagstruct, neststruct, domain, parent_grid, bd, grid_number, conv_theta_in)
The module &#39;fv_grid_utils&#39; contains routines for setting up and computing grid-related quantities...
logical bad_range
Definition: fv_nesting.F90:132
real, dimension(:,:,:), allocatable, target dum_north
Definition: fv_nesting.F90:144
subroutine compute_specific_volume_bc_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
The subroutine &#39;nested_grid_BC_apply_intT&#39; performs linear interpolation or extrapolation in time for...
Definition: boundary.F90:2171
subroutine level_sum(q, area, domain, bd, npz, L_sum)
subroutine, public nested_grid_bc_save_proc(nest_domain, ind, wt, istag, jstag, npx, npy, npz, bd, nest_BC, nest_BC_buffers, pd_in)
The subroutine &#39;nested_grid_BC_save_proc&#39; saves data received by &#39;nested_grid_BC_recv&#39; into the datat...
Definition: boundary.F90:1936
real, dimension(:,:), allocatable te_2d_coarse
Definition: fv_nesting.F90:136
subroutine remap_bc(pe_lag_BC, pe_eul_BC, var_lag_BC, var_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, varname, do_log_pe)
real, dimension(:), allocatable rf
Definition: fv_nesting.F90:133
subroutine remap_up_k(ps_src, ps_dst, ak_src, bk_src, ak_dst, bk_dst, var_src, var_dst, bd, istart, iend, jstart, jend, istag, jstag, npz_src, npz_dst, iv, kord, blend_wt, log_pe)
subroutine update_remap_uv(npz, ak_dst, bk_dst, ps_dst, u_dst, v_dst, kmd, ak_src, bk_src, ps_src, u_src, v_src, kord_mt, is, ie, js, je, isd, ied, jsd, jed, ptop, istart, iend, jstart, jend, blend_wt)
subroutine compute_delz_bc_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz)
real, dimension(:,:,:), allocatable dp1_coarse
Definition: fv_nesting.F90:137
The module &#39;fv_nesting&#39; is a collection of routines pertaining to grid nesting .
Definition: fv_nesting.F90:27
type(fv_nest_bc_type_3d) u_buf
Definition: fv_nesting.F90:141