FV3DYCORE  Version1.0.0
fv_nesting.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the FV3 dynamical core.
5 !*
6 !* The FV3 dynamical core is free software: you can redistribute it
7 !* and/or modify it under the terms of the
8 !* GNU Lesser General Public License as published by the
9 !* Free Software Foundation, either version 3 of the License, or
10 !* (at your option) any later version.
11 !*
12 !* The FV3 dynamical core is distributed in the hope that it will be
13 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
14 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 !* See the GNU General Public License for more details.
16 !*
17 !* You should have received a copy of the GNU Lesser General Public
18 !* License along with the FV3 dynamical core.
19 !* If not, see <http://www.gnu.org/licenses/>.
20 !***********************************************************************
21 
24 
25 #undef MULTI_GASES
26 
28 
29 ! Modules Included:
30 ! <table>
31 ! <tr>
32 ! <th>Module Name</th>
33 ! <th>Functions Included</th>
34 ! </tr>
35 ! <tr>
36 ! <td>boundary_mod</td>
37 ! <td>update_coarse_grid,nested_grid_BC_send, nested_grid_BC_recv, nested_grid_BC_save_proc
38 ! nested_grid_BC, nested_grid_BC_apply_intT</td>
39 ! </tr>
40 ! <tr>
41 ! <td>constants_mod</td>
42 ! <td>grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa</td>
43 ! </tr>
44 ! <tr>
45 ! <td>field_manager_mod</td>
46 ! <td>MODEL_ATMOS</td>
47 ! </tr>
48 ! <tr>
49 ! <td>fv_arrays_mod</td>
50 ! <td>fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type,
51 ! fv_nest_BC_type_3D,allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type</td>
52 ! </tr>
53 ! <tr>
54 ! <td>fv_diagnostics_mod</td>
55 ! <td>sphum_ll_fix, range_check</td>
56 ! </tr>
57 ! <tr>
58 ! <td>fv_grid_utils_mod</td>
59 ! <td>ptop_min, g_sum, cubed_to_latlon, f_p</td>
60 ! </tr>
61 ! <tr>
62 ! <td>fv_mapz_mod</td>
63 ! <td>mappm, remap_2d</td>
64 ! </tr>
65 ! <tr>
66 ! <td>fv_mp_mod</td>
67 ! <td>is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec,is_master,mp_reduce_sum</td>
68 ! </tr>
69 ! <tr>
70 ! <td>fv_restart_mod</td>
71 ! <td>d2a_setup, d2c_setup</td>
72 ! </tr>
73 ! <tr>
74 ! <td>fv_sg_mod</td>
75 ! <td>neg_adj3</td>
76 ! </tr>
77 ! <tr>
78 ! <td>fv_timing_mod</td>
79 ! <td>timing_on, timing_off</td>
80 ! </tr>
81 ! <tr>
82 ! <td>init_hydro_mod</td>
83 ! <td>p_var</td>
84 ! </tr>
85 ! <tr>
86 ! <td>mpp_mod/td>
87 ! <td>mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, FATAL</td>
88 ! </tr>
89 ! <tr>
90 ! <td>mpp_domains_mod/td>
91 ! <td>mpp_update_domains, mpp_global_field,mpp_get_data_domain, mpp_get_compute_domain,
92 ! mpp_get_global_domain, DGRID_NE, mpp_update_domains, domain2D, mpp_global_sum,
93 ! BITWISE_EFP_SUM, BITWISE_EXACT_SUM</td>
94 ! </tr>
95 ! <tr>
96 ! <td>sw_core_mod</td>
97 ! <td>divergence_corner, divergence_corner_nest</td>
98 ! </tr>
99 ! <tr>
100 ! <td>tracer_manager_mod</td>
101 ! <td>get_tracer_index</td>
102 ! </tr>
103 ! </table>
104 
105  use mpp_domains_mod, only: mpp_update_domains
106  use mpp_domains_mod, only: mpp_global_field
107  use field_manager_mod, only: model_atmos
108  use tracer_manager_mod, only: get_tracer_index
109  use fv_sg_mod, only: neg_adj3
110  use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_global_domain
111  use mpp_domains_mod, only: dgrid_ne, mpp_update_domains, domain2d
113  use mpp_mod, only: mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, fatal
114  use mpp_domains_mod, only: mpp_global_sum, bitwise_efp_sum, bitwise_exact_sum
117  use fv_mp_mod, only: is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec
121  use init_hydro_mod, only: p_var
122  use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa
123  use fv_mapz_mod, only: mappm
125  use fv_mp_mod, only: is_master
126  use fv_mp_mod, only: mp_reduce_sum
129 
130 implicit none
131  logical :: rf_initialized = .false.
132  logical :: bad_range
133  real, allocatable :: rf(:), rw(:)
134  integer :: kmax=1
135  !Arrays for global grid total energy, used for grid nesting
136  real, allocatable :: te_2d_coarse(:,:)
137  real, allocatable :: dp1_coarse(:,:,:)
138 
139  !For nested grid buffers
140  !Individual structures are allocated by nested_grid_BC_recv
142  type(fv_nest_bc_type_3d), allocatable:: q_buf(:)
143 !#ifdef USE_COND
144  real, dimension(:,:,:), allocatable, target :: dum_west, dum_east, dum_north, dum_south
145 !#endif
146 
147 private
149 
150 contains
151 
152 !!!! NOTE: Many of the routines here and in boundary.F90 have a lot of
153 !!!! redundant code, which could be cleaned up and simplified.
154 
157  subroutine setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, &
158  u, v, w, pt, delp, delz,q, uc, vc, pkz, &
159  nested, inline_q, make_nh, ng, &
160  gridstruct, flagstruct, neststruct, &
161  nest_timestep, tracer_nest_timestep, &
162  domain, bd, nwat)
164 
165  type(fv_grid_bounds_type), intent(IN) :: bd
166  real, intent(IN) :: zvir
167 
168  integer, intent(IN) :: npx, npy, npz
169  integer, intent(IN) :: ncnst, ng, nwat
170  logical, intent(IN) :: inline_q, make_nh,nested
171 
172  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
173  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
174  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:)
175  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
176  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
177  real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1:)
178  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
179  real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
180  real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
181  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
182  integer, intent(INOUT) :: nest_timestep, tracer_nest_timestep
183 
184  type(fv_grid_type), intent(INOUT) :: gridstruct
185  type(fv_flags_type), intent(INOUT) :: flagstruct
186  type(fv_nest_type), intent(INOUT), target :: neststruct
187  type(domain2d), intent(INOUT) :: domain
188  real :: divg(bd%isd:bd%ied+1,bd%jsd:bd%jed+1, npz)
189  real :: ua(bd%isd:bd%ied,bd%jsd:bd%jed)
190  real :: va(bd%isd:bd%ied,bd%jsd:bd%jed)
191 
192  real :: pkz_coarse( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
193  integer :: i,j,k,n,p, sphum
194  logical :: do_pd
195 
196  type(fv_nest_bc_type_3d) :: pkz_bc
197 
198  !local pointers
199  logical, pointer :: child_grids(:)
200 
201  integer :: is, ie, js, je
202  integer :: isd, ied, jsd, jed
203 
204  is = bd%is
205  ie = bd%ie
206  js = bd%js
207  je = bd%je
208  isd = bd%isd
209  ied = bd%ied
210  jsd = bd%jsd
211  jed = bd%jed
212 
213  child_grids => neststruct%child_grids
214 
215 
216  !IF nested, set up nested grid BCs for time-interpolation
217  !(actually applying the BCs is done in dyn_core
218 
219  nest_timestep = 0
220  if (.not. inline_q) tracer_nest_timestep = 0
221 
222 
223  if (neststruct%nested .and. (.not. (neststruct%first_step) .or. make_nh) ) then
224  do_pd = .true.
225  call set_bcs_t0(ncnst, flagstruct%hydrostatic, neststruct)
226  else
227  !On first timestep the t0 BCs are not initialized and may contain garbage
228  do_pd = .false.
229  end if
230 
231  !compute uc/vc for nested-grid BCs
232  !!! CLEANUP: if we compute uc/vc here we don't need to do on the first call of c_sw, right?
233  if (any(neststruct%child_grids)) then
234  call timing_on('COMM_TOTAL')
235  !!! CLEANUP: could we make this a non-blocking operation?
236  !!! Is this needed? it is on the initialization step.
237  call mpp_update_domains(u, v, &
238  domain, gridtype=dgrid_ne, complete=.true.)
239  call timing_off('COMM_TOTAL')
240 !$OMP parallel do default(none) shared(isd,jsd,ied,jed,is,ie,js,je,npx,npy,npz, &
241 !$OMP gridstruct,flagstruct,bd,u,v,uc,vc,nested,divg) &
242 !$OMP private(ua,va)
243  do k=1,npz
244  call d2c_setup(u(isd,jsd,k), v(isd,jsd,k), &
245  ua, va, &
246  uc(isd,jsd,k), vc(isd,jsd,k), flagstruct%nord>0, &
247  isd,ied,jsd,jed, is,ie,js,je, npx,npy, &
248  gridstruct%grid_type, gridstruct%nested, &
249  gridstruct%se_corner, gridstruct%sw_corner, &
250  gridstruct%ne_corner, gridstruct%nw_corner, &
251  gridstruct%rsin_u, gridstruct%rsin_v, &
252  gridstruct%cosa_s, gridstruct%rsin2, flagstruct%regional )
253  if (nested) then
254  call divergence_corner_nest(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd)
255  else
256  call divergence_corner(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd)
257  endif
258  end do
259  endif
260 
261 #ifndef SW_DYNAMICS
262  if (flagstruct%hydrostatic) then
263 !$OMP parallel do default(none) shared(npz,is,ie,js,je,pkz,pkz_coarse)
264  do k=1,npz
265  do j=js,je
266  do i=is,ie
267  pkz_coarse(i,j,k) = pkz(i,j,k)
268  enddo
269  enddo
270  enddo
271  endif
272 #endif
273 !! Nested grid: receive from parent grid
274  if (neststruct%nested) then
275  if (.not. allocated(q_buf)) then
276  allocate(q_buf(ncnst))
277  endif
278 
279  call nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
280  delp_buf)
281  do n=1,ncnst
282  call nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
283  q_buf(n))
284  enddo
285 #ifndef SW_DYNAMICS
286  call nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
287  pt_buf)
288 
289  if (flagstruct%hydrostatic) then
290  call allocate_fv_nest_bc_type(pkz_bc,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz,ng,0,0,0,.false.)
291  call nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
292  pkz_buf)
293  else
294  call nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
295  w_buf)
296  call nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
297  delz_buf)
298  endif
299 #endif
300  call nested_grid_bc_recv(neststruct%nest_domain, 0, 1, npz, bd, &
301  u_buf)
302  call nested_grid_bc_recv(neststruct%nest_domain, 0, 1, npz, bd, &
303  vc_buf)
304  call nested_grid_bc_recv(neststruct%nest_domain, 1, 0, npz, bd, &
305  v_buf)
306  call nested_grid_bc_recv(neststruct%nest_domain, 1, 0, npz, bd, &
307  uc_buf)
308  call nested_grid_bc_recv(neststruct%nest_domain, 1, 1, npz, bd, &
309  divg_buf)
310  endif
311 
312 
313 !! Coarse grid: send to child grids
314 
315  do p=1,size(child_grids)
316  if (child_grids(p)) then
317  call nested_grid_bc_send(delp, neststruct%nest_domain_all(p), 0, 0)
318  do n=1,ncnst
319  call nested_grid_bc_send(q(:,:,:,n), neststruct%nest_domain_all(p), 0, 0)
320  enddo
321 #ifndef SW_DYNAMICS
322  call nested_grid_bc_send(pt, neststruct%nest_domain_all(p), 0, 0)
323 
324  if (flagstruct%hydrostatic) then
325  !Working with PKZ is more complicated since it is only defined on the interior of the grid.
326  call nested_grid_bc_send(pkz_coarse, neststruct%nest_domain_all(p), 0, 0)
327  else
328  call nested_grid_bc_send(w, neststruct%nest_domain_all(p), 0, 0)
329  call nested_grid_bc_send(delz, neststruct%nest_domain_all(p), 0, 0)
330  endif
331 #endif
332  call nested_grid_bc_send(u, neststruct%nest_domain_all(p), 0, 1)
333  call nested_grid_bc_send(vc, neststruct%nest_domain_all(p), 0, 1)
334  call nested_grid_bc_send(v, neststruct%nest_domain_all(p), 1, 0)
335  call nested_grid_bc_send(uc, neststruct%nest_domain_all(p), 1, 0)
336  call nested_grid_bc_send(divg, neststruct%nest_domain_all(p), 1, 1)
337  endif
338  enddo
339 
340  !Nested grid: do computations
341  if (nested) then
342  call nested_grid_bc_save_proc(neststruct%nest_domain, &
343  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, &
344  neststruct%delp_BC, delp_buf, pd_in=do_pd)
345  do n=1,ncnst
346  call nested_grid_bc_save_proc(neststruct%nest_domain, &
347  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, &
348  neststruct%q_BC(n), q_buf(n), pd_in=do_pd)
349  enddo
350 #ifndef SW_DYNAMICS
351  call nested_grid_bc_save_proc(neststruct%nest_domain, &
352  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, &
353  neststruct%pt_BC, pt_buf)
354 
355  sphum = get_tracer_index(model_atmos, 'sphum')
356  if (flagstruct%hydrostatic) then
357  call nested_grid_bc_save_proc(neststruct%nest_domain, &
358  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, &
359  pkz_bc, pkz_buf)
360  call setup_pt_bc(neststruct%pt_BC, pkz_bc, neststruct%q_BC(sphum), npx, npy, npz, zvir, bd)
361  else
362  call nested_grid_bc_save_proc(neststruct%nest_domain, &
363  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, &
364  neststruct%w_BC, w_buf)
365  call nested_grid_bc_save_proc(neststruct%nest_domain, &
366  neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, &
367  neststruct%delz_BC, delz_buf) !Need a negative-definite method?
368 
369  call setup_pt_nh_bc(neststruct%pt_BC, neststruct%delp_BC, neststruct%delz_BC, &
370  neststruct%q_BC(sphum), neststruct%q_BC, ncnst, &
371 #ifdef USE_COND
372  neststruct%q_con_BC, &
373 #ifdef MOIST_CAPPA
374  neststruct%cappa_BC, &
375 #endif
376 #endif
377  npx, npy, npz, zvir, bd)
378  endif
379 #endif
380  call nested_grid_bc_save_proc(neststruct%nest_domain, &
381  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz, bd, &
382  neststruct%u_BC, u_buf)
383  call nested_grid_bc_save_proc(neststruct%nest_domain, &
384  neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz, bd, &
385  neststruct%vc_BC, vc_buf)
386  call nested_grid_bc_save_proc(neststruct%nest_domain, &
387  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz, bd, &
388  neststruct%v_BC, v_buf)
389  call nested_grid_bc_save_proc(neststruct%nest_domain, &
390  neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz, bd, &
391  neststruct%uc_BC, uc_buf)
392 
393  call nested_grid_bc_save_proc(neststruct%nest_domain, &
394  neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz, bd, &
395  neststruct%divg_BC, divg_buf)
396  endif
397 
398  if (neststruct%first_step) then
399  if (neststruct%nested) call set_bcs_t0(ncnst, flagstruct%hydrostatic, neststruct)
400  neststruct%first_step = .false.
401  if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false.
402  else if (flagstruct%make_nh) then
403  if (neststruct%nested) call set_nh_bcs_t0(neststruct)
404  flagstruct%make_nh= .false.
405  endif
406 
407  !Unnecessary?
408 !!$ if ( neststruct%nested .and. .not. neststruct%divg_BC%initialized) then
409 !!$ neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
410 !!$ neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
411 !!$ neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
412 !!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
413 !!$ neststruct%divg_BC%initialized = .true.
414 !!$ endif
415 
416 
417  call mpp_sync_self
418 
419  end subroutine setup_nested_grid_bcs
420 
421  subroutine setup_pt_bc(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd)
423  type(fv_grid_bounds_type), intent(IN) :: bd
424  type(fv_nest_BC_type_3d), intent(IN), target :: pkz_BC, sphum_BC
425  type(fv_nest_BC_type_3d), intent(INOUT), target :: pt_BC
426  integer, intent(IN) :: npx, npy, npz
427  real, intent(IN) :: zvir
428 
429  real, dimension(:,:,:), pointer :: ptBC, pkzBC, sphumBC
430 
431  integer :: i,j,k, istart, iend
432 
433  integer :: is, ie, js, je
434  integer :: isd, ied, jsd, jed
435 
436  is = bd%is
437  ie = bd%ie
438  js = bd%js
439  je = bd%je
440  isd = bd%isd
441  ied = bd%ied
442  jsd = bd%jsd
443  jed = bd%jed
444 
445  if (is == 1) then
446  ptbc => pt_bc%west_t1
447  pkzbc => pkz_bc%west_t1
448  sphumbc => sphum_bc%west_t1
449 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,ptBC,pkzBC,zvir,sphumBC)
450  do k=1,npz
451  do j=jsd,jed
452  do i=isd,0
453  ptbc(i,j,k) = ptbc(i,j,k)/pkzbc(i,j,k)*(1.+zvir*sphumbc(i,j,k))
454  end do
455  end do
456  end do
457  end if
458 
459  if (js == 1) then
460  ptbc => pt_bc%south_t1
461  pkzbc => pkz_bc%south_t1
462  sphumbc => sphum_bc%south_t1
463  if (is == 1) then
464  istart = is
465  else
466  istart = isd
467  end if
468  if (ie == npx-1) then
469  iend = ie
470  else
471  iend = ied
472  end if
473 
474 !$OMP parallel do default(none) shared(npz,jsd,istart,iend,ptBC,pkzBC,zvir,sphumBC)
475  do k=1,npz
476  do j=jsd,0
477  do i=istart,iend
478  ptbc(i,j,k) = ptbc(i,j,k)/pkzbc(i,j,k) * &
479  (1.+zvir*sphumbc(i,j,k))
480  end do
481  end do
482  end do
483  end if
484 
485 
486  if (ie == npx-1) then
487  ptbc => pt_bc%east_t1
488  pkzbc => pkz_bc%east_t1
489  sphumbc => sphum_bc%east_t1
490 !$OMP parallel do default(none) shared(npz,jsd,jed,npx,ied,ptBC,pkzBC,zvir,sphumBC)
491  do k=1,npz
492  do j=jsd,jed
493  do i=npx,ied
494  ptbc(i,j,k) = ptbc(i,j,k)/pkzbc(i,j,k) * &
495  (1.+zvir*sphumbc(i,j,k))
496  end do
497  end do
498  end do
499  end if
500 
501  if (je == npy-1) then
502  ptbc => pt_bc%north_t1
503  pkzbc => pkz_bc%north_t1
504  sphumbc => sphum_bc%north_t1
505  if (is == 1) then
506  istart = is
507  else
508  istart = isd
509  end if
510  if (ie == npx-1) then
511  iend = ie
512  else
513  iend = ied
514  end if
515 
516 !$OMP parallel do default(none) shared(npz,npy,jed,npx,istart,iend,ptBC,pkzBC,zvir,sphumBC)
517  do k=1,npz
518  do j=npy,jed
519  do i=istart,iend
520  ptbc(i,j,k) = ptbc(i,j,k)/pkzbc(i,j,k) * &
521  (1.+zvir*sphumbc(i,j,k))
522  end do
523  end do
524  end do
525  end if
526 
527  end subroutine setup_pt_bc
528 
529  subroutine setup_pt_nh_bc(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, &
530 #ifdef USE_COND
531  q_con_BC, &
532 #ifdef MOIST_CAPPA
533  cappa_BC, &
534 #endif
535 #endif
536  npx, npy, npz, zvir, bd)
537 
538  type(fv_grid_bounds_type), intent(IN) :: bd
539  type(fv_nest_BC_type_3d), intent(IN), target :: delp_BC, delz_BC, sphum_BC
540  type(fv_nest_BC_type_3d), intent(INOUT), target :: pt_BC
541  integer, intent(IN) :: nq
542  type(fv_nest_BC_type_3d), intent(IN), target :: q_BC(nq)
543 #ifdef USE_COND
544  type(fv_nest_BC_type_3d), intent(INOUT), target :: q_con_BC
545 #ifdef MOIST_CAPPA
546  type(fv_nest_BC_type_3d), intent(INOUT), target :: cappa_BC
547 #endif
548 #endif
549  integer, intent(IN) :: npx, npy, npz
550  real, intent(IN) :: zvir
551 
552  real, parameter:: c_liq = 4185.5
553  real, parameter:: c_ice = 1972.
554  real, parameter:: cv_vap = cp_vapor - rvgas
555 
556  real, dimension(:,:,:), pointer :: ptBC, sphumBC, qconBC, delpBC, delzBC, cappaBC
557  real, dimension(:,:,:), pointer :: liq_watBC_west, ice_watBC_west, rainwatBC_west, snowwatBC_west, graupelBC_west
558  real, dimension(:,:,:), pointer :: liq_watBC_east, ice_watBC_east, rainwatBC_east, snowwatBC_east, graupelBC_east
559  real, dimension(:,:,:), pointer :: liq_watBC_north, ice_watBC_north, rainwatBC_north, snowwatBC_north, graupelBC_north
560  real, dimension(:,:,:), pointer :: liq_watBC_south, ice_watBC_south, rainwatBC_south, snowwatBC_south, graupelBC_south
561 
562  real :: dp1, q_liq, q_sol, q_con = 0., cvm, pkz, rdg, cv_air
563 
564  integer :: i,j,k, istart, iend
565  integer :: liq_wat, ice_wat, rainwat, snowwat, graupel
566  real, parameter:: tice = 273.16
567  real, parameter:: t_i0 = 15.
568 
569  integer :: is, ie, js, je
570  integer :: isd, ied, jsd, jed
571 
572  is = bd%is
573  ie = bd%ie
574  js = bd%js
575  je = bd%je
576  isd = bd%isd
577  ied = bd%ied
578  jsd = bd%jsd
579  jed = bd%jed
580 
581  rdg = -rdgas / grav
582  cv_air = cp_air - rdgas
583 
584  liq_wat = get_tracer_index(model_atmos, 'liq_wat')
585  ice_wat = get_tracer_index(model_atmos, 'ice_wat')
586  rainwat = get_tracer_index(model_atmos, 'rainwat')
587  snowwat = get_tracer_index(model_atmos, 'snowwat')
588  graupel = get_tracer_index(model_atmos, 'graupel')
589 
590  if (is == 1) then
591  if (.not. allocated(dum_west)) then
592  allocate(dum_west(isd:0,jsd:jed,npz))
593 !$OMP parallel do default(none) shared(npz,isd,jsd,jed,dum_West)
594  do k=1,npz
595  do j=jsd,jed
596  do i=isd,0
597  dum_west(i,j,k) = 0.
598  enddo
599  enddo
600  enddo
601  endif
602  endif
603  if (js == 1) then
604  if (.not. allocated(dum_south)) then
605  allocate(dum_south(isd:ied,jsd:0,npz))
606 !$OMP parallel do default(none) shared(npz,isd,ied,jsd,dum_South)
607  do k=1,npz
608  do j=jsd,0
609  do i=isd,ied
610  dum_south(i,j,k) = 0.
611  enddo
612  enddo
613  enddo
614  endif
615  endif
616  if (ie == npx-1) then
617  if (.not. allocated(dum_east)) then
618  allocate(dum_east(npx:ied,jsd:jed,npz))
619 !$OMP parallel do default(none) shared(npx,npz,ied,jsd,jed,dum_East)
620  do k=1,npz
621  do j=jsd,jed
622  do i=npx,ied
623  dum_east(i,j,k) = 0.
624  enddo
625  enddo
626  enddo
627  endif
628  endif
629  if (je == npy-1) then
630  if (.not. allocated(dum_north)) then
631  allocate(dum_north(isd:ied,npy:jed,npz))
632 !$OMP parallel do default(none) shared(npy,npz,isd,ied,jed,dum_North)
633  do k=1,npz
634  do j=npy,jed
635  do i=isd,ied
636  dum_north(i,j,k) = 0.
637  enddo
638  enddo
639  enddo
640  endif
641  endif
642 
643  if (liq_wat > 0) then
644  liq_watbc_west => q_bc(liq_wat)%west_t1
645  liq_watbc_east => q_bc(liq_wat)%east_t1
646  liq_watbc_north => q_bc(liq_wat)%north_t1
647  liq_watbc_south => q_bc(liq_wat)%south_t1
648  else
649  liq_watbc_west => dum_west
650  liq_watbc_east => dum_east
651  liq_watbc_north => dum_north
652  liq_watbc_south => dum_south
653  endif
654  if (ice_wat > 0) then
655  ice_watbc_west => q_bc(ice_wat)%west_t1
656  ice_watbc_east => q_bc(ice_wat)%east_t1
657  ice_watbc_north => q_bc(ice_wat)%north_t1
658  ice_watbc_south => q_bc(ice_wat)%south_t1
659  else
660  ice_watbc_west => dum_west
661  ice_watbc_east => dum_east
662  ice_watbc_north => dum_north
663  ice_watbc_south => dum_south
664  endif
665  if (rainwat > 0) then
666  rainwatbc_west => q_bc(rainwat)%west_t1
667  rainwatbc_east => q_bc(rainwat)%east_t1
668  rainwatbc_north => q_bc(rainwat)%north_t1
669  rainwatbc_south => q_bc(rainwat)%south_t1
670  else
671  rainwatbc_west => dum_west
672  rainwatbc_east => dum_east
673  rainwatbc_north => dum_north
674  rainwatbc_south => dum_south
675  endif
676  if (snowwat > 0) then
677  snowwatbc_west => q_bc(snowwat)%west_t1
678  snowwatbc_east => q_bc(snowwat)%east_t1
679  snowwatbc_north => q_bc(snowwat)%north_t1
680  snowwatbc_south => q_bc(snowwat)%south_t1
681  else
682  snowwatbc_west => dum_west
683  snowwatbc_east => dum_east
684  snowwatbc_north => dum_north
685  snowwatbc_south => dum_south
686  endif
687  if (graupel > 0) then
688  graupelbc_west => q_bc(graupel)%west_t1
689  graupelbc_east => q_bc(graupel)%east_t1
690  graupelbc_north => q_bc(graupel)%north_t1
691  graupelbc_south => q_bc(graupel)%south_t1
692  else
693  graupelbc_west => dum_west
694  graupelbc_east => dum_east
695  graupelbc_north => dum_north
696  graupelbc_south => dum_south
697  endif
698 
699  if (is == 1) then
700  ptbc => pt_bc%west_t1
701  sphumbc => sphum_bc%west_t1
702 #ifdef USE_COND
703  qconbc => q_con_bc%west_t1
704 #ifdef MOIST_CAPPA
705  cappabc => cappa_bc%west_t1
706 #endif
707 #endif
708  delpbc => delp_bc%west_t1
709  delzbc => delz_bc%west_t1
710 
711 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,zvir,sphumBC,liq_watBC_west,rainwatBC_west,ice_watBC_west,snowwatBC_west,graupelBC_west,qconBC,cappaBC, &
712 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
713 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
714  do k=1,npz
715  do j=jsd,jed
716  do i=isd,0
717  dp1 = zvir*sphumbc(i,j,k)
718 #ifdef USE_COND
719 #ifdef GFS_PHYS
720  q_con = liq_watbc_west(i,j,k)
721  q_sol = q_con*max(min((tice-ptbc(i,j,k))/t_i0,1.),0.)
722  q_liq = q_con - q_sol
723 #else
724  q_liq = liq_watbc_west(i,j,k) + rainwatbc_west(i,j,k)
725  q_sol = ice_watbc_west(i,j,k) + snowwatbc_west(i,j,k) + graupelbc_west(i,j,k)
726  q_con = q_liq + q_sol
727 #endif
728  qconbc(i,j,k) = q_con
729 #ifdef MOIST_CAPPA
730  cvm = (1.-(sphumbc(i,j,k)+q_con))*cv_air+sphumbc(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice
731  cappabc(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1))
732  pkz = exp( cappabc(i,j,k)*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
733  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
734 #else
735  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
736  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
737 #endif
738  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)*(1.-q_con)/pkz
739 #else
740  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
741  (1.+dp1)/delzbc(i,j,k)))
742  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)/pkz
743 #endif
744  end do
745  end do
746  end do
747  end if
748 
749 
750  if (js == 1) then
751  ptbc => pt_bc%south_t1
752  sphumbc => sphum_bc%south_t1
753 #ifdef USE_COND
754  qconbc => q_con_bc%south_t1
755 #ifdef MOIST_CAPPA
756  cappabc => cappa_bc%south_t1
757 #endif
758 #endif
759  delpbc => delp_bc%south_t1
760  delzbc => delz_bc%south_t1
761  if (is == 1) then
762  istart = is
763  else
764  istart = isd
765  end if
766  if (ie == npx-1) then
767  iend = ie
768  else
769  iend = ied
770  end if
771 
772 !$OMP parallel do default(none) shared(npz,jsd,istart,iend,zvir,sphumBC, &
773 !$OMP liq_watBC_south,rainwatBC_south,ice_watBC_south,&
774 !$OMP snowwatBC_south,graupelBC_south,qconBC,cappaBC, &
775 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
776 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
777  do k=1,npz
778  do j=jsd,0
779  do i=istart,iend
780  dp1 = zvir*sphumbc(i,j,k)
781 #ifdef USE_COND
782 #ifdef GFS_PHYS
783  q_con = liq_watbc_south(i,j,k)
784  q_sol = q_con*max(min((tice-ptbc(i,j,k))/t_i0,1.),0.)
785  q_liq = q_con - q_sol
786 #else
787  q_liq = liq_watbc_south(i,j,k) + rainwatbc_south(i,j,k)
788  q_sol = ice_watbc_south(i,j,k) + snowwatbc_south(i,j,k) + graupelbc_south(i,j,k)
789  q_con = q_liq + q_sol
790 #endif
791  qconbc(i,j,k) = q_con
792 #ifdef MOIST_CAPPA
793  cvm = (1.-(sphumbc(i,j,k)+q_con))*cv_air+sphumbc(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice
794  cappabc(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1))
795  pkz = exp( cappabc(i,j,k)*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
796  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
797 #else
798  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
799  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
800 #endif
801  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)*(1.-q_con)/pkz
802 #else
803  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
804  (1.+dp1)/delzbc(i,j,k)))
805  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)/pkz
806 #endif
807  end do
808  end do
809  end do
810  end if
811 
812 
813  if (ie == npx-1) then
814  ptbc => pt_bc%east_t1
815  sphumbc => sphum_bc%east_t1
816 #ifdef USE_COND
817  qconbc => q_con_bc%east_t1
818 #ifdef MOIST_CAPPA
819  cappabc => cappa_bc%east_t1
820 #endif
821 #endif
822  delpbc => delp_bc%east_t1
823  delzbc => delz_bc%east_t1
824 !$OMP parallel do default(none) shared(npz,jsd,jed,npx,ied,zvir,sphumBC, &
825 !$OMP liq_watBC_east,rainwatBC_east,ice_watBC_east,snowwatBC_east,graupelBC_east,qconBC,cappaBC, &
826 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
827 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
828  do k=1,npz
829  do j=jsd,jed
830  do i=npx,ied
831  dp1 = zvir*sphumbc(i,j,k)
832 #ifdef USE_COND
833 #ifdef GFS_PHYS
834  q_con = liq_watbc_east(i,j,k)
835  q_sol = q_con*max(min((tice-ptbc(i,j,k))/t_i0,1.),0.)
836  q_liq = q_con - q_sol
837 #else
838  q_liq = liq_watbc_east(i,j,k) + rainwatbc_east(i,j,k)
839  q_sol = ice_watbc_east(i,j,k) + snowwatbc_east(i,j,k) + graupelbc_east(i,j,k)
840  q_con = q_liq + q_sol
841 #endif
842  qconbc(i,j,k) = q_con
843 #ifdef MOIST_CAPPA
844  cvm = (1.-(sphumbc(i,j,k)+q_con))*cv_air+sphumbc(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice
845  cappabc(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1))
846  pkz = exp( cappabc(i,j,k)*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
847  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
848 #else
849  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
850  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
851 #endif
852  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)*(1.-q_con)/pkz
853 #else
854  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
855  (1.+dp1)/delzbc(i,j,k)))
856  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)/pkz
857 #endif
858  end do
859  end do
860  end do
861  end if
862 
863  if (je == npy-1) then
864  ptbc => pt_bc%north_t1
865  sphumbc => sphum_bc%north_t1
866 #ifdef USE_COND
867  qconbc => q_con_bc%north_t1
868 #ifdef MOIST_CAPPA
869  cappabc => cappa_bc%north_t1
870 #endif
871 #endif
872  delpbc => delp_bc%north_t1
873  delzbc => delz_bc%north_t1
874  if (is == 1) then
875  istart = is
876  else
877  istart = isd
878  end if
879  if (ie == npx-1) then
880  iend = ie
881  else
882  iend = ied
883  end if
884 
885 !$OMP parallel do default(none) shared(npz,npy,jed,istart,iend,zvir, &
886 !$OMP sphumBC,liq_watBC_north,rainwatBC_north,ice_watBC_north,snowwatBC_north,graupelBC_north,qconBC,cappaBC, &
887 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
888 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
889  do k=1,npz
890  do j=npy,jed
891  do i=istart,iend
892  dp1 = zvir*sphumbc(i,j,k)
893 #ifdef USE_COND
894 #ifdef GFS_PHYS
895  q_con = liq_watbc_north(i,j,k)
896  q_sol = q_con*max(min((tice-ptbc(i,j,k))/t_i0,1.),0.)
897  q_liq = q_con - q_sol
898 #else
899  q_liq = liq_watbc_north(i,j,k) + rainwatbc_north(i,j,k)
900  q_sol = ice_watbc_north(i,j,k) + snowwatbc_north(i,j,k) + graupelbc_north(i,j,k)
901  q_con = q_liq + q_sol
902 #endif
903  qconbc(i,j,k) = q_con
904 #ifdef MOIST_CAPPA
905  cvm = (1.-(sphumbc(i,j,k)+q_con))*cv_air+sphumbc(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice
906  cappabc(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1))
907  pkz = exp( cappabc(i,j,k)*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
908  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
909 #else
910  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
911  (1.+dp1)*(1.-q_con)/delzbc(i,j,k)))
912 #endif
913  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)*(1.-q_con)/pkz
914 #else
915  pkz = exp( kappa*log(rdg*delpbc(i,j,k)*ptbc(i,j,k) * &
916  (1.+dp1)/delzbc(i,j,k)))
917  ptbc(i,j,k) = ptbc(i,j,k)*(1.+dp1)/pkz
918 #endif
919  end do
920  end do
921  end do
922  end if
923 
924 
925 
926  end subroutine setup_pt_nh_bc
927 
928 
929  subroutine set_nh_bcs_t0(neststruct)
931  type(fv_nest_type), intent(INOUT) :: neststruct
932 
933 #ifndef SW_DYNAMICS
934  neststruct%delz_BC%east_t0 = neststruct%delz_BC%east_t1
935  neststruct%delz_BC%west_t0 = neststruct%delz_BC%west_t1
936  neststruct%delz_BC%north_t0 = neststruct%delz_BC%north_t1
937  neststruct%delz_BC%south_t0 = neststruct%delz_BC%south_t1
938 
939  neststruct%w_BC%east_t0 = neststruct%w_BC%east_t1
940  neststruct%w_BC%west_t0 = neststruct%w_BC%west_t1
941  neststruct%w_BC%north_t0 = neststruct%w_BC%north_t1
942  neststruct%w_BC%south_t0 = neststruct%w_BC%south_t1
943 #endif
944 
945  end subroutine set_nh_bcs_t0
946 
947  subroutine set_bcs_t0(ncnst, hydrostatic, neststruct)
949  integer, intent(IN) :: ncnst
950  logical, intent(IN) :: hydrostatic
951  type(fv_nest_type), intent(INOUT) :: neststruct
952 
953  integer :: n
954 
955  neststruct%delp_BC%east_t0 = neststruct%delp_BC%east_t1
956  neststruct%delp_BC%west_t0 = neststruct%delp_BC%west_t1
957  neststruct%delp_BC%north_t0 = neststruct%delp_BC%north_t1
958  neststruct%delp_BC%south_t0 = neststruct%delp_BC%south_t1
959  do n=1,ncnst
960  neststruct%q_BC(n)%east_t0 = neststruct%q_BC(n)%east_t1
961  neststruct%q_BC(n)%west_t0 = neststruct%q_BC(n)%west_t1
962  neststruct%q_BC(n)%north_t0 = neststruct%q_BC(n)%north_t1
963  neststruct%q_BC(n)%south_t0 = neststruct%q_BC(n)%south_t1
964  enddo
965 #ifndef SW_DYNAMICS
966  neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1
967  neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1
968  neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1
969  neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1
970  neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1
971  neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1
972  neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1
973  neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1
974 
975 #ifdef USE_COND
976  neststruct%q_con_BC%east_t0 = neststruct%q_con_BC%east_t1
977  neststruct%q_con_BC%west_t0 = neststruct%q_con_BC%west_t1
978  neststruct%q_con_BC%north_t0 = neststruct%q_con_BC%north_t1
979  neststruct%q_con_BC%south_t0 = neststruct%q_con_BC%south_t1
980 #ifdef MOIST_CAPPA
981  neststruct%cappa_BC%east_t0 = neststruct%cappa_BC%east_t1
982  neststruct%cappa_BC%west_t0 = neststruct%cappa_BC%west_t1
983  neststruct%cappa_BC%north_t0 = neststruct%cappa_BC%north_t1
984  neststruct%cappa_BC%south_t0 = neststruct%cappa_BC%south_t1
985 #endif
986 #endif
987 
988  if (.not. hydrostatic) then
989  call set_nh_bcs_t0(neststruct)
990  endif
991 #endif
992  neststruct%u_BC%east_t0 = neststruct%u_BC%east_t1
993  neststruct%u_BC%west_t0 = neststruct%u_BC%west_t1
994  neststruct%u_BC%north_t0 = neststruct%u_BC%north_t1
995  neststruct%u_BC%south_t0 = neststruct%u_BC%south_t1
996  neststruct%v_BC%east_t0 = neststruct%v_BC%east_t1
997  neststruct%v_BC%west_t0 = neststruct%v_BC%west_t1
998  neststruct%v_BC%north_t0 = neststruct%v_BC%north_t1
999  neststruct%v_BC%south_t0 = neststruct%v_BC%south_t1
1000 
1001 
1002  neststruct%vc_BC%east_t0 = neststruct%vc_BC%east_t1
1003  neststruct%vc_BC%west_t0 = neststruct%vc_BC%west_t1
1004  neststruct%vc_BC%north_t0 = neststruct%vc_BC%north_t1
1005  neststruct%vc_BC%south_t0 = neststruct%vc_BC%south_t1
1006  neststruct%uc_BC%east_t0 = neststruct%uc_BC%east_t1
1007  neststruct%uc_BC%west_t0 = neststruct%uc_BC%west_t1
1008  neststruct%uc_BC%north_t0 = neststruct%uc_BC%north_t1
1009  neststruct%uc_BC%south_t0 = neststruct%uc_BC%south_t1
1010 
1011  neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
1012  neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
1013  neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
1014  neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
1015 
1016  end subroutine set_bcs_t0
1017 
1018 
1019 !! nestupdate types
1020 !! 1 - Interpolation update on all variables
1021 !! 2 - Conserving update (over areas on cell-
1022 !! centered variables, over faces on winds) on all variables
1023 !! 3 - Interpolation update on winds only
1024 !! 4 - Interpolation update on all variables except delp (mass conserving)
1025 !! 5 - Remap interpolating update, delp not updated
1026 !! 6 - Remap conserving update, delp not updated
1027 !! 7 - Remap conserving update, delp and q not updated
1028 !! 8 - Remap conserving update, only winds updated
1029 
1030 !! Note that nestupdate > 3 will not update delp.
1031 
1032 !! "Remap update" remaps updated variables from the nested grid's
1033 !! vertical coordinate to that of the coarse grid. When delp is not
1034 !! updated (nestbctype >= 3) the vertical coordinates differ on
1035 !! the two grids, because the surface pressure will be different
1036 !! on the two grids.
1037 !! Note: "conserving updates" do not guarantee global conservation
1038 !! unless flux nested grid BCs are specified, or if a quantity is
1039 !! not updated at all. This ability has not been implemented.
1040 !
1043 subroutine twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir)
1045  type(fv_atmos_type), intent(INOUT) :: atm(ngrids)
1046  integer, intent(IN) :: ngrids
1047  logical, intent(IN) :: grids_on_this_pe(ngrids)
1048  real, intent(IN) :: zvir
1049 
1050  integer :: n, p, sphum
1051 
1052 
1053  if (ngrids > 1) then
1054 
1055  do n=ngrids,2,-1 !loop backwards to allow information to propagate from finest to coarsest grids
1056 
1057  !two-way updating
1058  if (atm(n)%neststruct%twowaynest ) then
1059  if (grids_on_this_pe(n) .or. grids_on_this_pe(atm(n)%parent_grid%grid_number)) then
1060  sphum = get_tracer_index(model_atmos, 'sphum')
1061  call twoway_nest_update(atm(n)%npx, atm(n)%npy, atm(n)%npz, zvir, &
1062  atm(n)%ncnst, sphum, atm(n)%u, atm(n)%v, atm(n)%w, atm(n)%omga, &
1063  atm(n)%pt, atm(n)%delp, atm(n)%q, atm(n)%uc, atm(n)%vc, &
1064  atm(n)%pkz, atm(n)%delz, atm(n)%ps, atm(n)%ptop, &
1065  atm(n)%gridstruct, atm(n)%flagstruct, atm(n)%neststruct, atm(n)%parent_grid, atm(n)%bd, .false.)
1066  endif
1067  endif
1068 
1069  end do
1070 
1071  !NOTE: these routines need to be used with any grid which has been updated to, not just the coarsest grid.
1072  do n=1,ngrids
1073  if (atm(n)%neststruct%parent_of_twoway .and. grids_on_this_pe(n)) then
1074  call after_twoway_nest_update( atm(n)%npx, atm(n)%npy, atm(n)%npz, atm(n)%ng, atm(n)%ncnst, &
1075  atm(n)%u, atm(n)%v, atm(n)%w, atm(n)%delz, &
1076  atm(n)%pt, atm(n)%delp, atm(n)%q, &
1077  atm(n)%ps, atm(n)%pe, atm(n)%pk, atm(n)%peln, atm(n)%pkz, &
1078  atm(n)%phis, atm(n)%ua, atm(n)%va, &
1079  atm(n)%ptop, atm(n)%gridstruct, atm(n)%flagstruct, &
1080  atm(n)%domain, atm(n)%bd)
1081  endif
1082  enddo
1083 
1084  endif ! ngrids > 1
1085 
1086  end subroutine twoway_nesting
1087 
1088 !!!CLEANUP: this routine assumes that the PARENT GRID has pt = (regular) temperature,
1089 !!!not potential temperature; which may cause problems when updating if this is not the case.
1090  subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, &
1091  u, v, w, omga, pt, delp, q, &
1092  uc, vc, pkz, delz, ps, ptop, &
1093  gridstruct, flagstruct, neststruct, &
1094  parent_grid, bd, conv_theta_in)
1096  real, intent(IN) :: zvir, ptop
1097 
1098  integer, intent(IN) :: npx, npy, npz
1099  integer, intent(IN) :: ncnst, sphum
1100  logical, intent(IN), OPTIONAL :: conv_theta_in
1101 
1102  type(fv_grid_bounds_type), intent(IN) :: bd
1103  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
1104  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
1105  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1: )
1106  real, intent(inout) :: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1107  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
1108  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
1109  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
1110  real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1111  real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1112 
1113  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
1114  real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1: )
1115  real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed)
1116 
1117  type(fv_grid_type), intent(INOUT) :: gridstruct
1118  type(fv_flags_type), intent(INOUT) :: flagstruct
1119  type(fv_nest_type), intent(INOUT) :: neststruct
1120 
1121  type(fv_atmos_type), intent(INOUT) :: parent_grid
1122 
1123  real, allocatable :: t_nest(:,:,:), ps0(:,:)
1124  integer :: i,j,k,n
1125  integer :: isd_p, ied_p, jsd_p, jed_p, isc_p, iec_p, jsc_p, jec_p
1126  integer :: isg, ieg, jsg,jeg, npx_p, npy_p
1127  integer :: istart, iend
1128  real :: qmass_b, qmass_a, fix = 1.
1129  logical :: used, conv_theta=.true.
1130 
1131  real :: qdp( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
1132  real, allocatable :: qdp_coarse(:,:,:)
1133  real(kind=f_p), allocatable :: q_diff(:,:,:)
1134  real :: L_sum_b(npz), L_sum_a(npz)
1135 
1136  integer :: upoff
1137  integer :: is, ie, js, je
1138  integer :: isd, ied, jsd, jed
1139  integer :: isu, ieu, jsu, jeu
1140 
1141  is = bd%is
1142  ie = bd%ie
1143  js = bd%js
1144  je = bd%je
1145  isd = bd%isd
1146  ied = bd%ied
1147  jsd = bd%jsd
1148  jed = bd%jed
1149  isu = neststruct%isu
1150  ieu = neststruct%ieu
1151  jsu = neststruct%jsu
1152  jeu = neststruct%jeu
1153 
1154  upoff = neststruct%upoff
1155 
1156  !We update actual temperature, not theta.
1157  !If pt is actual temperature, set conv_theta to .false.
1158  if (present(conv_theta_in)) conv_theta = conv_theta_in
1159 
1160  if ((.not. neststruct%parent_proc) .and. (.not. neststruct%child_proc)) return
1161 
1162  call mpp_get_data_domain( parent_grid%domain, &
1163  isd_p, ied_p, jsd_p, jed_p )
1164  call mpp_get_compute_domain( parent_grid%domain, &
1165  isc_p, iec_p, jsc_p, jec_p )
1166 
1167 
1168  !delp/ps
1169 
1170  if (neststruct%nestupdate < 3) then
1171 
1172  call update_coarse_grid(parent_grid%delp, delp, neststruct%nest_domain,&
1173  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1174  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1175  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1176  npx, npy, npz, 0, 0, &
1177  neststruct%refinement, neststruct%nestupdate, upoff, 0, &
1178  neststruct%parent_proc, neststruct%child_proc, parent_grid)
1179 
1180  call mpp_sync!self
1181 
1182 #ifdef SW_DYNAMICS
1183  if (neststruct%parent_proc) then
1184  do j=jsd_p,jed_p
1185  do i=isd_p,ied_p
1186 
1187  parent_grid%ps(i,j) = &
1188  parent_grid%delp(i,j,1)/grav
1189 
1190  end do
1191  end do
1192  endif
1193 #endif
1194 
1195  end if
1196 
1197  !if (neststruct%nestupdate /= 3 .and. neststruct%nestbctype /= 3) then
1198  if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 7 .and. neststruct%nestupdate /= 8) then
1199 
1200  allocate(qdp_coarse(isd_p:ied_p,jsd_p:jed_p,npz))
1201  if (parent_grid%flagstruct%nwat > 0) then
1202  allocate(q_diff(isd_p:ied_p,jsd_p:jed_p,npz))
1203  q_diff = 0.
1204  endif
1205 
1206  do n=1,parent_grid%flagstruct%nwat
1207 
1208  qdp_coarse = 0.
1209  if (neststruct%child_proc) then
1210  do k=1,npz
1211  do j=jsd,jed
1212  do i=isd,ied
1213  qdp(i,j,k) = q(i,j,k,n)*delp(i,j,k)
1214  enddo
1215  enddo
1216  enddo
1217  else
1218  qdp = 0.
1219  endif
1220 
1221  if (neststruct%parent_proc) then
1222  !Add up ONLY region being replaced by nested grid
1223  do k=1,npz
1224  do j=jsu,jeu
1225  do i=isu,ieu
1226  qdp_coarse(i,j,k) = parent_grid%q(i,j,k,n)*parent_grid%delp(i,j,k)
1227  enddo
1228  enddo
1229  enddo
1230  call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, &
1231  parent_grid%bd, npz, l_sum_b)
1232  else
1233  qdp_coarse = 0.
1234  endif
1235  if (neststruct%parent_proc) then
1236  if (n <= parent_grid%flagstruct%nwat) then
1237  do k=1,npz
1238  do j=jsu,jeu
1239  do i=isu,ieu
1240  q_diff(i,j,k) = q_diff(i,j,k) - qdp_coarse(i,j,k)
1241  enddo
1242  enddo
1243  enddo
1244  endif
1245  endif
1246 
1247  call update_coarse_grid(qdp_coarse, qdp, neststruct%nest_domain, &
1248  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1249  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1250  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1251  npx, npy, npz, 0, 0, &
1252  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1253 
1254  call mpp_sync!self
1255 
1256  if (neststruct%parent_proc) then
1257  call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, &
1258  parent_grid%bd, npz, l_sum_a)
1259  do k=1,npz
1260  if (l_sum_a(k) > 0.) then
1261  fix = l_sum_b(k)/l_sum_a(k)
1262  do j=jsu,jeu
1263  do i=isu,ieu
1264  !Normalization mass fixer
1265  parent_grid%q(i,j,k,n) = qdp_coarse(i,j,k)*fix
1266  enddo
1267  enddo
1268  endif
1269  enddo
1270  if (n == 1) sphum_ll_fix = 1. - fix
1271  endif
1272  if (neststruct%parent_proc) then
1273  if (n <= parent_grid%flagstruct%nwat) then
1274  do k=1,npz
1275  do j=jsu,jeu
1276  do i=isu,ieu
1277  q_diff(i,j,k) = q_diff(i,j,k) + parent_grid%q(i,j,k,n)
1278  enddo
1279  enddo
1280  enddo
1281  endif
1282  endif
1283 
1284  end do
1285 
1286  if (neststruct%parent_proc) then
1287  if (parent_grid%flagstruct%nwat > 0) then
1288  do k=1,npz
1289  do j=jsu,jeu
1290  do i=isu,ieu
1291  parent_grid%delp(i,j,k) = parent_grid%delp(i,j,k) + q_diff(i,j,k)
1292  enddo
1293  enddo
1294  enddo
1295  endif
1296 
1297  do n=1,parent_grid%flagstruct%nwat
1298  do k=1,npz
1299  do j=jsu,jeu
1300  do i=isu,ieu
1301  parent_grid%q(i,j,k,n) = parent_grid%q(i,j,k,n)/parent_grid%delp(i,j,k)
1302  enddo
1303  enddo
1304  enddo
1305  enddo
1306  endif
1307 
1308  deallocate(qdp_coarse)
1309  if (allocated(q_diff)) deallocate(q_diff)
1310 
1311  endif
1312 
1313 #ifndef SW_DYNAMICS
1314  if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 8) then
1315 
1316  if (conv_theta) then
1317 
1318  if (neststruct%child_proc) then
1319  !pt is potential temperature on the nested grid, but actual
1320  !temperature on the coarse grid. Compute actual temperature
1321  !on the nested grid, then gather.
1322  allocate(t_nest(isd:ied,jsd:jed,1:npz))
1323 !$OMP parallel do default(none) shared(npz,js,je,is,ie,t_nest,pt,pkz,zvir,q,sphum)
1324  do k=1,npz
1325  do j=js,je
1326  do i=is,ie
1327  t_nest(i,j,k) = pt(i,j,k)*pkz(i,j,k)/(1.+zvir*q(i,j,k,sphum))
1328  enddo
1329  enddo
1330  enddo
1331  deallocate(t_nest)
1332  endif
1333 
1334  call update_coarse_grid(parent_grid%pt, &
1335  t_nest, neststruct%nest_domain, &
1336  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1337  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1338  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1339  npx, npy, npz, 0, 0, &
1340  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1341  else
1342 
1343  call update_coarse_grid(parent_grid%pt, &
1344  pt, neststruct%nest_domain, &
1345  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1346  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1347  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1348  npx, npy, npz, 0, 0, &
1349  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1350 
1351  endif !conv_theta
1352 
1353  call mpp_sync!self
1354 
1355 
1356  if (.not. flagstruct%hydrostatic) then
1357 
1358  call update_coarse_grid(parent_grid%w, w, neststruct%nest_domain, &
1359  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1360  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1361  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1362  npx, npy, npz, 0, 0, &
1363  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1364  !Updating for delz not yet implemented; may be problematic
1365 !!$ call update_coarse_grid(parent_grid%delz, delz, neststruct%nest_domain, &
1366 !!$ neststruct%ind_update_h, &
1367 !!$ isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, npz, 0, 0, &
1368 !!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc)
1369 
1370  call mpp_sync!self
1371 
1372  end if
1373 
1374  end if !Neststruct%nestupdate /= 3
1375 
1376 #endif
1377 
1378  call update_coarse_grid(parent_grid%u, u, neststruct%nest_domain, &
1379  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1380  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1381  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1382  npx, npy, npz, 0, 1, &
1383  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1384 
1385  call update_coarse_grid(parent_grid%v, v, neststruct%nest_domain, &
1386  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1387  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1388  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1389  npx, npy, npz, 1, 0, &
1390  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1391 
1392  call mpp_sync!self
1393 
1394 
1395 #ifndef SW_DYNAMICS
1396  if (neststruct%nestupdate >= 5 .and. npz > 4) then
1397 
1398  !Use PS0 from nested grid, NOT the full delp. Also we assume the same number of levels on both grids.
1399  !PS0 should be initially set to be ps so that this routine does NOTHING outside of the update region
1400 
1401  !Re-compute nested (AND COARSE) grid ps
1402 
1403  allocate(ps0(isd_p:ied_p,jsd_p:jed_p))
1404  if (neststruct%parent_proc) then
1405 
1406  parent_grid%ps = parent_grid%ptop
1407 !This loop appears to cause problems with OMP
1408 !$OMP parallel do default(none) shared(npz,jsd_p,jed_p,isd_p,ied_p,parent_grid)
1409  do j=jsd_p,jed_p
1410  do k=1,npz
1411  do i=isd_p,ied_p
1412  parent_grid%ps(i,j) = parent_grid%ps(i,j) + &
1413  parent_grid%delp(i,j,k)
1414  end do
1415  end do
1416  end do
1417 
1418  ps0 = parent_grid%ps
1419  endif
1420 
1421  if (neststruct%child_proc) then
1422 
1423  ps = ptop
1424 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,ied,ps,delp)
1425  do j=jsd,jed
1426  do k=1,npz
1427  do i=isd,ied
1428  ps(i,j) = ps(i,j) + delp(i,j,k)
1429  end do
1430  end do
1431  end do
1432  endif
1433 
1434  call update_coarse_grid(ps0, ps, neststruct%nest_domain, &
1435  neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, &
1436  isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, &
1437  neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, &
1438  npx, npy, 0, 0, &
1439  neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid)
1440 
1441  !!! The mpp version of update_coarse_grid does not return a consistent value of ps
1442  !!! across PEs, as it does not go into the haloes of a given coarse-grid PE. This
1443  !!! update_domains call takes care of the problem.
1444 
1445  if (neststruct%parent_proc) then
1446  call mpp_update_domains(parent_grid%ps, parent_grid%domain, complete=.false.)
1447  call mpp_update_domains(ps0, parent_grid%domain, complete=.true.)
1448  endif
1449 
1450 
1451  call mpp_sync!self
1452 
1453  if (parent_grid%tile == neststruct%parent_tile) then
1454 
1455  if (neststruct%parent_proc) then
1456 
1457  !comment out if statement to always remap theta instead of t in the remap-update.
1458  !(In LtE typically we use remap_t = .true.: remapping t is better (except in
1459  !idealized simulations with a background uniform theta) since near the top
1460  !boundary theta is exponential, which is hard to accurately interpolate with a spline
1461  if (.not. parent_grid%flagstruct%remap_t) then
1462 !$OMP parallel do default(none) shared(npz,jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum)
1463  do k=1,npz
1464  do j=jsc_p,jec_p
1465  do i=isc_p,iec_p
1466  parent_grid%pt(i,j,k) = &
1467  parent_grid%pt(i,j,k)/parent_grid%pkz(i,j,k)*&
1468  (1.+zvir*parent_grid%q(i,j,k,sphum))
1469  end do
1470  end do
1471  end do
1472  end if
1473  call update_remap_tqw(npz, parent_grid%ak, parent_grid%bk, &
1474  parent_grid%ps, parent_grid%delp, &
1475  parent_grid%pt, parent_grid%q, parent_grid%w, &
1476  parent_grid%flagstruct%hydrostatic, &
1477  npz, ps0, zvir, parent_grid%ptop, ncnst, &
1478  parent_grid%flagstruct%kord_tm, parent_grid%flagstruct%kord_tr, &
1479  parent_grid%flagstruct%kord_wz, &
1480  isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, .false. ) !neststruct%nestupdate < 7)
1481  if (.not. parent_grid%flagstruct%remap_t) then
1482 !$OMP parallel do default(none) shared(npz,jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum)
1483  do k=1,npz
1484  do j=jsc_p,jec_p
1485  do i=isc_p,iec_p
1486  parent_grid%pt(i,j,k) = &
1487  parent_grid%pt(i,j,k)*parent_grid%pkz(i,j,k) / &
1488  (1.+zvir*parent_grid%q(i,j,k,sphum))
1489  end do
1490  end do
1491  end do
1492  end if
1493 
1494  call update_remap_uv(npz, parent_grid%ak, parent_grid%bk, &
1495  parent_grid%ps, &
1496  parent_grid%u, &
1497  parent_grid%v, npz, ps0, parent_grid%flagstruct%kord_mt, &
1498  isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, parent_grid%ptop)
1499 
1500  endif !neststruct%parent_proc
1501 
1502  end if
1503 
1504  if (allocated(ps0)) deallocate(ps0)
1505 
1506  end if
1507 
1508 #endif
1509 
1510  end subroutine twoway_nest_update
1511 
1512  subroutine level_sum(q, area, domain, bd, npz, L_sum)
1514  integer, intent(IN) :: npz
1515  type(fv_grid_bounds_type), intent(IN) :: bd
1516  real, intent(in) :: area( bd%isd:bd%ied ,bd%jsd:bd%jed)
1517  real, intent(in) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
1518  real, intent(OUT) :: L_sum( npz )
1519  type(domain2d), intent(IN) :: domain
1520 
1521  integer :: i, j, k, n
1522  real :: qA!(bd%is:bd%ie, bd%js:bd%je)
1523 
1524  do k=1,npz
1525  qa = 0.
1526  do j=bd%js,bd%je
1527  do i=bd%is,bd%ie
1528  !qA(i,j) = q(i,j,k)*area(i,j)
1529  qa = qa + q(i,j,k)*area(i,j)
1530  enddo
1531  enddo
1532  call mp_reduce_sum(qa)
1533  l_sum(k) = qa
1534 ! L_sum(k) = mpp_global_sum(domain, qA, flags=BITWISE_EXACT_SUM)
1535 ! L_sum(k) = mpp_global_sum(domain, qA, flags=BITWISE_EFP_SUM) ! doesn't work??
1536  enddo
1537 
1538  end subroutine level_sum
1539 
1540 
1541  subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, &
1542  u, v, w, delz, pt, delp, q, &
1543  ps, pe, pk, peln, pkz, phis, ua, va, &
1544  ptop, gridstruct, flagstruct, &
1545  domain, bd)
1547  type(fv_grid_bounds_type), intent(IN) :: bd
1548  real, intent(IN) :: ptop
1549 
1550  integer, intent(IN) :: ng, npx, npy, npz
1551  integer, intent(IN) :: ncnst
1552 
1553  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
1554  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
1555  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1: )
1556  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
1557  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
1558  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
1559  real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1: )
1560 
1561 !-----------------------------------------------------------------------
1562 ! Auxilliary pressure arrays:
1563 ! The 5 vars below can be re-computed from delp and ptop.
1564 !-----------------------------------------------------------------------
1565 ! dyn_aux:
1566  real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed)
1567  real, intent(inout) :: pe (bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1)
1568  real, intent(inout) :: pk (bd%is:bd%ie,bd%js:bd%je, npz+1)
1569  real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je)
1570  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
1571 
1572 !-----------------------------------------------------------------------
1573 ! Others:
1574 !-----------------------------------------------------------------------
1575  real, intent(inout) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
1576 
1577  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz):: ua, va
1578  type(fv_grid_type), intent(IN) :: gridstruct
1579  type(fv_flags_type), intent(IN) :: flagstruct
1580  type(domain2d), intent(INOUT) :: domain
1581 
1582  logical :: bad_range
1583 
1584  integer :: is, ie, js, je
1585  integer :: isd, ied, jsd, jed
1586 
1587  is = bd%is
1588  ie = bd%ie
1589  js = bd%js
1590  je = bd%je
1591  isd = bd%isd
1592  ied = bd%ied
1593  jsd = bd%jsd
1594  jed = bd%jed
1595 
1596  call cubed_to_latlon(u, v, ua, va, &
1597  gridstruct, npx, npy, npz, &
1598  1, gridstruct%grid_type, domain, &
1599  gridstruct%nested, flagstruct%c2l_ord, bd)
1600 
1601 #ifndef SW_DYNAMICS
1602 
1603  !To get coarse grid pkz, etc right after a two-way update so
1604  !that it is consistent across a restart:
1605  !(should only be called after doing such an update)
1606 
1607  !! CLEANUP: move to twoway_nest_update??
1608  call p_var(npz, is, ie, js, je, ptop, ptop_min, &
1609  delp, delz, &
1610  pt, ps, &
1611  pe, peln, &
1612  pk, pkz, kappa, &
1613  q, ng, flagstruct%ncnst, gridstruct%area_64, 0., &
1614  .false., .false., & !mountain argument not used
1615  flagstruct%moist_phys, flagstruct%hydrostatic, &
1616  flagstruct%nwat, domain, .false.)
1617 
1618 #endif
1619 
1620  if (flagstruct%range_warn) then
1621  call range_check('TA update', pt, is, ie, js, je, ng, npz, gridstruct%agrid, 130., 350., bad_range)
1622  call range_check('UA update', ua, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 250., bad_range)
1623  call range_check('VA update', va, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 220., bad_range)
1624  if (.not. flagstruct%hydrostatic) then
1625  call range_check('W update', w, is, ie, js, je, ng, npz, gridstruct%agrid, -50., 100., bad_range)
1626  endif
1627  endif
1628 
1629 
1630 
1631  end subroutine after_twoway_nest_update
1632 
1635  !This does not yet do anything for the tracers
1636  subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, &
1637  kmd, ps0, zvir, ptop, nq, kord_tm, kord_tr, kord_wz, &
1638  is, ie, js, je, isd, ied, jsd, jed, do_q)
1639  integer, intent(in):: npz, kmd, nq, kord_tm, kord_tr, kord_wz
1640  real, intent(in):: zvir, ptop
1641  real, intent(in):: ak(npz+1), bk(npz+1)
1642  real, intent(in), dimension(isd:ied,jsd:jed):: ps0
1643  real, intent(in), dimension(isd:ied,jsd:jed):: ps
1644  real, intent(in), dimension(isd:ied,jsd:jed,npz):: delp
1645  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: t, w
1646  real, intent(inout), dimension(isd:ied,jsd:jed,npz,nq):: q
1647  integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed
1648  logical, intent(in) :: hydrostatic, do_q
1649 ! local:
1650  real, dimension(is:ie,kmd):: tp, qp
1651  real, dimension(is:ie,kmd+1):: pe0, pn0
1652  real, dimension(is:ie,npz):: qn1
1653  real, dimension(is:ie,npz+1):: pe1, pn1
1654  integer i,j,k,iq
1655 
1656 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps0,q,npz,ptop,do_q,&
1657 !$OMP t,w,ps,nq,hydrostatic,kord_tm,kord_tr,kord_wz) &
1658 !$OMP private(pe0,pn0,pe1,pn1,qp,tp,qn1)
1659  do 5000 j=js,je
1660 
1661  do k=1,kmd+1
1662  do i=is,ie
1663  pe0(i,k) = ak(k) + bk(k)*ps0(i,j)
1664  pn0(i,k) = log(pe0(i,k))
1665  enddo
1666  enddo
1667  do k=1,kmd+1
1668  do i=is,ie
1669  pe1(i,k) = ak(k) + bk(k)*ps(i,j)
1670  pn1(i,k) = log(pe1(i,k))
1671  enddo
1672  enddo
1673  if (do_q) then
1674  do iq=1,nq
1675  do k=1,kmd
1676  do i=is,ie
1677  qp(i,k) = q(i,j,k,iq)
1678  enddo
1679  enddo
1680  call mappm(kmd, pe0, qp, npz, pe1, qn1, is,ie, 0, kord_tr, ptop)
1681  do k=1,npz
1682  do i=is,ie
1683  q(i,j,k,iq) = qn1(i,k)
1684  enddo
1685  enddo
1686  enddo
1687  endif
1688 
1689  do k=1,kmd
1690  do i=is,ie
1691  tp(i,k) = t(i,j,k)
1692  enddo
1693  enddo
1694  !Remap T using logp
1695  call mappm(kmd, pn0, tp, npz, pn1, qn1, is,ie, 1, abs(kord_tm), ptop)
1696 
1697  do k=1,npz
1698  do i=is,ie
1699  t(i,j,k) = qn1(i,k)
1700  enddo
1701  enddo
1702 
1703  if (.not. hydrostatic) then
1704  do k=1,kmd
1705  do i=is,ie
1706  tp(i,k) = w(i,j,k)
1707  enddo
1708  enddo
1709  !Remap w using p
1710  !Using iv == -1 instead of -2
1711  call mappm(kmd, pe0, tp, npz, pe1, qn1, is,ie, -1, kord_wz, ptop)
1712 
1713  do k=1,npz
1714  do i=is,ie
1715  w(i,j,k) = qn1(i,k)
1716  enddo
1717  enddo
1718  endif
1719 
1720 5000 continue
1721 
1722  end subroutine update_remap_tqw
1723 
1724  !remap_uv as-is remaps only a-grid velocities. A new routine has been written to handle staggered grids.
1725  subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, &
1726  is, ie, js, je, isd, ied, jsd, jed, ptop)
1727  integer, intent(in):: npz
1728  real, intent(in):: ak(npz+1), bk(npz+1)
1729  real, intent(in):: ps(isd:ied,jsd:jed)
1730  real, intent(inout), dimension(isd:ied,jsd:jed+1,npz):: u
1731  real, intent(inout), dimension(isd:ied+1,jsd:jed,npz):: v
1732 !
1733  integer, intent(in):: kmd, kord_mt
1734  real, intent(IN) :: ptop
1735  real, intent(in):: ps0(isd:ied,jsd:jed)
1736  integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed
1737 !
1738 ! local:
1739  real, dimension(is:ie+1,kmd+1):: pe0
1740  real, dimension(is:ie+1,npz+1):: pe1
1741  real, dimension(is:ie+1,kmd):: qt
1742  real, dimension(is:ie+1,npz):: qn1
1743  integer i,j,k
1744 
1745 !------
1746 ! map u
1747 !------
1748 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps,ps0,npz,u,ptop,kord_mt) &
1749 !$OMP private(pe0,pe1,qt,qn1)
1750  do j=js,je+1
1751 !------
1752 ! Data
1753 !------
1754  do k=1,kmd+1
1755  do i=is,ie
1756  pe0(i,k) = ak(k) + bk(k)*0.5*(ps0(i,j)+ps0(i,j-1))
1757  enddo
1758  enddo
1759 !------
1760 ! Model
1761 !------
1762  do k=1,kmd+1
1763  do i=is,ie
1764  pe1(i,k) = ak(k) + bk(k)*0.5*(ps(i,j)+ps(i,j-1))
1765  enddo
1766  enddo
1767 !------
1768 !Do map
1769 !------
1770  qt = 0.
1771  do k=1,kmd
1772  do i=is,ie
1773  qt(i,k) = u(i,j,k)
1774  enddo
1775  enddo
1776  qn1 = 0.
1777  call mappm(kmd, pe0(is:ie,:), qt(is:ie,:), npz, pe1(is:ie,:), qn1(is:ie,:), is,ie, -1, kord_mt, ptop)
1778  do k=1,npz
1779  do i=is,ie
1780  u(i,j,k) = qn1(i,k)
1781  enddo
1782  enddo
1783 
1784  end do
1785 
1786 !------
1787 ! map v
1788 !------
1789 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps,ps0,npz,v,ptop) &
1790 !$OMP private(pe0,pe1,qt,qn1)
1791  do j=js,je
1792 !------
1793 ! Data
1794 !------
1795  do k=1,kmd+1
1796  do i=is,ie+1
1797  pe0(i,k) = ak(k) + bk(k)*0.5*(ps0(i,j)+ps0(i-1,j))
1798  enddo
1799  enddo
1800 !------
1801 ! Model
1802 !------
1803  do k=1,kmd+1
1804  do i=is,ie+1
1805  pe1(i,k) = ak(k) + bk(k)*0.5*(ps(i,j)+ps(i-1,j))
1806  enddo
1807  enddo
1808 !------
1809 !Do map
1810 !------
1811  qt = 0.
1812  do k=1,kmd
1813  do i=is,ie+1
1814  qt(i,k) = v(i,j,k)
1815  enddo
1816  enddo
1817  qn1 = 0.
1818  call mappm(kmd, pe0(is:ie+1,:), qt(is:ie+1,:), npz, pe1(is:ie+1,:), qn1(is:ie+1,:), is,ie+1, -1, 8, ptop)
1819  do k=1,npz
1820  do i=is,ie+1
1821  v(i,j,k) = qn1(i,k)
1822  enddo
1823  enddo
1824  end do
1825 
1826  end subroutine update_remap_uv
1827 
1828 
1829 
1830 end module fv_nesting_mod
real, dimension(:,:,:), allocatable, target dum_east
Definition: fv_nesting.F90:144
subroutine, public setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, u, v, w, pt, delp, delz, q, uc, vc, pkz, nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, nest_timestep, tracer_nest_timestep, domain, bd, nwat)
The subroutine &#39;setup_nested_grid_BCs&#39; fetches data from the coarse grid to set up the nested-grid bo...
Definition: fv_nesting.F90:163
subroutine, public divergence_corner_nest(u, v, ua, va, divg_d, gridstruct, flagstruct, bd)
Definition: sw_core.F90:1809
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
type(fv_nest_bc_type_3d) divg_buf
Definition: fv_nesting.F90:141
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:120
real, dimension(:,:,:), allocatable, target dum_south
Definition: fv_nesting.F90:144
subroutine, public neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qg, qa, check_negative)
Definition: fv_sg.F90:1477
type(fv_nest_bc_type_3d) delz_buf
Definition: fv_nesting.F90:141
subroutine, public nested_grid_bc_send(var_coarse, nest_domain, istag, jstag)
The subroutine &#39;nested_grid_BC_send&#39; sends coarse-grid data to create boundary conditions.
Definition: boundary.F90:1309
The interface&#39;update_coarse_grid_mpp&#39;contains subroutines that fetch data from the nested grid and in...
Definition: boundary.F90:108
subroutine set_nh_bcs_t0(neststruct)
Definition: fv_nesting.F90:930
type(fv_nest_bc_type_3d) vc_buf
Definition: fv_nesting.F90:141
subroutine set_bcs_t0(ncnst, hydrostatic, neststruct)
Definition: fv_nesting.F90:948
subroutine, public mappm(km, pe1, q1, kn, pe2, q2, i1, i2, iv, kord, ptop)
The subroutine &#39;mappm&#39; is a general-purpose routine for remapping one set of vertical levels to anoth...
Definition: fv_mapz.F90:3300
real, public sphum_ll_fix
The module &#39;sw_core&#39; advances the forward step of the Lagrangian dynamics as described by ...
Definition: sw_core.F90:26
type(fv_nest_bc_type_3d) uc_buf
Definition: fv_nesting.F90:141
subroutine, public divergence_corner(u, v, ua, va, divg_d, gridstruct, flagstruct, bd)
The subroutine &#39;divergence_corner&#39; computes the cell-mean divergence on the "dual grid"...
Definition: sw_core.F90:1701
real function, public g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, reproduce)
The function &#39;g_sum&#39; is the fast version of &#39;globalsum&#39;.
The module &#39;fv_sg&#39; performs FV sub-grid mixing.
Definition: fv_sg.F90:54
&#39;allocate_fv_nest_BC_type&#39; is an interface to subroutines that allocate the &#39;fv_nest_BC_type&#39; structu...
Definition: fv_arrays.F90:1126
type(fv_nest_bc_type_3d) v_buf
Definition: fv_nesting.F90:141
subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, u, v, w, omga, pt, delp, q, uc, vc, pkz, delz, ps, ptop, gridstruct, flagstruct, neststruct, parent_grid, bd, conv_theta_in)
type(fv_nest_bc_type_3d) delp_buf
Definition: fv_nesting.F90:141
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
subroutine update_remap_tqw(npz, ak, bk, ps, delp, t, q, w, hydrostatic, kmd, ps0, zvir, ptop, nq, kord_tm, kord_tr, kord_wz, is, ie, js, je, isd, ied, jsd, jed, do_q)
The subroutine &#39;update_remap_tqw&#39; remaps (interpolated) nested-grid data to the coarse-grid&#39;s vertica...
subroutine, public nested_grid_bc_recv(nest_domain, istag, jstag, npz, bd, nest_BC_buffers)
subroutine &#39;nested_grid_BC_recv&#39; receives coarse-grid data to create boundary conditions.
Definition: boundary.F90:1341
real, parameter, public ptop_min
integer, parameter, public f_p
subroutine setup_pt_bc(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd)
Definition: fv_nesting.F90:422
subroutine, public d2a_setup(u, v, ua, va, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, nested, cosa_s, rsin2, regional)
The module &#39;fv_mapz&#39; contains the vertical mapping routines .
Definition: fv_mapz.F90:27
real, dimension(:,:,:), allocatable, target dum_west
Definition: fv_nesting.F90:144
type(fv_nest_bc_type_3d) w_buf
Definition: fv_nesting.F90:141
subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, u, v, w, delz, pt, delp, q, ps, pe, pk, peln, pkz, phis, ua, va, ptop, gridstruct, flagstruct, domain, bd)
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
type(fv_nest_bc_type_3d), dimension(:), allocatable q_buf
Definition: fv_nesting.F90:142
type(fv_nest_bc_type_3d) pkz_buf
Definition: fv_nesting.F90:141
subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, is, ie, js, je, isd, ied, jsd, jed, ptop)
real, dimension(:), allocatable rw
Definition: fv_nesting.F90:133
subroutine, public twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir)
The subroutine&#39;twoway_nesting&#39; performs a two-way update of nested-grid data onto the parent grid...
type(fv_nest_bc_type_3d) pt_buf
Definition: fv_nesting.F90:141
logical rf_initialized
Definition: fv_nesting.F90:131
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
subroutine, public range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_range)
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
The module &#39;fv_grid_utils&#39; contains routines for setting up and computing grid-related quantities...
logical bad_range
Definition: fv_nesting.F90:132
real, dimension(:,:,:), allocatable, target dum_north
Definition: fv_nesting.F90:144
subroutine level_sum(q, area, domain, bd, npz, L_sum)
subroutine, public cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, nested, c2l_ord, bd)
subroutine setup_pt_nh_bc(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, ifdef USE_COND
Definition: fv_nesting.F90:531
subroutine, public nested_grid_bc_save_proc(nest_domain, ind, wt, istag, jstag, npx, npy, npz, bd, nest_BC, nest_BC_buffers, pd_in)
The subroutine &#39;nested_grid_BC_save_proc&#39; saves data received by &#39;nested_grid_BC_recv&#39; into the datat...
Definition: boundary.F90:1449
real, dimension(:,:), allocatable te_2d_coarse
Definition: fv_nesting.F90:136
real, dimension(:), allocatable rf
Definition: fv_nesting.F90:133
subroutine, public d2c_setup(u, v, ua, va, uc, vc, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, nested, se_corner, sw_corner, ne_corner, nw_corner, rsin_u, rsin_v, cosa_s, rsin2, regional)
subroutine, public p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, delp, delz, pt, ps, pe, peln, pk, pkz, cappa, q, ng, nq, area, dry_mass, adjust_dry_mass, mountain, moist_phys, hydrostatic, nwat, domain, make_nh)
the subroutine &#39;p_var&#39; computes auxiliary pressure variables for a hydrostatic state.
Definition: init_hydro.F90:87
real, dimension(:,:,:), allocatable dp1_coarse
Definition: fv_nesting.F90:137
The module &#39;fv_nesting&#39; is a collection of routines pertaining to grid nesting .
Definition: fv_nesting.F90:27
type(fv_nest_bc_type_3d) u_buf
Definition: fv_nesting.F90:141