FV3DYCORE  Version1.0.0
dyn_core.F90
Go to the documentation of this file.
1 
2 !***********************************************************************
3 !* GNU Lesser General Public License
4 !*
5 !* This file is part of the FV3 dynamical core.
6 !*
7 !* The FV3 dynamical core is free software: you can redistribute it
8 !* and/or modify it under the terms of the
9 !* GNU Lesser General Public License as published by the
10 !* Free Software Foundation, either version 3 of the License, or
11 !* (at your option) any later version.
12 !* The FV3 dynamical core is distributed in the hope that it will be
13 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
14 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 !* See the GNU General Public License for more details.
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with the FV3 dynamical core.
18 !* If not, see <http://www.gnu.org/licenses/>.
19 !***********************************************************************
20 !
27 
29 
30 ! <table>
31 ! <tr>
32 ! <th>Module Name</th>
33 ! <th>Functions Included</th>
34 ! </tr>
35 ! <tr>
36 ! <td>a2b_edge_mod</td>
37 ! <td>a2b_ord2, a2b_ord4</td>
38 ! </tr>
39 ! <tr>
40 ! <td>boundary_mod</td>
41 ! <td>extrapolation_BC, nested_grid_BC_apply_intT</td>
42 ! </tr>
43 ! <tr>
44 ! <td>constants_mod</td>
45 ! <td>rdgas, radius, cp_air, pi</td>
46 ! </tr>
47 ! <tr>
48 ! <td>diag_manager_mod</td>
49 ! <td>send_data</td>
50 ! </tr>
51 ! <tr>
52 ! <td>fv_ada_nudge_mod</td>
53 ! <td>breed_slp_inline_ada</td>
54 ! </tr>
55 ! <tr>
56 ! <td>fv_arrays_mod</td>
57 ! <td>fv_grid_type, fv_flags_type, fv_nest_type,
58 ! fv_diag_type,fv_grid_bounds_type, R_GRID </td>
59 ! </tr>
60 ! <tr>
61 ! <td>fv_diagnostics_mod</td>
62 ! <td>prt_maxmin, fv_time, prt_mxm</td>
63 ! </tr>
64 ! <tr>
65 ! <td>fv_mp_mod</td>
66 ! <td>is_master, start_group_halo_update,
67 ! complete_group_halo_update,group_halo_update_type</td>
68 ! </tr>
69 ! <tr>
70 ! <td>fv_nwp_nudge_mod</td>
71 ! <td>breed_slp_inline, do_adiabatic_init</td>
72 ! </tr>
73 ! <tr>
74 ! <td>fv_timing_mod</td>
75 ! <td>timing_on, timing_off</td>
76 ! </tr>
77 ! <tr>
78 ! <td>fv_update_phys_mod</td>
79 ! <td>update_dwinds_phys</td>
80 ! </tr>
81 ! <tr>
82 ! <td>mpp_mod</td>
83 ! <td>mpp_pe </td>
84 ! </tr>
85 ! <tr>
86 ! <td>mpp_domains_mod</td>
87 ! <td>CGRID_NE, DGRID_NE, mpp_get_boundary, mpp_update_domains,domain2d</td>
88 ! </tr>
89 ! <tr>
90 ! <td>mpp_parameter_mod</td>
91 ! <td>CORNER</td>
92 ! </tr>
93 ! <tr>
94 ! <td>nh_core_mod</td>
95 ! <td>Riem_Solver3, Riem_Solver_C, update_dz_c, update_dz_d, nest_halo_nh</td>
96 ! </tr>
97 ! <tr>
98 ! <td>test_cases_mod</td>
99 ! <td>test_case, case9_forcing1, case9_forcing2</td>
100 ! </tr>
101 ! <tr>
102 ! <td>tp_core_mod</td>
103 ! <td>copy_corners</td>
104 ! </tr>
105 ! </table>
106 
107  use constants_mod, only: rdgas, radius, cp_air, pi
108  use mpp_mod, only: mpp_pe
109  use mpp_domains_mod, only: cgrid_ne, dgrid_ne, mpp_get_boundary, mpp_update_domains, &
110  domain2d
111  use mpp_parameter_mod, only: corner
112  use fv_mp_mod, only: is_master
113  use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update
114  use fv_mp_mod, only: group_halo_update_type
115  use sw_core_mod, only: c_sw, d_sw
116  use a2b_edge_mod, only: a2b_ord2, a2b_ord4
117  use nh_core_mod, only: riem_solver3, riem_solver_c, update_dz_c, update_dz_d, nest_halo_nh
118  use tp_core_mod, only: copy_corners
121 #ifdef ROT3
123 #endif
124 #if defined (ADA_NUDGE)
125  use fv_ada_nudge_mod, only: breed_slp_inline_ada
126 #else
128 #endif
129  use diag_manager_mod, only: send_data
132 
136 
137 #ifdef SW_DYNAMICS
139 #endif
140 #ifdef MULTI_GASES
141  use multi_gases_mod, only: virqd, vicvqd
142 #endif
145 
146 implicit none
147 private
148 
149 public :: dyn_core, del2_cubed, init_ijk_mem
150 
151  real :: ptk, peln1, rgrav
152  real :: d3_damp
153  real, allocatable, dimension(:,:,:) :: ut, vt, crx, cry, xfx, yfx, divgd, &
154  zh, du, dv, pkc, delpc, pk3, ptc, gz
155 ! real, parameter:: delt_max = 1.e-1 ! Max dissipative heating/cooling rate
156  ! 6 deg per 10-min
157  real(kind=R_GRID), parameter :: cnst_0p20=0.20d0
158 
159  real, allocatable :: rf(:)
160  integer:: k_rf = 0
161  logical:: rff_initialized = .false.
162  logical:: first_call = .true.
163  integer :: kmax=1
164 
165 contains
166 
167 !-----------------------------------------------------------------------
168 ! dyn_core :: FV Lagrangian dynamics driver
169 !-----------------------------------------------------------------------
170 
171  subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, cappa, &
172 #ifdef MULTI_GASES
173  kapad, &
174 #endif
175  grav, hydrostatic, &
176  u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, &
177  uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, &
178  ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, &
179  init_step, i_pack, end_step, diss_est,time_total)
181  integer, intent(IN) :: npx
182  integer, intent(IN) :: npy
183  integer, intent(IN) :: npz
184  integer, intent(IN) :: ng, nq, sphum
185  integer, intent(IN) :: n_split
186  real , intent(IN) :: bdt
187  real , intent(IN) :: zvir, cp, akap, grav
188  real , intent(IN) :: ptop
189  logical, intent(IN) :: hydrostatic
190  logical, intent(IN) :: init_step, end_step
191  real, intent(in) :: pfull(npz)
192  real, intent(in), dimension(npz+1) :: ak, bk
193  integer, intent(IN) :: ks
194  type(group_halo_update_type), intent(inout) :: i_pack(*)
195  type(fv_grid_bounds_type), intent(IN) :: bd
196  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz):: u
197  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz):: v
198  real, intent(inout) :: w( bd%isd:,bd%jsd:,1:)
199  real, intent(inout) :: delz(bd%isd:,bd%jsd:,1:)
200  real, intent(inout) :: cappa(bd%isd:,bd%jsd:,1:)
201 #ifdef MULTI_GASES
202  real, intent(inout) :: kapad(bd%isd:bd%ied,bd%jsd:bd%jed,1:npz)
203 #endif
204  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
205  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
206  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) !
207  real, intent(in), optional:: time_total
208  real, intent(inout) :: diss_est(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
209 
210 !-----------------------------------------------------------------------
211 ! Auxilliary pressure arrays:
212 ! The 5 vars below can be re-computed from delp and ptop.
213 !-----------------------------------------------------------------------
214 ! dyn_aux:
215  real, intent(inout):: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
216  real, intent(inout):: pe(bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1)
217  real, intent(inout):: peln(bd%is:bd%ie,npz+1,bd%js:bd%je)
218  real, intent(inout):: pk(bd%is:bd%ie,bd%js:bd%je, npz+1)
219 
220 !-----------------------------------------------------------------------
221 ! Others:
222  real, parameter:: near0 = 1.e-8
223 #ifdef OVERLOAD_R4
224  real, parameter:: huge_r = 1.e8
225 #else
226  real, parameter:: huge_r = 1.e40
227 #endif
228 !-----------------------------------------------------------------------
229  real, intent(out ):: ws(bd%is:bd%ie,bd%js:bd%je)
230  real, intent(inout):: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
231  real, intent(inout):: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
232  real, intent(inout):: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
233  real, intent(inout), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: ua, va
234  real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:)
235 
236 ! The Flux capacitors: accumulated Mass flux arrays
237  real, intent(inout):: mfx(bd%is:bd%ie+1, bd%js:bd%je, npz)
238  real, intent(inout):: mfy(bd%is:bd%ie , bd%js:bd%je+1, npz)
239 ! Accumulated Courant number arrays
240  real, intent(inout):: cx(bd%is:bd%ie+1, bd%jsd:bd%jed, npz)
241  real, intent(inout):: cy(bd%isd:bd%ied ,bd%js:bd%je+1, npz)
242  real, intent(inout),dimension(bd%is:bd%ie,bd%js:bd%je,npz):: pkz
243 
244  type(fv_grid_type), intent(INOUT), target :: gridstruct
245  type(fv_flags_type), intent(IN), target :: flagstruct
246  type(fv_nest_type), intent(INOUT) :: neststruct
247  type(fv_diag_type), intent(IN) :: idiag
248  type(domain2d), intent(INOUT) :: domain
249 
250  real, allocatable, dimension(:,:,:):: pem, heat_source
251 ! Auto 1D & 2D arrays:
252  real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: ws3, z_rat
253  real:: dp_ref(npz)
254  real:: zs(bd%isd:bd%ied,bd%jsd:bd%jed)
255  real:: p1d(bd%is:bd%ie)
256  real:: om2d(bd%is:bd%ie,npz)
257  real wbuffer(npy+2,npz)
258  real ebuffer(npy+2,npz)
259  real nbuffer(npx+2,npz)
260  real sbuffer(npx+2,npz)
261 ! ---- For external mode:
262  real divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
263  real wk(bd%isd:bd%ied,bd%jsd:bd%jed)
264  real fz(bd%is: bd%ie+1,bd%js: bd%je+1)
265  real heat_s(bd%is:bd%ie,bd%js:bd%je)
266 ! new array for stochastic kinetic energy backscatter (SKEB)
267  real diss_e(bd%is:bd%ie,bd%js:bd%je)
268  real damp_vt(npz+1)
269  integer nord_v(npz+1)
270 !-------------------------------------
271  integer :: hord_m, hord_v, hord_t, hord_p
272  integer :: nord_k, nord_w, nord_t
273  integer :: ms
274 !---------------------------------------
275  integer :: i,j,k, it, iq, n_con, nf_ke
276  integer :: iep1, jep1
277  real :: beta, beta_d, d_con_k, damp_w, damp_t, kgb, cv_air
278  real :: dt, dt2, rdt
279  real :: d2_divg
280  real :: k1k, rdg, dtmp, delt
281  real :: recip_k_split_n_split
282  real :: reg_bc_update_time
283  logical :: last_step, remap_step
284  logical used
285  real :: split_timestep_bc
286 
287  integer :: is, ie, js, je
288  integer :: isd, ied, jsd, jed
289 
290  is = bd%is
291  ie = bd%ie
292  js = bd%js
293  je = bd%je
294  isd = bd%isd
295  ied = bd%ied
296  jsd = bd%jsd
297  jed = bd%jed
298 
299 #ifdef SW_DYNAMICS
300  peln1 = 0.
301 #else
302  peln1 = log(ptop)
303 #endif
304  ptk = ptop ** akap
305  dt = bdt / real(n_split)
306  dt2 = 0.5*dt
307  rdt = 1.0/dt
308  ms = max(1, flagstruct%m_split/2)
309  beta = flagstruct%beta
310  rdg = -rdgas / grav
311  cv_air = cp_air - rdgas
312  recip_k_split_n_split=1./real(flagstruct%k_split*n_split)
313 
314 ! Indexes:
315  iep1 = ie + 1
316  jep1 = je + 1
317 
318  if ( .not.hydrostatic ) then
319 
320  rgrav = 1.0/grav
321  k1k = akap / (1.-akap) ! rg/Cv=0.4
322 
323 !$OMP parallel do default(none) shared(npz,dp_ref,ak,bk)
324  do k=1,npz
325  dp_ref(k) = ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.e5
326  enddo
327 
328 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,zs,phis,rgrav)
329  do j=jsd,jed
330  do i=isd,ied
331  zs(i,j) = phis(i,j) * rgrav
332  enddo
333  enddo
334  endif
335 
336 !
337  if ( init_step ) then ! Start of the big dynamic time stepping
338 
339  allocate( gz(isd:ied, jsd:jed ,npz+1) )
340  call init_ijk_mem(isd,ied, jsd,jed, npz+1, gz, huge_r)
341  allocate( pkc(isd:ied, jsd:jed ,npz+1) )
342  allocate( ptc(isd:ied, jsd:jed ,npz ) )
343  allocate( crx(is :ie+1, jsd:jed, npz) )
344  allocate( xfx(is :ie+1, jsd:jed, npz) )
345  allocate( cry(isd:ied, js :je+1, npz) )
346  allocate( yfx(isd:ied, js :je+1, npz) )
347  allocate( divgd(isd:ied+1,jsd:jed+1,npz) )
348  allocate( delpc(isd:ied, jsd:jed ,npz ) )
349 ! call init_ijk_mem(isd,ied, jsd,jed, npz, delpc, 0.)
350  allocate( ut(isd:ied, jsd:jed, npz) )
351 ! call init_ijk_mem(isd,ied, jsd,jed, npz, ut, 0.)
352  allocate( vt(isd:ied, jsd:jed, npz) )
353 ! call init_ijk_mem(isd,ied, jsd,jed, npz, vt, 0.)
354 
355  if ( .not. hydrostatic ) then
356  allocate( zh(isd:ied, jsd:jed, npz+1) )
357 ! call init_ijk_mem(isd,ied, jsd,jed, npz+1, zh, huge_r )
358  allocate ( pk3(isd:ied,jsd:jed,npz+1) )
359  call init_ijk_mem(isd,ied, jsd,jed, npz+1, pk3, huge_r )
360  endif
361  if ( beta > near0 ) then
362  allocate( du(isd:ied, jsd:jed+1,npz) )
363  call init_ijk_mem(isd,ied, jsd,jed+1, npz, du, 0.)
364  allocate( dv(isd:ied+1,jsd:jed, npz) )
365  call init_ijk_mem(isd,ied+1, jsd,jed , npz, dv, 0.)
366  endif
367 !$OMP parallel do default(none) shared(is,ie,js,je,npz,diss_est)
368  do k=1,npz
369  do j=js,je
370  do i=is,ie
371  diss_est(i,j,k) = 0.
372  enddo
373  enddo
374  enddo
375  endif ! end init_step
376 
377 ! Empty the "flux capacitors"
378  call init_ijk_mem(is, ie+1, js, je, npz, mfx, 0.)
379  call init_ijk_mem(is, ie , js, je+1, npz, mfy, 0.)
380  call init_ijk_mem(is, ie+1, jsd, jed, npz, cx, 0.)
381  call init_ijk_mem(isd, ied, js, je+1, npz, cy, 0.)
382 
383  if ( flagstruct%d_con > 1.0e-5 ) then
384  allocate( heat_source(isd:ied, jsd:jed, npz) )
385  call init_ijk_mem(isd, ied, jsd, jed, npz, heat_source, 0.)
386  endif
387 
388  if ( flagstruct%convert_ke .or. flagstruct%vtdm4> 1.e-4 ) then
389  n_con = npz
390  else
391  if ( flagstruct%d2_bg_k1 < 1.e-3 ) then
392  n_con = 0
393  else
394  if ( flagstruct%d2_bg_k2 < 1.e-3 ) then
395  n_con = 1
396  else
397  n_con = 2
398  endif
399  endif
400  endif
401 
402 !-----------------------------------------------------
403  do it=1,n_split
404 !-----------------------------------------------------
405  n_step = it
406 #ifdef ROT3
407  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
408 #endif
409  if ( flagstruct%breed_vortex_inline .or. it==n_split ) then
410  remap_step = .true.
411  else
412  remap_step = .false.
413  endif
414 
415  if ( flagstruct%fv_debug ) then
416  if(is_master()) write(*,*) 'n_split loop, it=', it
417  if ( .not. flagstruct%hydrostatic ) &
418  call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
419  endif
420 
421  if (gridstruct%nested) then
422  !First split timestep has split_timestep_BC = n_split*k_split
423  ! to do time-extrapolation on BCs.
424  split_timestep_bc = real(n_split*flagstruct%k_split+neststruct%nest_timestep)
425  endif
426 
427  if ( nq > 0 ) then
428  call timing_on('COMM_TOTAL')
429  call timing_on('COMM_TRACER')
430  if ( flagstruct%inline_q ) then
431  call start_group_halo_update(i_pack(10), q, domain)
432  endif
433  call timing_off('COMM_TRACER')
434  call timing_off('COMM_TOTAL')
435  endif
436 
437  if ( .not. hydrostatic ) then
438  call timing_on('COMM_TOTAL')
439  call start_group_halo_update(i_pack(7), w, domain)
440  call timing_off('COMM_TOTAL')
441 
442  if ( it==1 ) then
443  if (gridstruct%nested .or. gridstruct%regional) then
444 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,gz,zs,delz)
445  do j=jsd,jed
446  do i=isd,ied
447  gz(i,j,npz+1) = zs(i,j)
448  enddo
449  do k=npz,1,-1
450  do i=isd,ied
451  gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)
452  enddo
453  enddo
454  enddo
455  else
456 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,zs,delz)
457  do j=js,je
458  do i=is,ie
459  gz(i,j,npz+1) = zs(i,j)
460  enddo
461  do k=npz,1,-1
462  do i=is,ie
463  gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)
464  enddo
465  enddo
466  enddo
467  endif
468  call timing_on('COMM_TOTAL')
469  call start_group_halo_update(i_pack(5), gz, domain)
470  call timing_off('COMM_TOTAL')
471  endif
472 
473  endif
474 
475 #ifdef SW_DYNAMICS
476  if (test_case>1) then
477 #ifdef USE_OLD
478  if (test_case==9) call case9_forcing1(phis, time_total)
479 #endif
480 #endif
481 
482  if ( it==1 ) then
483  call timing_on('COMM_TOTAL')
484  call complete_group_halo_update(i_pack(1), domain)
485  call timing_off('COMM_TOTAL')
486  beta_d = 0.
487  else
488  beta_d = beta
489  endif
490 
491  if ( it==n_split .and. end_step ) then
492  if ( flagstruct%use_old_omega ) then
493  allocate ( pem(is-1:ie+1,npz+1,js-1:je+1) )
494 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pem,delp,ptop)
495  do j=js-1,je+1
496  do i=is-1,ie+1
497  pem(i,1,j) = ptop
498  enddo
499  do k=1,npz
500  do i=is-1,ie+1
501  pem(i,k+1,j) = pem(i,k,j) + delp(i,j,k)
502  enddo
503  enddo
504  enddo
505  endif
506  last_step = .true.
507  else
508  last_step = .false.
509  endif
510 
511  call timing_on('COMM_TOTAL')
512  call complete_group_halo_update(i_pack(8), domain)
513  if( .not. hydrostatic ) &
514  call complete_group_halo_update(i_pack(7), domain)
515  call timing_off('COMM_TOTAL')
516 
517  call timing_on('c_sw')
518 !$OMP parallel do default(none) shared(npz,isd,jsd,delpc,delp,ptc,pt,u,v,w,uc,vc,ua,va, &
519 !$OMP omga,ut,vt,divgd,flagstruct,dt2,hydrostatic,bd, &
520 !$OMP gridstruct)
521  do k=1,npz
522  call c_sw(delpc(isd,jsd,k), delp(isd,jsd,k), ptc(isd,jsd,k), &
523  pt(isd,jsd,k), u(isd,jsd,k), v(isd,jsd,k), &
524  w(isd:,jsd:,k), uc(isd,jsd,k), vc(isd,jsd,k), &
525  ua(isd,jsd,k), va(isd,jsd,k), omga(isd,jsd,k), &
526  ut(isd,jsd,k), vt(isd,jsd,k), divgd(isd,jsd,k), &
527  flagstruct%nord, dt2, hydrostatic, .true., bd, &
528  gridstruct, flagstruct)
529  enddo
530  call timing_off('c_sw')
531  if ( flagstruct%nord > 0 ) then
532  call timing_on('COMM_TOTAL')
533  call start_group_halo_update(i_pack(3), divgd, domain, position=corner)
534  call timing_off('COMM_TOTAL')
535  endif
536 
537  if (gridstruct%nested) then
539  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
540  neststruct%delp_bc, bctype=neststruct%nestbctype)
541 #ifndef SW_DYNAMICS
543  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
544  neststruct%pt_bc, bctype=neststruct%nestbctype )
545 #endif
546  endif
547 
548  if (flagstruct%regional) then
549  reg_bc_update_time=current_time_in_seconds+(0.5+(it-1))*dt
550  call regional_boundary_update(delpc, 'delp', &
551  isd, ied, jsd, jed, npz, &
552  is, ie, js, je, &
553  isd, ied, jsd, jed, &
554  reg_bc_update_time )
555 #ifndef SW_DYNAMICS
556  call regional_boundary_update(ptc, 'pt', &
557  isd, ied, jsd, jed, npz, &
558  is, ie, js, je, &
559  isd, ied, jsd, jed, &
560  reg_bc_update_time )
561 #endif
562  endif
563 
564  if ( hydrostatic ) then
565  call geopk(ptop, pe, peln, delpc, pkc, gz, phis, ptc, &
566 #ifdef MULTI_GASES
567  kapad, &
568 #endif
569  q_con, pkz, npz, akap, .true., &
570  gridstruct%nested, .false., npx, npy, flagstruct%a2b_ord, bd)
571  else
572 #ifndef SW_DYNAMICS
573  if ( it == 1 ) then
574 
575  call timing_on('COMM_TOTAL')
576  call complete_group_halo_update(i_pack(5), domain)
577  call timing_off('COMM_TOTAL')
578 
579 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz)
580  do k=1,npz+1
581  do j=jsd,jed
582  do i=isd,ied
583 ! Save edge heights for update_dz_d
584  zh(i,j,k) = gz(i,j,k)
585  enddo
586  enddo
587  enddo
588 
589  else
590 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz)
591  do k=1, npz+1
592  do j=jsd,jed
593  do i=isd,ied
594  gz(i,j,k) = zh(i,j,k)
595  enddo
596  enddo
597  enddo
598  endif
599  call timing_on('UPDATE_DZ_C')
600  call update_dz_c(is, ie, js, je, npz, ng, dt2, dp_ref, zs, gridstruct%area, ut, vt, gz, ws3, &
601  npx, npy, gridstruct%sw_corner, gridstruct%se_corner, &
602  gridstruct%ne_corner, gridstruct%nw_corner, bd, gridstruct%grid_type)
603  call timing_off('UPDATE_DZ_C')
604 
605  call timing_on('Riem_Solver')
606  call riem_solver_c( ms, dt2, is, ie, js, je, npz, ng, &
607  akap, cappa, cp, &
608 #ifdef MULTI_GASES
609  kapad, &
610 #endif
611  ptop, phis, omga, ptc, &
612  q_con, delpc, gz, pkc, ws3, flagstruct%p_fac, &
613  flagstruct%a_imp, flagstruct%scale_z )
614  call timing_off('Riem_Solver')
615 
616  if (gridstruct%nested) then
617  call nested_grid_bc_apply_intt(delz, &
618  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
619  neststruct%delz_bc, bctype=neststruct%nestbctype )
620  endif
621 
622  if (flagstruct%regional) then
623  reg_bc_update_time=current_time_in_seconds+(0.5+(it-1))*dt
624  call regional_boundary_update(delz, 'delz', &
625  isd, ied, jsd, jed, ubound(delz,3), &
626  is, ie, js, je, &
627  isd, ied, jsd, jed, &
628  reg_bc_update_time )
629  endif
630 
631  if (gridstruct%nested .or. flagstruct%regional) then
632  !Compute gz/pkc
633  !NOTE: nominally only need to compute quantities one out in the halo for p_grad_c
634  !(instead of entire halo)
635 
636  call nest_halo_nh(ptop, grav, akap, cp, delpc, delz, ptc, phis, &
637 #ifdef MULTI_GASES
638  q, &
639 #endif
640 #ifdef USE_COND
641  q_con, &
642 #ifdef MOIST_CAPPA
643  cappa, &
644 #endif
645 #endif
646  pkc, gz, pk3, &
647  npx, npy, npz, gridstruct%nested, .false., .false., .false., bd, flagstruct%regional)
648  endif
649 #endif SW_DYNAMICS
650 
651  endif ! end hydro check
652 
653  call p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, gridstruct%rdxc, gridstruct%rdyc, hydrostatic)
654 
655  call timing_on('COMM_TOTAL')
656  call start_group_halo_update(i_pack(9), uc, vc, domain, gridtype=cgrid_ne)
657  call timing_off('COMM_TOTAL')
658 #ifdef SW_DYNAMICS
659 #ifdef USE_OLD
660  if (test_case==9) call case9_forcing2(phis)
661 #endif
662  endif !test_case>1
663 #endif
664 
665  call timing_on('COMM_TOTAL')
666  if (flagstruct%inline_q .and. nq>0) call complete_group_halo_update(i_pack(10), domain)
667  if (flagstruct%nord > 0) call complete_group_halo_update(i_pack(3), domain)
668  call complete_group_halo_update(i_pack(9), domain)
669 
670  call timing_off('COMM_TOTAL')
671  if (gridstruct%nested) then
672  !On a nested grid we have to do SOMETHING with uc and vc in
673  ! the boundary halo, particularly at the corners of the
674  ! domain and of each processor element. We must either
675  ! apply an interpolated BC, or extrapolate into the
676  ! boundary halo
677  ! NOTE:
678  !The update_domains calls for uc and vc need to go BEFORE the BCs to ensure cross-restart
679  !bitwise-consistent solutions when doing the spatial extrapolation; should not make a
680  !difference for interpolated BCs from the coarse grid.
681 
682 
683  call nested_grid_bc_apply_intt(vc, &
684  0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
685  neststruct%vc_bc, bctype=neststruct%nestbctype )
686  call nested_grid_bc_apply_intt(uc, &
687  1, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
688  neststruct%uc_bc, bctype=neststruct%nestbctype )
689 
691  1, 1, npx, npy, npz, bd, split_timestep_bc, real(n_split*flagstruct%k_split), &
692  neststruct%divg_bc, bctype=neststruct%nestbctype )
693 
694  end if
695 
696  if (flagstruct%regional) then
697 
698  call exch_uv(domain, bd, npz, vc, uc)
699 
700  reg_bc_update_time=current_time_in_seconds+(0.5+(it-1))*dt
701  call regional_boundary_update(vc, 'vc', &
702  isd, ied, jsd, jed+1, npz, &
703  is, ie, js, je, &
704  isd, ied, jsd, jed, &
705  reg_bc_update_time )
706  call regional_boundary_update(uc, 'uc', &
707  isd, ied+1, jsd, jed, npz, &
708  is, ie, js, je, &
709  isd, ied, jsd, jed, &
710  reg_bc_update_time )
711 !!! Currently divgd is always 0.0 in the regional domain boundary area.
712  reg_bc_update_time=current_time_in_seconds+(it-1)*dt
713  call regional_boundary_update(divgd, 'divgd', &
714  isd, ied+1, jsd, jed+1, npz, &
715  is, ie, js, je, &
716  isd, ied, jsd, jed, &
717  reg_bc_update_time )
718  endif
719 
720  if ( flagstruct%inline_q ) then
721  if ( gridstruct%nested ) then
722  do iq=1,nq
723  call nested_grid_bc_apply_intt(q(isd:ied,jsd:jed,:,iq), &
724  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
725  neststruct%q_bc(iq), bctype=neststruct%nestbctype )
726  end do
727  endif
728 
729  if (flagstruct%regional) then
730  reg_bc_update_time=current_time_in_seconds+(it-1)*dt
731  do iq=1,nq
732  call regional_boundary_update(q(:,:,:,iq), 'q', &
733  isd, ied, jsd, jed, npz, &
734  is, ie, js, je, &
735  isd, ied, jsd, jed, &
736  reg_bc_update_time )
737  enddo
738  endif
739 
740  endif
741 
742  if (flagstruct%regional) call exch_uv(domain, bd, npz, vc, uc)
743  if (first_call .and. is_master() .and. last_step) write(6,*) 'Sponge layer divergence damping coefficent:'
744 
745  call timing_on('d_sw')
746 !$OMP parallel do default(none) shared(npz,flagstruct,nord_v,pfull,damp_vt,hydrostatic,last_step, &
747 !$OMP is,ie,js,je,isd,ied,jsd,jed,omga,delp,gridstruct,npx,npy, &
748 !$OMP ng,zh,vt,ptc,pt,u,v,w,uc,vc,ua,va,divgd,mfx,mfy,cx,cy, &
749 !$OMP crx,cry,xfx,yfx,q_con,zvir,sphum,nq,q,dt,bd,rdt,iep1,jep1, &
750 !$OMP heat_source,diss_est,ptop,first_call) &
751 !$OMP private(nord_k, nord_w, nord_t, damp_w, damp_t, d2_divg, &
752 !$OMP d_con_k,kgb, hord_m, hord_v, hord_t, hord_p, wk, heat_s,diss_e, z_rat)
753  do k=1,npz
754  hord_m = flagstruct%hord_mt
755  hord_t = flagstruct%hord_tm
756  hord_v = flagstruct%hord_vt
757  hord_p = flagstruct%hord_dp
758  nord_k = flagstruct%nord
759 
760 ! if ( k==npz ) then
761  kgb = flagstruct%ke_bg
762 ! else
763 ! kgb = 0.
764 ! endif
765 
766  nord_v(k) = min(2, flagstruct%nord)
767 ! d2_divg = min(0.20, flagstruct%d2_bg*(1.-3.*tanh(0.1*log(pfull(k)/pfull(npz)))))
768  d2_divg = min(0.20, flagstruct%d2_bg)
769 
770  if ( flagstruct%do_vort_damp ) then
771  damp_vt(k) = flagstruct%vtdm4 ! for delp, delz, and vorticity
772  else
773  damp_vt(k) = 0.
774  endif
775 
776  nord_w = nord_v(k)
777  nord_t = nord_v(k)
778  damp_w = damp_vt(k)
779  damp_t = damp_vt(k)
780  d_con_k = flagstruct%d_con
781 
782  if ( npz==1 .or. flagstruct%n_sponge<0 ) then
783  d2_divg = flagstruct%d2_bg
784  else
785 ! Sponge layers with del-2 damping on divergence, vorticity, w, z, and air mass (delp).
786 ! no special damping of potential temperature in sponge layers
787 ! enhanced del-2 divergence damping has same vertical structure as Rayleigh
788 ! damping if d2_bg_k2<=0.
789  if (flagstruct%d2_bg_k2 > 0) then ! old version, only applied at top two or three levels
790 
791  if ( k==1 ) then
792 ! Divergence damping:
793  nord_k=0; d2_divg = max(0.01, flagstruct%d2_bg, flagstruct%d2_bg_k1)
794 ! Vertical velocity:
795  nord_w=0; damp_w = d2_divg
796  if ( flagstruct%do_vort_damp ) then
797 ! damping on delp and vorticity:
798  nord_v(k)=0;
799 #ifndef HIWPP
800  damp_vt(k) = 0.5*d2_divg
801 #endif
802  endif
803  d_con_k = 0.
804  elseif ( k==2 .and. flagstruct%d2_bg_k2>0.01 ) then
805  nord_k=0; d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k2)
806  nord_w=0; damp_w = d2_divg
807  if ( flagstruct%do_vort_damp ) then
808  nord_v(k)=0;
809 #ifndef HIWPP
810  damp_vt(k) = 0.5*d2_divg
811 #endif
812  endif
813  d_con_k = 0.
814  elseif ( k==3 .and. flagstruct%d2_bg_k2>0.05 ) then
815  nord_k=0; d2_divg = max(flagstruct%d2_bg, 0.2*flagstruct%d2_bg_k2)
816  nord_w=0; damp_w = d2_divg
817  d_con_k = 0.
818  endif
819  else ! new version, uses d2_bg_k1 and sponge layer vertical structure
820  if ( pfull(k) < flagstruct%rf_cutoff ) then
821  nord_k=0; nord_w=0
822  d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k1* &
823  sin(0.5*pi*log(flagstruct%rf_cutoff/pfull(k))/log(flagstruct%rf_cutoff/ptop))**2)
824  if (first_call .and. is_master() .and. last_step) write(6,*) k, 0.01*pfull(k), d2_divg
825  damp_w = d2_divg
826  if ( flagstruct%do_vort_damp ) then
827 ! damping on delp and vorticity
828  nord_v(k)=0
829  damp_vt(k) = 0.5*d2_divg
830  endif
831  endif
832  endif
833 
834  endif
835 
836  if( hydrostatic .and. (.not.flagstruct%use_old_omega) .and. last_step ) then
837 ! Average horizontal "convergence" to cell center
838  do j=js,je
839  do i=is,ie
840  omga(i,j,k) = delp(i,j,k)
841  enddo
842  enddo
843  endif
844 
845 !--- external mode divergence damping ---
846  if ( flagstruct%d_ext > 0. ) &
847  call a2b_ord2(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, &
848  ie, js, je, ng, .false.)
849 
850  if ( .not.hydrostatic .and. flagstruct%do_f3d ) then
851 ! Correction factor for 3D Coriolis force
852  do j=jsd,jed
853  do i=isd,ied
854  z_rat(i,j) = 1. + (zh(i,j,k)+zh(i,j,k+1))/radius
855  enddo
856  enddo
857  endif
858 
859  call d_sw(vt(isd,jsd,k), delp(isd,jsd,k), ptc(isd,jsd,k), pt(isd,jsd,k), &
860  u(isd,jsd,k), v(isd,jsd,k), w(isd:,jsd:,k), uc(isd,jsd,k), &
861  vc(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), divgd(isd,jsd,k), &
862  mfx(is, js, k), mfy(is, js, k), cx(is, jsd,k), cy(isd,js, k), &
863  crx(is, jsd,k), cry(isd,js, k), xfx(is, jsd,k), yfx(isd,js, k), &
864 #ifdef USE_COND
865  q_con(isd:,jsd:,k), z_rat(isd,jsd), &
866 #else
867  q_con(isd:,jsd:,1), z_rat(isd,jsd), &
868 #endif
869  kgb, heat_s, diss_e,zvir, sphum, nq, q, k, npz, flagstruct%inline_q, dt, &
870  flagstruct%hord_tr, hord_m, hord_v, hord_t, hord_p, &
871  nord_k, nord_v(k), nord_w, nord_t, flagstruct%dddmp, d2_divg, flagstruct%d4_bg, &
872  damp_vt(k), damp_w, damp_t, d_con_k, hydrostatic, gridstruct, flagstruct, bd)
873 
874  if( hydrostatic .and. (.not.flagstruct%use_old_omega) .and. last_step ) then
875 ! Average horizontal "convergence" to cell center
876  do j=js,je
877  do i=is,ie
878  omga(i,j,k) = omga(i,j,k)*(xfx(i,j,k)-xfx(i+1,j,k)+yfx(i,j,k)-yfx(i,j+1,k))*gridstruct%rarea(i,j)*rdt
879  enddo
880  enddo
881  endif
882 
883  if ( flagstruct%d_ext > 0. ) then
884  do j=js,jep1
885  do i=is,iep1
886  ptc(i,j,k) = wk(i,j) ! delp at cell corners
887  enddo
888  enddo
889  endif
890  if ( flagstruct%d_con > 1.0e-5 .OR. flagstruct%do_skeb ) then
891 ! if ( flagstruct%d_con > 1.0E-5 ) then
892 ! Average horizontal "convergence" to cell center
893  do j=js,je
894  do i=is,ie
895  heat_source(i,j,k) = heat_source(i,j,k) + heat_s(i,j)
896  diss_est(i,j,k) = diss_est(i,j,k) + diss_e(i,j)
897  enddo
898  enddo
899  endif
900  enddo ! end openMP k-loop
901 
902  if (flagstruct%regional) then
903  call exch_uv(domain, bd, npz, vc, uc)
904  call exch_uv(domain, bd, npz, u, v )
905  endif
906  call timing_off('d_sw')
907 
908  if( flagstruct%fill_dp ) call mix_dp(hydrostatic, w, delp, pt, npz, ak, bk, .false., flagstruct%fv_debug, bd)
909 
910  call timing_on('COMM_TOTAL')
911  call start_group_halo_update(i_pack(1), delp, domain, complete=.false.)
912  call start_group_halo_update(i_pack(1), pt, domain, complete=.true.)
913 #ifdef USE_COND
914  call start_group_halo_update(i_pack(11), q_con, domain)
915 #endif
916  call timing_off('COMM_TOTAL')
917 
918  if ( flagstruct%d_ext > 0. ) then
919  d2_divg = flagstruct%d_ext * gridstruct%da_min_c
920 !$OMP parallel do default(none) shared(is,iep1,js,jep1,npz,wk,ptc,divg2,vt,d2_divg)
921  do j=js,jep1
922  do i=is,iep1
923  wk(i,j) = ptc(i,j,1)
924  divg2(i,j) = wk(i,j)*vt(i,j,1)
925  enddo
926  do k=2,npz
927  do i=is,iep1
928  wk(i,j) = wk(i,j) + ptc(i,j,k)
929  divg2(i,j) = divg2(i,j) + ptc(i,j,k)*vt(i,j,k)
930  enddo
931  enddo
932  do i=is,iep1
933  divg2(i,j) = d2_divg*divg2(i,j)/wk(i,j)
934  enddo
935  enddo
936  else
937  divg2(:,:) = 0.
938  endif
939 
940  call timing_on('COMM_TOTAL')
941  call complete_group_halo_update(i_pack(1), domain)
942 #ifdef USE_COND
943  call complete_group_halo_update(i_pack(11), domain)
944 #endif
945  call timing_off('COMM_TOTAL')
946  if ( flagstruct%fv_debug ) then
947  if ( .not. flagstruct%hydrostatic ) &
948  call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
949  endif
950 
951  !Want to move this block into the hydro/nonhydro branch above and merge the two if structures
952  if (gridstruct%nested) then
953  call nested_grid_bc_apply_intt(delp, &
954  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
955  neststruct%delp_bc, bctype=neststruct%nestbctype )
956 
957 #ifndef SW_DYNAMICS
958 
959  call nested_grid_bc_apply_intt(pt, &
960  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
961  neststruct%pt_bc, bctype=neststruct%nestbctype )
962 
963 #ifdef USE_COND
964  call nested_grid_bc_apply_intt(q_con, &
965  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
966  neststruct%q_con_bc, bctype=neststruct%nestbctype )
967 #endif
968 
969 #endif
970 
971  end if
972 
973  if (flagstruct%regional) then
974  reg_bc_update_time=current_time_in_seconds+bdt+(it-1)*dt
975  call regional_boundary_update(delp, 'delp', &
976  isd, ied, jsd, jed, npz, &
977  is, ie, js, je, &
978  isd, ied, jsd, jed, &
979  reg_bc_update_time )
980 #ifndef SW_DYNAMICS
981  call regional_boundary_update(pt, 'pt', &
982  isd, ied, jsd, jed, npz, &
983  is, ie, js, je, &
984  isd, ied, jsd, jed, &
985  reg_bc_update_time )
986 
987 #ifdef USE_COND
988  call regional_boundary_update(q_con, 'q_con', &
989  isd, ied, jsd, jed, npz, &
990  is, ie, js, je, &
991  isd, ied, jsd, jed, &
992  reg_bc_update_time )
993 #endif
994 
995 #endif
996  endif
997 
998  if ( hydrostatic ) then
999  call geopk(ptop, pe, peln, delp, pkc, gz, phis, pt, &
1000 #ifdef MULTI_GASES
1001  kapad, &
1002 #endif
1003  q_con, pkz, npz, akap, .false., &
1004  gridstruct%nested, .true., npx, npy, flagstruct%a2b_ord, bd)
1005  else
1006 #ifndef SW_DYNAMICS
1007  call timing_on('UPDATE_DZ')
1008  call update_dz_d(nord_v, damp_vt, flagstruct%hord_tm, is, ie, js, je, npz, ng, npx, npy, gridstruct%area, &
1009  gridstruct%rarea, dp_ref, zs, zh, crx, cry, xfx, yfx, delz, ws, rdt, gridstruct, bd, flagstruct%lim_fac, &
1010  flagstruct%regional)
1011  call timing_off('UPDATE_DZ')
1012  if ( flagstruct%fv_debug ) then
1013  if ( .not. flagstruct%hydrostatic ) &
1014  call prt_mxm('delz updated', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
1015  endif
1016 
1017  if (idiag%id_ws>0 .and. last_step) then
1018 ! call prt_maxmin('WS', ws, is, ie, js, je, 0, 1, 1., master)
1019  used=send_data(idiag%id_ws, ws, fv_time)
1020  endif
1021  call timing_on('Riem_Solver')
1022 
1023  call riem_solver3(flagstruct%m_split, dt, is, ie, js, je, npz, ng, &
1024  isd, ied, jsd, jed, &
1025  akap, cappa, cp, &
1026 #ifdef MULTI_GASES
1027  kapad, &
1028 #endif
1029  ptop, zs, q_con, w, delz, pt, delp, zh, &
1030  pe, pkc, pk3, pk, peln, ws, &
1031  flagstruct%scale_z, flagstruct%p_fac, flagstruct%a_imp, &
1032  flagstruct%use_logp, remap_step, beta<-0.1)
1033  call timing_off('Riem_Solver')
1034 
1035  call timing_on('COMM_TOTAL')
1036  if ( gridstruct%square_domain ) then
1037  call start_group_halo_update(i_pack(4), zh , domain)
1038  call start_group_halo_update(i_pack(5), pkc, domain, whalo=2, ehalo=2, shalo=2, nhalo=2)
1039  else
1040  call start_group_halo_update(i_pack(4), zh , domain, complete=.false.)
1041  call start_group_halo_update(i_pack(4), pkc, domain, complete=.true.)
1042  endif
1043  call timing_off('COMM_TOTAL')
1044  if ( remap_step ) &
1045  call pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
1046 
1047  if ( flagstruct%use_logp ) then
1048  call pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
1049  else
1050  call pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
1051  endif
1052  if (gridstruct%nested) then
1053  call nested_grid_bc_apply_intt(delz, &
1054  0, 0, npx, npy, npz, bd, split_timestep_bc+1., real(n_split*flagstruct%k_split), &
1055  neststruct%delz_bc, bctype=neststruct%nestbctype )
1056  endif
1057 
1058  if (flagstruct%regional) then
1059  reg_bc_update_time=current_time_in_seconds+it*dt
1060  call regional_boundary_update(delz, 'delz', &
1061  isd, ied, jsd, jed, ubound(delz,3), &
1062  is, ie, js, je, &
1063  isd, ied, jsd, jed, &
1064  reg_bc_update_time )
1065  endif
1066 
1067  if (gridstruct%nested .or. flagstruct%regional) then
1068  !Compute gz/pkc/pk3; note that now pkc should be nonhydro pert'n pressure
1069 
1070  call nest_halo_nh(ptop, grav, akap, cp, delp, delz, pt, phis, &
1071 #ifdef MULTI_GASES
1072  q, &
1073 #endif
1074 #ifdef USE_COND
1075  q_con, &
1076 #ifdef MOIST_CAPPA
1077  cappa, &
1078 #endif
1079 #endif
1080  pkc, gz, pk3, npx, npy, npz, gridstruct%nested, .true., .true., .true., bd, flagstruct%regional)
1081  endif
1082 
1083  call timing_on('COMM_TOTAL')
1084  call complete_group_halo_update(i_pack(4), domain)
1085  call timing_off('COMM_TOTAL')
1086 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,zh,grav)
1087  do k=1,npz+1
1088  do j=js-2,je+2
1089  do i=is-2,ie+2
1090  gz(i,j,k) = zh(i,j,k)*grav
1091  enddo
1092  enddo
1093  enddo
1094  if ( gridstruct%square_domain ) then
1095  call timing_on('COMM_TOTAL')
1096  call complete_group_halo_update(i_pack(5), domain)
1097  call timing_off('COMM_TOTAL')
1098  endif
1099 #endif SW_DYNAMICS
1100  endif ! end hydro check
1101 
1102 #ifdef SW_DYNAMICS
1103  if (test_case > 1) then
1104 #else
1105  if ( remap_step .and. hydrostatic ) then
1106 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pk,pkc)
1107  do k=1,npz+1
1108  do j=js,je
1109  do i=is,ie
1110  pk(i,j,k) = pkc(i,j,k)
1111  enddo
1112  enddo
1113  enddo
1114  endif
1115 #endif
1116 
1117 !----------------------------
1118 ! Compute pressure gradient:
1119 !----------------------------
1120  call timing_on('PG_D')
1121  if ( hydrostatic ) then
1122  if ( beta > 0. ) then
1123  call grad1_p_update(divg2, u, v, pkc, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta_d, flagstruct%a2b_ord)
1124  else
1125  call one_grad_p(u, v, pkc, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, flagstruct%a2b_ord, flagstruct%d_ext)
1126  endif
1127 
1128  else
1129 
1130 
1131  if ( beta > 0. ) then
1132  call split_p_grad( u, v, pkc, gz, delp, pk3, beta_d, dt, ng, gridstruct, bd, npx, npy, npz, flagstruct%use_logp)
1133  elseif ( beta < -0.1 ) then
1134  call one_grad_p(u, v, pkc, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, flagstruct%a2b_ord, flagstruct%d_ext)
1135  else
1136  call nh_p_grad(u, v, pkc, gz, delp, pk3, dt, ng, gridstruct, bd, npx, npy, npz, flagstruct%use_logp)
1137  endif
1138 
1139 #ifdef ROT3
1140  if ( flagstruct%do_f3d ) then
1141 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ua,gridstruct,w,va,isd,ied,jsd,jed)
1142  do k=1,npz
1143  do j=js,je
1144  do i=is,ie
1145  ua(i,j,k) = -gridstruct%w00(i,j)*w(i,j,k)
1146  enddo
1147  enddo
1148  do j=jsd,jed
1149  do i=isd,ied
1150  va(i,j,k) = 0.
1151  enddo
1152  enddo
1153  enddo
1154  call mpp_update_domains(ua, domain, complete=.true.)
1155  call update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, ua, va, u, v, gridstruct, npx, npy, npz, domain)
1156  endif
1157 #endif
1158  endif
1159  call timing_off('PG_D')
1160 
1161 ! *** Inline Rayleigh friction here?
1162  if( flagstruct%RF_fast .and. flagstruct%tau > 0. ) &
1163  call ray_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, &
1164  ks, dp_ref, ptop, hydrostatic, flagstruct%rf_cutoff, bd)
1165 
1166 !-------------------------------------------------------------------------------------------------------
1167  if ( flagstruct%breed_vortex_inline ) then
1168  if ( .not. hydrostatic ) then
1169 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pkz,cappa,rdg,delp,delz,pt,k1k)
1170  do k=1,npz
1171  do j=js,je
1172  do i=is,ie
1173 ! Note: pt at this stage is Theta_m
1174 #ifdef MOIST_CAPPA
1175  pkz(i,j,k) = exp(cappa(i,j,k)/(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1176 #else
1177 #ifdef MULTI_GASES
1178  pkz(i,j,k) = exp( k1k*virqd(q(i,j,k,:))/vicvqd(q(i,j,k,:))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1179 #else
1180  pkz(i,j,k) = exp( k1k*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1181 #endif
1182 #endif
1183  enddo
1184  enddo
1185  enddo
1186  endif
1187 #if defined (ADA_NUDGE)
1188  call breed_slp_inline_ada( it, dt, npz, ak, bk, phis, pe, pk, peln, pkz, &
1189  delp, u, v, pt, q, flagstruct%nwat, zvir, gridstruct, ks, domain, bd )
1190 #else
1191  call breed_slp_inline( it, dt, npz, ak, bk, phis, pe, pk, peln, pkz, delp, u, v, pt, q, &
1192  flagstruct%nwat, zvir, gridstruct, ks, domain, bd, hydrostatic )
1193 #endif
1194  endif
1195 !-------------------------------------------------------------------------------------------------------
1196 
1197  call timing_on('COMM_TOTAL')
1198  if( it==n_split .and. gridstruct%grid_type<4 .and. .not. (gridstruct%nested .or. gridstruct%regional)) then
1199 ! Prevent accumulation of rounding errors at overlapped domain edges:
1200  call mpp_get_boundary(u, v, domain, ebuffery=ebuffer, &
1201  nbufferx=nbuffer, gridtype=dgrid_ne )
1202 !$OMP parallel do default(none) shared(is,ie,js,je,npz,u,nbuffer,v,ebuffer)
1203  do k=1,npz
1204  do i=is,ie
1205  u(i,je+1,k) = nbuffer(i-is+1,k)
1206  enddo
1207  do j=js,je
1208  v(ie+1,j,k) = ebuffer(j-js+1,k)
1209  enddo
1210  enddo
1211 
1212  endif
1213 
1214 #ifndef ROT3
1215  if ( it/=n_split) &
1216  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
1217 #endif
1218  call timing_off('COMM_TOTAL')
1219 
1220 #ifdef SW_DYNAMICS
1221  endif
1222 #endif
1223  if ( gridstruct%nested ) then
1224  neststruct%nest_timestep = neststruct%nest_timestep + 1
1225  endif
1226 
1227 #ifdef SW_DYNAMICS
1228 #else
1229  if ( hydrostatic .and. last_step ) then
1230  if ( flagstruct%use_old_omega ) then
1231 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga,pe,pem,rdt)
1232  do k=1,npz
1233  do j=js,je
1234  do i=is,ie
1235  omga(i,j,k) = (pe(i,k+1,j) - pem(i,k+1,j)) * rdt
1236  enddo
1237  enddo
1238  enddo
1239 !------------------------------
1240 ! Compute the "advective term"
1241 !------------------------------
1242  call adv_pe(ua, va, pem, omga, gridstruct, bd, npx, npy, npz, ng)
1243  else
1244 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga) private(om2d)
1245  do j=js,je
1246  do k=1,npz
1247  do i=is,ie
1248  om2d(i,k) = omga(i,j,k)
1249  enddo
1250  enddo
1251  do k=2,npz
1252  do i=is,ie
1253  om2d(i,k) = om2d(i,k-1) + omga(i,j,k)
1254  enddo
1255  enddo
1256  do k=2,npz
1257  do i=is,ie
1258  omga(i,j,k) = om2d(i,k)
1259  enddo
1260  enddo
1261  enddo
1262  endif
1263  if (idiag%id_ws>0 .and. hydrostatic) then
1264 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ws,delz,delp,omga)
1265  do j=js,je
1266  do i=is,ie
1267  ws(i,j) = delz(i,j,npz)/delp(i,j,npz) * omga(i,j,npz)
1268  enddo
1269  enddo
1270  used=send_data(idiag%id_ws, ws, fv_time)
1271  endif
1272  endif
1273 #endif
1274 
1275  if (gridstruct%nested) then
1276 
1277 
1278 
1279 #ifndef SW_DYNAMICS
1280  if (.not. hydrostatic) then
1281  call nested_grid_bc_apply_intt(w, &
1282  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1283  neststruct%w_bc, bctype=neststruct%nestbctype )
1284  end if
1285 #endif SW_DYNAMICS
1286  call nested_grid_bc_apply_intt(u, &
1287  0, 1, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1288  neststruct%u_bc, bctype=neststruct%nestbctype )
1289  call nested_grid_bc_apply_intt(v, &
1290  1, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1291  neststruct%v_bc, bctype=neststruct%nestbctype )
1292 
1293  end if
1294 
1295  if (flagstruct%regional) then
1296 
1297 #ifndef SW_DYNAMICS
1298  if (.not. hydrostatic) then
1299  reg_bc_update_time=current_time_in_seconds+it*dt
1300  call regional_boundary_update(w, 'w', &
1301  isd, ied, jsd, jed, ubound(w,3), &
1302  is, ie, js, je, &
1303  isd, ied, jsd, jed, &
1304  reg_bc_update_time )
1305  endif
1306 #endif SW_DYNAMICS
1307 
1308  call regional_boundary_update(u, 'u', &
1309  isd, ied, jsd, jed+1, npz, &
1310  is, ie, js, je, &
1311  isd, ied, jsd, jed, &
1312  reg_bc_update_time )
1313  call regional_boundary_update(v, 'v', &
1314  isd, ied+1, jsd, jed, npz, &
1315  is, ie, js, je, &
1316  isd, ied, jsd, jed, &
1317  reg_bc_update_time )
1318 
1319  call exch_uv(domain, bd, npz, u, v )
1320  endif
1321 
1322 !-----------------------------------------------------
1323  enddo ! time split loop
1324  first_call=.false.
1325 !-----------------------------------------------------
1326  if ( nq > 0 .and. .not. flagstruct%inline_q ) then
1327  call timing_on('COMM_TOTAL')
1328  call timing_on('COMM_TRACER')
1329  call start_group_halo_update(i_pack(10), q, domain)
1330  call timing_off('COMM_TRACER')
1331  call timing_off('COMM_TOTAL')
1332  endif
1333 
1334  if ( flagstruct%fv_debug ) then
1335  if(is_master()) write(*,*) 'End of n_split loop'
1336  endif
1337 
1338 
1339  if ( n_con/=0 .and. flagstruct%d_con > 1.e-5 ) then
1340  nf_ke = min(3, flagstruct%nord+1)
1341  call del2_cubed(heat_source, cnst_0p20*gridstruct%da_min, gridstruct, domain, npx, npy, npz, nf_ke, bd)
1342 
1343 ! Note: pt here is cp_air*(Virtual_Temperature/pkz), cp_air is constant
1344  if ( hydrostatic ) then
1345 !
1346 ! del(Cp_air*Tm) = - del(KE)
1347 !
1348 !$OMP parallel do default(none) shared(flagstruct,is,ie,js,je,n_con,pt,heat_source,delp,pkz,bdt) &
1349 !$OMP private(dtmp)
1350  do j=js,je
1351  do k=1,n_con ! n_con is usually less than 3;
1352  if ( k<3 ) then
1353  do i=is,ie
1354  pt(i,j,k) = pt(i,j,k) + heat_source(i,j,k)/(cp_air*delp(i,j,k)*pkz(i,j,k))
1355  enddo
1356  else
1357  do i=is,ie
1358  dtmp = heat_source(i,j,k) / (cp_air*delp(i,j,k))
1359  pt(i,j,k) = pt(i,j,k) + sign(min(abs(bdt)*flagstruct%delt_max,abs(dtmp)), dtmp)/pkz(i,j,k)
1360  enddo
1361  endif
1362  enddo
1363  enddo
1364  else
1365 !$OMP parallel do default(none) shared(flagstruct,is,ie,js,je,n_con,pkz,cappa,rdg,delp,delz,pt, &
1366 !$OMP heat_source,k1k,cv_air,bdt) &
1367 !$OMP private(dtmp, delt)
1368  do k=1,n_con
1369  delt = abs(bdt*flagstruct%delt_max)
1370 ! Sponge layers:
1371  if ( k == 1 ) delt = 0.1*delt
1372  if ( k == 2 ) delt = 0.5*delt
1373  do j=js,je
1374  do i=is,ie
1375 #ifdef MOIST_CAPPA
1376  pkz(i,j,k) = exp( cappa(i,j,k)/(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1377 #else
1378 #ifdef MULTI_GASES
1379  pkz(i,j,k) = exp( k1k*virqd(q(i,j,k,:))/vicvqd(q(i,j,k,:))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1380 #else
1381  pkz(i,j,k) = exp( k1k*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1382 #endif
1383 #endif
1384  dtmp = heat_source(i,j,k) / (cv_air*delp(i,j,k))
1385  pt(i,j,k) = pt(i,j,k) + sign(min(delt, abs(dtmp)),dtmp) / pkz(i,j,k)
1386  enddo
1387  enddo
1388  enddo
1389  endif
1390 
1391  endif
1392  if (allocated(heat_source)) deallocate( heat_source ) !If ncon == 0 but d_con > 1.e-5, this would not be deallocated in earlier versions of the code
1393 
1394  if ( end_step ) then
1395  deallocate( gz )
1396  deallocate( ptc )
1397  deallocate( crx )
1398  deallocate( xfx )
1399  deallocate( cry )
1400  deallocate( yfx )
1401  deallocate( divgd )
1402  deallocate( pkc )
1403  deallocate( delpc )
1404 
1405  if( allocated(ut)) deallocate( ut )
1406  if( allocated(vt)) deallocate( vt )
1407  if ( allocated (du) ) deallocate( du )
1408  if ( allocated (dv) ) deallocate( dv )
1409  if ( .not. hydrostatic ) then
1410  deallocate( zh )
1411  if( allocated(pk3) ) deallocate ( pk3 )
1412  endif
1413 
1414  endif
1415  if( allocated(pem) ) deallocate ( pem )
1416 
1417 end subroutine dyn_core
1418 
1419 subroutine pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
1420 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1421 real, intent(in):: ptop, akap
1422 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1423 real, intent(inout), dimension(isd:ied,jsd:jed,npz+1):: pk3
1424 ! Local:
1425 real:: pei(isd:ied)
1426 real:: pej(jsd:jed)
1427 integer:: i,j,k
1428 
1429 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,npz,ptop,delp,pk3,akap) &
1430 !$OMP private(pei)
1431  do j=js,je
1432  pei(is-2) = ptop
1433  pei(is-1) = ptop
1434  do k=1,npz
1435  pei(is-2) = pei(is-2) + delp(is-2,j,k)
1436  pei(is-1) = pei(is-1) + delp(is-1,j,k)
1437  pk3(is-2,j,k+1) = exp(akap*log(pei(is-2)))
1438  pk3(is-1,j,k+1) = exp(akap*log(pei(is-1)))
1439  enddo
1440  pei(ie+1) = ptop
1441  pei(ie+2) = ptop
1442  do k=1,npz
1443  pei(ie+1) = pei(ie+1) + delp(ie+1,j,k)
1444  pei(ie+2) = pei(ie+2) + delp(ie+2,j,k)
1445  pk3(ie+1,j,k+1) = exp(akap*log(pei(ie+1)))
1446  pk3(ie+2,j,k+1) = exp(akap*log(pei(ie+2)))
1447  enddo
1448  enddo
1449 
1450 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ptop,delp,pk3,akap) &
1451 !$OMP private(pej)
1452  do i=is-2,ie+2
1453  pej(js-2) = ptop
1454  pej(js-1) = ptop
1455  do k=1,npz
1456  pej(js-2) = pej(js-2) + delp(i,js-2,k)
1457  pej(js-1) = pej(js-1) + delp(i,js-1,k)
1458  pk3(i,js-2,k+1) = exp(akap*log(pej(js-2)))
1459  pk3(i,js-1,k+1) = exp(akap*log(pej(js-1)))
1460  enddo
1461  pej(je+1) = ptop
1462  pej(je+2) = ptop
1463  do k=1,npz
1464  pej(je+1) = pej(je+1) + delp(i,je+1,k)
1465  pej(je+2) = pej(je+2) + delp(i,je+2,k)
1466  pk3(i,je+1,k+1) = exp(akap*log(pej(je+1)))
1467  pk3(i,je+2,k+1) = exp(akap*log(pej(je+2)))
1468  enddo
1469  enddo
1470 
1471 end subroutine pk3_halo
1472 
1473 subroutine pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
1474 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1475 real, intent(in):: ptop
1476 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1477 real, intent(inout), dimension(isd:ied,jsd:jed,npz+1):: pk3
1478 ! Local:
1479 real:: pet
1480 integer:: i,j,k
1481 
1482 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,npz,ptop,delp,pk3) &
1483 !$OMP private(pet)
1484  do j=js,je
1485  do i=is-2,is-1
1486  pet = ptop
1487  do k=1,npz
1488  pet = pet + delp(i,j,k)
1489  pk3(i,j,k+1) = log(pet)
1490  enddo
1491  enddo
1492  do i=ie+1,ie+2
1493  pet = ptop
1494  do k=1,npz
1495  pet = pet + delp(i,j,k)
1496  pk3(i,j,k+1) = log(pet)
1497  enddo
1498  enddo
1499  enddo
1500 
1501 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ptop,delp,pk3) &
1502 !$OMP private(pet)
1503  do i=is-2,ie+2
1504  do j=js-2,js-1
1505  pet = ptop
1506  do k=1,npz
1507  pet = pet + delp(i,j,k)
1508  pk3(i,j,k+1) = log(pet)
1509  enddo
1510  enddo
1511  do j=je+1,je+2
1512  pet = ptop
1513  do k=1,npz
1514  pet = pet + delp(i,j,k)
1515  pk3(i,j,k+1) = log(pet)
1516  enddo
1517  enddo
1518  enddo
1519 
1520 end subroutine pln_halo
1521 
1522 subroutine pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
1523 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1524 real, intent(in):: ptop
1525 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1526 real, intent(inout), dimension(is-1:ie+1,npz+1,js-1:je+1):: pe
1527 ! Local:
1528 integer:: i,j,k
1529 
1530 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ptop)
1531  do j=js,je
1532  pe(is-1,1,j) = ptop
1533  pe(ie+1,1,j) = ptop
1534  do k=1,npz
1535  pe(is-1,k+1,j) = pe(is-1,k,j) + delp(is-1,j,k)
1536  pe(ie+1,k+1,j) = pe(ie+1,k,j) + delp(ie+1,j,k)
1537  enddo
1538  enddo
1539 
1540 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ptop)
1541  do i=is-1,ie+1
1542  pe(i,1,js-1) = ptop
1543  pe(i,1,je+1) = ptop
1544  do k=1,npz
1545  pe(i,k+1,js-1) = pe(i,k,js-1) + delp(i,js-1,k)
1546  pe(i,k+1,je+1) = pe(i,k,je+1) + delp(i,je+1,k)
1547  enddo
1548  enddo
1549 
1550 end subroutine pe_halo
1551 
1552 
1553 subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng)
1555 integer, intent(in) :: npx, npy, npz, ng
1556 type(fv_grid_bounds_type), intent(IN) :: bd
1557 ! Contra-variant wind components:
1558 real, intent(in), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: ua, va
1559 ! Pressure at edges:
1560 real, intent(in) :: pem(bd%is-1:bd%ie+1,1:npz+1,bd%js-1:bd%je+1)
1561 real, intent(inout) :: om(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1562 type(fv_grid_type), intent(INOUT), target :: gridstruct
1563 
1564 ! Local:
1565 real, dimension(bd%is:bd%ie,bd%js:bd%je):: up, vp
1566 real v3(3,bd%is:bd%ie,bd%js:bd%je)
1567 
1568 real pin(bd%isd:bd%ied,bd%jsd:bd%jed)
1569 real pb(bd%isd:bd%ied,bd%jsd:bd%jed)
1570 
1571 real grad(3,bd%is:bd%ie,bd%js:bd%je)
1572 real pdx(3,bd%is:bd%ie,bd%js:bd%je+1)
1573 real pdy(3,bd%is:bd%ie+1,bd%js:bd%je)
1574 
1575 integer :: i,j,k, n
1576 integer :: is, ie, js, je
1577 
1578  is = bd%is
1579  ie = bd%ie
1580  js = bd%js
1581  je = bd%je
1582 
1583 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ua,va,gridstruct,pem,npx,npy,ng,om) &
1584 !$OMP private(n, pdx, pdy, pin, pb, up, vp, grad, v3)
1585 do k=1,npz
1586  if ( k==npz ) then
1587  do j=js,je
1588  do i=is,ie
1589  up(i,j) = ua(i,j,npz)
1590  vp(i,j) = va(i,j,npz)
1591  enddo
1592  enddo
1593  else
1594  do j=js,je
1595  do i=is,ie
1596  up(i,j) = 0.5*(ua(i,j,k)+ua(i,j,k+1))
1597  vp(i,j) = 0.5*(va(i,j,k)+va(i,j,k+1))
1598  enddo
1599  enddo
1600  endif
1601 
1602  ! Compute Vect wind:
1603  do j=js,je
1604  do i=is,ie
1605  do n=1,3
1606  v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j)
1607  enddo
1608  enddo
1609  enddo
1610 
1611  do j=js-1,je+1
1612  do i=is-1,ie+1
1613  pin(i,j) = pem(i,k+1,j)
1614  enddo
1615  enddo
1616 
1617  ! Compute pe at 4 cell corners:
1618  call a2b_ord2(pin, pb, gridstruct, npx, npy, is, ie, js, je, ng)
1619 
1620 
1621  do j=js,je+1
1622  do i=is,ie
1623  do n=1,3
1624  pdx(n,i,j) = (pb(i,j)+pb(i+1,j))*gridstruct%dx(i,j)*gridstruct%en1(n,i,j)
1625  enddo
1626  enddo
1627  enddo
1628  do j=js,je
1629  do i=is,ie+1
1630  do n=1,3
1631  pdy(n,i,j) = (pb(i,j)+pb(i,j+1))*gridstruct%dy(i,j)*gridstruct%en2(n,i,j)
1632  enddo
1633  enddo
1634  enddo
1635 
1636  ! Compute grad (pe) by Green's theorem
1637  do j=js,je
1638  do i=is,ie
1639  do n=1,3
1640  grad(n,i,j) = pdx(n,i,j+1) - pdx(n,i,j) - pdy(n,i,j) + pdy(n,i+1,j)
1641  enddo
1642  enddo
1643  enddo
1644 
1645  ! Compute inner product: V3 * grad (pe)
1646  do j=js,je
1647  do i=is,ie
1648  om(i,j,k) = om(i,j,k) + 0.5*gridstruct%rarea(i,j)*(v3(1,i,j)*grad(1,i,j) + &
1649  v3(2,i,j)*grad(2,i,j) + v3(3,i,j)*grad(3,i,j))
1650  enddo
1651  enddo
1652 enddo
1653 
1654 end subroutine adv_pe
1655 
1656 
1657 
1658 
1659 subroutine p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, rdxc, rdyc, hydrostatic)
1661 integer, intent(in):: npz
1662 real, intent(in):: dt2
1663 type(fv_grid_bounds_type), intent(IN) :: bd
1664 real, intent(in), dimension(bd%isd:, bd%jsd: ,: ):: delpc
1667 real, intent(in), dimension(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1):: pkc, gz
1668 real, intent(inout):: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1669 real, intent(inout):: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1670 real, intent(IN) :: rdxc(bd%isd:bd%ied+1,bd%jsd:bd%jed+1)
1671 real, intent(IN) :: rdyc(bd%isd:bd%ied ,bd%jsd:bd%jed)
1672 logical, intent(in):: hydrostatic
1673 ! Local:
1674 real:: wk(bd%is-1:bd%ie+1,bd%js-1:bd%je+1)
1675 integer:: i,j,k
1676 
1677 integer :: is, ie, js, je
1678 
1679  is = bd%is
1680  ie = bd%ie
1681  js = bd%js
1682  je = bd%je
1683 
1684 !$OMP parallel do default(none) shared(is,ie,js,je,npz,hydrostatic,pkc,delpc,uc,dt2,rdxc,gz,vc,rdyc) &
1685 !$OMP private(wk)
1686 do k=1,npz
1687 
1688  if ( hydrostatic ) then
1689  do j=js-1,je+1
1690  do i=is-1,ie+1
1691  wk(i,j) = pkc(i,j,k+1) - pkc(i,j,k)
1692  enddo
1693  enddo
1694  else
1695  do j=js-1,je+1
1696  do i=is-1,ie+1
1697  wk(i,j) = delpc(i,j,k)
1698  enddo
1699  enddo
1700  endif
1701 
1702  do j=js,je
1703  do i=is,ie+1
1704  uc(i,j,k) = uc(i,j,k) + dt2*rdxc(i,j) / (wk(i-1,j)+wk(i,j)) * &
1705  ( (gz(i-1,j,k+1)-gz(i,j,k ))*(pkc(i,j,k+1)-pkc(i-1,j,k)) &
1706  + (gz(i-1,j,k) - gz(i,j,k+1))*(pkc(i-1,j,k+1)-pkc(i,j,k)) )
1707  enddo
1708  enddo
1709  do j=js,je+1
1710  do i=is,ie
1711  vc(i,j,k) = vc(i,j,k) + dt2*rdyc(i,j) / (wk(i,j-1)+wk(i,j)) * &
1712  ( (gz(i,j-1,k+1)-gz(i,j,k ))*(pkc(i,j,k+1)-pkc(i,j-1,k)) &
1713  + (gz(i,j-1,k) - gz(i,j,k+1))*(pkc(i,j-1,k+1)-pkc(i,j,k)) )
1714  enddo
1715  enddo
1716 enddo
1717 
1718 end subroutine p_grad_c
1719 
1720 
1721 subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
1722 integer, intent(IN) :: ng, npx, npy, npz
1723 real, intent(IN) :: dt
1724 logical, intent(in) :: use_logp
1725 type(fv_grid_bounds_type), intent(IN) :: bd
1726 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1727 real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1728 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1729 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1730 real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz)
1731 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz)
1732 type(fv_grid_type), intent(INOUT), target :: gridstruct
1733 ! Local:
1734 real wk1(bd%isd:bd%ied, bd%jsd:bd%jed)
1735 real wk(bd%is: bd%ie+1,bd%js: bd%je+1)
1736 real du1, dv1, top_value
1737 integer i,j,k
1738 integer :: is, ie, js, je
1739 integer :: isd, ied, jsd, jed
1740 
1741  is = bd%is
1742  ie = bd%ie
1743  js = bd%js
1744  je = bd%je
1745  isd = bd%isd
1746  ied = bd%ied
1747  jsd = bd%jsd
1748  jed = bd%jed
1749 
1750 if ( use_logp ) then
1751  top_value = peln1
1752 else
1753  top_value = ptk
1754 endif
1755 
1756 !$OMP parallel do default(none) shared(top_value,isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) &
1757 !$OMP private(wk1)
1758 do k=1,npz+1
1759  if ( k==1 ) then
1760  do j=js,je+1
1761  do i=is,ie+1
1762  pp(i,j,1) = 0.
1763  pk(i,j,1) = top_value
1764  enddo
1765  enddo
1766  else
1767  call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1768  call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1769  endif
1770  call a2b_ord4( gz(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1771 enddo
1772 
1773 !$OMP parallel do default(none) shared(is,ie,js,je,npz,delp,gridstruct,npx,npy,ng,isd,jsd, &
1774 !$OMP pk,dt,gz,u,pp,v) &
1775 !$OMP private(wk1, wk, du1, dv1)
1776 do k=1,npz
1777  call a2b_ord4(delp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng)
1778  do j=js,je+1
1779  do i=is,ie+1
1780  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1781  enddo
1782  enddo
1783  do j=js,je+1
1784  do i=is,ie
1785  ! hydrostatic contributions from past time-step already added in the "beta" part
1786  ! Current gradient from "hydrostatic" components:
1787  du1 = dt / (wk(i,j)+wk(i+1,j)) * &
1788  ( (gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) + &
1789  (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)) )
1790 #ifdef GAS_HYDRO_P
1791  dul = (1.-0.5*(q_con(i,j-1,k)+q_con(i,j,k)))*du
1792 #endif
1793  ! Non-hydrostatic contribution
1794  u(i,j,k) = (u(i,j,k) + du1 + dt/(wk1(i,j)+wk1(i+1,j)) * &
1795  ((gz(i,j,k+1)-gz(i+1,j,k))*(pp(i+1,j,k+1)-pp(i,j,k)) &
1796  + (gz(i,j,k)-gz(i+1,j,k+1))*(pp(i,j,k+1)-pp(i+1,j,k))))*gridstruct%rdx(i,j)
1797  enddo
1798  enddo
1799  do j=js,je
1800  do i=is,ie+1
1801  ! Current gradient from "hydrostatic" components:
1802  dv1 = dt / (wk(i,j)+wk(i,j+1)) * &
1803  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) + &
1804  (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1805 #ifdef GAS_HYDRO_P
1806  dvl = (1.-0.5*(q_con(i-1,j,k)+q_con(i,j,k)))*dv
1807 #endif
1808  ! Non-hydrostatic contribution
1809  v(i,j,k) = (v(i,j,k) + dv1 + dt/(wk1(i,j)+wk1(i,j+1)) * &
1810  ((gz(i,j,k+1)-gz(i,j+1,k))*(pp(i,j+1,k+1)-pp(i,j,k)) &
1811  + (gz(i,j,k)-gz(i,j+1,k+1))*(pp(i,j,k+1)-pp(i,j+1,k))))*gridstruct%rdy(i,j)
1812  enddo
1813  enddo
1814 
1815 enddo ! end k-loop
1816 end subroutine nh_p_grad
1817 
1818 
1819 subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
1820 integer, intent(IN) :: ng, npx, npy, npz
1821 real, intent(IN) :: beta, dt
1822 logical, intent(in):: use_logp
1823 type(fv_grid_bounds_type), intent(IN) :: bd
1824 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1825 real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1826 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1827 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1828 ! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1829 ! real, intent(inout) :: dv(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1830 real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz)
1831 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz)
1832 type(fv_grid_type), intent(INOUT), target :: gridstruct
1833 ! Local:
1834 real wk1(bd%isd:bd%ied, bd%jsd:bd%jed)
1835 real wk(bd%is: bd%ie+1,bd%js: bd%je+1)
1836 real alpha, top_value
1837 integer i,j,k
1838 integer :: is, ie, js, je
1839 integer :: isd, ied, jsd, jed
1840 
1841  is = bd%is
1842  ie = bd%ie
1843  js = bd%js
1844  je = bd%je
1845  isd = bd%isd
1846  ied = bd%ied
1847  jsd = bd%jsd
1848  jed = bd%jed
1849 
1850 if ( use_logp ) then
1851  top_value = peln1
1852 else
1853  top_value = ptk
1854 endif
1855 
1856 alpha = 1. - beta
1857 
1858 !$OMP parallel do default(none) shared(is,ie,js,je,pp,pk,top_value)
1859 do j=js,je+1
1860  do i=is,ie+1
1861  pp(i,j,1) = 0.
1862  pk(i,j,1) = top_value
1863  enddo
1864 enddo
1865 
1866 !$OMP parallel do default(none) shared(isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) &
1867 !$OMP private(wk1)
1868 do k=1,npz+1
1869  if ( k/=1 ) then
1870  call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1871  call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1872  endif
1873  call a2b_ord4( gz(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1874 enddo
1875 
1876 !$OMP parallel do default(none) shared(is,ie,js,je,isd,jsd,npz,delp,gridstruct,npx,npy,ng, &
1877 !$OMP pk,u,beta,du,dt,gz,alpha,pp,v,dv) &
1878 !$OMP private(wk1, wk)
1879 do k=1,npz
1880  call a2b_ord4(delp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng)
1881 
1882  do j=js,je+1
1883  do i=is,ie+1
1884  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1885  enddo
1886  enddo
1887 
1888  do j=js,je+1
1889  do i=is,ie
1890  u(i,j,k) = u(i,j,k) + beta*du(i,j,k)
1891  ! hydrostatic contributions from past time-step already added in the "beta" part
1892  ! Current gradient from "hydrostatic" components:
1893  !---------------------------------------------------------------------------------
1894  du(i,j,k) = dt / (wk(i,j)+wk(i+1,j)) * &
1895  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) + &
1896  (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)))
1897 #ifdef GAS_HYDRO_P
1898  du = (1.-0.5*(q_con(i,j-1,k)+q_con(i,j,k)))*du
1899 #endif
1900  !---------------------------------------------------------------------------------
1901  ! Non-hydrostatic contribution
1902  u(i,j,k) = (u(i,j,k) + alpha*du(i,j,k) + dt/(wk1(i,j)+wk1(i+1,j)) * &
1903  ((gz(i,j,k+1)-gz(i+1,j,k))*(pp(i+1,j,k+1)-pp(i,j,k)) &
1904  + (gz(i,j,k)-gz(i+1,j,k+1))*(pp(i,j,k+1)-pp(i+1,j,k))))*gridstruct%rdx(i,j)
1905  enddo
1906  enddo
1907  do j=js,je
1908  do i=is,ie+1
1909  v(i,j,k) = v(i,j,k) + beta*dv(i,j,k)
1910  ! Current gradient from "hydrostatic" components:
1911  !---------------------------------------------------------------------------------
1912  dv(i,j,k) = dt / (wk(i,j)+wk(i,j+1)) * &
1913  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) + &
1914  (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1915 #ifdef GAS_HYDRO_P
1916  dv = (1.-0.5*(q_con(i-1,j,k)+q_con(i,j,k)))*dv
1917 #endif
1918  !---------------------------------------------------------------------------------
1919  ! Non-hydrostatic contribution
1920  v(i,j,k) = (v(i,j,k) + alpha*dv(i,j,k) + dt/(wk1(i,j)+wk1(i,j+1)) * &
1921  ((gz(i,j,k+1)-gz(i,j+1,k))*(pp(i,j+1,k+1)-pp(i,j,k)) &
1922  + (gz(i,j,k)-gz(i,j+1,k+1))*(pp(i,j,k+1)-pp(i,j+1,k))))*gridstruct%rdy(i,j)
1923  enddo
1924  enddo
1925 
1926 enddo ! end k-loop
1927 
1928 
1929 end subroutine split_p_grad
1930 
1931 
1932 
1933 subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, &
1934  ptop, hydrostatic, a2b_ord, d_ext)
1936 integer, intent(IN) :: ng, npx, npy, npz, a2b_ord
1937 real, intent(IN) :: dt, ptop, d_ext
1938 logical, intent(in) :: hydrostatic
1939 type(fv_grid_bounds_type), intent(IN) :: bd
1940 real, intent(in) :: divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
1941 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1942 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1943 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed ,npz)
1944 real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1945 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1946 type(fv_grid_type), intent(INOUT), target :: gridstruct
1947 ! Local:
1948 real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: wk
1949 real:: wk1(bd%is:bd%ie+1,bd%js:bd%je+1)
1950 real:: wk2(bd%is:bd%ie,bd%js:bd%je+1)
1951 real top_value
1952 integer i,j,k
1953 
1954 integer :: is, ie, js, je
1955 integer :: isd, ied, jsd, jed
1956 
1957  is = bd%is
1958  ie = bd%ie
1959  js = bd%js
1960  je = bd%je
1961  isd = bd%isd
1962  ied = bd%ied
1963  jsd = bd%jsd
1964  jed = bd%jed
1965 
1966 if ( hydrostatic ) then
1967  ! pk is pe**kappa if hydrostatic
1968  top_value = ptk
1969 else
1970  ! pk is full pressure if non-hydrostatic
1971  top_value = ptop
1972 endif
1973 
1974 !$OMP parallel do default(none) shared(is,ie,js,je,pk,top_value)
1975 do j=js,je+1
1976  do i=is,ie+1
1977  pk(i,j,1) = top_value
1978  enddo
1979 enddo
1980 
1981 !$OMP parallel do default(none) shared(npz,isd,jsd,pk,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
1982 !$OMP private(wk)
1983 do k=2,npz+1
1984  if ( a2b_ord==4 ) then
1985  call a2b_ord4(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1986  else
1987  call a2b_ord2(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1988  endif
1989 enddo
1990 
1991 !$OMP parallel do default(none) shared(npz,isd,jsd,gz,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
1992 !$OMP private(wk)
1993 do k=1,npz+1
1994  if ( a2b_ord==4 ) then
1995  call a2b_ord4( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1996  else
1997  call a2b_ord2( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1998  endif
1999 enddo
2000 
2001 if ( d_ext > 0. ) then
2002 
2003  !$OMP parallel do default(none) shared(is,ie,js,je,wk2,divg2)
2004  do j=js,je+1
2005  do i=is,ie
2006  wk2(i,j) = divg2(i,j)-divg2(i+1,j)
2007  enddo
2008  enddo
2009 
2010  !$OMP parallel do default(none) shared(is,ie,js,je,wk1,divg2)
2011  do j=js,je
2012  do i=is,ie+1
2013  wk1(i,j) = divg2(i,j)-divg2(i,j+1)
2014  enddo
2015  enddo
2016 
2017 else
2018 
2019  !$OMP parallel do default(none) shared(is,ie,js,je,wk1,wk2)
2020  do j=js,je+1
2021  do i=is,ie
2022  wk2(i,j) = 0.
2023  enddo
2024  do i=is,ie+1
2025  wk1(i,j) = 0.
2026  enddo
2027  enddo
2028 
2029 endif
2030 
2031 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pk,delp,hydrostatic,a2b_ord,gridstruct, &
2032 !$OMP npx,npy,isd,jsd,ng,u,v,wk2,dt,gz,wk1) &
2033 !$OMP private(wk)
2034 do k=1,npz
2035 
2036  if ( hydrostatic ) then
2037  do j=js,je+1
2038  do i=is,ie+1
2039  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
2040  enddo
2041  enddo
2042  else
2043  if ( a2b_ord==4 ) then
2044  call a2b_ord4(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng)
2045  else
2046  call a2b_ord2(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng)
2047  endif
2048  endif
2049 
2050  do j=js,je+1
2051  do i=is,ie
2052  u(i,j,k) = gridstruct%rdx(i,j)*(wk2(i,j)+u(i,j,k) + dt/(wk(i,j)+wk(i+1,j)) * &
2053  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) &
2054  + (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k))))
2055  enddo
2056  enddo
2057  do j=js,je
2058  do i=is,ie+1
2059  v(i,j,k) = gridstruct%rdy(i,j)*(wk1(i,j)+v(i,j,k) + dt/(wk(i,j)+wk(i,j+1)) * &
2060  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) &
2061  + (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k))))
2062  enddo
2063  enddo
2064 enddo ! end k-loop
2065 
2066 end subroutine one_grad_p
2067 
2068 
2069 subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta, a2b_ord)
2071 integer, intent(in) :: ng, npx, npy, npz, a2b_ord
2072 real, intent(in) :: dt, ptop, beta
2073 type(fv_grid_bounds_type), intent(IN) :: bd
2074 real, intent(in):: divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
2075 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
2076 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
2077 real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
2078 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
2079 type(fv_grid_type), intent(INOUT), target :: gridstruct
2080 
2081 ! Local:
2082 real:: wk(bd%isd:bd%ied,bd%jsd:bd%jed)
2083 real top_value, alpha
2084 integer i,j,k
2085 
2086  integer :: is, ie, js, je
2087  integer :: isd, ied, jsd, jed
2088 
2089  is = bd%is
2090  ie = bd%ie
2091  js = bd%js
2092  je = bd%je
2093  isd = bd%isd
2094  ied = bd%ied
2095  jsd = bd%jsd
2096  jed = bd%jed
2097 
2098 alpha = 1. - beta
2099 
2100 ! pk is pe**kappa if hydrostatic
2101 top_value = ptk
2102 
2103 !$OMP parallel do default(none) shared(is,ie,js,je,pk,top_value)
2104 do j=js,je+1
2105  do i=is,ie+1
2106  pk(i,j,1) = top_value
2107  enddo
2108 enddo
2109 !$OMP parallel do default(none) shared(npz,isd,jsd,pk,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
2110 !$OMP private(wk)
2111 do k=2,npz+1
2112  if ( a2b_ord==4 ) then
2113  call a2b_ord4(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2114  else
2115  call a2b_ord2(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2116  endif
2117 enddo
2118 
2119 !$OMP parallel do default(none) shared(npz,isd,jsd,gz,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
2120 !$OMP private(wk)
2121 do k=1,npz+1
2122  if ( a2b_ord==4 ) then
2123  call a2b_ord4( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2124  else
2125  call a2b_ord2( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2126  endif
2127 enddo
2128 
2129 !$OMP parallel do default(none) shared(npz,is,ie,js,je,pk,u,beta,gz,divg2,alpha, &
2130 !$OMP gridstruct,v,dt,du,dv) &
2131 !$OMP private(wk)
2132 do k=1,npz
2133 
2134  do j=js,je+1
2135  do i=is,ie+1
2136  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
2137  enddo
2138  enddo
2139 
2140  do j=js,je+1
2141  do i=is,ie
2142  u(i,j,k) = u(i,j,k) + beta*du(i,j,k)
2143  du(i,j,k) = dt/(wk(i,j)+wk(i+1,j)) * &
2144  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) &
2145  + (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)))
2146  u(i,j,k) = (u(i,j,k) + divg2(i,j)-divg2(i+1,j) + alpha*du(i,j,k))*gridstruct%rdx(i,j)
2147  enddo
2148  enddo
2149  do j=js,je
2150  do i=is,ie+1
2151  v(i,j,k) = v(i,j,k) + beta*dv(i,j,k)
2152  dv(i,j,k) = dt/(wk(i,j)+wk(i,j+1)) * &
2153  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) &
2154  + (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
2155  v(i,j,k) = (v(i,j,k) + divg2(i,j)-divg2(i,j+1) + alpha*dv(i,j,k))*gridstruct%rdy(i,j)
2156  enddo
2157  enddo
2158 enddo ! end k-loop
2159 
2160 end subroutine grad1_p_update
2161 
2162 
2163 subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd)
2164 integer, intent(IN) :: km
2165 real , intent(IN) :: ak(km+1), bk(km+1)
2166 type(fv_grid_bounds_type), intent(IN) :: bd
2167 real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp
2168 real, intent(INOUT), dimension(bd%isd:,bd%jsd:,1:):: w
2169 logical, intent(IN) :: hydrostatic, CG, fv_debug
2170 ! Local:
2171 real dp, dpmin
2172 integer i, j, k, ip
2173 integer ifirst, ilast
2174 integer jfirst, jlast
2175 
2176 integer :: is, ie, js, je
2177 integer :: isd, ied, jsd, jed
2178 
2179  is = bd%is
2180  ie = bd%ie
2181  js = bd%js
2182  je = bd%je
2183  isd = bd%isd
2184  ied = bd%ied
2185  jsd = bd%jsd
2186  jed = bd%jed
2187 
2188 
2189 if ( cg ) then
2190  ifirst = is-1; ilast = ie+1
2191  jfirst = js-1; jlast = je+1
2192 else
2193  ifirst = is; ilast = ie
2194  jfirst = js; jlast = je
2195 endif
2196 
2197 
2198 !$OMP parallel do default(none) shared(jfirst,jlast,km,ifirst,ilast,delp,ak,bk,pt, &
2199 !$OMP hydrostatic,w,fv_debug) &
2200 !$OMP private(ip, dpmin, dp)
2201 do 1000 j=jfirst,jlast
2202 
2203  ip = 0
2204 
2205  do k=1, km-1
2206  dpmin = 0.01 * ( ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.e5 )
2207  do i=ifirst, ilast
2208  if(delp(i,j,k) < dpmin) then
2209  if (fv_debug) write(*,*) 'Mix_dp: ', i, j, k, mpp_pe(), delp(i,j,k), pt(i,j,k)
2210  ! Remap from below and mix pt
2211  dp = dpmin - delp(i,j,k)
2212  pt(i,j,k) = (pt(i,j,k)*delp(i,j,k) + pt(i,j,k+1)*dp) / dpmin
2213  if ( .not.hydrostatic ) w(i,j,k) = (w(i,j,k)*delp(i,j,k) + w(i,j,k+1)*dp) / dpmin
2214  delp(i,j,k) = dpmin
2215  delp(i,j,k+1) = delp(i,j,k+1) - dp
2216  ip = ip + 1
2217  endif
2218  enddo
2219  enddo
2220 
2221  ! Bottom (k=km):
2222  dpmin = 0.01 * ( ak(km+1)-ak(km) + (bk(km+1)-bk(km))*1.e5 )
2223  do i=ifirst, ilast
2224  if(delp(i,j,km) < dpmin) then
2225  if (fv_debug) write(*,*) 'Mix_dp: ', i, j, km, mpp_pe(), delp(i,j,km), pt(i,j,km)
2226  ! Remap from above and mix pt
2227  dp = dpmin - delp(i,j,km)
2228  pt(i,j,km) = (pt(i,j,km)*delp(i,j,km) + pt(i,j,km-1)*dp)/dpmin
2229  if ( .not.hydrostatic ) w(i,j,km) = (w(i,j,km)*delp(i,j,km) + w(i,j,km-1)*dp) / dpmin
2230  delp(i,j,km) = dpmin
2231  delp(i,j,km-1) = delp(i,j,km-1) - dp
2232  ip = ip + 1
2233  endif
2234  enddo
2235  if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip
2236  ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip
2237 1000 continue
2238 
2239  end subroutine mix_dp
2240 
2242  subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, &
2243 #ifdef MULTI_GASES
2244  kapad, &
2245 #endif
2246  q_con, pkz, km, akap, CG, nested, computehalo, npx, npy, a2b_ord, bd)
2248  integer, intent(IN) :: km, npx, npy, a2b_ord
2249  real , intent(IN) :: akap, ptop
2250  type(fv_grid_bounds_type), intent(IN) :: bd
2251  real , intent(IN) :: hs(bd%isd:bd%ied,bd%jsd:bd%jed)
2252  real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp
2253 #ifdef MULTI_GASES
2254  real, intent(IN) :: kapad(bd%isd:bd%ied,bd%jsd:bd%jed,km)
2255 #endif
2256  real, intent(IN), dimension(bd%isd:,bd%jsd:,1:):: q_con
2257  logical, intent(IN) :: CG, nested, computehalo
2258  ! !OUTPUT PARAMETERS
2259  real, intent(OUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km+1):: gz, pk
2260  real, intent(OUT) :: pe(bd%is-1:bd%ie+1,km+1,bd%js-1:bd%je+1)
2261  real, intent(out) :: peln(bd%is:bd%ie,km+1,bd%js:bd%je)
2262  real, intent(out) :: pkz(bd%is:bd%ie,bd%js:bd%je,km)
2263  ! !DESCRIPTION:
2264  ! Calculates geopotential and pressure to the kappa.
2265  ! Local:
2266  real peg(bd%isd:bd%ied,km+1)
2267  real pkg(bd%isd:bd%ied,km+1)
2268  real p1d(bd%isd:bd%ied)
2269  real logp(bd%isd:bd%ied)
2270 #ifdef MULTI_GASES
2271  real pkx (bd%isd:bd%ied,km)
2272  real pkgx(bd%isd:bd%ied,km)
2273  real akapx
2274  integer n
2275 #endif
2276  integer i, j, k
2277  integer ifirst, ilast
2278  integer jfirst, jlast
2279 
2280  integer :: is, ie, js, je
2281  integer :: isd, ied, jsd, jed
2282 
2283  is = bd%is
2284  ie = bd%ie
2285  js = bd%js
2286  je = bd%je
2287  isd = bd%isd
2288  ied = bd%ied
2289  jsd = bd%jsd
2290  jed = bd%jed
2291 
2292  if ( (.not. cg .and. a2b_ord==4) .or. (nested .and. .not. cg) ) then ! D-Grid
2293  ifirst = is-2; ilast = ie+2
2294  jfirst = js-2; jlast = je+2
2295  else
2296  ifirst = is-1; ilast = ie+1
2297  jfirst = js-1; jlast = je+1
2298  endif
2299 
2300  if (nested .and. computehalo) then
2301  if (is == 1) ifirst = isd
2302  if (ie == npx-1) ilast = ied
2303  if (js == 1) jfirst = jsd
2304  if (je == npy-1) jlast = jed
2305  end if
2306 
2307 #ifdef MULTI_GASES
2308 !$OMP parallel do default(none) shared(jfirst,jlast,ifirst,ilast,pk,km,gz,hs,ptop,ptk,kapad, &
2309 !$OMP js,je,is,ie,peln,peln1,pe,delp,akap,pt,CG,pkz,q_con) &
2310 !$OMP private(peg, pkx, pkgx, akapx, pkg, p1d, logp)
2311 #else
2312 !$OMP parallel do default(none) shared(jfirst,jlast,ifirst,ilast,pk,km,gz,hs,ptop,ptk, &
2313 !$OMP js,je,is,ie,peln,peln1,pe,delp,akap,pt,CG,pkz,q_con) &
2314 !$OMP private(peg, pkg, p1d, logp)
2315 #endif
2316  do 2000 j=jfirst,jlast
2317 
2318  do i=ifirst, ilast
2319  p1d(i) = ptop
2320  pk(i,j,1) = ptk
2321  gz(i,j,km+1) = hs(i,j)
2322 #ifdef USE_COND
2323  peg(i,1) = ptop
2324  pkg(i,1) = ptk
2325 #endif
2326  enddo
2327 
2328 #ifndef SW_DYNAMICS
2329  if( j>=js .and. j<=je) then
2330  do i=is,ie
2331  peln(i,1,j) = peln1
2332  enddo
2333  endif
2334 #endif
2335 
2336  if( j>(js-2) .and. j<(je+2) ) then
2337  do i=max(ifirst,is-1), min(ilast,ie+1)
2338  pe(i,1,j) = ptop
2339  enddo
2340  endif
2341 
2342  ! Top down
2343  do k=2,km+1
2344  do i=ifirst, ilast
2345  p1d(i) = p1d(i) + delp(i,j,k-1)
2346  logp(i) = log(p1d(i))
2347  pk(i,j,k) = exp( akap*logp(i) )
2348 #ifdef USE_COND
2349  peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1))
2350  pkg(i,k) = exp( akap*log(peg(i,k)) )
2351 #endif
2352  enddo
2353 
2354  if( j>(js-2) .and. j<(je+2) ) then
2355  do i=max(ifirst,is-1), min(ilast,ie+1)
2356  pe(i,k,j) = p1d(i)
2357  enddo
2358  if( j>=js .and. j<=je) then
2359  do i=is,ie
2360  peln(i,k,j) = logp(i)
2361  enddo
2362  endif
2363  endif
2364  enddo
2365 
2366 #ifdef MULTI_GASES
2367  do k=1,km
2368  do i=ifirst, ilast
2369  akapx = (kapad(i,j,k)-akap)/akap
2370 #ifdef USE_COND
2371  pkgx(i,k) = (pkg(i,k+1)-pkg(i,k))/(akap*(log(peg(i,k+1))-log(peg(i,k))))
2372  pkgx(i,k) = exp( akapx*log(pkgx(i,k)) )
2373 #else
2374  pkx(i,k) = (pk(i,j,k+1)-pk(i,j,k))/(akap*(peln(i,k+1,j)-peln(i,k,j)))
2375  pkx(i,k) = exp( akapx*log(pkx(i,k)) )
2376 #endif
2377  enddo
2378  enddo
2379 #endif
2380 
2381  ! Bottom up
2382  do k=km,1,-1
2383  do i=ifirst, ilast
2384 #ifdef SW_DYNAMICS
2385  gz(i,j,k) = gz(i,j,k+1) + pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2386 #else
2387 #ifdef USE_COND
2388 #ifdef MULTI_GASES
2389  gz(i,j,k) = gz(i,j,k+1) + cp_air*pkgx(i,k)*pt(i,j,k)*(pkg(i,k+1)-pkg(i,k))
2390 #else
2391  gz(i,j,k) = gz(i,j,k+1) + cp_air*pt(i,j,k)*(pkg(i,k+1)-pkg(i,k))
2392 #endif
2393 #else
2394 #ifdef MULTI_GASES
2395  gz(i,j,k) = gz(i,j,k+1) + cp_air*pkx(i,k)*pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2396 #else
2397  gz(i,j,k) = gz(i,j,k+1) + cp_air*pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2398 #endif
2399 #endif
2400 #endif
2401  enddo
2402  enddo
2403 
2404  if ( .not. cg .and. j .ge. js .and. j .le. je ) then
2405  do k=1,km
2406  do i=is,ie
2407  pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(akap*(peln(i,k+1,j)-peln(i,k,j)))
2408 #ifdef MULTI_GASES
2409  akapx = kapad(i,j,k) / akap
2410  pkz(i,j,k) = exp( akapx * log( pkz(i,j,k) ) )
2411 #endif
2412  enddo
2413  enddo
2414  endif
2415 
2416 2000 continue
2417  end subroutine geopk
2418 
2420  subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
2421  !---------------------------------------------------------------
2422  ! This routine is for filtering the omega field for the physics
2423  !---------------------------------------------------------------
2424  integer, intent(in):: npx, npy, km, nmax
2425  real(kind=R_GRID), intent(in):: cd
2426  type(fv_grid_bounds_type), intent(IN) :: bd
2427  real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed,km)
2428  type(fv_grid_type), intent(IN), target :: gridstruct
2429  type(domain2d), intent(INOUT) :: domain
2430  real, parameter:: r3 = 1./3.
2431  real :: fx(bd%isd:bd%ied+1,bd%jsd:bd%jed), fy(bd%isd:bd%ied,bd%jsd:bd%jed+1)
2432  real :: q2(bd%isd:bd%ied,bd%jsd:bd%jed)
2433  integer i,j,k, n, nt, ntimes
2434  integer :: is, ie, js, je
2435  integer :: isd, ied, jsd, jed
2436 
2437  !Local routine pointers
2438 ! real, pointer, dimension(:,:) :: rarea
2439 ! real, pointer, dimension(:,:) :: del6_u, del6_v
2440 ! logical, pointer :: sw_corner, se_corner, ne_corner, nw_corner
2441 
2442  is = bd%is
2443  ie = bd%ie
2444  js = bd%js
2445  je = bd%je
2446  isd = bd%isd
2447  ied = bd%ied
2448  jsd = bd%jsd
2449  jed = bd%jed
2450 
2451 ! rarea => gridstruct%rarea
2452 ! del6_u => gridstruct%del6_u
2453 ! del6_v => gridstruct%del6_v
2454 
2455 ! sw_corner => gridstruct%sw_corner
2456 ! nw_corner => gridstruct%nw_corner
2457 ! se_corner => gridstruct%se_corner
2458 ! ne_corner => gridstruct%ne_corner
2459 
2460  ntimes = min(3, nmax)
2461 
2462  call timing_on('COMM_TOTAL')
2463  call mpp_update_domains(q, domain, complete=.true.)
2464  call timing_off('COMM_TOTAL')
2465 
2466 
2467  do n=1,ntimes
2468  nt = ntimes - n
2469 
2470 !$OMP parallel do default(none) shared(km,q,is,ie,js,je,npx,npy, &
2471 !$OMP nt,isd,jsd,gridstruct,bd, &
2472 !$OMP cd) &
2473 !$OMP private(fx, fy)
2474  do k=1,km
2475 
2476  if ( gridstruct%sw_corner ) then
2477  q(1,1,k) = (q(1,1,k)+q(0,1,k)+q(1,0,k)) * r3
2478  q(0,1,k) = q(1,1,k)
2479  q(1,0,k) = q(1,1,k)
2480  endif
2481  if ( gridstruct%se_corner ) then
2482  q(ie, 1,k) = (q(ie,1,k)+q(npx,1,k)+q(ie,0,k)) * r3
2483  q(npx,1,k) = q(ie,1,k)
2484  q(ie, 0,k) = q(ie,1,k)
2485  endif
2486  if ( gridstruct%ne_corner ) then
2487  q(ie, je,k) = (q(ie,je,k)+q(npx,je,k)+q(ie,npy,k)) * r3
2488  q(npx,je,k) = q(ie,je,k)
2489  q(ie,npy,k) = q(ie,je,k)
2490  endif
2491  if ( gridstruct%nw_corner ) then
2492  q(1, je,k) = (q(1,je,k)+q(0,je,k)+q(1,npy,k)) * r3
2493  q(0, je,k) = q(1,je,k)
2494  q(1,npy,k) = q(1,je,k)
2495  endif
2496 
2497  if(nt>0 .and. (.not. gridstruct%regional)) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%nested, bd, &
2498  gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner )
2499  do j=js-nt,je+nt
2500  do i=is-nt,ie+1+nt
2501 #ifdef USE_SG
2502  fx(i,j) = gridstruct%dy(i,j)*gridstruct%sina_u(i,j)*(q(i-1,j,k)-q(i,j,k))*gridstruct%rdxc(i,j)
2503 #else
2504  fx(i,j) = gridstruct%del6_v(i,j)*(q(i-1,j,k)-q(i,j,k))
2505 #endif
2506  enddo
2507  enddo
2508 
2509  if(nt>0 .and. (.not. gridstruct%regional)) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%nested, bd, &
2510  gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner)
2511  do j=js-nt,je+1+nt
2512  do i=is-nt,ie+nt
2513 #ifdef USE_SG
2514  fy(i,j) = gridstruct%dx(i,j)*gridstruct%sina_v(i,j)*(q(i,j-1,k)-q(i,j,k))*gridstruct%rdyc(i,j)
2515 #else
2516  fy(i,j) = gridstruct%del6_u(i,j)*(q(i,j-1,k)-q(i,j,k))
2517 #endif
2518  enddo
2519  enddo
2520 
2521  do j=js-nt,je+nt
2522  do i=is-nt,ie+nt
2523  q(i,j,k) = q(i,j,k) + cd*gridstruct%rarea(i,j)*(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))
2524  enddo
2525  enddo
2526  enddo
2527  enddo
2528 
2529  end subroutine del2_cubed
2530 
2531  subroutine init_ijk_mem(i1, i2, j1, j2, km, array, var)
2532  integer, intent(in):: i1, i2, j1, j2, km
2533  real, intent(inout):: array(i1:i2,j1:j2,km)
2534  real, intent(in):: var
2535  integer:: i, j, k
2536 
2537 !$OMP parallel do default(none) shared(i1,i2,j1,j2,km,array,var)
2538  do k=1,km
2539  do j=j1,j2
2540  do i=i1,i2
2541  array(i,j,k) = var
2542  enddo
2543  enddo
2544  enddo
2545 
2546  end subroutine init_ijk_mem
2547 
2549  subroutine ray_fast(dt, npx, npy, npz, pfull, tau, u, v, w, &
2550  ks, dp, ptop, hydrostatic, rf_cutoff, bd)
2551 ! Simple "inline" version of the Rayleigh friction
2552  real, intent(in):: dt
2553  real, intent(in):: tau
2554  real, intent(in):: ptop, rf_cutoff
2555  real, intent(in), dimension(npz):: pfull
2556  integer, intent(in):: npx, npy, npz, ks
2557  logical, intent(in):: hydrostatic
2558  type(fv_grid_bounds_type), intent(IN) :: bd
2559  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
2560  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
2561  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
2562  real, intent(in):: dp(npz)
2563 !
2564  real(kind=R_GRID):: rff(npz)
2565  real, parameter:: sday = 86400.
2566  real, dimension(bd%is:bd%ie+1):: dmv
2567  real, dimension(bd%is:bd%ie):: dmu
2568  real:: tau0, dm
2569  integer i, j, k
2570 
2571  integer :: is, ie, js, je
2572  integer :: isd, ied, jsd, jed
2573 
2574  is = bd%is
2575  ie = bd%ie
2576  js = bd%js
2577  je = bd%je
2578  isd = bd%isd
2579  ied = bd%ied
2580  jsd = bd%jsd
2581  jed = bd%jed
2582 
2583  if ( .not. rff_initialized ) then
2584  tau0 = tau * sday
2585  allocate( rf(npz) )
2586  rf(:) = 1.
2587 
2588  if( is_master() ) write(6,*) 'Fast Rayleigh friction E-folding time (days):'
2589  do k=1, npz
2590  if ( pfull(k) < rf_cutoff ) then
2591  rff(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pfull(k))/log(rf_cutoff/ptop))**2
2592 ! Re-FACTOR rf
2593  if( is_master() ) write(6,*) k, 0.01*pfull(k), dt/(rff(k)*sday)
2594  kmax = k
2595  rff(k) = 1.d0 / (1.0d0+rff(k))
2596  rf(k) = rff(k)
2597  else
2598  exit
2599  endif
2600  enddo
2601  dm = 0.
2602  do k=1,ks
2603  if ( pfull(k) < rf_cutoff + min(100., 10.*ptop) ) then
2604  dm = dm + dp(k)
2605  k_rf = k
2606  else
2607  exit
2608  endif
2609  enddo
2610  if( is_master() ) write(6,*) 'k_rf=', k_rf, 0.01*pfull(k_rf), 'dm=', dm
2611  rff_initialized = .true.
2612  endif
2613 
2614 !$OMP parallel do default(none) shared(k_rf,is,ie,js,je,kmax,pfull,rf_cutoff,w,rf,dp,u,v,hydrostatic) &
2615 !$OMP private(dm, dmu, dmv)
2616  do j=js,je+1
2617 
2618  dm = 0.
2619  do k=1, k_rf
2620  dm = dm + dp(k)
2621  enddo
2622 
2623  dmu(:) = 0.
2624  dmv(:) = 0.
2625  do k=1,kmax
2626  do i=is,ie
2627  dmu(i) = dmu(i) + (1.-rf(k))*dp(k)*u(i,j,k)
2628  u(i,j,k) = rf(k)*u(i,j,k)
2629  enddo
2630  if ( j/=je+1 ) then
2631  do i=is,ie+1
2632  dmv(i) = dmv(i) + (1.-rf(k))*dp(k)*v(i,j,k)
2633  v(i,j,k) = rf(k)*v(i,j,k)
2634  enddo
2635  if ( .not. hydrostatic ) then
2636  do i=is,ie
2637  w(i,j,k) = rf(k)*w(i,j,k)
2638  enddo
2639  endif
2640  endif
2641  enddo
2642 
2643  do i=is,ie
2644  dmu(i) = dmu(i) / dm
2645  enddo
2646  if ( j/=je+1 ) then
2647  do i=is,ie+1
2648  dmv(i) = dmv(i) / dm
2649  enddo
2650  endif
2651 
2652  do k=1, k_rf
2653  do i=is,ie
2654  u(i,j,k) = u(i,j,k) + dmu(i)
2655  enddo
2656  if ( j/=je+1 ) then
2657  do i=is,ie+1
2658  v(i,j,k) = v(i,j,k) + dmv(i)
2659  enddo
2660  endif
2661  enddo
2662 
2663  enddo
2664 
2665  end subroutine ray_fast
2666 
2667 
2668 end module dyn_core_mod
subroutine, public init_ijk_mem(i1, i2, j1, j2, km, array, var)
Definition: dyn_core.F90:2532
real, dimension(:,:,:), allocatable vt
Definition: dyn_core.F90:153
subroutine split_p_grad(u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
Definition: dyn_core.F90:1820
real, dimension(:,:,:), allocatable crx
Definition: dyn_core.F90:153
real, dimension(:,:,:), allocatable delpc
Definition: dyn_core.F90:153
integer, public k_step
logical, public do_adiabatic_init
Definition: fv_nudge.F90:138
subroutine, public regional_boundary_update(array, bc_vbl_name, lbnd_x, ubnd_x, lbnd_y, ubnd_y, ubnd_z, is, ie, js, je, isd, ied, jsd, jed, fcst_time, index4)
subroutine pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
Definition: dyn_core.F90:1523
real, public current_time_in_seconds
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:120
subroutine pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
Definition: dyn_core.F90:1420
integer, parameter, public h_stagger
integer, public p_step
subroutine, public c_sw(delpc, delp, ptc, pt, u, v, w, uc, vc, ua, va, wc, ut, vt, divg_d, nord, dt2, hydrostatic, dord4, bd, gridstruct, flagstruct)
The subroutine &#39;c_sw&#39; performs a half-timestep advance of the C-grid winds.
Definition: sw_core.F90:105
The module &#39;sw_core&#39; advances the forward step of the Lagrangian dynamics as described by ...
Definition: sw_core.F90:26
The module &#39;multi_gases&#39; peforms multi constitutents computations.
Definition: multi_gases.F90:25
subroutine, public a2b_ord2(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace)
Definition: a2b_edge.F90:698
integer, public test_case
Definition: test_cases.F90:173
subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
Definition: dyn_core.F90:1722
integer, public n_step
The module &#39;fv_update_phys&#39; applies physics tendencies consistent with the FV3 discretization and def...
integer, parameter, public r_grid
Definition: fv_arrays.F90:35
pure real function, public virqd(q)
The module fv_nwp_nudge contains routines for nudging to input analyses. note This module is currentl...
Definition: fv_nudge.F90:32
subroutine, public prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain)
real, dimension(:,:,:), allocatable divgd
Definition: dyn_core.F90:153
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
The module &#39;tp_core&#39; is a collection of routines to support FV transport.
Definition: tp_core.F90:24
The module &#39;a2b_edge&#39; performs FV-consistent interpolation of pressure to corners.
Definition: a2b_edge.F90:24
integer kmax
Definition: dyn_core.F90:163
real, dimension(:,:,:), allocatable ptc
Definition: dyn_core.F90:153
subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, nested, computehalo, npx, npy, a2b_ord, bd)
The subroutine &#39;geopk&#39; calculates geopotential and pressure to the kappa.
Definition: dyn_core.F90:2247
subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd)
Definition: dyn_core.F90:2164
logical rff_initialized
Definition: dyn_core.F90:161
subroutine, public copy_corners(q, npx, npy, dir, nested, bd, sw_corner, se_corner, nw_corner, ne_corner)
Definition: tp_core.F90:248
integer k_rf
Definition: dyn_core.F90:160
subroutine, public dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, cappa, grav, hydrostatic, u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, init_step, i_pack, end_step, diss_est, time_total)
Definition: dyn_core.F90:180
subroutine, public breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, delp, u, v, pt, q, nwat, zvir, gridstruct, ks, domain_local, bd, hydrostatic)
The subroutine &#39;breed_slp_inline&#39; performs vortex breeding by nudging sea level pressure toward singl...
Definition: fv_nudge.F90:2157
real, dimension(:,:,:), allocatable zh
Definition: dyn_core.F90:153
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
subroutine, public riem_solver3(ms, dt, is, ie, js, je, km, ng, isd, ied, jsd, jed, akap, cappa, cp, ptop, zs, q_con, w, delz, pt, delp, zh, pe, ppe, pk3, pk, peln, ws, scale_m, p_fac, a_imp, use_logp, last_call, fp_out)
Definition: nh_core.F90:74
real, dimension(:,:,:), allocatable xfx
Definition: dyn_core.F90:153
subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta, a2b_ord)
Definition: dyn_core.F90:2070
subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng)
Definition: dyn_core.F90:1554
subroutine, public case9_forcing2(phis)
subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, a2b_ord, d_ext)
Definition: dyn_core.F90:1935
subroutine, public del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
The subroutine &#39;del2-cubed&#39; filters the omega field for the physics.
Definition: dyn_core.F90:2421
type(time_type), public fv_time
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
real, dimension(:,:,:), allocatable cry
Definition: dyn_core.F90:153
subroutine pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
Definition: dyn_core.F90:1474
integer, parameter, public v_stagger
real, dimension(:,:,:), allocatable yfx
Definition: dyn_core.F90:153
subroutine, public case9_forcing1(phis, time_since_start)
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
The module &#39;dyn_core&#39; peforms the Lagrangian acoustic dynamics described by .
Definition: dyn_core.F90:28
subroutine p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, rdxc, rdyc, hydrostatic)
Definition: dyn_core.F90:1660
subroutine, public prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac)
subroutine, public d_sw(delpc, delp, ptc, pt, u, v, w, uc, vc, ua, va, divg_d, xflux, yflux, cx, cy, crx_adv, cry_adv, xfx_adv, yfx_adv, q_con, z_rat, kgb, heat_source, diss_est, zvir, sphum, nq, q, k, km, inline_q, dt, hord_tr, hord_mt, hord_vt, hord_tm, hord_dp, nord, nord_v, nord_w, nord_t, dddmp, d2_bg, d4_bg, damp_v, damp_w, damp_t, d_con, hydrostatic, gridstruct, flagstruct, bd)
The subroutine &#39;d_sw&#39; peforms a full-timestep advance of the D-grid winds and other prognostic varaia...
Definition: sw_core.F90:530
logical first_call
Definition: dyn_core.F90:162
real, dimension(:,:,:), allocatable ut
Definition: dyn_core.F90:153
integer, public a_step
real, dimension(:,:,:), allocatable pkc
Definition: dyn_core.F90:153
subroutine ray_fast(dt, npx, npy, npz, pfull, tau, u, v, w, ks, dp, ptop, hydrostatic, rf_cutoff, bd)
The subroutine &#39;Ray_fast&#39; computes a simple "inline" version of the Rayleigh friction (EXPERIMENTAL -...
Definition: dyn_core.F90:2551
real, dimension(:), allocatable rf
Definition: dyn_core.F90:159
subroutine, public a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace)
Definition: a2b_edge.F90:70
subroutine, public exch_uv(domain, bd, npz, u, v)
real, dimension(:,:,:), allocatable du
Definition: dyn_core.F90:153
real, dimension(:,:,:), allocatable pk3
Definition: dyn_core.F90:153
subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
The subroutine &#39;update_dwinds_phys&#39; transforms the wind tendencies from the A grid to the D grid for ...
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
The subroutine &#39;nested_grid_BC_apply_intT&#39; performs linear interpolation or extrapolation in time for...
Definition: boundary.F90:1684
real, dimension(:,:,:), allocatable dv
Definition: dyn_core.F90:153
The module &#39;nh_core&#39; peforms non-hydrostatic computations.
Definition: nh_core.F90:26
real, dimension(:,:,:), allocatable gz
Definition: dyn_core.F90:153
pure real function, public vicvqd(q)
subroutine, public extrapolation_bc(q, istag, jstag, npx, npy, bd, pd_in, debug_in)
The subroutine &#39;extrapolation_BC&#39; performs linear extrapolation into the halo region.
Definition: boundary.F90:118
real(kind=r_grid), parameter cnst_0p20
Definition: dyn_core.F90:157
integer, parameter, public u_stagger