FV3DYCORE  Version 2.0.0
dyn_core.F90
Go to the documentation of this file.
1 
2 !***********************************************************************
3 !* GNU Lesser General Public License
4 !*
5 !* This file is part of the FV3 dynamical core.
6 !*
7 !* The FV3 dynamical core is free software: you can redistribute it
8 !* and/or modify it under the terms of the
9 !* GNU Lesser General Public License as published by the
10 !* Free Software Foundation, either version 3 of the License, or
11 !* (at your option) any later version.
12 !* The FV3 dynamical core is distributed in the hope that it will be
13 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
14 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 !* See the GNU General Public License for more details.
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with the FV3 dynamical core.
18 !* If not, see <http://www.gnu.org/licenses/>.
19 !***********************************************************************
20 !
27 
29 
30 ! <table>
31 ! <tr>
32 ! <th>Module Name</th>
33 ! <th>Functions Included</th>
34 ! </tr>
35 ! <tr>
36 ! <td>a2b_edge_mod</td>
37 ! <td>a2b_ord2, a2b_ord4</td>
38 ! </tr>
39 ! <tr>
40 ! <td>boundary_mod</td>
41 ! <td>extrapolation_BC, nested_grid_BC_apply_intT</td>
42 ! </tr>
43 ! <tr>
44 ! <td>constants_mod</td>
45 ! <td>rdgas, radius, cp_air, pi</td>
46 ! </tr>
47 ! <tr>
48 ! <td>diag_manager_mod</td>
49 ! <td>send_data</td>
50 ! </tr>
51 ! <tr>
52 ! <td>fv_ada_nudge_mod</td>
53 ! <td>breed_slp_inline_ada</td>
54 ! </tr>
55 ! <tr>
56 ! <td>fv_arrays_mod</td>
57 ! <td>fv_grid_type, fv_flags_type, fv_nest_type,
58 ! fv_diag_type,fv_grid_bounds_type, R_GRID </td>
59 ! </tr>
60 ! <tr>
61 ! <td>fv_diagnostics_mod</td>
62 ! <td>prt_maxmin, fv_time, prt_mxm</td>
63 ! </tr>
64 ! <tr>
65 ! <td>fv_mp_mod</td>
66 ! <td>is_master, start_group_halo_update,
67 ! complete_group_halo_update,group_halo_update_type</td>
68 ! </tr>
69 ! <tr>
70 ! <td>fv_nwp_nudge_mod</td>
71 ! <td>breed_slp_inline, do_adiabatic_init</td>
72 ! </tr>
73 ! <tr>
74 ! <td>fv_timing_mod</td>
75 ! <td>timing_on, timing_off</td>
76 ! </tr>
77 ! <tr>
78 ! <td>fv_update_phys_mod</td>
79 ! <td>update_dwinds_phys</td>
80 ! </tr>
81 ! <tr>
82 ! <td>mpp_mod</td>
83 ! <td>mpp_pe </td>
84 ! </tr>
85 ! <tr>
86 ! <td>mpp_domains_mod</td>
87 ! <td>CGRID_NE, DGRID_NE, mpp_get_boundary, mpp_update_domains,domain2d</td>
88 ! </tr>
89 ! <tr>
90 ! <td>mpp_parameter_mod</td>
91 ! <td>CORNER</td>
92 ! </tr>
93 ! <tr>
94 ! <td>nh_core_mod</td>
95 ! <td>Riem_Solver3, Riem_Solver_C, update_dz_c, update_dz_d, nest_halo_nh</td>
96 ! </tr>
97 ! <tr>
98 ! <td>test_cases_mod</td>
99 ! <td>test_case, case9_forcing1, case9_forcing2</td>
100 ! </tr>
101 ! <tr>
102 ! <td>tp_core_mod</td>
103 ! <td>copy_corners</td>
104 ! </tr>
105 ! </table>
106 
107  use constants_mod, only: rdgas, radius, cp_air, pi
108  use mpp_mod, only: mpp_pe
109  use mpp_domains_mod, only: cgrid_ne, dgrid_ne, mpp_get_boundary, mpp_update_domains, &
110  domain2d
111  use mpp_parameter_mod, only: corner
112  use fv_mp_mod, only: is_master
113  use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update
114  use fv_mp_mod, only: group_halo_update_type
115  use sw_core_mod, only: c_sw, d_sw
116  use a2b_edge_mod, only: a2b_ord2, a2b_ord4
117  use nh_core_mod, only: riem_solver3, riem_solver_c, update_dz_c, update_dz_d, nh_bc
118  use tp_core_mod, only: copy_corners
121 #ifdef ROT3
123 #endif
124 #if defined (ADA_NUDGE)
125  use fv_ada_nudge_mod, only: breed_slp_inline_ada
126 #else
128 #endif
129  use diag_manager_mod, only: send_data
132 
136  use fv_regional_mod, only: delz_regbc ! TEMPORARY --- lmh
137 
138 #ifdef SW_DYNAMICS
140 #endif
141 #ifdef MULTI_GASES
142  use multi_gases_mod, only: virqd, vicvqd
143 #endif
146 
147 implicit none
148 private
149 
150 public :: dyn_core, del2_cubed, init_ijk_mem
151 
152  real :: ptk, peln1, rgrav
153  real :: d3_damp
154  real, allocatable, dimension(:,:,:) :: ut, vt, crx, cry, xfx, yfx, divgd, &
155  zh, du, dv, pkc, delpc, pk3, ptc, gz
156 ! real, parameter:: delt_max = 1.e-1 ! Max dissipative heating/cooling rate
157  ! 6 deg per 10-min
158  real(kind=R_GRID), parameter :: cnst_0p20=0.20d0
159 
160  real, allocatable :: rf(:)
161  integer:: k_rf = 0
162  logical:: rff_initialized = .false.
163  logical:: first_call = .true.
164  integer :: kmax=1
165 
166 contains
167 
168 !-----------------------------------------------------------------------
169 ! dyn_core :: FV Lagrangian dynamics driver
170 !-----------------------------------------------------------------------
171 
172  subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, akap, cappa, &
173 #ifdef MULTI_GASES
174  kapad, &
175 #endif
176  grav, hydrostatic, &
177  u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, &
178  uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, &
179  ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, &
180  init_step, i_pack, end_step, diss_est,time_total)
182  integer, intent(IN) :: npx
183  integer, intent(IN) :: npy
184  integer, intent(IN) :: npz
185  integer, intent(IN) :: ng, nq, sphum
186  integer, intent(IN) :: n_map, n_split
187  real , intent(IN) :: bdt
188  real , intent(IN) :: zvir, cp, akap, grav
189  real , intent(IN) :: ptop
190  logical, intent(IN) :: hydrostatic
191  logical, intent(IN) :: init_step, end_step
192  real, intent(in) :: pfull(npz)
193  real, intent(in), dimension(npz+1) :: ak, bk
194  integer, intent(IN) :: ks
195  type(group_halo_update_type), intent(inout) :: i_pack(*)
196  type(fv_grid_bounds_type), intent(IN) :: bd
197  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz):: u
198  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz):: v
199  real, intent(inout) :: w( bd%isd:,bd%jsd:,1:)
200  real, intent(inout) :: delz(bd%is:,bd%js:,1:)
201  real, intent(inout) :: cappa(bd%isd:,bd%jsd:,1:)
202 #ifdef MULTI_GASES
203  real, intent(inout) :: kapad(bd%isd:bd%ied,bd%jsd:bd%jed,1:npz)
204 #endif
205  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
206  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
207  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) !
208  real, intent(in), optional:: time_total
209  real, intent(inout) :: diss_est(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
210 
211 !-----------------------------------------------------------------------
212 ! Auxilliary pressure arrays:
213 ! The 5 vars below can be re-computed from delp and ptop.
214 !-----------------------------------------------------------------------
215 ! dyn_aux:
216  real, intent(inout):: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
217  real, intent(inout):: pe(bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1)
218  real, intent(inout):: peln(bd%is:bd%ie,npz+1,bd%js:bd%je)
219  real, intent(inout):: pk(bd%is:bd%ie,bd%js:bd%je, npz+1)
220 
221 !-----------------------------------------------------------------------
222 ! Others:
223  real, parameter:: near0 = 1.e-8
224 #ifdef OVERLOAD_R4
225  real, parameter:: huge_r = 1.e8
226 #else
227  real, parameter:: huge_r = 1.e40
228 #endif
229 !-----------------------------------------------------------------------
230  real, intent(out ):: ws(bd%is:bd%ie,bd%js:bd%je)
231  real, intent(inout):: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
232  real, intent(inout):: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
233  real, intent(inout):: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
234  real, intent(inout), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: ua, va
235  real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:)
236 
237 ! The Flux capacitors: accumulated Mass flux arrays
238  real, intent(inout):: mfx(bd%is:bd%ie+1, bd%js:bd%je, npz)
239  real, intent(inout):: mfy(bd%is:bd%ie , bd%js:bd%je+1, npz)
240 ! Accumulated Courant number arrays
241  real, intent(inout):: cx(bd%is:bd%ie+1, bd%jsd:bd%jed, npz)
242  real, intent(inout):: cy(bd%isd:bd%ied ,bd%js:bd%je+1, npz)
243  real, intent(inout),dimension(bd%is:bd%ie,bd%js:bd%je,npz):: pkz
244 
245  type(fv_grid_type), intent(INOUT), target :: gridstruct
246  type(fv_flags_type), intent(IN), target :: flagstruct
247  type(fv_nest_type), intent(INOUT) :: neststruct
248  type(fv_diag_type), intent(IN) :: idiag
249  type(domain2d), intent(INOUT) :: domain
250 
251  real, allocatable, dimension(:,:,:):: pem, heat_source
252 ! Auto 1D & 2D arrays:
253  real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: ws3, z_rat
254  real:: dp_ref(npz)
255  real:: zs(bd%isd:bd%ied,bd%jsd:bd%jed)
256  real:: p1d(bd%is:bd%ie)
257  real:: om2d(bd%is:bd%ie,npz)
258  real wbuffer(npy+2,npz)
259  real ebuffer(npy+2,npz)
260  real nbuffer(npx+2,npz)
261  real sbuffer(npx+2,npz)
262 ! ---- For external mode:
263  real divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
264  real wk(bd%isd:bd%ied,bd%jsd:bd%jed)
265  real fz(bd%is: bd%ie+1,bd%js: bd%je+1)
266  real heat_s(bd%is:bd%ie,bd%js:bd%je)
267 ! new array for stochastic kinetic energy backscatter (SKEB)
268  real diss_e(bd%is:bd%ie,bd%js:bd%je)
269  real damp_vt(npz+1)
270  integer nord_v(npz+1)
271 !-------------------------------------
272  integer :: hord_m, hord_v, hord_t, hord_p
273  integer :: nord_k, nord_w, nord_t
274  integer :: ms
275 !---------------------------------------
276  integer :: i,j,k, it, iq, n_con, nf_ke
277  integer :: iep1, jep1
278  real :: beta, beta_d, d_con_k, damp_w, damp_t, kgb, cv_air
279  real :: dt, dt2, rdt
280  real :: d2_divg
281  real :: k1k, rdg, dtmp, delt
282  real :: recip_k_split_n_split
283  real :: reg_bc_update_time
284  logical :: last_step, remap_step
285  logical used
286  real :: split_timestep_bc
287 
288  integer :: is, ie, js, je
289  integer :: isd, ied, jsd, jed
290 
291  is = bd%is
292  ie = bd%ie
293  js = bd%js
294  je = bd%je
295  isd = bd%isd
296  ied = bd%ied
297  jsd = bd%jsd
298  jed = bd%jed
299 
300 #ifdef SW_DYNAMICS
301  peln1 = 0.
302 #else
303  peln1 = log(ptop)
304 #endif
305  ptk = ptop ** akap
306  dt = bdt / real(n_split)
307  dt2 = 0.5*dt
308  rdt = 1.0/dt
309  ms = max(1, flagstruct%m_split/2)
310  beta = flagstruct%beta
311  rdg = -rdgas / grav
312  cv_air = cp_air - rdgas
313  recip_k_split_n_split=1./real(flagstruct%k_split*n_split)
314 
315 ! Indexes:
316  iep1 = ie + 1
317  jep1 = je + 1
318 
319  if ( .not.hydrostatic ) then
320 
321  rgrav = 1.0/grav
322  k1k = akap / (1.-akap) ! rg/Cv=0.4
323 
324 !$OMP parallel do default(none) shared(npz,dp_ref,ak,bk)
325  do k=1,npz
326  dp_ref(k) = ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.e5
327  enddo
328 
329 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,zs,phis,rgrav)
330  do j=jsd,jed
331  do i=isd,ied
332  zs(i,j) = phis(i,j) * rgrav
333  enddo
334  enddo
335  endif
336 
337 !
338  if ( init_step ) then ! Start of the big dynamic time stepping
339 
340  allocate( gz(isd:ied, jsd:jed ,npz+1) )
341  call init_ijk_mem(isd,ied, jsd,jed, npz+1, gz, huge_r)
342  allocate( pkc(isd:ied, jsd:jed ,npz+1) )
343  allocate( ptc(isd:ied, jsd:jed ,npz ) )
344  allocate( crx(is :ie+1, jsd:jed, npz) )
345  allocate( xfx(is :ie+1, jsd:jed, npz) )
346  allocate( cry(isd:ied, js :je+1, npz) )
347  allocate( yfx(isd:ied, js :je+1, npz) )
348  allocate( divgd(isd:ied+1,jsd:jed+1,npz) )
349  allocate( delpc(isd:ied, jsd:jed ,npz ) )
350 ! call init_ijk_mem(isd,ied, jsd,jed, npz, delpc, 0.)
351  allocate( ut(isd:ied, jsd:jed, npz) )
352 ! call init_ijk_mem(isd,ied, jsd,jed, npz, ut, 0.)
353  allocate( vt(isd:ied, jsd:jed, npz) )
354 ! call init_ijk_mem(isd,ied, jsd,jed, npz, vt, 0.)
355 
356  if ( .not. hydrostatic ) then
357  allocate( zh(isd:ied, jsd:jed, npz+1) )
358 ! call init_ijk_mem(isd,ied, jsd,jed, npz+1, zh, huge_r )
359  allocate ( pk3(isd:ied,jsd:jed,npz+1) )
360  call init_ijk_mem(isd,ied, jsd,jed, npz+1, pk3, huge_r )
361  endif
362  if ( beta > near0 ) then
363  allocate( du(isd:ied, jsd:jed+1,npz) )
364  call init_ijk_mem(isd,ied, jsd,jed+1, npz, du, 0.)
365  allocate( dv(isd:ied+1,jsd:jed, npz) )
366  call init_ijk_mem(isd,ied+1, jsd,jed , npz, dv, 0.)
367  endif
368 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,diss_est)
369  do k=1,npz
370  do j=jsd,jed
371  do i=isd,ied
372  diss_est(i,j,k) = 0.
373  enddo
374  enddo
375  enddo
376  endif ! end init_step
377 
378 ! Empty the "flux capacitors"
379  call init_ijk_mem(is, ie+1, js, je, npz, mfx, 0.)
380  call init_ijk_mem(is, ie , js, je+1, npz, mfy, 0.)
381  call init_ijk_mem(is, ie+1, jsd, jed, npz, cx, 0.)
382  call init_ijk_mem(isd, ied, js, je+1, npz, cy, 0.)
383 
384  if ( flagstruct%d_con > 1.0e-5 ) then
385  allocate( heat_source(isd:ied, jsd:jed, npz) )
386  call init_ijk_mem(isd, ied, jsd, jed, npz, heat_source, 0.)
387  endif
388 
389  if ( flagstruct%convert_ke .or. flagstruct%vtdm4> 1.e-4 ) then
390  n_con = npz
391  else
392  if ( flagstruct%d2_bg_k1 < 1.e-3 ) then
393  n_con = 0
394  else
395  if ( flagstruct%d2_bg_k2 < 1.e-3 ) then
396  n_con = 1
397  else
398  n_con = 2
399  endif
400  endif
401  endif
402 
403 !-----------------------------------------------------
404  do it=1,n_split
405 !-----------------------------------------------------
406  n_step = it
407 #ifdef ROT3
408  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
409 #endif
410  if ( flagstruct%breed_vortex_inline .or. it==n_split ) then
411  remap_step = .true.
412  else
413  remap_step = .false.
414  endif
415 
416  if ( flagstruct%fv_debug ) then
417  if(is_master()) write(*,*) 'n_split loop, it=', it
418  if ( .not. flagstruct%hydrostatic ) &
419  call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain)
420  endif
421 
422  if (gridstruct%nested) then
423  !First split timestep has split_timestep_BC = n_split*k_split
424  ! to do time-extrapolation on BCs.
425  split_timestep_bc = real(n_split*flagstruct%k_split+neststruct%nest_timestep)
426  endif
427 
428  if ( nq > 0 ) then
429  call timing_on('COMM_TOTAL')
430  call timing_on('COMM_TRACER')
431  if ( flagstruct%inline_q ) then
432  call start_group_halo_update(i_pack(10), q, domain)
433  endif
434  call timing_off('COMM_TRACER')
435  call timing_off('COMM_TOTAL')
436  endif
437 
438  if ( .not. hydrostatic ) then
439  call timing_on('COMM_TOTAL')
440  call start_group_halo_update(i_pack(7), w, domain)
441  call timing_off('COMM_TOTAL')
442 
443  if ( it==1 ) then
444  if (gridstruct%bounded_domain) then
445 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,gz,zs,npz)
446  do j=jsd,jed
447  do i=isd,ied
448  gz(i,j,npz+1) = zs(i,j)
449  enddo
450  enddo
451  if (gridstruct%nested) then
452  call gz_bc(gz,neststruct%delz_BC,bd,npx,npy,npz,split_timestep_bc, real(n_split*flagstruct%k_split))
453  endif
454  if (gridstruct%regional) then
455  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
456  if (is_master() .and. flagstruct%fv_debug) print*, ' REG_BC_UPDATE_TIME: ', it, current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
457  call gz_bc(gz, delz_regbc,bd,npx,npy,npz,mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600.)
458  endif
459  else
460 !$OMP parallel do default(none) shared(is,ie,js,je,gz,zs,npz)
461  do j=js,je
462  do i=is,ie
463  gz(i,j,npz+1) = zs(i,j)
464  enddo
465  enddo
466  endif
467 
468 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,delz)
469  do j=js,je
470  do k=npz,1,-1
471  do i=is,ie
472  gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)
473  enddo
474  enddo
475  enddo
476  call timing_on('COMM_TOTAL')
477  call start_group_halo_update(i_pack(5), gz, domain)
478  call timing_off('COMM_TOTAL')
479  endif
480 
481  endif
482 
483 
484 #ifdef SW_DYNAMICS
485  if (test_case>1) then
486 #ifdef USE_OLD
487  if (test_case==9) call case9_forcing1(phis, time_total)
488 #endif
489 #endif
490 
491  if ( it==1 ) then
492  call timing_on('COMM_TOTAL')
493  call complete_group_halo_update(i_pack(1), domain)
494  call timing_off('COMM_TOTAL')
495  beta_d = 0.
496  else
497  beta_d = beta
498  endif
499 
500  if ( it==n_split .and. end_step ) then
501  if ( flagstruct%use_old_omega ) then
502  allocate ( pem(is-1:ie+1,npz+1,js-1:je+1) )
503 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pem,delp,ptop)
504  do j=js-1,je+1
505  do i=is-1,ie+1
506  pem(i,1,j) = ptop
507  enddo
508  do k=1,npz
509  do i=is-1,ie+1
510  pem(i,k+1,j) = pem(i,k,j) + delp(i,j,k)
511  enddo
512  enddo
513  enddo
514  endif
515  last_step = .true.
516  else
517  last_step = .false.
518  endif
519 
520  call timing_on('COMM_TOTAL')
521  call complete_group_halo_update(i_pack(8), domain)
522  if( .not. hydrostatic ) &
523  call complete_group_halo_update(i_pack(7), domain)
524  call timing_off('COMM_TOTAL')
525 
526  call timing_on('c_sw')
527 !$OMP parallel do default(none) shared(npz,isd,jsd,delpc,delp,ptc,pt,u,v,w,uc,vc,ua,va, &
528 !$OMP omga,ut,vt,divgd,flagstruct,dt2,hydrostatic,bd, &
529 !$OMP gridstruct)
530  do k=1,npz
531  call c_sw(delpc(isd,jsd,k), delp(isd,jsd,k), ptc(isd,jsd,k), &
532  pt(isd,jsd,k), u(isd,jsd,k), v(isd,jsd,k), &
533  w(isd:,jsd:,k), uc(isd,jsd,k), vc(isd,jsd,k), &
534  ua(isd,jsd,k), va(isd,jsd,k), omga(isd,jsd,k), &
535  ut(isd,jsd,k), vt(isd,jsd,k), divgd(isd,jsd,k), &
536  flagstruct%nord, dt2, hydrostatic, .true., bd, &
537  gridstruct, flagstruct)
538  enddo
539  call timing_off('c_sw')
540  if ( flagstruct%nord > 0 ) then
541  call timing_on('COMM_TOTAL')
542  call start_group_halo_update(i_pack(3), divgd, domain, position=corner)
543  call timing_off('COMM_TOTAL')
544  endif
545 
546  if (gridstruct%nested) then
548  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
549  neststruct%delp_BC, bctype=neststruct%nestbctype)
550 #ifndef SW_DYNAMICS
552  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
553  neststruct%pt_BC, bctype=neststruct%nestbctype )
554 #endif
555  endif
556 
557  if (flagstruct%regional) then
558  call timing_on('COMM_TOTAL')
559  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(0.5+(it-1))*dt
560  call regional_boundary_update(delpc, 'delp', &
561  isd, ied, jsd, jed, npz, &
562  is, ie, js, je, &
563  isd, ied, jsd, jed, &
564  reg_bc_update_time )
565  call mpp_update_domains(delpc, domain, complete=.true.)
566 #ifndef SW_DYNAMICS
567  call regional_boundary_update(ptc, 'pt', &
568  isd, ied, jsd, jed, npz, &
569  is, ie, js, je, &
570  isd, ied, jsd, jed, &
571  reg_bc_update_time )
572  call mpp_update_domains(ptc, domain, complete=.true.)
573 #endif
574  call timing_off('COMM_TOTAL')
575  endif
576 
577  if ( hydrostatic ) then
578  call geopk(ptop, pe, peln, delpc, pkc, gz, phis, ptc, &
579 #ifdef MULTI_GASES
580  kapad, &
581 #endif
582  q_con, pkz, npz, akap, .true., &
583  gridstruct%bounded_domain, .false., npx, npy, flagstruct%a2b_ord, bd)
584  else
585 #ifndef SW_DYNAMICS
586  if ( it == 1 ) then
587 
588  call timing_on('COMM_TOTAL')
589  call complete_group_halo_update(i_pack(5), domain)
590  call timing_off('COMM_TOTAL')
591 
592 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz)
593  do k=1,npz+1
594  do j=jsd,jed
595  do i=isd,ied
596 ! Save edge heights for update_dz_d
597  zh(i,j,k) = gz(i,j,k)
598  enddo
599  enddo
600  enddo
601 
602  else
603 
604  if (gridstruct%bounded_domain) then
605  if (gridstruct%nested) then
606  call gz_bc(gz,neststruct%delz_BC,bd,npx,npy,npz,split_timestep_bc, real(n_split*flagstruct%k_split))
607  endif
608  if (gridstruct%regional) then
609  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
610  if (is_master() .and. flagstruct%fv_debug) print*, ' REG_BC_UPDATE_TIME: ', it, current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
611  call gz_bc(gz, delz_regbc,bd,npx,npy,npz,mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600.)
612  endif
613  endif
614 
615 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz)
616  do k=1, npz+1
617  do j=jsd,jed
618  do i=isd,ied
619  gz(i,j,k) = zh(i,j,k)
620  enddo
621  enddo
622  enddo
623 
624  endif
625  call timing_on('UPDATE_DZ_C')
626  call update_dz_c(is, ie, js, je, npz, ng, dt2, dp_ref, zs, gridstruct%area, ut, vt, gz, ws3, &
627  npx, npy, gridstruct%sw_corner, gridstruct%se_corner, &
628  gridstruct%ne_corner, gridstruct%nw_corner, bd, gridstruct%grid_type)
629  call timing_off('UPDATE_DZ_C')
630 
631  call timing_on('Riem_Solver')
632  call riem_solver_c( ms, dt2, is, ie, js, je, npz, ng, &
633  akap, cappa, cp, &
634 #ifdef MULTI_GASES
635  kapad, &
636 #endif
637  ptop, phis, omga, ptc, &
638  q_con, delpc, gz, pkc, ws3, flagstruct%p_fac, &
639  flagstruct%a_imp, flagstruct%scale_z )
640  call timing_off('Riem_Solver')
641 
642  if (gridstruct%nested) then
643  call nh_bc(ptop, grav, akap, cp, delpc, neststruct%delz_BC, ptc, phis, &
644 #ifdef MULTI_GASES
645  q, &
646 #endif
647 #ifdef USE_COND
648  q_con, &
649 #ifdef MOIST_CAPPA
650  cappa, &
651 #endif
652 #endif
653  pkc, gz, pk3, &
654  split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
655  npx, npy, npz, gridstruct%bounded_domain, .false., .false., .false., bd)
656  endif
657 
658  if (flagstruct%regional) then
659 
660  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(0.5+(it-1))*dt
661  call nh_bc(ptop, grav, akap, cp, delpc, delz_regbc, ptc, phis, &
662 #ifdef MULTI_GASES
663  q, &
664 #endif
665 #ifdef USE_COND
666  q_con, &
667 #ifdef MOIST_CAPPA
668  cappa, &
669 #endif
670 #endif
671  pkc, gz, pk3, &
672  mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600., &
673  npx, npy, npz, gridstruct%bounded_domain, .false., .false., .false., bd)
674 
675  endif
676 
677 #endif
678 
679  endif ! end hydro check
680 
681  call p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, gridstruct%rdxc, gridstruct%rdyc, hydrostatic)
682 
683  call timing_on('COMM_TOTAL')
684  call start_group_halo_update(i_pack(9), uc, vc, domain, gridtype=cgrid_ne)
685  call timing_off('COMM_TOTAL')
686 #ifdef SW_DYNAMICS
687 #ifdef USE_OLD
688  if (test_case==9) call case9_forcing2(phis)
689 #endif
690  endif !test_case>1
691 #endif
692 
693  call timing_on('COMM_TOTAL')
694  if (flagstruct%inline_q .and. nq>0) call complete_group_halo_update(i_pack(10), domain)
695  if (flagstruct%nord > 0) call complete_group_halo_update(i_pack(3), domain)
696  call complete_group_halo_update(i_pack(9), domain)
697 
698  call timing_off('COMM_TOTAL')
699  if (gridstruct%nested) then
700  !On a nested grid we have to do SOMETHING with uc and vc in
701  ! the boundary halo, particularly at the corners of the
702  ! domain and of each processor element. We must either
703  ! apply an interpolated BC, or extrapolate into the
704  ! boundary halo
705  ! NOTE:
706  !The update_domains calls for uc and vc need to go BEFORE the BCs to ensure cross-restart
707  !bitwise-consistent solutions when doing the spatial extrapolation; should not make a
708  !difference for interpolated BCs from the coarse grid.
709 
710 
711  call nested_grid_bc_apply_intt(vc, &
712  0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
713  neststruct%vc_BC, bctype=neststruct%nestbctype )
714  call nested_grid_bc_apply_intt(uc, &
715  1, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
716  neststruct%uc_BC, bctype=neststruct%nestbctype )
717 
719  1, 1, npx, npy, npz, bd, split_timestep_bc, real(n_split*flagstruct%k_split), &
720  neststruct%divg_BC, bctype=neststruct%nestbctype )
721 
722  end if
723 
724  if (flagstruct%regional) then
725 
726  call mpp_update_domains(uc, vc, domain, gridtype=cgrid_ne)
727  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(0.5+(it-1))*dt
728  call regional_boundary_update(vc, 'vc', &
729  isd, ied, jsd, jed+1, npz, &
730  is, ie, js, je, &
731  isd, ied, jsd, jed, &
732  reg_bc_update_time )
733  call regional_boundary_update(uc, 'uc', &
734  isd, ied+1, jsd, jed, npz, &
735  is, ie, js, je, &
736  isd, ied, jsd, jed, &
737  reg_bc_update_time )
738  call mpp_update_domains(uc, vc, domain, gridtype=cgrid_ne)
739 
740 !!! Currently divgd is always 0.0 in the regional domain boundary area.
741 ! DH* 2021-02-03: no need to execute this code, because divgd is always
742 ! 0.0 in the regional domain boundary area. This code breaks b4b reproducibility
743 ! when changing the MPI decomposition for regional runs.
744 ! reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
745 ! call regional_boundary_update(divgd, 'divgd', &
746 ! isd, ied+1, jsd, jed+1, npz, &
747 ! is, ie, js, je, &
748 ! isd, ied, jsd, jed, &
749 ! reg_bc_update_time )
750 ! *DH
751  endif
752 
753  if ( flagstruct%inline_q ) then
754  if ( gridstruct%nested ) then
755  do iq=1,nq
756  call nested_grid_bc_apply_intt(q(isd:ied,jsd:jed,:,iq), &
757  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
758  neststruct%q_BC(iq), bctype=neststruct%nestbctype )
759  end do
760  endif
761  if (flagstruct%regional) then
762  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
763  do iq=1,nq
764  call regional_boundary_update(q(:,:,:,iq), 'q', &
765  isd, ied, jsd, jed, npz, &
766  is, ie, js, je, &
767  isd, ied, jsd, jed, &
768  reg_bc_update_time )
769  enddo
770  endif
771 
772  endif
773 
774 
775  call timing_on('d_sw')
776 !$OMP parallel do default(none) shared(npz,flagstruct,nord_v,pfull,damp_vt,hydrostatic,last_step, &
777 !$OMP is,ie,js,je,isd,ied,jsd,jed,omga,delp,gridstruct,npx,npy, &
778 !$OMP ng,zh,vt,ptc,pt,u,v,w,uc,vc,ua,va,divgd,mfx,mfy,cx,cy, &
779 !$OMP crx,cry,xfx,yfx,q_con,zvir,sphum,nq,q,dt,bd,rdt,iep1,jep1, &
780 !$OMP heat_source,diss_est,ptop,first_call) &
781 !$OMP private(nord_k, nord_w, nord_t, damp_w, damp_t, d2_divg, &
782 !$OMP d_con_k,kgb, hord_m, hord_v, hord_t, hord_p, wk, heat_s,diss_e, z_rat)
783  do k=1,npz
784  hord_m = flagstruct%hord_mt
785  hord_t = flagstruct%hord_tm
786  hord_v = flagstruct%hord_vt
787  hord_p = flagstruct%hord_dp
788  nord_k = flagstruct%nord
789 
790 ! if ( k==npz ) then
791  kgb = flagstruct%ke_bg
792 ! else
793 ! kgb = 0.
794 ! endif
795 
796  nord_v(k) = min(2, flagstruct%nord)
797 ! d2_divg = min(0.20, flagstruct%d2_bg*(1.-3.*tanh(0.1*log(pfull(k)/pfull(npz)))))
798  d2_divg = min(0.20, flagstruct%d2_bg)
799 
800  if ( flagstruct%do_vort_damp ) then
801  damp_vt(k) = flagstruct%vtdm4 ! for delp, delz, and vorticity
802  else
803  damp_vt(k) = 0.
804  endif
805 
806  nord_w = nord_v(k)
807  nord_t = nord_v(k)
808  damp_w = damp_vt(k)
809  damp_t = damp_vt(k)
810  d_con_k = flagstruct%d_con
811 
812  if ( npz==1 .or. flagstruct%n_sponge<0 ) then
813  d2_divg = flagstruct%d2_bg
814  else
815 ! Sponge layers with del-2 damping on divergence, vorticity, w, z, and air mass (delp).
816 ! no special damping of potential temperature in sponge layers
817 ! enhanced del-2 divergence damping has same vertical structure as Rayleigh
818 ! damping if d2_bg_k2<=0.
819  if (flagstruct%d2_bg_k2 > 0) then ! old version, only applied at top two or three levels
820 
821  if ( k==1 ) then
822 ! Divergence damping:
823  nord_k=0; d2_divg = max(0.01, flagstruct%d2_bg, flagstruct%d2_bg_k1)
824 ! Vertical velocity:
825  nord_w=0; damp_w = d2_divg
826  if ( flagstruct%do_vort_damp ) then
827 ! damping on delp and vorticity:
828  nord_v(k)=0;
829 #ifndef HIWPP
830  damp_vt(k) = 0.5*d2_divg
831 #endif
832  endif
833  d_con_k = 0.
834  elseif ( k==2 .and. flagstruct%d2_bg_k2>0.01 ) then
835  nord_k=0; d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k2)
836  nord_w=0; damp_w = d2_divg
837  if ( flagstruct%do_vort_damp ) then
838  nord_v(k)=0;
839 #ifndef HIWPP
840  damp_vt(k) = 0.5*d2_divg
841 #endif
842  endif
843  d_con_k = 0.
844  elseif ( k==3 .and. flagstruct%d2_bg_k2>0.05 ) then
845  nord_k=0; d2_divg = max(flagstruct%d2_bg, 0.2*flagstruct%d2_bg_k2)
846  nord_w=0; damp_w = d2_divg
847  d_con_k = 0.
848  endif
849  else ! new version, uses d2_bg_k1 and sponge layer vertical structure
850  if ( pfull(k) < flagstruct%rf_cutoff ) then
851  nord_k=0; nord_w=0
852  d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k1* &
853  sin(0.5*pi*log(flagstruct%rf_cutoff/pfull(k))/log(flagstruct%rf_cutoff/ptop))**2)
854  if (first_call .and. is_master() .and. last_step) write(6,*) k, 0.01*pfull(k), d2_divg
855  damp_w = d2_divg
856  if ( flagstruct%do_vort_damp ) then
857 ! damping on delp and vorticity
858  nord_v(k)=0
859  damp_vt(k) = 0.5*d2_divg
860  endif
861  endif
862  endif
863 
864  endif
865 
866  if( hydrostatic .and. (.not.flagstruct%use_old_omega) .and. last_step ) then
867 ! Average horizontal "convergence" to cell center
868  do j=js,je
869  do i=is,ie
870  omga(i,j,k) = delp(i,j,k)
871  enddo
872  enddo
873  endif
874 
875 !--- external mode divergence damping ---
876  if ( flagstruct%d_ext > 0. ) &
877  call a2b_ord2(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, &
878  ie, js, je, ng, .false.)
879 
880  if ( .not.hydrostatic .and. flagstruct%do_f3d ) then
881 ! Correction factor for 3D Coriolis force
882  do j=jsd,jed
883  do i=isd,ied
884  z_rat(i,j) = 1. + (zh(i,j,k)+zh(i,j,k+1))/radius
885  enddo
886  enddo
887  endif
888 
889  call d_sw(vt(isd,jsd,k), delp(isd,jsd,k), ptc(isd,jsd,k), pt(isd,jsd,k), &
890  u(isd,jsd,k), v(isd,jsd,k), w(isd:,jsd:,k), uc(isd,jsd,k), &
891  vc(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), divgd(isd,jsd,k), &
892  mfx(is, js, k), mfy(is, js, k), cx(is, jsd,k), cy(isd,js, k), &
893  crx(is, jsd,k), cry(isd,js, k), xfx(is, jsd,k), yfx(isd,js, k), &
894 #ifdef USE_COND
895  q_con(isd:,jsd:,k), z_rat(isd,jsd), &
896 #else
897  q_con(isd:,jsd:,1), z_rat(isd,jsd), &
898 #endif
899  kgb, heat_s, diss_e,zvir, sphum, nq, q, k, npz, flagstruct%inline_q, dt, &
900  flagstruct%hord_tr, hord_m, hord_v, hord_t, hord_p, &
901  nord_k, nord_v(k), nord_w, nord_t, flagstruct%dddmp, d2_divg, flagstruct%d4_bg, &
902  damp_vt(k), damp_w, damp_t, d_con_k, hydrostatic, gridstruct, flagstruct, bd)
903 
904  if( hydrostatic .and. (.not.flagstruct%use_old_omega) .and. last_step ) then
905 ! Average horizontal "convergence" to cell center
906  do j=js,je
907  do i=is,ie
908  omga(i,j,k) = omga(i,j,k)*(xfx(i,j,k)-xfx(i+1,j,k)+yfx(i,j,k)-yfx(i,j+1,k))*gridstruct%rarea(i,j)*rdt
909  enddo
910  enddo
911  endif
912 
913  if ( flagstruct%d_ext > 0. ) then
914  do j=js,jep1
915  do i=is,iep1
916  ptc(i,j,k) = wk(i,j) ! delp at cell corners
917  enddo
918  enddo
919  endif
920  if ( flagstruct%d_con > 1.0e-5 .OR. flagstruct%do_skeb ) then
921 ! if ( flagstruct%d_con > 1.0E-5 ) then
922 ! Average horizontal "convergence" to cell center
923  do j=js,je
924  do i=is,ie
925  heat_source(i,j,k) = heat_source(i,j,k) + heat_s(i,j)
926  diss_est(i,j,k) = diss_est(i,j,k) + diss_e(i,j)
927  enddo
928  enddo
929  endif
930  enddo ! end openMP k-loop
931 
932  if (flagstruct%regional) then
933  call mpp_update_domains(uc, vc, domain, gridtype=cgrid_ne)
934  call mpp_update_domains(u , v , domain, gridtype=dgrid_ne)
935  endif
936  call timing_off('d_sw')
937 
938  if( flagstruct%fill_dp ) call mix_dp(hydrostatic, w, delp, pt, npz, ak, bk, .false., flagstruct%fv_debug, bd)
939 
940  call timing_on('COMM_TOTAL')
941  call start_group_halo_update(i_pack(1), delp, domain, complete=.false.)
942  call start_group_halo_update(i_pack(1), pt, domain, complete=.true.)
943 #ifdef USE_COND
944  call start_group_halo_update(i_pack(11), q_con, domain)
945 #endif
946  call timing_off('COMM_TOTAL')
947 
948  if ( flagstruct%d_ext > 0. ) then
949  d2_divg = flagstruct%d_ext * gridstruct%da_min_c
950 !$OMP parallel do default(none) shared(is,iep1,js,jep1,npz,wk,ptc,divg2,vt,d2_divg)
951  do j=js,jep1
952  do i=is,iep1
953  wk(i,j) = ptc(i,j,1)
954  divg2(i,j) = wk(i,j)*vt(i,j,1)
955  enddo
956  do k=2,npz
957  do i=is,iep1
958  wk(i,j) = wk(i,j) + ptc(i,j,k)
959  divg2(i,j) = divg2(i,j) + ptc(i,j,k)*vt(i,j,k)
960  enddo
961  enddo
962  do i=is,iep1
963  divg2(i,j) = d2_divg*divg2(i,j)/wk(i,j)
964  enddo
965  enddo
966  else
967  divg2(:,:) = 0.
968  endif
969 
970  call timing_on('COMM_TOTAL')
971  call complete_group_halo_update(i_pack(1), domain)
972 #ifdef USE_COND
973  call complete_group_halo_update(i_pack(11), domain)
974 #endif
975  call timing_off('COMM_TOTAL')
976  if ( flagstruct%fv_debug ) then
977  if ( .not. flagstruct%hydrostatic ) &
978  call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain)
979  endif
980 
981  !Want to move this block into the hydro/nonhydro branch above and merge the two if structures
982  if (gridstruct%nested) then
983  call nested_grid_bc_apply_intt(delp, &
984  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
985  neststruct%delp_BC, bctype=neststruct%nestbctype )
986 
987 #ifndef SW_DYNAMICS
988 
989  call nested_grid_bc_apply_intt(pt, &
990  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
991  neststruct%pt_BC, bctype=neststruct%nestbctype )
992 
993 #ifdef USE_COND
994  call nested_grid_bc_apply_intt(q_con, &
995  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
996  neststruct%q_con_BC, bctype=neststruct%nestbctype )
997 #endif
998 
999 #endif
1000 
1001  end if
1002 
1003  if (flagstruct%regional) then
1004  call timing_on('COMM_TOTAL')
1005  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt
1006  call regional_boundary_update(delp, 'delp', &
1007  isd, ied, jsd, jed, npz, &
1008  is, ie, js, je, &
1009  isd, ied, jsd, jed, &
1010  reg_bc_update_time )
1011  call mpp_update_domains(delp, domain, complete=.true.)
1012 #ifndef SW_DYNAMICS
1013  call regional_boundary_update(pt, 'pt', &
1014  isd, ied, jsd, jed, npz, &
1015  is, ie, js, je, &
1016  isd, ied, jsd, jed, &
1017  reg_bc_update_time )
1018  call mpp_update_domains(pt, domain, complete=.true.)
1019 #ifdef USE_COND
1020  call regional_boundary_update(q_con, 'q_con', &
1021  isd, ied, jsd, jed, npz, &
1022  is, ie, js, je, &
1023  isd, ied, jsd, jed, &
1024  reg_bc_update_time )
1025  call mpp_update_domains(q_con, domain, complete=.true.)
1026 #endif
1027 #endif
1028  call timing_off('COMM_TOTAL')
1029  endif
1030 
1031  if ( hydrostatic ) then
1032  call geopk(ptop, pe, peln, delp, pkc, gz, phis, pt, &
1033 #ifdef MULTI_GASES
1034  kapad, &
1035 #endif
1036  q_con, pkz, npz, akap, .false., &
1037  gridstruct%bounded_domain, .true., npx, npy, flagstruct%a2b_ord, bd)
1038  else
1039 #ifndef SW_DYNAMICS
1040  call timing_on('UPDATE_DZ')
1041  call update_dz_d(nord_v, damp_vt, flagstruct%hord_tm, is, ie, js, je, npz, ng, npx, npy, gridstruct%area, &
1042  gridstruct%rarea, dp_ref, zs, zh, crx, cry, xfx, yfx, ws, rdt, gridstruct, bd, flagstruct%lim_fac)
1043  call timing_off('UPDATE_DZ')
1044  if ( flagstruct%fv_debug ) then
1045  if ( .not. flagstruct%hydrostatic ) &
1046  call prt_mxm('delz updated', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain)
1047  endif
1048 
1049  if (idiag%id_ws>0 .and. last_step) then
1050 ! call prt_maxmin('WS', ws, is, ie, js, je, 0, 1, 1., master)
1051  used=send_data(idiag%id_ws, ws, fv_time)
1052  endif
1053  call timing_on('Riem_Solver')
1054 
1055  call riem_solver3(flagstruct%m_split, dt, is, ie, js, je, npz, ng, &
1056  isd, ied, jsd, jed, &
1057  akap, cappa, cp, &
1058 #ifdef MULTI_GASES
1059  kapad, &
1060 #endif
1061  ptop, zs, q_con, w, delz, pt, delp, zh, &
1062  pe, pkc, pk3, pk, peln, ws, &
1063  flagstruct%scale_z, flagstruct%p_fac, flagstruct%a_imp, &
1064  flagstruct%use_logp, remap_step, beta<-0.1)
1065  call timing_off('Riem_Solver')
1066 
1067  call timing_on('COMM_TOTAL')
1068  if ( gridstruct%square_domain ) then
1069  call start_group_halo_update(i_pack(4), zh , domain)
1070  call start_group_halo_update(i_pack(5), pkc, domain, whalo=2, ehalo=2, shalo=2, nhalo=2)
1071  else
1072  call start_group_halo_update(i_pack(4), zh , domain, complete=.false.)
1073  call start_group_halo_update(i_pack(4), pkc, domain, complete=.true.)
1074  endif
1075  call timing_off('COMM_TOTAL')
1076  if ( remap_step ) &
1077  call pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
1078 
1079  if ( flagstruct%use_logp ) then
1080  call pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
1081  else
1082  call pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
1083  endif
1084 
1085  if (gridstruct%nested) then
1086  call nh_bc(ptop, grav, akap, cp, delp, neststruct%delz_BC, pt, phis, &
1087 #ifdef MULTI_GASES
1088  q, &
1089 #endif
1090 #ifdef USE_COND
1091  q_con, &
1092 #ifdef MOIST_CAPPA
1093  cappa, &
1094 #endif
1095 #endif
1096  pkc, gz, pk3, &
1097  split_timestep_bc+1., real(n_split*flagstruct%k_split), &
1098  npx, npy, npz, gridstruct%bounded_domain, .true., .true., .true., bd)
1099  endif
1100 
1101  if (flagstruct%regional) then
1102  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+it*dt
1103  call nh_bc(ptop, grav, akap, cp, delp, delz_regbc, pt, phis, &
1104 #ifdef MULTI_GASES
1105  q, &
1106 #endif
1107 #ifdef USE_COND
1108  q_con, &
1109 #ifdef MOIST_CAPPA
1110  cappa, &
1111 #endif
1112 #endif
1113  pkc, gz, pk3, &
1114  mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600., &
1115  npx, npy, npz, gridstruct%bounded_domain, .true., .true., .true., bd)
1116 
1117  endif
1118 
1119  call timing_on('COMM_TOTAL')
1120  call complete_group_halo_update(i_pack(4), domain)
1121  call timing_off('COMM_TOTAL')
1122 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,zh,grav)
1123  do k=1,npz+1
1124  do j=js-2,je+2
1125  do i=is-2,ie+2
1126  gz(i,j,k) = zh(i,j,k)*grav
1127  enddo
1128  enddo
1129  enddo
1130  if ( gridstruct%square_domain ) then
1131  call timing_on('COMM_TOTAL')
1132  call complete_group_halo_update(i_pack(5), domain)
1133  call timing_off('COMM_TOTAL')
1134  endif
1135 #endif
1136  endif ! end hydro check
1137 
1138 #ifdef SW_DYNAMICS
1139  if (test_case > 1) then
1140 #else
1141  if ( remap_step .and. hydrostatic ) then
1142 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pk,pkc)
1143  do k=1,npz+1
1144  do j=js,je
1145  do i=is,ie
1146  pk(i,j,k) = pkc(i,j,k)
1147  enddo
1148  enddo
1149  enddo
1150  endif
1151 #endif
1152 
1153 !----------------------------
1154 ! Compute pressure gradient:
1155 !----------------------------
1156  call timing_on('PG_D')
1157  if ( hydrostatic ) then
1158  if ( beta > 0. ) then
1159  call grad1_p_update(divg2, u, v, pkc, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta_d, flagstruct%a2b_ord)
1160  else
1161  call one_grad_p(u, v, pkc, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, flagstruct%a2b_ord, flagstruct%d_ext)
1162  endif
1163 
1164  else
1165 
1166 
1167  if ( beta > 0. ) then
1168  call split_p_grad( u, v, pkc, gz, delp, pk3, beta_d, dt, ng, gridstruct, bd, npx, npy, npz, flagstruct%use_logp)
1169  elseif ( beta < -0.1 ) then
1170  call one_grad_p(u, v, pkc, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, flagstruct%a2b_ord, flagstruct%d_ext)
1171  else
1172  call nh_p_grad(u, v, pkc, gz, delp, pk3, dt, ng, gridstruct, bd, npx, npy, npz, flagstruct%use_logp)
1173  endif
1174 
1175 #ifdef ROT3
1176  if ( flagstruct%do_f3d ) then
1177 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ua,gridstruct,w,va,isd,ied,jsd,jed)
1178  do k=1,npz
1179  do j=js,je
1180  do i=is,ie
1181  ua(i,j,k) = -gridstruct%w00(i,j)*w(i,j,k)
1182  enddo
1183  enddo
1184  do j=jsd,jed
1185  do i=isd,ied
1186  va(i,j,k) = 0.
1187  enddo
1188  enddo
1189  enddo
1190  call mpp_update_domains(ua, domain, complete=.true.)
1191  call update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, ua, va, u, v, gridstruct, npx, npy, npz, domain)
1192  endif
1193 #endif
1194  endif
1195  call timing_off('PG_D')
1196 
1197 ! *** Inline Rayleigh friction here?
1198  if( flagstruct%RF_fast .and. flagstruct%tau > 0. ) &
1199  call ray_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, &
1200  ks, dp_ref, ptop, hydrostatic, flagstruct%rf_cutoff, bd)
1201 
1202 !-------------------------------------------------------------------------------------------------------
1203  if ( flagstruct%breed_vortex_inline ) then
1204  if ( .not. hydrostatic ) then
1205 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pkz,cappa,rdg,delp,delz,pt,k1k)
1206  do k=1,npz
1207  do j=js,je
1208  do i=is,ie
1209 ! Note: pt at this stage is Theta_m
1210 #ifdef MOIST_CAPPA
1211  pkz(i,j,k) = exp(cappa(i,j,k)/(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1212 #else
1213 #ifdef MULTI_GASES
1214  pkz(i,j,k) = exp( k1k*virqd(q(i,j,k,:))/vicvqd(q(i,j,k,:))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1215 #else
1216  pkz(i,j,k) = exp( k1k*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1217 #endif
1218 #endif
1219  enddo
1220  enddo
1221  enddo
1222  endif
1223 #if defined (ADA_NUDGE)
1224  call breed_slp_inline_ada( it, dt, npz, ak, bk, phis, pe, pk, peln, pkz, &
1225  delp, u, v, pt, q, flagstruct%nwat, zvir, gridstruct, ks, domain, bd )
1226 #else
1227  call breed_slp_inline( it, dt, npz, ak, bk, phis, pe, pk, peln, pkz, delp, u, v, pt, q, &
1228  flagstruct%nwat, zvir, gridstruct, ks, domain, bd, hydrostatic )
1229 #endif
1230  endif
1231 !-------------------------------------------------------------------------------------------------------
1232 
1233  call timing_on('COMM_TOTAL')
1234  if( it==n_split .and. gridstruct%grid_type<4 .and. .not. gridstruct%bounded_domain) then
1235 ! Prevent accumulation of rounding errors at overlapped domain edges:
1236  call mpp_get_boundary(u, v, domain, ebuffery=ebuffer, &
1237  nbufferx=nbuffer, gridtype=dgrid_ne )
1238 !$OMP parallel do default(none) shared(is,ie,js,je,npz,u,nbuffer,v,ebuffer)
1239  do k=1,npz
1240  do i=is,ie
1241  u(i,je+1,k) = nbuffer(i-is+1,k)
1242  enddo
1243  do j=js,je
1244  v(ie+1,j,k) = ebuffer(j-js+1,k)
1245  enddo
1246  enddo
1247 
1248  endif
1249 
1250 #ifndef ROT3
1251  if (.not. flagstruct%regional .and. it/=n_split) &
1252  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
1253 #endif
1254  call timing_off('COMM_TOTAL')
1255 
1256 #ifdef SW_DYNAMICS
1257  endif
1258 #endif
1259  if ( gridstruct%nested ) then
1260  neststruct%nest_timestep = neststruct%nest_timestep + 1
1261  endif
1262 
1263 #ifdef SW_DYNAMICS
1264 #else
1265  if ( hydrostatic .and. last_step ) then
1266  if ( flagstruct%use_old_omega ) then
1267 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga,pe,pem,rdt)
1268  do k=1,npz
1269  do j=js,je
1270  do i=is,ie
1271  omga(i,j,k) = (pe(i,k+1,j) - pem(i,k+1,j)) * rdt
1272  enddo
1273  enddo
1274  enddo
1275 !------------------------------
1276 ! Compute the "advective term"
1277 !------------------------------
1278  call adv_pe(ua, va, pem, omga, gridstruct, bd, npx, npy, npz, ng)
1279  else
1280 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga) private(om2d)
1281  do j=js,je
1282  do k=1,npz
1283  do i=is,ie
1284  om2d(i,k) = omga(i,j,k)
1285  enddo
1286  enddo
1287  do k=2,npz
1288  do i=is,ie
1289  om2d(i,k) = om2d(i,k-1) + omga(i,j,k)
1290  enddo
1291  enddo
1292  do k=2,npz
1293  do i=is,ie
1294  omga(i,j,k) = om2d(i,k)
1295  enddo
1296  enddo
1297  enddo
1298  endif
1299  if (idiag%id_ws>0 .and. hydrostatic) then
1300 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ws,delz,delp,omga)
1301  do j=js,je
1302  do i=is,ie
1303  ws(i,j) = delz(i,j,npz)/delp(i,j,npz) * omga(i,j,npz)
1304  enddo
1305  enddo
1306  used=send_data(idiag%id_ws, ws, fv_time)
1307  endif
1308  endif
1309 #endif
1310 
1311  if (gridstruct%nested) then
1312 
1313 
1314 
1315 #ifndef SW_DYNAMICS
1316  if (.not. hydrostatic) then
1317  call nested_grid_bc_apply_intt(w, &
1318  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1319  neststruct%w_BC, bctype=neststruct%nestbctype )
1320  end if
1321 #endif
1322  call nested_grid_bc_apply_intt(u, &
1323  0, 1, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1324  neststruct%u_BC, bctype=neststruct%nestbctype )
1325  call nested_grid_bc_apply_intt(v, &
1326  1, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1327  neststruct%v_BC, bctype=neststruct%nestbctype )
1328 
1329  end if
1330 
1331  if (flagstruct%regional) then
1332  call timing_on('COMM_TOTAL')
1333 #ifndef SW_DYNAMICS
1334  if (.not. hydrostatic) then
1335  reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+it*dt
1336  call regional_boundary_update(w, 'w', &
1337  isd, ied, jsd, jed, ubound(w,3), &
1338  is, ie, js, je, &
1339  isd, ied, jsd, jed, &
1340  reg_bc_update_time )
1341  endif
1342 #endif
1343 
1344  call regional_boundary_update(u, 'u', &
1345  isd, ied, jsd, jed+1, npz, &
1346  is, ie, js, je, &
1347  isd, ied, jsd, jed, &
1348  reg_bc_update_time )
1349  call regional_boundary_update(v, 'v', &
1350  isd, ied+1, jsd, jed, npz, &
1351  is, ie, js, je, &
1352  isd, ied, jsd, jed, &
1353  reg_bc_update_time )
1354  call mpp_update_domains(u, v, domain, gridtype=dgrid_ne)
1355 #ifndef ROT3
1356  if (it/=n_split) &
1357  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
1358 #endif
1359  call timing_off('COMM_TOTAL')
1360  end if
1361 
1362 !-----------------------------------------------------
1363  enddo ! time split loop
1364  first_call=.false.
1365 !-----------------------------------------------------
1366  if ( nq > 0 .and. .not. flagstruct%inline_q ) then
1367  call timing_on('COMM_TOTAL')
1368  call timing_on('COMM_TRACER')
1369  call start_group_halo_update(i_pack(10), q, domain)
1370  call timing_off('COMM_TRACER')
1371  call timing_off('COMM_TOTAL')
1372  endif
1373 
1374  if ( flagstruct%fv_debug ) then
1375  if(is_master()) write(*,*) 'End of n_split loop'
1376  endif
1377 
1378 
1379  if ( n_con/=0 .and. flagstruct%d_con > 1.e-5 ) then
1380  nf_ke = min(3, flagstruct%nord+1)
1381  call del2_cubed(heat_source, cnst_0p20*gridstruct%da_min, gridstruct, domain, npx, npy, npz, nf_ke, bd)
1382 
1383 ! Note: pt here is cp_air*(Virtual_Temperature/pkz), cp_air is constant
1384  if ( hydrostatic ) then
1385 !
1386 ! del(Cp_air*Tm) = - del(KE)
1387 !
1388 !$OMP parallel do default(none) shared(flagstruct,is,ie,js,je,n_con,pt,heat_source,delp,pkz,bdt) &
1389 !$OMP private(dtmp)
1390  do j=js,je
1391  do k=1,n_con ! n_con is usually less than 3;
1392  if ( k<3 ) then
1393  do i=is,ie
1394  pt(i,j,k) = pt(i,j,k) + heat_source(i,j,k)/(cp_air*delp(i,j,k)*pkz(i,j,k))
1395  enddo
1396  else
1397  do i=is,ie
1398  dtmp = heat_source(i,j,k) / (cp_air*delp(i,j,k))
1399  pt(i,j,k) = pt(i,j,k) + sign(min(abs(bdt)*flagstruct%delt_max,abs(dtmp)), dtmp)/pkz(i,j,k)
1400  enddo
1401  endif
1402  enddo
1403  enddo
1404  else
1405 !$OMP parallel do default(none) shared(flagstruct,is,ie,js,je,n_con,pkz,cappa,rdg,delp,delz,pt, &
1406 !$OMP heat_source,k1k,cv_air,bdt) &
1407 !$OMP private(dtmp, delt)
1408  do k=1,n_con
1409  delt = abs(bdt*flagstruct%delt_max)
1410 ! Sponge layers:
1411  if ( k == 1 ) delt = 0.1*delt
1412  if ( k == 2 ) delt = 0.5*delt
1413  do j=js,je
1414  do i=is,ie
1415 #ifdef MOIST_CAPPA
1416  pkz(i,j,k) = exp( cappa(i,j,k)/(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1417 #else
1418 #ifdef MULTI_GASES
1419  pkz(i,j,k) = exp( k1k*virqd(q(i,j,k,:))/vicvqd(q(i,j,k,:))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1420 #else
1421  pkz(i,j,k) = exp( k1k*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1422 #endif
1423 #endif
1424  dtmp = heat_source(i,j,k) / (cv_air*delp(i,j,k))
1425  pt(i,j,k) = pt(i,j,k) + sign(min(delt, abs(dtmp)),dtmp) / pkz(i,j,k)
1426  enddo
1427  enddo
1428  enddo
1429  endif
1430 
1431  endif
1432  if (allocated(heat_source)) deallocate( heat_source ) !If ncon == 0 but d_con > 1.e-5, this would not be deallocated in earlier versions of the code
1433 
1434  if ( end_step ) then
1435  deallocate( gz )
1436  deallocate( ptc )
1437  deallocate( crx )
1438  deallocate( xfx )
1439  deallocate( cry )
1440  deallocate( yfx )
1441  deallocate( divgd )
1442  deallocate( pkc )
1443  deallocate( delpc )
1444 
1445  if( allocated(ut)) deallocate( ut )
1446  if( allocated(vt)) deallocate( vt )
1447  if ( allocated (du) ) deallocate( du )
1448  if ( allocated (dv) ) deallocate( dv )
1449  if ( .not. hydrostatic ) then
1450  deallocate( zh )
1451  if( allocated(pk3) ) deallocate ( pk3 )
1452  endif
1453 
1454  endif
1455  if( allocated(pem) ) deallocate ( pem )
1456 
1457 end subroutine dyn_core
1458 
1459 subroutine pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
1460 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1461 real, intent(in):: ptop, akap
1462 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1463 real, intent(inout), dimension(isd:ied,jsd:jed,npz+1):: pk3
1464 ! Local:
1465 real:: pei(isd:ied)
1466 real:: pej(jsd:jed)
1467 integer:: i,j,k
1468 
1469 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,npz,ptop,delp,pk3,akap) &
1470 !$OMP private(pei)
1471  do j=js,je
1472  pei(is-2) = ptop
1473  pei(is-1) = ptop
1474  do k=1,npz
1475  pei(is-2) = pei(is-2) + delp(is-2,j,k)
1476  pei(is-1) = pei(is-1) + delp(is-1,j,k)
1477  pk3(is-2,j,k+1) = exp(akap*log(pei(is-2)))
1478  pk3(is-1,j,k+1) = exp(akap*log(pei(is-1)))
1479  enddo
1480  pei(ie+1) = ptop
1481  pei(ie+2) = ptop
1482  do k=1,npz
1483  pei(ie+1) = pei(ie+1) + delp(ie+1,j,k)
1484  pei(ie+2) = pei(ie+2) + delp(ie+2,j,k)
1485  pk3(ie+1,j,k+1) = exp(akap*log(pei(ie+1)))
1486  pk3(ie+2,j,k+1) = exp(akap*log(pei(ie+2)))
1487  enddo
1488  enddo
1489 
1490 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ptop,delp,pk3,akap) &
1491 !$OMP private(pej)
1492  do i=is-2,ie+2
1493  pej(js-2) = ptop
1494  pej(js-1) = ptop
1495  do k=1,npz
1496  pej(js-2) = pej(js-2) + delp(i,js-2,k)
1497  pej(js-1) = pej(js-1) + delp(i,js-1,k)
1498  pk3(i,js-2,k+1) = exp(akap*log(pej(js-2)))
1499  pk3(i,js-1,k+1) = exp(akap*log(pej(js-1)))
1500  enddo
1501  pej(je+1) = ptop
1502  pej(je+2) = ptop
1503  do k=1,npz
1504  pej(je+1) = pej(je+1) + delp(i,je+1,k)
1505  pej(je+2) = pej(je+2) + delp(i,je+2,k)
1506  pk3(i,je+1,k+1) = exp(akap*log(pej(je+1)))
1507  pk3(i,je+2,k+1) = exp(akap*log(pej(je+2)))
1508  enddo
1509  enddo
1510 
1511 end subroutine pk3_halo
1512 
1513 subroutine pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
1514 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1515 real, intent(in):: ptop
1516 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1517 real, intent(inout), dimension(isd:ied,jsd:jed,npz+1):: pk3
1518 ! Local:
1519 real:: pet
1520 integer:: i,j,k
1521 
1522 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,npz,ptop,delp,pk3) &
1523 !$OMP private(pet)
1524  do j=js,je
1525  do i=is-2,is-1
1526  pet = ptop
1527  do k=1,npz
1528  pet = pet + delp(i,j,k)
1529  pk3(i,j,k+1) = log(pet)
1530  enddo
1531  enddo
1532  do i=ie+1,ie+2
1533  pet = ptop
1534  do k=1,npz
1535  pet = pet + delp(i,j,k)
1536  pk3(i,j,k+1) = log(pet)
1537  enddo
1538  enddo
1539  enddo
1540 
1541 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ptop,delp,pk3) &
1542 !$OMP private(pet)
1543  do i=is-2,ie+2
1544  do j=js-2,js-1
1545  pet = ptop
1546  do k=1,npz
1547  pet = pet + delp(i,j,k)
1548  pk3(i,j,k+1) = log(pet)
1549  enddo
1550  enddo
1551  do j=je+1,je+2
1552  pet = ptop
1553  do k=1,npz
1554  pet = pet + delp(i,j,k)
1555  pk3(i,j,k+1) = log(pet)
1556  enddo
1557  enddo
1558  enddo
1559 
1560 end subroutine pln_halo
1561 
1562 subroutine pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
1563 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1564 real, intent(in):: ptop
1565 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1566 real, intent(inout), dimension(is-1:ie+1,npz+1,js-1:je+1):: pe
1567 ! Local:
1568 integer:: i,j,k
1569 
1570 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ptop)
1571  do j=js,je
1572  pe(is-1,1,j) = ptop
1573  pe(ie+1,1,j) = ptop
1574  do k=1,npz
1575  pe(is-1,k+1,j) = pe(is-1,k,j) + delp(is-1,j,k)
1576  pe(ie+1,k+1,j) = pe(ie+1,k,j) + delp(ie+1,j,k)
1577  enddo
1578  enddo
1579 
1580 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ptop)
1581  do i=is-1,ie+1
1582  pe(i,1,js-1) = ptop
1583  pe(i,1,je+1) = ptop
1584  do k=1,npz
1585  pe(i,k+1,js-1) = pe(i,k,js-1) + delp(i,js-1,k)
1586  pe(i,k+1,je+1) = pe(i,k,je+1) + delp(i,je+1,k)
1587  enddo
1588  enddo
1589 
1590 end subroutine pe_halo
1591 
1592 
1593 subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng)
1595 integer, intent(in) :: npx, npy, npz, ng
1596 type(fv_grid_bounds_type), intent(IN) :: bd
1597 ! Contra-variant wind components:
1598 real, intent(in), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: ua, va
1599 ! Pressure at edges:
1600 real, intent(in) :: pem(bd%is-1:bd%ie+1,1:npz+1,bd%js-1:bd%je+1)
1601 real, intent(inout) :: om(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1602 type(fv_grid_type), intent(INOUT), target :: gridstruct
1603 
1604 ! Local:
1605 real, dimension(bd%is:bd%ie,bd%js:bd%je):: up, vp
1606 real v3(3,bd%is:bd%ie,bd%js:bd%je)
1607 
1608 real pin(bd%isd:bd%ied,bd%jsd:bd%jed)
1609 real pb(bd%isd:bd%ied,bd%jsd:bd%jed)
1610 
1611 real grad(3,bd%is:bd%ie,bd%js:bd%je)
1612 real pdx(3,bd%is:bd%ie,bd%js:bd%je+1)
1613 real pdy(3,bd%is:bd%ie+1,bd%js:bd%je)
1614 
1615 integer :: i,j,k, n
1616 integer :: is, ie, js, je
1617 
1618  is = bd%is
1619  ie = bd%ie
1620  js = bd%js
1621  je = bd%je
1622 
1623 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ua,va,gridstruct,pem,npx,npy,ng,om) &
1624 !$OMP private(n, pdx, pdy, pin, pb, up, vp, grad, v3)
1625 do k=1,npz
1626  if ( k==npz ) then
1627  do j=js,je
1628  do i=is,ie
1629  up(i,j) = ua(i,j,npz)
1630  vp(i,j) = va(i,j,npz)
1631  enddo
1632  enddo
1633  else
1634  do j=js,je
1635  do i=is,ie
1636  up(i,j) = 0.5*(ua(i,j,k)+ua(i,j,k+1))
1637  vp(i,j) = 0.5*(va(i,j,k)+va(i,j,k+1))
1638  enddo
1639  enddo
1640  endif
1641 
1642  ! Compute Vect wind:
1643  do j=js,je
1644  do i=is,ie
1645  do n=1,3
1646  v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j)
1647  enddo
1648  enddo
1649  enddo
1650 
1651  do j=js-1,je+1
1652  do i=is-1,ie+1
1653  pin(i,j) = pem(i,k+1,j)
1654  enddo
1655  enddo
1656 
1657  ! Compute pe at 4 cell corners:
1658  call a2b_ord2(pin, pb, gridstruct, npx, npy, is, ie, js, je, ng)
1659 
1660 
1661  do j=js,je+1
1662  do i=is,ie
1663  do n=1,3
1664  pdx(n,i,j) = (pb(i,j)+pb(i+1,j))*gridstruct%dx(i,j)*gridstruct%en1(n,i,j)
1665  enddo
1666  enddo
1667  enddo
1668  do j=js,je
1669  do i=is,ie+1
1670  do n=1,3
1671  pdy(n,i,j) = (pb(i,j)+pb(i,j+1))*gridstruct%dy(i,j)*gridstruct%en2(n,i,j)
1672  enddo
1673  enddo
1674  enddo
1675 
1676  ! Compute grad (pe) by Green's theorem
1677  do j=js,je
1678  do i=is,ie
1679  do n=1,3
1680  grad(n,i,j) = pdx(n,i,j+1) - pdx(n,i,j) - pdy(n,i,j) + pdy(n,i+1,j)
1681  enddo
1682  enddo
1683  enddo
1684 
1685  ! Compute inner product: V3 * grad (pe)
1686  do j=js,je
1687  do i=is,ie
1688  om(i,j,k) = om(i,j,k) + 0.5*gridstruct%rarea(i,j)*(v3(1,i,j)*grad(1,i,j) + &
1689  v3(2,i,j)*grad(2,i,j) + v3(3,i,j)*grad(3,i,j))
1690  enddo
1691  enddo
1692 enddo
1693 
1694 end subroutine adv_pe
1695 
1696 
1697 
1698 
1699 subroutine p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, rdxc, rdyc, hydrostatic)
1701 integer, intent(in):: npz
1702 real, intent(in):: dt2
1703 type(fv_grid_bounds_type), intent(IN) :: bd
1704 real, intent(in), dimension(bd%isd:, bd%jsd: ,: ):: delpc
1707 real, intent(in), dimension(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1):: pkc, gz
1708 real, intent(inout):: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1709 real, intent(inout):: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1710 real, intent(IN) :: rdxc(bd%isd:bd%ied+1,bd%jsd:bd%jed+1)
1711 real, intent(IN) :: rdyc(bd%isd:bd%ied ,bd%jsd:bd%jed)
1712 logical, intent(in):: hydrostatic
1713 ! Local:
1714 real:: wk(bd%is-1:bd%ie+1,bd%js-1:bd%je+1)
1715 integer:: i,j,k
1716 
1717 integer :: is, ie, js, je
1718 
1719  is = bd%is
1720  ie = bd%ie
1721  js = bd%js
1722  je = bd%je
1723 
1724 !$OMP parallel do default(none) shared(is,ie,js,je,npz,hydrostatic,pkc,delpc,uc,dt2,rdxc,gz,vc,rdyc) &
1725 !$OMP private(wk)
1726 do k=1,npz
1727 
1728  if ( hydrostatic ) then
1729  do j=js-1,je+1
1730  do i=is-1,ie+1
1731  wk(i,j) = pkc(i,j,k+1) - pkc(i,j,k)
1732  enddo
1733  enddo
1734  else
1735  do j=js-1,je+1
1736  do i=is-1,ie+1
1737  wk(i,j) = delpc(i,j,k)
1738  enddo
1739  enddo
1740  endif
1741 
1742  do j=js,je
1743  do i=is,ie+1
1744  uc(i,j,k) = uc(i,j,k) + dt2*rdxc(i,j) / (wk(i-1,j)+wk(i,j)) * &
1745  ( (gz(i-1,j,k+1)-gz(i,j,k ))*(pkc(i,j,k+1)-pkc(i-1,j,k)) &
1746  + (gz(i-1,j,k) - gz(i,j,k+1))*(pkc(i-1,j,k+1)-pkc(i,j,k)) )
1747  enddo
1748  enddo
1749  do j=js,je+1
1750  do i=is,ie
1751  vc(i,j,k) = vc(i,j,k) + dt2*rdyc(i,j) / (wk(i,j-1)+wk(i,j)) * &
1752  ( (gz(i,j-1,k+1)-gz(i,j,k ))*(pkc(i,j,k+1)-pkc(i,j-1,k)) &
1753  + (gz(i,j-1,k) - gz(i,j,k+1))*(pkc(i,j-1,k+1)-pkc(i,j,k)) )
1754  enddo
1755  enddo
1756 enddo
1757 
1758 end subroutine p_grad_c
1759 
1760 
1761 subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
1762 integer, intent(IN) :: ng, npx, npy, npz
1763 real, intent(IN) :: dt
1764 logical, intent(in) :: use_logp
1765 type(fv_grid_bounds_type), intent(IN) :: bd
1766 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1767 real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1768 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1769 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1770 real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz)
1771 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz)
1772 type(fv_grid_type), intent(INOUT), target :: gridstruct
1773 ! Local:
1774 real wk1(bd%isd:bd%ied, bd%jsd:bd%jed)
1775 real wk(bd%is: bd%ie+1,bd%js: bd%je+1)
1776 real du1, dv1, top_value
1777 integer i,j,k
1778 integer :: is, ie, js, je
1779 integer :: isd, ied, jsd, jed
1780 
1781  is = bd%is
1782  ie = bd%ie
1783  js = bd%js
1784  je = bd%je
1785  isd = bd%isd
1786  ied = bd%ied
1787  jsd = bd%jsd
1788  jed = bd%jed
1789 
1790 if ( use_logp ) then
1791  top_value = peln1
1792 else
1793  top_value = ptk
1794 endif
1795 
1796 !$OMP parallel do default(none) shared(top_value,isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) &
1797 !$OMP private(wk1)
1798 do k=1,npz+1
1799  if ( k==1 ) then
1800  do j=js,je+1
1801  do i=is,ie+1
1802  pp(i,j,1) = 0.
1803  pk(i,j,1) = top_value
1804  enddo
1805  enddo
1806  else
1807  call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1808  call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1809  endif
1810  call a2b_ord4( gz(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1811 enddo
1812 
1813 !$OMP parallel do default(none) shared(is,ie,js,je,npz,delp,gridstruct,npx,npy,ng,isd,jsd, &
1814 !$OMP pk,dt,gz,u,pp,v) &
1815 !$OMP private(wk1, wk, du1, dv1)
1816 do k=1,npz
1817  call a2b_ord4(delp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng)
1818  do j=js,je+1
1819  do i=is,ie+1
1820  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1821  enddo
1822  enddo
1823  do j=js,je+1
1824  do i=is,ie
1825  ! hydrostatic contributions from past time-step already added in the "beta" part
1826  ! Current gradient from "hydrostatic" components:
1827  du1 = dt / (wk(i,j)+wk(i+1,j)) * &
1828  ( (gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) + &
1829  (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)) )
1830 #ifdef GAS_HYDRO_P
1831  dul = (1.-0.5*(q_con(i,j-1,k)+q_con(i,j,k)))*du
1832 #endif
1833  ! Non-hydrostatic contribution
1834  u(i,j,k) = (u(i,j,k) + du1 + dt/(wk1(i,j)+wk1(i+1,j)) * &
1835  ((gz(i,j,k+1)-gz(i+1,j,k))*(pp(i+1,j,k+1)-pp(i,j,k)) &
1836  + (gz(i,j,k)-gz(i+1,j,k+1))*(pp(i,j,k+1)-pp(i+1,j,k))))*gridstruct%rdx(i,j)
1837  enddo
1838  enddo
1839  do j=js,je
1840  do i=is,ie+1
1841  ! Current gradient from "hydrostatic" components:
1842  dv1 = dt / (wk(i,j)+wk(i,j+1)) * &
1843  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) + &
1844  (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1845 #ifdef GAS_HYDRO_P
1846  dvl = (1.-0.5*(q_con(i-1,j,k)+q_con(i,j,k)))*dv
1847 #endif
1848  ! Non-hydrostatic contribution
1849  v(i,j,k) = (v(i,j,k) + dv1 + dt/(wk1(i,j)+wk1(i,j+1)) * &
1850  ((gz(i,j,k+1)-gz(i,j+1,k))*(pp(i,j+1,k+1)-pp(i,j,k)) &
1851  + (gz(i,j,k)-gz(i,j+1,k+1))*(pp(i,j,k+1)-pp(i,j+1,k))))*gridstruct%rdy(i,j)
1852  enddo
1853  enddo
1854 
1855 enddo ! end k-loop
1856 end subroutine nh_p_grad
1857 
1858 
1859 subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
1860 integer, intent(IN) :: ng, npx, npy, npz
1861 real, intent(IN) :: beta, dt
1862 logical, intent(in):: use_logp
1863 type(fv_grid_bounds_type), intent(IN) :: bd
1864 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1865 real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1866 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1867 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1)
1868 ! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1869 ! real, intent(inout) :: dv(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1870 real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz)
1871 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz)
1872 type(fv_grid_type), intent(INOUT), target :: gridstruct
1873 ! Local:
1874 real wk1(bd%isd:bd%ied, bd%jsd:bd%jed)
1875 real wk(bd%is: bd%ie+1,bd%js: bd%je+1)
1876 real alpha, top_value
1877 integer i,j,k
1878 integer :: is, ie, js, je
1879 integer :: isd, ied, jsd, jed
1880 
1881  is = bd%is
1882  ie = bd%ie
1883  js = bd%js
1884  je = bd%je
1885  isd = bd%isd
1886  ied = bd%ied
1887  jsd = bd%jsd
1888  jed = bd%jed
1889 
1890 if ( use_logp ) then
1891  top_value = peln1
1892 else
1893  top_value = ptk
1894 endif
1895 
1896 alpha = 1. - beta
1897 
1898 !$OMP parallel do default(none) shared(is,ie,js,je,pp,pk,top_value)
1899 do j=js,je+1
1900  do i=is,ie+1
1901  pp(i,j,1) = 0.
1902  pk(i,j,1) = top_value
1903  enddo
1904 enddo
1905 
1906 !$OMP parallel do default(none) shared(isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) &
1907 !$OMP private(wk1)
1908 do k=1,npz+1
1909  if ( k/=1 ) then
1910  call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1911  call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1912  endif
1913  call a2b_ord4( gz(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1914 enddo
1915 
1916 !$OMP parallel do default(none) shared(is,ie,js,je,isd,jsd,npz,delp,gridstruct,npx,npy,ng, &
1917 !$OMP pk,u,beta,du,dt,gz,alpha,pp,v,dv) &
1918 !$OMP private(wk1, wk)
1919 do k=1,npz
1920  call a2b_ord4(delp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng)
1921 
1922  do j=js,je+1
1923  do i=is,ie+1
1924  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1925  enddo
1926  enddo
1927 
1928  do j=js,je+1
1929  do i=is,ie
1930  u(i,j,k) = u(i,j,k) + beta*du(i,j,k)
1931  ! hydrostatic contributions from past time-step already added in the "beta" part
1932  ! Current gradient from "hydrostatic" components:
1933  !---------------------------------------------------------------------------------
1934  du(i,j,k) = dt / (wk(i,j)+wk(i+1,j)) * &
1935  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) + &
1936  (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)))
1937 #ifdef GAS_HYDRO_P
1938  du = (1.-0.5*(q_con(i,j-1,k)+q_con(i,j,k)))*du
1939 #endif
1940  !---------------------------------------------------------------------------------
1941  ! Non-hydrostatic contribution
1942  u(i,j,k) = (u(i,j,k) + alpha*du(i,j,k) + dt/(wk1(i,j)+wk1(i+1,j)) * &
1943  ((gz(i,j,k+1)-gz(i+1,j,k))*(pp(i+1,j,k+1)-pp(i,j,k)) &
1944  + (gz(i,j,k)-gz(i+1,j,k+1))*(pp(i,j,k+1)-pp(i+1,j,k))))*gridstruct%rdx(i,j)
1945  enddo
1946  enddo
1947  do j=js,je
1948  do i=is,ie+1
1949  v(i,j,k) = v(i,j,k) + beta*dv(i,j,k)
1950  ! Current gradient from "hydrostatic" components:
1951  !---------------------------------------------------------------------------------
1952  dv(i,j,k) = dt / (wk(i,j)+wk(i,j+1)) * &
1953  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) + &
1954  (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1955 #ifdef GAS_HYDRO_P
1956  dv = (1.-0.5*(q_con(i-1,j,k)+q_con(i,j,k)))*dv
1957 #endif
1958  !---------------------------------------------------------------------------------
1959  ! Non-hydrostatic contribution
1960  v(i,j,k) = (v(i,j,k) + alpha*dv(i,j,k) + dt/(wk1(i,j)+wk1(i,j+1)) * &
1961  ((gz(i,j,k+1)-gz(i,j+1,k))*(pp(i,j+1,k+1)-pp(i,j,k)) &
1962  + (gz(i,j,k)-gz(i,j+1,k+1))*(pp(i,j,k+1)-pp(i,j+1,k))))*gridstruct%rdy(i,j)
1963  enddo
1964  enddo
1965 
1966 enddo ! end k-loop
1967 
1968 
1969 end subroutine split_p_grad
1970 
1971 
1972 
1973 subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, &
1974  ptop, hydrostatic, a2b_ord, d_ext)
1976 integer, intent(IN) :: ng, npx, npy, npz, a2b_ord
1977 real, intent(IN) :: dt, ptop, d_ext
1978 logical, intent(in) :: hydrostatic
1979 type(fv_grid_bounds_type), intent(IN) :: bd
1980 real, intent(in) :: divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
1981 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1982 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1983 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed ,npz)
1984 real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1985 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1986 type(fv_grid_type), intent(INOUT), target :: gridstruct
1987 ! Local:
1988 real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: wk
1989 real:: wk1(bd%is:bd%ie+1,bd%js:bd%je+1)
1990 real:: wk2(bd%is:bd%ie,bd%js:bd%je+1)
1991 real top_value
1992 integer i,j,k
1993 
1994 integer :: is, ie, js, je
1995 integer :: isd, ied, jsd, jed
1996 
1997  is = bd%is
1998  ie = bd%ie
1999  js = bd%js
2000  je = bd%je
2001  isd = bd%isd
2002  ied = bd%ied
2003  jsd = bd%jsd
2004  jed = bd%jed
2005 
2006 if ( hydrostatic ) then
2007  ! pk is pe**kappa if hydrostatic
2008  top_value = ptk
2009 else
2010  ! pk is full pressure if non-hydrostatic
2011  top_value = ptop
2012 endif
2013 
2014 !$OMP parallel do default(none) shared(is,ie,js,je,pk,top_value)
2015 do j=js,je+1
2016  do i=is,ie+1
2017  pk(i,j,1) = top_value
2018  enddo
2019 enddo
2020 
2021 !$OMP parallel do default(none) shared(npz,isd,jsd,pk,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
2022 !$OMP private(wk)
2023 do k=2,npz+1
2024  if ( a2b_ord==4 ) then
2025  call a2b_ord4(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2026  else
2027  call a2b_ord2(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2028  endif
2029 enddo
2030 
2031 !$OMP parallel do default(none) shared(npz,isd,jsd,gz,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
2032 !$OMP private(wk)
2033 do k=1,npz+1
2034  if ( a2b_ord==4 ) then
2035  call a2b_ord4( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2036  else
2037  call a2b_ord2( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2038  endif
2039 enddo
2040 
2041 if ( d_ext > 0. ) then
2042 
2043  !$OMP parallel do default(none) shared(is,ie,js,je,wk2,divg2)
2044  do j=js,je+1
2045  do i=is,ie
2046  wk2(i,j) = divg2(i,j)-divg2(i+1,j)
2047  enddo
2048  enddo
2049 
2050  !$OMP parallel do default(none) shared(is,ie,js,je,wk1,divg2)
2051  do j=js,je
2052  do i=is,ie+1
2053  wk1(i,j) = divg2(i,j)-divg2(i,j+1)
2054  enddo
2055  enddo
2056 
2057 else
2058 
2059  !$OMP parallel do default(none) shared(is,ie,js,je,wk1,wk2)
2060  do j=js,je+1
2061  do i=is,ie
2062  wk2(i,j) = 0.
2063  enddo
2064  do i=is,ie+1
2065  wk1(i,j) = 0.
2066  enddo
2067  enddo
2068 
2069 endif
2070 
2071 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pk,delp,hydrostatic,a2b_ord,gridstruct, &
2072 !$OMP npx,npy,isd,jsd,ng,u,v,wk2,dt,gz,wk1) &
2073 !$OMP private(wk)
2074 do k=1,npz
2075 
2076  if ( hydrostatic ) then
2077  do j=js,je+1
2078  do i=is,ie+1
2079  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
2080  enddo
2081  enddo
2082  else
2083  if ( a2b_ord==4 ) then
2084  call a2b_ord4(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng)
2085  else
2086  call a2b_ord2(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng)
2087  endif
2088  endif
2089 
2090  do j=js,je+1
2091  do i=is,ie
2092  u(i,j,k) = gridstruct%rdx(i,j)*(wk2(i,j)+u(i,j,k) + dt/(wk(i,j)+wk(i+1,j)) * &
2093  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) &
2094  + (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k))))
2095  enddo
2096  enddo
2097  do j=js,je
2098  do i=is,ie+1
2099  v(i,j,k) = gridstruct%rdy(i,j)*(wk1(i,j)+v(i,j,k) + dt/(wk(i,j)+wk(i,j+1)) * &
2100  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) &
2101  + (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k))))
2102  enddo
2103  enddo
2104 enddo ! end k-loop
2105 
2106 end subroutine one_grad_p
2107 
2108 
2109 subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta, a2b_ord)
2111 integer, intent(in) :: ng, npx, npy, npz, a2b_ord
2112 real, intent(in) :: dt, ptop, beta
2113 type(fv_grid_bounds_type), intent(IN) :: bd
2114 real, intent(in):: divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
2115 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
2116 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
2117 real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
2118 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
2119 type(fv_grid_type), intent(INOUT), target :: gridstruct
2120 
2121 ! Local:
2122 real:: wk(bd%isd:bd%ied,bd%jsd:bd%jed)
2123 real top_value, alpha
2124 integer i,j,k
2125 
2126  integer :: is, ie, js, je
2127  integer :: isd, ied, jsd, jed
2128 
2129  is = bd%is
2130  ie = bd%ie
2131  js = bd%js
2132  je = bd%je
2133  isd = bd%isd
2134  ied = bd%ied
2135  jsd = bd%jsd
2136  jed = bd%jed
2137 
2138 alpha = 1. - beta
2139 
2140 ! pk is pe**kappa if hydrostatic
2141 top_value = ptk
2142 
2143 !$OMP parallel do default(none) shared(is,ie,js,je,pk,top_value)
2144 do j=js,je+1
2145  do i=is,ie+1
2146  pk(i,j,1) = top_value
2147  enddo
2148 enddo
2149 !$OMP parallel do default(none) shared(npz,isd,jsd,pk,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
2150 !$OMP private(wk)
2151 do k=2,npz+1
2152  if ( a2b_ord==4 ) then
2153  call a2b_ord4(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2154  else
2155  call a2b_ord2(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2156  endif
2157 enddo
2158 
2159 !$OMP parallel do default(none) shared(npz,isd,jsd,gz,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
2160 !$OMP private(wk)
2161 do k=1,npz+1
2162  if ( a2b_ord==4 ) then
2163  call a2b_ord4( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2164  else
2165  call a2b_ord2( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
2166  endif
2167 enddo
2168 
2169 !$OMP parallel do default(none) shared(npz,is,ie,js,je,pk,u,beta,gz,divg2,alpha, &
2170 !$OMP gridstruct,v,dt,du,dv) &
2171 !$OMP private(wk)
2172 do k=1,npz
2173 
2174  do j=js,je+1
2175  do i=is,ie+1
2176  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
2177  enddo
2178  enddo
2179 
2180  do j=js,je+1
2181  do i=is,ie
2182  u(i,j,k) = u(i,j,k) + beta*du(i,j,k)
2183  du(i,j,k) = dt/(wk(i,j)+wk(i+1,j)) * &
2184  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) &
2185  + (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)))
2186  u(i,j,k) = (u(i,j,k) + divg2(i,j)-divg2(i+1,j) + alpha*du(i,j,k))*gridstruct%rdx(i,j)
2187  enddo
2188  enddo
2189  do j=js,je
2190  do i=is,ie+1
2191  v(i,j,k) = v(i,j,k) + beta*dv(i,j,k)
2192  dv(i,j,k) = dt/(wk(i,j)+wk(i,j+1)) * &
2193  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) &
2194  + (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
2195  v(i,j,k) = (v(i,j,k) + divg2(i,j)-divg2(i,j+1) + alpha*dv(i,j,k))*gridstruct%rdy(i,j)
2196  enddo
2197  enddo
2198 enddo ! end k-loop
2199 
2200 end subroutine grad1_p_update
2201 
2202 
2203 subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd)
2204 integer, intent(IN) :: km
2205 real , intent(IN) :: ak(km+1), bk(km+1)
2206 type(fv_grid_bounds_type), intent(IN) :: bd
2207 real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp
2208 real, intent(INOUT), dimension(bd%isd:,bd%jsd:,1:):: w
2209 logical, intent(IN) :: hydrostatic, CG, fv_debug
2210 ! Local:
2211 real dp, dpmin
2212 integer i, j, k, ip
2213 integer ifirst, ilast
2214 integer jfirst, jlast
2215 
2216 integer :: is, ie, js, je
2217 integer :: isd, ied, jsd, jed
2218 
2219  is = bd%is
2220  ie = bd%ie
2221  js = bd%js
2222  je = bd%je
2223  isd = bd%isd
2224  ied = bd%ied
2225  jsd = bd%jsd
2226  jed = bd%jed
2227 
2228 
2229 if ( cg ) then
2230  ifirst = is-1; ilast = ie+1
2231  jfirst = js-1; jlast = je+1
2232 else
2233  ifirst = is; ilast = ie
2234  jfirst = js; jlast = je
2235 endif
2236 
2237 
2238 !$OMP parallel do default(none) shared(jfirst,jlast,km,ifirst,ilast,delp,ak,bk,pt, &
2239 !$OMP hydrostatic,w,fv_debug) &
2240 !$OMP private(ip, dpmin, dp)
2241 do 1000 j=jfirst,jlast
2242 
2243  ip = 0
2244 
2245  do k=1, km-1
2246  dpmin = 0.01 * ( ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.e5 )
2247  do i=ifirst, ilast
2248  if(delp(i,j,k) < dpmin) then
2249  if (fv_debug) write(*,*) 'Mix_dp: ', i, j, k, mpp_pe(), delp(i,j,k), pt(i,j,k)
2250  ! Remap from below and mix pt
2251  dp = dpmin - delp(i,j,k)
2252  pt(i,j,k) = (pt(i,j,k)*delp(i,j,k) + pt(i,j,k+1)*dp) / dpmin
2253  if ( .not.hydrostatic ) w(i,j,k) = (w(i,j,k)*delp(i,j,k) + w(i,j,k+1)*dp) / dpmin
2254  delp(i,j,k) = dpmin
2255  delp(i,j,k+1) = delp(i,j,k+1) - dp
2256  ip = ip + 1
2257  endif
2258  enddo
2259  enddo
2260 
2261  ! Bottom (k=km):
2262  dpmin = 0.01 * ( ak(km+1)-ak(km) + (bk(km+1)-bk(km))*1.e5 )
2263  do i=ifirst, ilast
2264  if(delp(i,j,km) < dpmin) then
2265  if (fv_debug) write(*,*) 'Mix_dp: ', i, j, km, mpp_pe(), delp(i,j,km), pt(i,j,km)
2266  ! Remap from above and mix pt
2267  dp = dpmin - delp(i,j,km)
2268  pt(i,j,km) = (pt(i,j,km)*delp(i,j,km) + pt(i,j,km-1)*dp)/dpmin
2269  if ( .not.hydrostatic ) w(i,j,km) = (w(i,j,km)*delp(i,j,km) + w(i,j,km-1)*dp) / dpmin
2270  delp(i,j,km) = dpmin
2271  delp(i,j,km-1) = delp(i,j,km-1) - dp
2272  ip = ip + 1
2273  endif
2274  enddo
2275  if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip
2276  ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip
2277 1000 continue
2278 
2279  end subroutine mix_dp
2280 
2282  subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, &
2283 #ifdef MULTI_GASES
2284  kapad, &
2285 #endif
2286  q_con, pkz, km, akap, CG, bounded_domain, computehalo, npx, npy, a2b_ord, bd)
2287 
2288  integer, intent(IN) :: km, npx, npy, a2b_ord
2289  real , intent(IN) :: akap, ptop
2290  type(fv_grid_bounds_type), intent(IN) :: bd
2291  real , intent(IN) :: hs(bd%isd:bd%ied,bd%jsd:bd%jed)
2292  real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp
2293 #ifdef MULTI_GASES
2294  real, intent(IN) :: kapad(bd%isd:bd%ied,bd%jsd:bd%jed,km)
2295 #endif
2296  real, intent(IN), dimension(bd%isd:,bd%jsd:,1:):: q_con
2297  logical, intent(IN) :: CG, bounded_domain, computehalo
2298  ! !OUTPUT PARAMETERS
2299  real, intent(OUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km+1):: gz, pk
2300  real, intent(OUT) :: pe(bd%is-1:bd%ie+1,km+1,bd%js-1:bd%je+1)
2301  real, intent(out) :: peln(bd%is:bd%ie,km+1,bd%js:bd%je)
2302  real, intent(out) :: pkz(bd%is:bd%ie,bd%js:bd%je,km)
2303  ! !DESCRIPTION:
2304  ! Calculates geopotential and pressure to the kappa.
2305  ! Local:
2306  real peg(bd%isd:bd%ied,km+1)
2307  real pkg(bd%isd:bd%ied,km+1)
2308  real p1d(bd%isd:bd%ied)
2309  real logp(bd%isd:bd%ied)
2310 #ifdef MULTI_GASES
2311  real pkx (bd%isd:bd%ied,km)
2312  real pkgx(bd%isd:bd%ied,km)
2313  real akapx
2314  integer n
2315 #endif
2316  integer i, j, k
2317  integer ifirst, ilast
2318  integer jfirst, jlast
2319 
2320  integer :: is, ie, js, je
2321  integer :: isd, ied, jsd, jed
2322 
2323  is = bd%is
2324  ie = bd%ie
2325  js = bd%js
2326  je = bd%je
2327  isd = bd%isd
2328  ied = bd%ied
2329  jsd = bd%jsd
2330  jed = bd%jed
2331 
2332  if ( (.not. cg .and. a2b_ord==4) .or. (bounded_domain .and. .not. cg) ) then ! D-Grid
2333  ifirst = is-2; ilast = ie+2
2334  jfirst = js-2; jlast = je+2
2335  else
2336  ifirst = is-1; ilast = ie+1
2337  jfirst = js-1; jlast = je+1
2338  endif
2339 
2340  if (bounded_domain .and. computehalo) then
2341  if (is == 1) ifirst = isd
2342  if (ie == npx-1) ilast = ied
2343  if (js == 1) jfirst = jsd
2344  if (je == npy-1) jlast = jed
2345  end if
2346 
2347 #ifdef MULTI_GASES
2348 !$OMP parallel do default(none) shared(jfirst,jlast,ifirst,ilast,pk,km,gz,hs,ptop,ptk,kapad, &
2349 !$OMP js,je,is,ie,peln,peln1,pe,delp,akap,pt,CG,pkz,q_con) &
2350 !$OMP private(peg, pkx, pkgx, akapx, pkg, p1d, logp)
2351 #else
2352 !$OMP parallel do default(none) shared(jfirst,jlast,ifirst,ilast,pk,km,gz,hs,ptop,ptk, &
2353 !$OMP js,je,is,ie,peln,peln1,pe,delp,akap,pt,CG,pkz,q_con) &
2354 !$OMP private(peg, pkg, p1d, logp)
2355 #endif
2356  do 2000 j=jfirst,jlast
2357 
2358  do i=ifirst, ilast
2359  p1d(i) = ptop
2360  pk(i,j,1) = ptk
2361  gz(i,j,km+1) = hs(i,j)
2362 #ifdef USE_COND
2363  peg(i,1) = ptop
2364  pkg(i,1) = ptk
2365 #endif
2366  enddo
2367 
2368 #ifndef SW_DYNAMICS
2369  if( j>=js .and. j<=je) then
2370  do i=is,ie
2371  peln(i,1,j) = peln1
2372  enddo
2373  endif
2374 #endif
2375 
2376  if( j>(js-2) .and. j<(je+2) ) then
2377  do i=max(ifirst,is-1), min(ilast,ie+1)
2378  pe(i,1,j) = ptop
2379  enddo
2380  endif
2381 
2382  ! Top down
2383  do k=2,km+1
2384  do i=ifirst, ilast
2385  p1d(i) = p1d(i) + delp(i,j,k-1)
2386  logp(i) = log(p1d(i))
2387  pk(i,j,k) = exp( akap*logp(i) )
2388 #ifdef USE_COND
2389  peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1))
2390  pkg(i,k) = exp( akap*log(peg(i,k)) )
2391 #endif
2392  enddo
2393 
2394  if( j>(js-2) .and. j<(je+2) ) then
2395  do i=max(ifirst,is-1), min(ilast,ie+1)
2396  pe(i,k,j) = p1d(i)
2397  enddo
2398  if( j>=js .and. j<=je) then
2399  do i=is,ie
2400  peln(i,k,j) = logp(i)
2401  enddo
2402  endif
2403  endif
2404  enddo
2405 
2406 #ifdef MULTI_GASES
2407  do k=1,km
2408  do i=ifirst, ilast
2409  akapx = (kapad(i,j,k)-akap)/akap
2410 #ifdef USE_COND
2411  pkgx(i,k) = (pkg(i,k+1)-pkg(i,k))/(akap*(log(peg(i,k+1))-log(peg(i,k))))
2412  pkgx(i,k) = exp( akapx*log(pkgx(i,k)) )
2413 #else
2414  pkx(i,k) = (pk(i,j,k+1)-pk(i,j,k))/(akap*(peln(i,k+1,j)-peln(i,k,j)))
2415  pkx(i,k) = exp( akapx*log(pkx(i,k)) )
2416 #endif
2417  enddo
2418  enddo
2419 #endif
2420 
2421  ! Bottom up
2422  do k=km,1,-1
2423  do i=ifirst, ilast
2424 #ifdef SW_DYNAMICS
2425  gz(i,j,k) = gz(i,j,k+1) + pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2426 #else
2427 #ifdef USE_COND
2428 #ifdef MULTI_GASES
2429  gz(i,j,k) = gz(i,j,k+1) + cp_air*pkgx(i,k)*pt(i,j,k)*(pkg(i,k+1)-pkg(i,k))
2430 #else
2431  gz(i,j,k) = gz(i,j,k+1) + cp_air*pt(i,j,k)*(pkg(i,k+1)-pkg(i,k))
2432 #endif
2433 #else
2434 #ifdef MULTI_GASES
2435  gz(i,j,k) = gz(i,j,k+1) + cp_air*pkx(i,k)*pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2436 #else
2437  gz(i,j,k) = gz(i,j,k+1) + cp_air*pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2438 #endif
2439 #endif
2440 #endif
2441  enddo
2442  enddo
2443 
2444  if ( .not. cg .and. j .ge. js .and. j .le. je ) then
2445  do k=1,km
2446  do i=is,ie
2447  pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(akap*(peln(i,k+1,j)-peln(i,k,j)))
2448 #ifdef MULTI_GASES
2449  akapx = kapad(i,j,k) / akap
2450  pkz(i,j,k) = exp( akapx * log( pkz(i,j,k) ) )
2451 #endif
2452  enddo
2453  enddo
2454  endif
2455 
2456 2000 continue
2457  end subroutine geopk
2458 
2460  subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
2461  !---------------------------------------------------------------
2462  ! This routine is for filtering the omega field for the physics
2463  !---------------------------------------------------------------
2464  integer, intent(in):: npx, npy, km, nmax
2465  real(kind=R_GRID), intent(in):: cd
2466  type(fv_grid_bounds_type), intent(IN) :: bd
2467  real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed,km)
2468  type(fv_grid_type), intent(IN), target :: gridstruct
2469  type(domain2d), intent(INOUT) :: domain
2470  real, parameter:: r3 = 1./3.
2471  real :: fx(bd%isd:bd%ied+1,bd%jsd:bd%jed), fy(bd%isd:bd%ied,bd%jsd:bd%jed+1)
2472  real :: q2(bd%isd:bd%ied,bd%jsd:bd%jed)
2473  integer i,j,k, n, nt, ntimes
2474  integer :: is, ie, js, je
2475  integer :: isd, ied, jsd, jed
2476 
2477  !Local routine pointers
2478 ! real, pointer, dimension(:,:) :: rarea
2479 ! real, pointer, dimension(:,:) :: del6_u, del6_v
2480 ! logical, pointer :: sw_corner, se_corner, ne_corner, nw_corner
2481 
2482  is = bd%is
2483  ie = bd%ie
2484  js = bd%js
2485  je = bd%je
2486  isd = bd%isd
2487  ied = bd%ied
2488  jsd = bd%jsd
2489  jed = bd%jed
2490 
2491 ! rarea => gridstruct%rarea
2492 ! del6_u => gridstruct%del6_u
2493 ! del6_v => gridstruct%del6_v
2494 
2495 ! sw_corner => gridstruct%sw_corner
2496 ! nw_corner => gridstruct%nw_corner
2497 ! se_corner => gridstruct%se_corner
2498 ! ne_corner => gridstruct%ne_corner
2499 
2500  ntimes = min(3, nmax)
2501 
2502  call timing_on('COMM_TOTAL')
2503  call mpp_update_domains(q, domain, complete=.true.)
2504  call timing_off('COMM_TOTAL')
2505 
2506 
2507  do n=1,ntimes
2508  nt = ntimes - n
2509 
2510 !$OMP parallel do default(none) shared(km,q,is,ie,js,je,npx,npy, &
2511 !$OMP nt,isd,jsd,gridstruct,bd, &
2512 !$OMP cd) &
2513 !$OMP private(fx, fy)
2514  do k=1,km
2515 
2516  if ( gridstruct%sw_corner ) then
2517  q(1,1,k) = (q(1,1,k)+q(0,1,k)+q(1,0,k)) * r3
2518  q(0,1,k) = q(1,1,k)
2519  q(1,0,k) = q(1,1,k)
2520  endif
2521  if ( gridstruct%se_corner ) then
2522  q(ie, 1,k) = (q(ie,1,k)+q(npx,1,k)+q(ie,0,k)) * r3
2523  q(npx,1,k) = q(ie,1,k)
2524  q(ie, 0,k) = q(ie,1,k)
2525  endif
2526  if ( gridstruct%ne_corner ) then
2527  q(ie, je,k) = (q(ie,je,k)+q(npx,je,k)+q(ie,npy,k)) * r3
2528  q(npx,je,k) = q(ie,je,k)
2529  q(ie,npy,k) = q(ie,je,k)
2530  endif
2531  if ( gridstruct%nw_corner ) then
2532  q(1, je,k) = (q(1,je,k)+q(0,je,k)+q(1,npy,k)) * r3
2533  q(0, je,k) = q(1,je,k)
2534  q(1,npy,k) = q(1,je,k)
2535  endif
2536 
2537  if(nt>0 .and. (.not. gridstruct%bounded_domain)) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%bounded_domain, bd, &
2538  gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner )
2539  do j=js-nt,je+nt
2540  do i=is-nt,ie+1+nt
2541 #ifdef USE_SG
2542  fx(i,j) = gridstruct%dy(i,j)*gridstruct%sina_u(i,j)*(q(i-1,j,k)-q(i,j,k))*gridstruct%rdxc(i,j)
2543 #else
2544  fx(i,j) = gridstruct%del6_v(i,j)*(q(i-1,j,k)-q(i,j,k))
2545 #endif
2546  enddo
2547  enddo
2548 
2549  if(nt>0 .and. (.not. gridstruct%bounded_domain)) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%bounded_domain, bd, &
2550  gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner)
2551  do j=js-nt,je+1+nt
2552  do i=is-nt,ie+nt
2553 #ifdef USE_SG
2554  fy(i,j) = gridstruct%dx(i,j)*gridstruct%sina_v(i,j)*(q(i,j-1,k)-q(i,j,k))*gridstruct%rdyc(i,j)
2555 #else
2556  fy(i,j) = gridstruct%del6_u(i,j)*(q(i,j-1,k)-q(i,j,k))
2557 #endif
2558  enddo
2559  enddo
2560 
2561  do j=js-nt,je+nt
2562  do i=is-nt,ie+nt
2563  q(i,j,k) = q(i,j,k) + cd*gridstruct%rarea(i,j)*(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))
2564  enddo
2565  enddo
2566  enddo
2567  enddo
2568 
2569  end subroutine del2_cubed
2570 
2571  subroutine init_ijk_mem(i1, i2, j1, j2, km, array, var)
2572  integer, intent(in):: i1, i2, j1, j2, km
2573  real, intent(inout):: array(i1:i2,j1:j2,km)
2574  real, intent(in):: var
2575  integer:: i, j, k
2576 
2577 !$OMP parallel do default(none) shared(i1,i2,j1,j2,km,array,var)
2578  do k=1,km
2579  do j=j1,j2
2580  do i=i1,i2
2581  array(i,j,k) = var
2582  enddo
2583  enddo
2584  enddo
2585 
2586  end subroutine init_ijk_mem
2587 
2589  subroutine ray_fast(dt, npx, npy, npz, pfull, tau, u, v, w, &
2590  ks, dp, ptop, hydrostatic, rf_cutoff, bd)
2591 ! Simple "inline" version of the Rayleigh friction
2592  real, intent(in):: dt
2593  real, intent(in):: tau
2594  real, intent(in):: ptop, rf_cutoff
2595  real, intent(in), dimension(npz):: pfull
2596  integer, intent(in):: npx, npy, npz, ks
2597  logical, intent(in):: hydrostatic
2598  type(fv_grid_bounds_type), intent(IN) :: bd
2599  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
2600  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
2601  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
2602  real, intent(in):: dp(npz)
2603 !
2604  real(kind=R_GRID):: rff(npz)
2605  real, parameter:: sday = 86400.
2606  real, dimension(bd%is:bd%ie+1):: dmv
2607  real, dimension(bd%is:bd%ie):: dmu
2608  real:: tau0, dm
2609  integer i, j, k
2610 
2611  integer :: is, ie, js, je
2612  integer :: isd, ied, jsd, jed
2613 
2614  is = bd%is
2615  ie = bd%ie
2616  js = bd%js
2617  je = bd%je
2618  isd = bd%isd
2619  ied = bd%ied
2620  jsd = bd%jsd
2621  jed = bd%jed
2622 
2623  if ( .not. rff_initialized ) then
2624  tau0 = tau * sday
2625  allocate( rf(npz) )
2626  rf(:) = 1.
2627 
2628  if( is_master() ) write(6,*) 'Fast Rayleigh friction E-folding time (days):'
2629  do k=1, npz
2630  if ( pfull(k) < rf_cutoff ) then
2631  rff(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pfull(k))/log(rf_cutoff/ptop))**2
2632 ! Re-FACTOR rf
2633  if( is_master() ) write(6,*) k, 0.01*pfull(k), dt/(rff(k)*sday)
2634  kmax = k
2635  rff(k) = 1.d0 / (1.0d0+rff(k))
2636  rf(k) = rff(k)
2637  else
2638  exit
2639  endif
2640  enddo
2641  dm = 0.
2642  do k=1,ks
2643  if ( pfull(k) < rf_cutoff + min(100., 10.*ptop) ) then
2644  dm = dm + dp(k)
2645  k_rf = k
2646  else
2647  exit
2648  endif
2649  enddo
2650  if( is_master() ) write(6,*) 'k_rf=', k_rf, 0.01*pfull(k_rf), 'dm=', dm
2651  rff_initialized = .true.
2652  endif
2653 
2654 !$OMP parallel do default(none) shared(k_rf,is,ie,js,je,kmax,pfull,rf_cutoff,w,rf,dp,u,v,hydrostatic) &
2655 !$OMP private(dm, dmu, dmv)
2656  do j=js,je+1
2657 
2658  dm = 0.
2659  do k=1, k_rf
2660  dm = dm + dp(k)
2661  enddo
2662 
2663  dmu(:) = 0.
2664  dmv(:) = 0.
2665  do k=1,kmax
2666  do i=is,ie
2667  dmu(i) = dmu(i) + (1.-rf(k))*dp(k)*u(i,j,k)
2668  u(i,j,k) = rf(k)*u(i,j,k)
2669  enddo
2670  if ( j/=je+1 ) then
2671  do i=is,ie+1
2672  dmv(i) = dmv(i) + (1.-rf(k))*dp(k)*v(i,j,k)
2673  v(i,j,k) = rf(k)*v(i,j,k)
2674  enddo
2675  if ( .not. hydrostatic ) then
2676  do i=is,ie
2677  w(i,j,k) = rf(k)*w(i,j,k)
2678  enddo
2679  endif
2680  endif
2681  enddo
2682 
2683  do i=is,ie
2684  dmu(i) = dmu(i) / dm
2685  enddo
2686  if ( j/=je+1 ) then
2687  do i=is,ie+1
2688  dmv(i) = dmv(i) / dm
2689  enddo
2690  endif
2691 
2692  do k=1, k_rf
2693  do i=is,ie
2694  u(i,j,k) = u(i,j,k) + dmu(i)
2695  enddo
2696  if ( j/=je+1 ) then
2697  do i=is,ie+1
2698  v(i,j,k) = v(i,j,k) + dmv(i)
2699  enddo
2700  endif
2701  enddo
2702 
2703  enddo
2704 
2705  end subroutine ray_fast
2706 
2707  subroutine gz_bc(gz,delzBC,bd,npx,npy,npz,step,split)
2709  type(fv_grid_bounds_type), intent(IN) :: bd
2710  integer, intent(IN) :: npx, npy, npz
2711  real, intent(INOUT) :: gz(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1)
2712  type(fv_nest_bc_type_3d), intent(IN) :: delzBC
2713  real, intent(IN) :: step, split
2714 
2715  real :: a1, a2
2716  integer i, j, k
2717 
2718  integer :: is, ie, js, je
2719  integer :: isd, ied, jsd, jed
2720 
2721  integer :: istart, iend
2722 
2723  is = bd%is
2724  ie = bd%ie
2725  js = bd%js
2726  je = bd%je
2727  isd = bd%isd
2728  ied = bd%ied
2729  jsd = bd%jsd
2730  jed = bd%jed
2731 
2732  a1 = (split-step)/split
2733  a2 = step/split
2734 
2735  if (is == 1) then
2736 !$OMP parallel do default(none) shared(jsd,jed,npz,isd,delzBC,gz,a1,a2)
2737  do j=jsd,jed
2738  do k=npz,1,-1
2739  do i=isd,0
2740  gz(i,j,k) = gz(i,j,k+1) - (delzbc%west_t1(i,j,k)*a2 + delzbc%west_t0(i,j,k)*a1)
2741  enddo
2742  enddo
2743  enddo
2744  endif
2745 
2746  if (ie == npx-1) then
2747 !$OMP parallel do default(none) shared(jsd,jed,npz,npx,ied,delzBC,gz,a1,a2)
2748  do j=jsd,jed
2749  do k=npz,1,-1
2750  do i=npx,ied
2751  gz(i,j,k) = gz(i,j,k+1) - (delzbc%east_t1(i,j,k)*a2 + delzbc%east_t0(i,j,k)*a1)
2752  enddo
2753  enddo
2754  enddo
2755  endif
2756 
2757  if (is == 1) then
2758  istart = is
2759  else
2760  istart = isd
2761  end if
2762  if (ie == npx-1) then
2763  iend = ie
2764  else
2765  iend = ied
2766  end if
2767 
2768  if (js == 1) then
2769 !$OMP parallel do default(none) shared(jsd,npz,istart,iend,delzBC,gz,a1,a2)
2770  do j=jsd,0
2771  do k=npz,1,-1
2772  do i=istart,iend
2773  gz(i,j,k) = gz(i,j,k+1) - (delzbc%south_t1(i,j,k)*a2 + delzbc%south_t0(i,j,k)*a1)
2774  !if (gz(i,j,k) <= gz(i,j,k+1) .or. abs(gz(i,j,k)) > 1.e6) print*, ' BAD GZ (bc): ', i, j, k, gz(i,j,k:k+1), delzBC%west_t1(i,j,k), delzBC%west_t0(i,j,k)
2775  enddo
2776  enddo
2777  enddo
2778  endif
2779 
2780  if (je == npy-1) then
2781 !$OMP parallel do default(none) shared(npy,jed,npz,istart,iend,delzBC,gz,a1,a2)
2782  do j=npy,jed
2783  do k=npz,1,-1
2784  do i=istart,iend
2785  gz(i,j,k) = gz(i,j,k+1) - (delzbc%north_t1(i,j,k)*a2 + delzbc%north_t0(i,j,k)*a1)
2786  !if (gz(i,j,k) <= gz(i,j,k+1) .or. abs(gz(i,j,k)) > 1.e6) print*, ' BAD GZ (bc): ', i, j, k, gz(i,j,k:k+1), delzBC%west_t1(i,j,k), delzBC%west_t0(i,j,k)
2787  enddo
2788  enddo
2789  enddo
2790  endif
2791 
2792  end subroutine gz_bc
2793 
2794 
2795 end module dyn_core_mod
subroutine, public init_ijk_mem(i1, i2, j1, j2, km, array, var)
Definition: dyn_core.F90:2572
real, dimension(:,:,:), allocatable vt
Definition: dyn_core.F90:154
subroutine split_p_grad(u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
Definition: dyn_core.F90:1860
real, dimension(:,:,:), allocatable crx
Definition: dyn_core.F90:154
real, dimension(:,:,:), allocatable delpc
Definition: dyn_core.F90:154
integer, public k_step
logical, public do_adiabatic_init
Definition: fv_nudge.F90:138
subroutine, public regional_boundary_update(array, bc_vbl_name, lbnd_x, ubnd_x, lbnd_y, ubnd_y, ubnd_z, is, ie, js, je, isd, ied, jsd, jed, fcst_time, index4)
subroutine pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
Definition: dyn_core.F90:1563
real, public current_time_in_seconds
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:123
subroutine pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
Definition: dyn_core.F90:1460
integer, parameter, public h_stagger
integer, public p_step
subroutine, public c_sw(delpc, delp, ptc, pt, u, v, w, uc, vc, ua, va, wc, ut, vt, divg_d, nord, dt2, hydrostatic, dord4, bd, gridstruct, flagstruct)
The subroutine &#39;c_sw&#39; performs a half-timestep advance of the C-grid winds.
Definition: sw_core.F90:105
integer test_case
Definition: test_cases.F90:180
The module &#39;sw_core&#39; advances the forward step of the Lagrangian dynamics as described by ...
Definition: sw_core.F90:26
The module &#39;multi_gases&#39; peforms multi constitutents computations.
Definition: multi_gases.F90:25
subroutine, public a2b_ord2(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace)
Definition: a2b_edge.F90:698
subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
Definition: dyn_core.F90:1762
integer, public n_step
integer, parameter, public r_grid
Definition: fv_arrays.F90:34
subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, ifdef MULTI_GASES
The subroutine &#39;geopk&#39; calculates geopotential and pressure to the kappa.
Definition: dyn_core.F90:2284
pure real function, public virqd(q)
The module fv_nwp_nudge contains routines for nudging to input analyses. note This module is currentl...
Definition: fv_nudge.F90:32
subroutine, public prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain)
integer, parameter, public bc_time_interval
real, dimension(:,:,:), allocatable divgd
Definition: dyn_core.F90:154
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
The module &#39;tp_core&#39; is a collection of routines to support FV transport.
Definition: tp_core.F90:24
The module &#39;a2b_edge&#39; performs FV-consistent interpolation of pressure to corners.
Definition: a2b_edge.F90:24
integer kmax
Definition: dyn_core.F90:164
real, dimension(:,:,:), allocatable ptc
Definition: dyn_core.F90:154
subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd)
Definition: dyn_core.F90:2204
logical rff_initialized
Definition: dyn_core.F90:162
integer k_rf
Definition: dyn_core.F90:161
subroutine, public dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, akap, cappa, grav, hydrostatic, u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, init_step, i_pack, end_step, diss_est, time_total)
Definition: dyn_core.F90:181
subroutine, public breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, delp, u, v, pt, q, nwat, zvir, gridstruct, ks, domain_local, bd, hydrostatic)
The subroutine &#39;breed_slp_inline&#39; performs vortex breeding by nudging sea level pressure toward singl...
Definition: fv_nudge.F90:2156
subroutine, public case9_forcing1(phis, time_since_start, isd, ied, jsd, jed)
real, dimension(:,:,:), allocatable zh
Definition: dyn_core.F90:154
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
subroutine, public riem_solver3(ms, dt, is, ie, js, je, km, ng, isd, ied, jsd, jed, akap, cappa, cp, ptop, zs, q_con, w, delz, pt, delp, zh, pe, ppe, pk3, pk, peln, ws, scale_m, p_fac, a_imp, use_logp, last_call, fp_out)
Definition: nh_core.F90:72
real, dimension(:,:,:), allocatable xfx
Definition: dyn_core.F90:154
subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta, a2b_ord)
Definition: dyn_core.F90:2110
subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng)
Definition: dyn_core.F90:1594
subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, a2b_ord, d_ext)
Definition: dyn_core.F90:1975
subroutine gz_bc(gz, delzBC, bd, npx, npy, npz, step, split)
Definition: dyn_core.F90:2708
subroutine, public del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
The subroutine &#39;del2-cubed&#39; filters the omega field for the physics.
Definition: dyn_core.F90:2461
subroutine, public case9_forcing2(phis, isd, ied, jsd, jed)
type(time_type), public fv_time
subroutine, public update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
The subroutine &#39;update_dwinds_phys&#39; transforms the wind tendencies from the A grid to the D grid for ...
type(fv_nest_bc_type_3d), public delz_regbc
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
real, dimension(:,:,:), allocatable cry
Definition: dyn_core.F90:154
subroutine pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
Definition: dyn_core.F90:1514
integer, parameter, public v_stagger
real, dimension(:,:,:), allocatable yfx
Definition: dyn_core.F90:154
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
The module &#39;fv_grid_utils&#39; contains routines for setting up and computing grid-related quantities...
The module &#39;dyn_core&#39; peforms the Lagrangian acoustic dynamics described by .
Definition: dyn_core.F90:28
subroutine p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, rdxc, rdyc, hydrostatic)
Definition: dyn_core.F90:1700
subroutine, public prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac)
subroutine, public d_sw(delpc, delp, ptc, pt, u, v, w, uc, vc, ua, va, divg_d, xflux, yflux, cx, cy, crx_adv, cry_adv, xfx_adv, yfx_adv, q_con, z_rat, kgb, heat_source, diss_est, zvir, sphum, nq, q, k, km, inline_q, dt, hord_tr, hord_mt, hord_vt, hord_tm, hord_dp, nord, nord_v, nord_w, nord_t, dddmp, d2_bg, d4_bg, damp_v, damp_w, damp_t, d_con, hydrostatic, gridstruct, flagstruct, bd)
The subroutine &#39;d_sw&#39; peforms a full-timestep advance of the D-grid winds and other prognostic varaia...
Definition: sw_core.F90:526
logical first_call
Definition: dyn_core.F90:163
real, dimension(:,:,:), allocatable ut
Definition: dyn_core.F90:154
subroutine, public copy_corners(q, npx, npy, dir, bounded_domain, bd, sw_corner, se_corner, nw_corner, ne_corner)
Definition: tp_core.F90:248
integer, public a_step
real, dimension(:,:,:), allocatable pkc
Definition: dyn_core.F90:154
subroutine ray_fast(dt, npx, npy, npz, pfull, tau, u, v, w, ks, dp, ptop, hydrostatic, rf_cutoff, bd)
The subroutine &#39;Ray_fast&#39; computes a simple "inline" version of the Rayleigh friction (EXPERIMENTAL -...
Definition: dyn_core.F90:2591
real, dimension(:), allocatable rf
Definition: dyn_core.F90:160
subroutine, public a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace)
Definition: a2b_edge.F90:70
subroutine, public exch_uv(domain, bd, npz, u, v)
real, dimension(:,:,:), allocatable du
Definition: dyn_core.F90:154
real, dimension(:,:,:), allocatable pk3
Definition: dyn_core.F90:154
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
The subroutine &#39;nested_grid_BC_apply_intT&#39; performs linear interpolation or extrapolation in time for...
Definition: boundary.F90:2171
real, dimension(:,:,:), allocatable dv
Definition: dyn_core.F90:154
The module &#39;nh_core&#39; peforms non-hydrostatic computations include moisture effect in pt...
Definition: nh_core.F90:25
real, dimension(:,:,:), allocatable gz
Definition: dyn_core.F90:154
pure real function, public vicvqd(q)
subroutine, public extrapolation_bc(q, istag, jstag, npx, npy, bd, pd_in, debug_in)
The subroutine &#39;extrapolation_BC&#39; performs linear extrapolation into the halo region.
Definition: boundary.F90:133
real(kind=r_grid), parameter cnst_0p20
Definition: dyn_core.F90:158
integer, parameter, public u_stagger