FV3DYCORE  Version1.0.0
fv_dynamics.F90
Go to the documentation of this file.
1 
2 !***********************************************************************
3 !* GNU Lesser General Public License
4 !*
5 !* This file is part of the FV3 dynamical core.
6 !*
7 !* The FV3 dynamical core is free software: you can redistribute it
8 !* and/or modify it under the terms of the
9 !* GNU Lesser General Public License as published by the
10 !* Free Software Foundation, either version 3 of the License, or
11 !* (at your option) any later version.
12 !*
13 !* The FV3 dynamical core is distributed in the hope that it will be
14 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
15 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 !* See the GNU General Public License for more details.
17 !*
18 !* You should have received a copy of the GNU Lesser General Public
19 !* License along with the FV3 dynamical core.
20 !* If not, see <http://www.gnu.org/licenses/>.
21 !***********************************************************************
22 
25 
27 
28 ! Modules Included:
29 ! <table>
30 ! <tr>
31 ! <th>Module Name</th>
32 ! <th>Functions Included</th>
33 ! </tr>
34 ! <tr>
35 ! <td>boundary_mod</td>
36 ! <td>nested_grid_BC_apply_intT</td>
37 ! </tr>
38 ! <tr>
39 ! <td>constants_mod</td>
40 ! <td>grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor</td>
41 ! </tr>
42 ! <tr>
43 ! <td>diag_manager_mod</td>
44 ! <td>send_data</td>
45 ! </tr>
46 ! <tr>
47 ! <td>dyn_core_mod</td>
48 ! <td>dyn_core, del2_cubed, init_ijk_mem</td>
49 ! </tr>
50 ! <tr>
51 ! <td>field_manager_mod</td>
52 ! <td>MODEL_ATMOS</td>
53 ! </tr>
54 ! <tr>
55 ! <td>fv_arrays_mod</td>
56 ! <td>fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, fv_grid_bounds_type</td>
57 ! </tr>
58 ! <tr>
59 ! <td>fv_diagnostics_mod</td>
60 ! <td>fv_time, prt_mxm, range_check, prt_minmax</td>
61 ! </tr>
62 ! <tr>
63 ! <td>fv_fill_mod</td>
64 ! <td>fill2D</td>
65 ! </tr>
66 ! <tr>
67 ! <td>fv_grid_utils_mod</td>
68 ! <td>cubed_to_latlon, c2l_ord2, g_sum</td>
69 ! </tr>
70 ! <tr>
71 ! <td>fv_mapz_mod</td>
72 ! <td>compute_total_energy, Lagrangian_to_Eulerian, moist_cv, moist_cp</td>
73 ! </tr>
74 ! <tr>
75 ! <td>fv_mp_mod</td>
76 ! <td>is_master,group_halo_update_type, start_group_halo_update, complete_group_halo_update</td>
77 ! </tr>
78 ! <tr>
79 ! <td>fv_nesting_mod</td>
80 ! <td>setup_nested_grid_BCs</td>
81 ! </tr>
82 ! <tr>
83 ! <td>fv_nwp_mod</td>
84 ! <td>do_adiabatic_init</td>
85 ! </tr>
86 ! <tr>
87 ! <td>fv_restart_mod</td>
88 ! <td>d2a_setup, d2c_setup</td>
89 ! </tr>
90 ! <tr>
91 ! <td>fv_sg_mod</td>
92 ! <td>neg_adj3</td>
93 ! </tr>
94 ! <tr>
95 ! <td>fv_sg_mod</td>
96 ! <td>neg_adj2</td>
97 ! </tr>
98 ! <tr>
99 ! <td>fv_timing_mod</td>
100 ! <td>timing_on, timing_off</td>
101 ! </tr>
102 ! <tr>
103 ! <td>fv_tracer2d_mod</td>
104 ! <td>tracer_2d, tracer_2d_1L, tracer_2d_nested</td>
105 ! </tr>
106 ! <tr>
107 ! <td>mpp_mod/td>
108 ! <td>mpp_pe</td>
109 ! </tr>
110 ! <tr>
111 ! <td>mpp_domains_mod/td>
112 ! <td>DGRID_NE, CGRID_NE, mpp_update_domains, domain2D</td>
113 ! </tr>
114 ! <tr>
115 ! <td>fv_sg_mod</td>
116 ! <td>neg_adj3</td>
117 ! </tr>
118 ! <tr>
119 ! <td>tracer_manager_mod</td>
120 ! <td>get_tracer_index</td>
121 ! </tr>
122 ! </table>
123 
124  use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor
129  use fv_fill_mod, only: fill2d
130  use fv_mp_mod, only: is_master
131  use fv_mp_mod, only: group_halo_update_type
132  use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update
134  use diag_manager_mod, only: send_data
136  use mpp_domains_mod, only: dgrid_ne, cgrid_ne, mpp_update_domains, domain2d
137  use mpp_mod, only: mpp_pe
138  use field_manager_mod, only: model_atmos
139  use tracer_manager_mod, only: get_tracer_index
140  use fv_sg_mod, only: neg_adj3, neg_adj2
144  use fv_regional_mod, only: a_step, p_step, k_step
149 #ifdef MULTI_GASES
150  use multi_gases_mod, only: virq, virqd, vicpqd
151 #endif
152 
153 implicit none
154  logical :: rf_initialized = .false.
155  logical :: pt_initialized = .false.
156  logical :: bad_range = .false.
157  real, allocatable :: rf(:)
158  integer :: kmax=1
159  integer :: k_rf = 0
160 
161  real :: agrav
162 #ifdef HIWPP
163  real, allocatable:: u00(:,:,:), v00(:,:,:)
164 #endif
165 private
166 public :: fv_dynamics
167 
168 contains
169 
170 !-----------------------------------------------------------------------
171 ! fv_dynamics :: FV dynamical core driver
172 !-----------------------------------------------------------------------
173 
174  subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, &
175  reproduce_sum, kappa, cp_air, zvir, ptop, ks, ncnst, n_split, &
176  q_split, u, v, w, delz, hydrostatic, pt, delp, q, &
177  ps, pe, pk, peln, pkz, phis, q_con, omga, ua, va, uc, vc, &
178  ak, bk, mfx, mfy, cx, cy, ze0, hybrid_z, &
179  gridstruct, flagstruct, neststruct, idiag, bd, &
180  parent_grid, domain, diss_est, time_total)
182 #ifdef CCPP
183  use mpp_mod, only: fatal, mpp_error
184  use ccpp_data, only: ccpp_interstitial
185 #endif
186 
187  real, intent(IN) :: bdt
188  real, intent(IN) :: consv_te
189  real, intent(IN) :: kappa, cp_air
190  real, intent(IN) :: zvir, ptop
191  real, intent(IN), optional :: time_total
192 
193  integer, intent(IN) :: npx
194  integer, intent(IN) :: npy
195  integer, intent(IN) :: npz
196  integer, intent(IN) :: nq_tot
197  integer, intent(IN) :: ng
198  integer, intent(IN) :: ks
199  integer, intent(IN) :: ncnst
200  integer, intent(IN) :: n_split
201  integer, intent(IN) :: q_split
202  logical, intent(IN) :: fill
203  logical, intent(IN) :: reproduce_sum
204  logical, intent(IN) :: hydrostatic
205  logical, intent(IN) :: hybrid_z
206 
207  type(fv_grid_bounds_type), intent(IN) :: bd
208  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
209  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
210  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:)
211  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
212  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
213  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
214  real, intent(inout) :: delz(bd%isd:,bd%jsd:,1:)
215  real, intent(inout) :: ze0(bd%is:, bd%js: ,1:)
216  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed,npz) :: diss_est! diffusion estimate for SKEB
217 ! ze0 no longer used
218 
219 !-----------------------------------------------------------------------
220 ! Auxilliary pressure arrays:
221 ! The 5 vars below can be re-computed from delp and ptop.
222 !-----------------------------------------------------------------------
223 ! dyn_aux:
224  real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed)
225  real, intent(inout) :: pe (bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1)
226  real, intent(inout) :: pk (bd%is:bd%ie,bd%js:bd%je, npz+1)
227  real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je)
228  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
229  real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:)
230 
231 !-----------------------------------------------------------------------
232 ! Others:
233 !-----------------------------------------------------------------------
234  real, intent(inout) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
235  real, intent(inout) :: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
236  real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
237  real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
238 
239  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz):: ua, va
240  real, intent(in), dimension(npz+1):: ak, bk
241 
242 ! Accumulated Mass flux arrays: the "Flux Capacitor"
243  real, intent(inout) :: mfx(bd%is:bd%ie+1, bd%js:bd%je, npz)
244  real, intent(inout) :: mfy(bd%is:bd%ie , bd%js:bd%je+1, npz)
245 ! Accumulated Courant number arrays
246  real, intent(inout) :: cx(bd%is:bd%ie+1, bd%jsd:bd%jed, npz)
247  real, intent(inout) :: cy(bd%isd:bd%ied ,bd%js:bd%je+1, npz)
248 
249  type(fv_grid_type), intent(inout), target :: gridstruct
250  type(fv_flags_type), intent(INOUT) :: flagstruct
251  type(fv_nest_type), intent(INOUT) :: neststruct
252  type(domain2d), intent(INOUT) :: domain
253  type(fv_atmos_type), pointer, intent(IN) :: parent_grid
254  type(fv_diag_type), intent(IN) :: idiag
255 
256 ! Local Arrays
257  real :: ws(bd%is:bd%ie,bd%js:bd%je)
258 #ifndef CCPP
259  real :: te_2d(bd%is:bd%ie,bd%js:bd%je)
260 #endif
261  real :: teq(bd%is:bd%ie,bd%js:bd%je)
262  real :: ps2(bd%isd:bd%ied,bd%jsd:bd%jed)
263  real :: m_fac(bd%is:bd%ie,bd%js:bd%je)
264  real :: pfull(npz)
265  real, dimension(bd%is:bd%ie):: cvm
266 #ifndef CCPP
267  real, allocatable :: dp1(:,:,:), dtdt_m(:,:,:), cappa(:,:,:)
268 #endif
269 #ifdef MULTI_GASES
270  real, allocatable :: kapad(:,:,:)
271 #endif
272  real:: akap, rdg, ph1, ph2, mdt, gam, amdt, u0
273  real:: recip_k_split,reg_bc_update_time
274  integer :: kord_tracer(ncnst)
275  integer :: i,j,k, n, iq, n_map, nq, nwat, k_split
276  integer :: sphum, liq_wat = -999, ice_wat = -999 ! GFDL physics
277  integer :: rainwat = -999, snowwat = -999, graupel = -999, cld_amt = -999
278  integer :: theta_d = -999
279 #ifdef CCPP
280  logical used, do_omega
281 #else
282  logical used, last_step, do_omega
283 #endif
284  integer, parameter :: max_packs=12
285  type(group_halo_update_type), save :: i_pack(max_packs)
286  integer :: is, ie, js, je
287  integer :: isd, ied, jsd, jed
288  real :: dt2
289 #ifdef CCPP
290  integer :: ierr
291 #endif
292 
293 #ifdef CCPP
294  ccpp_associate: associate( cappa => ccpp_interstitial%cappa, &
295  dp1 => ccpp_interstitial%te0, &
296  dtdt_m => ccpp_interstitial%dtdt, &
297  last_step => ccpp_interstitial%last_step, &
298  te_2d => ccpp_interstitial%te0_2d )
299 #endif
300 
301  is = bd%is
302  ie = bd%ie
303  js = bd%js
304  je = bd%je
305  isd = bd%isd
306  ied = bd%ied
307  jsd = bd%jsd
308  jed = bd%jed
309 
310 
311 ! cv_air = cp_air - rdgas
312  agrav = 1. / grav
313  dt2 = 0.5*bdt
314  k_split = flagstruct%k_split
315  recip_k_split=1./real(k_split)
316  nwat = flagstruct%nwat
317  nq = nq_tot - flagstruct%dnats
318  rdg = -rdgas * agrav
319 
320 #ifdef CCPP
321 
322  ! Reset all interstitial variables for CCPP version
323  ! of fast physics, and manually set runtime parameters
324  call ccpp_interstitial%reset()
325  if (flagstruct%do_sat_adj) then
326  ccpp_interstitial%out_dt = (idiag%id_mdt > 0)
327  end if
328 
329 #else
330  te_2d = 0.
331 
332  allocate ( dp1(isd:ied, jsd:jed, 1:npz) )
333  call init_ijk_mem(isd,ied, jsd,jed, npz, dp1, 0.)
334 
335 #ifdef MOIST_CAPPA
336  allocate ( cappa(isd:ied,jsd:jed,npz) )
337  call init_ijk_mem(isd,ied, jsd,jed, npz, cappa, 0.)
338 #else
339  allocate ( cappa(isd:isd,jsd:jsd,1) )
340  cappa = 0.
341 #endif
342 #endif
343 
344 #ifdef MULTI_GASES
345  allocate ( kapad(isd:ied, jsd:jed, npz) )
346  call init_ijk_mem(isd,ied, jsd,jed, npz, kapad, kappa)
347 #endif
348 
349  !We call this BEFORE converting pt to virtual potential temperature,
350  !since we interpolate on (regular) temperature rather than theta.
351  if (gridstruct%nested .or. any(neststruct%child_grids)) then
352  call timing_on('NEST_BCs')
353  call setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, &
354  u, v, w, pt, delp, delz, q, uc, vc, pkz, &
355  neststruct%nested, flagstruct%inline_q, flagstruct%make_nh, ng, &
356  gridstruct, flagstruct, neststruct, &
357  neststruct%nest_timestep, neststruct%tracer_nest_timestep, &
358  domain, bd, nwat)
359 
360 #ifndef SW_DYNAMICS
361  if (gridstruct%nested) then
362  !Correct halo values have now been set up for BCs; we can go ahead and apply them too...
363  call nested_grid_bc_apply_intt(pt, &
364  0, 0, npx, npy, npz, bd, 1., 1., &
365  neststruct%pt_BC, bctype=neststruct%nestbctype )
366 #ifdef USE_COND
367  call nested_grid_bc_apply_intt(q_con, &
368  0, 0, npx, npy, npz, bd, 1., 1., &
369  neststruct%q_con_BC, bctype=neststruct%nestbctype )
370 #ifdef MOIST_CAPPA
371  call nested_grid_bc_apply_intt(cappa, &
372  0, 0, npx, npy, npz, bd, 1., 1., &
373  neststruct%cappa_BC, bctype=neststruct%nestbctype )
374 #endif
375 #endif
376  endif
377 #endif
378  call timing_off('NEST_BCs')
379  endif
380 
381  ! For the regional domain set values valid the beginning of the
382  ! current large timestep at the boundary points of the pertinent
383  ! prognostic arrays.
384 
385  if (flagstruct%regional) then
386  call timing_on('Regional_BCs')
387 
388  reg_bc_update_time=current_time_in_seconds
389  call set_regional_bcs &
390  (delp,delz,w,pt &
391 #ifdef USE_COND
392  ,q_con &
393 #endif
394 #ifdef MOIST_CAPPA
395  ,cappa &
396 #endif
397  ,q,u,v,uc,vc, bd, npz, reg_bc_update_time )
398 
399  call timing_off('Regional_BCs')
400  endif
401 
402  if ( flagstruct%no_dycore ) then
403  if ( nwat.eq.2 .and. (.not.hydrostatic) ) then
404  sphum = get_tracer_index(model_atmos, 'sphum')
405  endif
406  goto 911
407  endif
408 
409  if ( nwat==0 ) then
410  sphum = 1
411  cld_amt = -1 ! to cause trouble if (mis)used
412  else
413  sphum = get_tracer_index(model_atmos, 'sphum')
414  liq_wat = get_tracer_index(model_atmos, 'liq_wat')
415  ice_wat = get_tracer_index(model_atmos, 'ice_wat')
416  rainwat = get_tracer_index(model_atmos, 'rainwat')
417  snowwat = get_tracer_index(model_atmos, 'snowwat')
418  graupel = get_tracer_index(model_atmos, 'graupel')
419  cld_amt = get_tracer_index(model_atmos, 'cld_amt')
420  endif
421 
422  theta_d = get_tracer_index(model_atmos, 'theta_d')
423 
424 #ifdef SW_DYNAMICS
425  akap = 1.
426  pfull(1) = 0.5*flagstruct%p_ref
427 #else
428  akap = kappa
429 
430 !$OMP parallel do default(none) shared(npz,ak,bk,flagstruct,pfull) &
431 !$OMP private(ph1, ph2,k)
432  do k=1,npz
433  ph1 = ak(k ) + bk(k )*flagstruct%p_ref
434  ph2 = ak(k+1) + bk(k+1)*flagstruct%p_ref
435  pfull(k) = (ph2 - ph1) / log(ph2/ph1)
436  enddo
437 
438  if ( hydrostatic ) then
439 #if defined(CCPP) && defined(__GFORTRAN__)
440 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,zvir,nwat,q,q_con,sphum,liq_wat, &
441 #else
442 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,zvir,nwat,q,q_con,sphum,liq_wat, &
443 #endif
444 !$OMP rainwat,ice_wat,snowwat,graupel) private(cvm,i,j,k)
445  do k=1,npz
446  do j=js,je
447 #ifdef USE_COND
448  call moist_cp(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
449  ice_wat, snowwat, graupel, q, q_con(is:ie,j,k), cvm)
450 #endif
451  do i=is,ie
452  dp1(i,j,k) = zvir*q(i,j,k,sphum)
453  enddo
454  enddo
455  enddo
456  else
457 #if defined(CCPP) && defined(__GFORTRAN__)
458 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,zvir,q,q_con,sphum,liq_wat, &
459 #else
460 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,zvir,q,q_con,sphum,liq_wat, &
461 #endif
462 !$OMP rainwat,ice_wat,snowwat,graupel,pkz,flagstruct, &
463 #ifdef MULTI_GASES
464 !$OMP kapad, &
465 #endif
466 #if defined(CCPP) && defined(__GFORTRAN__)
467 !$OMP kappa,rdg,delp,pt,delz,nwat) &
468 #else
469 !$OMP cappa,kappa,rdg,delp,pt,delz,nwat) &
470 #endif
471 !$OMP private(cvm,i,j,k)
472  do k=1,npz
473  if ( flagstruct%moist_phys ) then
474  do j=js,je
475 #ifdef MOIST_CAPPA
476  call moist_cv(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
477  ice_wat, snowwat, graupel, q, q_con(is:ie,j,k), cvm)
478 #endif
479  do i=is,ie
480 #ifdef MULTI_GASES
481  dp1(i,j,k) = virq(q(i,j,k,:))-1.
482  kapad(i,j,k)= kappa * (virqd(q(i,j,k,:))/vicpqd(q(i,j,k,:)))
483 #else
484  dp1(i,j,k) = zvir*q(i,j,k,sphum)
485 #endif
486 
487 #ifdef MOIST_CAPPA
488  cappa(i,j,k) = rdgas/(rdgas + cvm(i)/(1.+dp1(i,j,k)))
489  pkz(i,j,k) = exp(cappa(i,j,k)*log(rdg*delp(i,j,k)*pt(i,j,k)* &
490 #ifdef MULTI_GASES
491  (1.+dp1(i,j,k)) /delz(i,j,k)) )
492 #else
493  (1.+dp1(i,j,k))*(1.-q_con(i,j,k))/delz(i,j,k)) )
494 #endif
495 #else
496  pkz(i,j,k) = exp( kappa*log(rdg*delp(i,j,k)*pt(i,j,k)* &
497  (1.+dp1(i,j,k))/delz(i,j,k)) )
498 ! Using dry pressure for the definition of the virtual potential temperature
499 ! pkz(i,j,k) = exp( kappa*log(rdg*delp(i,j,k)*pt(i,j,k)* &
500 ! (1.-q(i,j,k,sphum))/delz(i,j,k)) )
501 #endif
502 
503  enddo
504  enddo
505  else
506  do j=js,je
507  do i=is,ie
508  dp1(i,j,k) = 0.
509 #ifdef MULTI_GASES
510  kapad(i,j,k)= kappa * (virqd(q(i,j,k,:))/vicpqd(q(i,j,k,:)))
511  pkz(i,j,k) = exp(kapad(i,j,k)*log(rdg*virqd(q(i,j,k,:))*delp(i,j,k)*pt(i,j,k)/delz(i,j,k)))
512 #else
513  pkz(i,j,k) = exp(kappa*log(rdg*delp(i,j,k)*pt(i,j,k)/delz(i,j,k)))
514 #endif
515  enddo
516  enddo
517  endif
518  enddo
519  endif
520  if ( flagstruct%fv_debug ) then
521 #ifdef MOIST_CAPPA
522  call prt_mxm('cappa', cappa, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
523 #endif
524  call prt_mxm('PS', ps, is, ie, js, je, ng, 1, 0.01, gridstruct%area_64, domain)
525  call prt_mxm('T_dyn_b', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
526  if ( .not. hydrostatic) call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
527  call prt_mxm('delp_b ', delp, is, ie, js, je, ng, npz, 0.01, gridstruct%area_64, domain)
528  call prt_mxm('pk_b', pk, is, ie, js, je, 0, npz+1, 1.,gridstruct%area_64, domain)
529  call prt_mxm('pkz_b', pkz,is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain)
530  endif
531 
532 !---------------------
533 ! Compute Total Energy
534 !---------------------
535 
536  if ( consv_te > 0. .and. (.not.do_adiabatic_init) ) then
537  call compute_total_energy(is, ie, js, je, isd, ied, jsd, jed, npz, &
538  u, v, w, delz, pt, delp, q, dp1, pe, peln, phis, &
539  gridstruct%rsin2, gridstruct%cosa_s, &
540  zvir, cp_air, rdgas, hlv, te_2d, ua, va, teq, &
541  flagstruct%moist_phys, nwat, sphum, liq_wat, rainwat, &
542  ice_wat, snowwat, graupel, hydrostatic, idiag%id_te)
543  if( idiag%id_te>0 ) then
544  used = send_data(idiag%id_te, teq, fv_time)
545 ! te_den=1.E-9*g_sum(teq, is, ie, js, je, ng, area, 0)/(grav*4.*pi*radius**2)
546 ! if(is_master()) write(*,*) 'Total Energy Density (Giga J/m**2)=',te_den
547  endif
548  endif
549 
550  if( (flagstruct%consv_am.or.idiag%id_amdt>0) .and. (.not.do_adiabatic_init) ) then
551  call compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, &
552  ptop, ua, va, u, v, delp, teq, ps2, m_fac)
553  endif
554 
555  if( .not.flagstruct%RF_fast .and. flagstruct%tau .ne. 0. ) then
556  if ( gridstruct%grid_type<4 ) then
557 ! if ( flagstruct%RF_fast ) then
558 ! call Ray_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, &
559 ! dp_ref, ptop, hydrostatic, flagstruct%rf_cutoff, bd)
560 ! else
561  call rayleigh_super(abs(bdt), npx, npy, npz, ks, pfull, phis, flagstruct%tau, u, v, w, pt, &
562  ua, va, delz, gridstruct%agrid, cp_air, rdgas, ptop, hydrostatic, &
563  (.not. (neststruct%nested .or. flagstruct%regional)), flagstruct%rf_cutoff, gridstruct, domain, bd)
564 ! endif
565  else
566  call rayleigh_friction(abs(bdt), npx, npy, npz, ks, pfull, flagstruct%tau, u, v, w, pt, &
567  ua, va, delz, cp_air, rdgas, ptop, hydrostatic, .true., flagstruct%rf_cutoff, gridstruct, domain, bd)
568  endif
569  endif
570 
571 #endif
572 
573 #ifndef SW_DYNAMICS
574 ! Convert pt to virtual potential temperature on the first timestep
575  if ( flagstruct%adiabatic .and. flagstruct%kord_tm>0 ) then
576  if ( .not.pt_initialized )then
577 !$OMP parallel do default(none) shared(theta_d,is,ie,js,je,npz,pt,pkz,q)
578  do k=1,npz
579  do j=js,je
580  do i=is,ie
581  pt(i,j,k) = pt(i,j,k)/pkz(i,j,k)
582  enddo
583  enddo
584  if ( theta_d>0 ) then
585  do j=js,je
586  do i=is,ie
587  q(i,j,k,theta_d) = pt(i,j,k)
588  enddo
589  enddo
590  endif
591  enddo
592  pt_initialized = .true.
593  endif
594  else
595 #if defined(CCPP) && defined(__GFORTRAN__)
596 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pt,pkz,q_con)
597 #else
598 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pt,dp1,pkz,q_con)
599 #endif
600  do k=1,npz
601  do j=js,je
602  do i=is,ie
603 
604 #ifdef MULTI_GASES
605  pt(i,j,k) = pt(i,j,k)*(1.+dp1(i,j,k))/pkz(i,j,k)
606 #else
607 #ifdef USE_COND
608  pt(i,j,k) = pt(i,j,k)*(1.+dp1(i,j,k))*(1.-q_con(i,j,k))/pkz(i,j,k)
609 #else
610  pt(i,j,k) = pt(i,j,k)*(1.+dp1(i,j,k))/pkz(i,j,k)
611 #endif
612 #endif
613  enddo
614  enddo
615  enddo
616  endif
617 #endif
618 
619 
620  last_step = .false.
621  mdt = bdt / real(k_split)
622 
623 #ifndef CCPP
624  if ( idiag%id_mdt > 0 .and. (.not. do_adiabatic_init) ) then
625  allocate ( dtdt_m(is:ie,js:je,npz) )
626 !$OMP parallel do default(none) shared(is,ie,js,je,npz,dtdt_m)
627  do k=1,npz
628  do j=js,je
629  do i=is,ie
630  dtdt_m(i,j,k) = 0.
631  enddo
632  enddo
633  enddo
634  endif
635 #endif
636 
637  call timing_on('FV_DYN_LOOP')
638  do n_map=1, k_split ! first level of time-split
639  k_step = n_map
640  call timing_on('COMM_TOTAL')
641 #ifdef USE_COND
642  call start_group_halo_update(i_pack(11), q_con, domain)
643 #ifdef MOIST_CAPPA
644  call start_group_halo_update(i_pack(12), cappa, domain)
645 #endif
646 #endif
647 #ifdef MULTI_GASES
648  call start_group_halo_update(i_pack(13), kapad, domain)
649 #endif
650  call start_group_halo_update(i_pack(1), delp, domain, complete=.false.)
651  call start_group_halo_update(i_pack(1), pt, domain, complete=.true.)
652 #ifndef ROT3
653  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
654 #endif
655  call timing_off('COMM_TOTAL')
656 #if defined(CCPP) && defined(__GFORTRAN__)
657 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,delp)
658 #else
659 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,dp1,delp)
660 #endif
661  do k=1,npz
662  do j=jsd,jed
663  do i=isd,ied
664  dp1(i,j,k) = delp(i,j,k)
665  enddo
666  enddo
667  enddo
668 
669  if ( n_map==k_split ) last_step = .true.
670 
671 #ifdef USE_COND
672  call timing_on('COMM_TOTAL')
673  call complete_group_halo_update(i_pack(11), domain)
674 #ifdef MOIST_CAPPA
675  call complete_group_halo_update(i_pack(12), domain)
676 #endif
677  call timing_off('COMM_TOTAL')
678 #endif
679 #ifdef MULTI_GASES
680  call complete_group_halo_update(i_pack(13), domain)
681 #endif
682 
683  call timing_on('DYN_CORE')
684  call dyn_core(npx, npy, npz, ng, sphum, nq, mdt, n_split, zvir, cp_air, akap, cappa, &
685 #ifdef MULTI_GASES
686  kapad, &
687 #endif
688  grav, hydrostatic, &
689  u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, &
690  uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, &
691  gridstruct, flagstruct, neststruct, idiag, bd, &
692  domain, n_map==1, i_pack, last_step, diss_est,time_total)
693  call timing_off('DYN_CORE')
694 
695 #ifdef SW_DYNAMICS
696 !!$OMP parallel do default(none) shared(is,ie,js,je,ps,delp,agrav)
697  do j=js,je
698  do i=is,ie
699  ps(i,j) = delp(i,j,1) * agrav
700  enddo
701  enddo
702 #else
703  if( .not. flagstruct%inline_q .and. nq /= 0 ) then
704 !--------------------------------------------------------
705 ! Perform large-time-step scalar transport using the accumulated CFL and
706 ! mass fluxes
707  call timing_on('tracer_2d')
708  !!! CLEANUP: merge these two calls?
709  if (gridstruct%nested .or. flagstruct%regional) then
710  call tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, &
711  flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), &
712  flagstruct%nord_tr, flagstruct%trdm2, &
713  k_split, neststruct, parent_grid, flagstruct%lim_fac,flagstruct%regional)
714  else
715  if ( flagstruct%z_tracer ) then
716  call tracer_2d_1l(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, &
717  flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), &
718  flagstruct%nord_tr, flagstruct%trdm2, flagstruct%lim_fac,flagstruct%regional)
719  else
720  call tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, &
721  flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), &
722  flagstruct%nord_tr, flagstruct%trdm2, flagstruct%lim_fac,flagstruct%regional)
723  endif
724  endif
725  call timing_off('tracer_2d')
726 
727 #ifdef FILL2D
728  if ( flagstruct%hord_tr<8 .and. flagstruct%moist_phys ) then
729  call timing_on('Fill2D')
730  if ( liq_wat > 0 ) &
731  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,liq_wat), delp, gridstruct%area, domain, neststruct%nested, gridstruct%regional, npx, npy)
732  if ( rainwat > 0 ) &
733  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,rainwat), delp, gridstruct%area, domain, neststruct%nested, gridstruct%regional, npx, npy)
734  if ( ice_wat > 0 ) &
735  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,ice_wat), delp, gridstruct%area, domain, neststruct%nested, gridstruct%regional, npx, npy)
736  if ( snowwat > 0 ) &
737  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,snowwat), delp, gridstruct%area, domain, neststruct%nested, gridstruct%regional, npx, npy)
738  if ( graupel > 0 ) &
739  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,graupel), delp, gridstruct%area, domain, neststruct%nested, gridstruct%regional, npx, npy)
740  call timing_off('Fill2D')
741  endif
742 #endif
743 
744  if( last_step .and. idiag%id_divg>0 ) then
745  used = send_data(idiag%id_divg, dp1, fv_time)
746  if(flagstruct%fv_debug) call prt_mxm('divg', dp1, is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain)
747  endif
748  endif
749 
750  if ( npz > 4 ) then
751 !------------------------------------------------------------------------
752 ! Perform vertical remapping from Lagrangian control-volume to
753 ! the Eulerian coordinate as specified by the routine set_eta.
754 ! Note that this finite-volume dycore is otherwise independent of the vertical
755 ! Eulerian coordinate.
756 !------------------------------------------------------------------------
757 
758  do iq=1,nq
759  kord_tracer(iq) = flagstruct%kord_tr
760  if ( iq==cld_amt ) kord_tracer(iq) = 9 ! monotonic
761  enddo
762 
763  do_omega = hydrostatic .and. last_step
764  call timing_on('Remapping')
765 #ifdef AVEC_TIMERS
766  call avec_timer_start(6)
767 #endif
768 
769  call lagrangian_to_eulerian(last_step, consv_te, ps, pe, delp, &
770  pkz, pk, mdt, bdt, npz, is,ie,js,je, isd,ied,jsd,jed, &
771  nq, nwat, sphum, q_con, u, v, w, delz, pt, q, phis, &
772  zvir, cp_air, akap, cappa, flagstruct%kord_mt, flagstruct%kord_wz, &
773  kord_tracer, flagstruct%kord_tm, peln, te_2d, &
774  ng, ua, va, omga, dp1, ws, fill, reproduce_sum, &
775  idiag%id_mdt>0, dtdt_m, ptop, ak, bk, pfull, gridstruct, domain, &
776  flagstruct%do_sat_adj, hydrostatic, hybrid_z, do_omega, &
777  flagstruct%adiabatic, do_adiabatic_init)
778 
779 #ifdef AVEC_TIMERS
780  call avec_timer_stop(6)
781 #endif
782  call timing_off('Remapping')
783 #ifdef MOIST_CAPPA
784  if ( neststruct%nested .and. .not. last_step) then
785  call nested_grid_bc_apply_intt(cappa, &
786  0, 0, npx, npy, npz, bd, real(n_map+1), real(k_split), &
787  neststruct%cappa_bc, bctype=neststruct%nestbctype )
788  endif
789 
790  if ( flagstruct%regional .and. .not. last_step) then
791  reg_bc_update_time=current_time_in_seconds+(n_map+1)*mdt
792  call regional_boundary_update(cappa, 'cappa', &
793  isd, ied, jsd, jed, npz, &
794  is, ie, js, je, &
795  isd, ied, jsd, jed, &
796  reg_bc_update_time )
797  endif
798 #endif
799 
800  if( last_step ) then
801  if( .not. hydrostatic ) then
802 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga,delp,delz,w)
803  do k=1,npz
804  do j=js,je
805  do i=is,ie
806  omga(i,j,k) = delp(i,j,k)/delz(i,j,k)*w(i,j,k)
807  enddo
808  enddo
809  enddo
810  endif
811 !--------------------------
812 ! Filter omega for physics:
813 !--------------------------
814  if(flagstruct%nf_omega>0) &
815  call del2_cubed(omga, 0.18*gridstruct%da_min, gridstruct, domain, npx, npy, npz, flagstruct%nf_omega, bd)
816  endif
817  end if
818 #endif
819  enddo ! n_map loop
820  call timing_off('FV_DYN_LOOP')
821  if ( idiag%id_mdt > 0 .and. (.not.do_adiabatic_init) ) then
822 ! Output temperature tendency due to inline moist physics:
823 #if defined(CCPP) && defined(__GFORTRAN__)
824 !$OMP parallel do default(none) shared(is,ie,js,je,npz,bdt)
825 #else
826 !$OMP parallel do default(none) shared(is,ie,js,je,npz,dtdt_m,bdt)
827 #endif
828  do k=1,npz
829  do j=js,je
830  do i=is,ie
831  dtdt_m(i,j,k) = dtdt_m(i,j,k) *( 86400.0 / bdt)
832  enddo
833  enddo
834  enddo
835 ! call prt_mxm('Fast DTDT (deg/Day)', dtdt_m, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain)
836  used = send_data(idiag%id_mdt, dtdt_m, fv_time)
837 #ifndef CCPP
838  deallocate ( dtdt_m )
839 #endif
840  endif
841 
842  if( nwat == 6 ) then
843  if (cld_amt > 0) then
844  call neg_adj3(is, ie, js, je, ng, npz, &
845  flagstruct%hydrostatic, &
846  peln, delz, &
847  pt, delp, q(isd,jsd,1,sphum), &
848  q(isd,jsd,1,liq_wat), &
849  q(isd,jsd,1,rainwat), &
850  q(isd,jsd,1,ice_wat), &
851  q(isd,jsd,1,snowwat), &
852  q(isd,jsd,1,graupel), &
853  q(isd,jsd,1,cld_amt), flagstruct%check_negative)
854  else
855  call neg_adj3(is, ie, js, je, ng, npz, &
856  flagstruct%hydrostatic, &
857  peln, delz, &
858  pt, delp, q(isd,jsd,1,sphum), &
859  q(isd,jsd,1,liq_wat), &
860  q(isd,jsd,1,rainwat), &
861  q(isd,jsd,1,ice_wat), &
862  q(isd,jsd,1,snowwat), &
863  q(isd,jsd,1,graupel), check_negative=flagstruct%check_negative)
864  endif
865  if ( flagstruct%fv_debug ) then
866  call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
867  call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
868  call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
869  call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
870  call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
871  call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
872  call prt_mxm('graupel_dyn', q(isd,jsd,1,graupel), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
873  endif
874  endif
875 
876  if( nwat == 5 ) then
877  if (cld_amt > 0) then
878  call neg_adj2(is, ie, js, je, ng, npz, &
879  flagstruct%hydrostatic, &
880  peln, delz, &
881  pt, delp, q(isd,jsd,1,sphum), &
882  q(isd,jsd,1,liq_wat), &
883  q(isd,jsd,1,rainwat), &
884  q(isd,jsd,1,ice_wat), &
885  q(isd,jsd,1,snowwat), &
886  q(isd,jsd,1,cld_amt), flagstruct%check_negative)
887  else
888  call neg_adj2(is, ie, js, je, ng, npz, &
889  flagstruct%hydrostatic, &
890  peln, delz, &
891  pt, delp, q(isd,jsd,1,sphum), &
892  q(isd,jsd,1,liq_wat), &
893  q(isd,jsd,1,rainwat), &
894  q(isd,jsd,1,ice_wat), &
895  q(isd,jsd,1,snowwat), &
896  check_negative=flagstruct%check_negative)
897  endif
898  if ( flagstruct%fv_debug ) then
899  call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
900  call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
901  call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
902  call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
903  call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
904  call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
905  endif
906  endif
907 
908  if( (flagstruct%consv_am.or.idiag%id_amdt>0.or.idiag%id_aam>0) .and. (.not.do_adiabatic_init) ) then
909  call compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, &
910  ptop, ua, va, u, v, delp, te_2d, ps, m_fac)
911  if( idiag%id_aam>0 ) then
912  used = send_data(idiag%id_aam, te_2d, fv_time)
913  if ( prt_minmax ) then
914  gam = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0)
915  if( is_master() ) write(6,*) 'Total AAM =', gam
916  endif
917  endif
918  endif
919 
920  if( (flagstruct%consv_am.or.idiag%id_amdt>0) .and. (.not.do_adiabatic_init) ) then
921 #if defined(CCPP) && defined(__GFORTRAN__)
922 !$OMP parallel do default(none) shared(is,ie,js,je,teq,dt2,ps2,ps,idiag)
923 #else
924 !$OMP parallel do default(none) shared(is,ie,js,je,te_2d,teq,dt2,ps2,ps,idiag)
925 #endif
926  do j=js,je
927  do i=is,ie
928 ! Note: the mountain torque computation contains also numerical error
929 ! The numerical error is mostly from the zonal gradient of the terrain (zxg)
930  te_2d(i,j) = te_2d(i,j)-teq(i,j) + dt2*(ps2(i,j)+ps(i,j))*idiag%zxg(i,j)
931  enddo
932  enddo
933  if( idiag%id_amdt>0 ) used = send_data(idiag%id_amdt, te_2d/bdt, fv_time)
934 
935  if ( flagstruct%consv_am .or. prt_minmax ) then
936  amdt = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.)
937  u0 = -radius*amdt/g_sum( domain, m_fac, is, ie, js, je, ng, gridstruct%area_64, 0,reproduce=.true.)
938  if(is_master() .and. prt_minmax) &
939  write(6,*) 'Dynamic AM tendency (Hadleys)=', amdt/(bdt*1.e18), 'del-u (per day)=', u0*86400./bdt
940  endif
941 
942  if( flagstruct%consv_am ) then
943 !$OMP parallel do default(none) shared(is,ie,js,je,m_fac,u0,gridstruct)
944  do j=js,je
945  do i=is,ie
946  m_fac(i,j) = u0*cos(gridstruct%agrid(i,j,2))
947  enddo
948  enddo
949 !$OMP parallel do default(none) shared(is,ie,js,je,npz,hydrostatic,pt,m_fac,ua,cp_air, &
950 !$OMP u,u0,gridstruct,v )
951  do k=1,npz
952  do j=js,je+1
953  do i=is,ie
954  u(i,j,k) = u(i,j,k) + u0*gridstruct%l2c_u(i,j)
955  enddo
956  enddo
957  do j=js,je
958  do i=is,ie+1
959  v(i,j,k) = v(i,j,k) + u0*gridstruct%l2c_v(i,j)
960  enddo
961  enddo
962  enddo
963  endif ! consv_am
964  endif
965 
966 911 call cubed_to_latlon(u, v, ua, va, gridstruct, &
967  npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%nested, flagstruct%c2l_ord, bd)
968 
969 #ifdef MULTI_GASES
970  deallocate(kapad)
971 #endif
972 #ifndef CCPP
973  deallocate(dp1)
974  deallocate(cappa)
975 #endif
976 
977  if ( flagstruct%fv_debug ) then
978  call prt_mxm('UA', ua, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
979  call prt_mxm('VA', va, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
980  call prt_mxm('TA', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
981  if (.not. hydrostatic) call prt_mxm('W ', w, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
982  endif
983 
984  if ( flagstruct%range_warn ) then
985  call range_check('UA_dyn', ua, is, ie, js, je, ng, npz, gridstruct%agrid, &
986  -280., 280., bad_range)
987  call range_check('VA_dyn', ua, is, ie, js, je, ng, npz, gridstruct%agrid, &
988  -280., 280., bad_range)
989  call range_check('TA_dyn', pt, is, ie, js, je, ng, npz, gridstruct%agrid, &
990  150., 335., bad_range)
991  if ( .not. hydrostatic ) &
992  call range_check('W_dyn', w, is, ie, js, je, ng, npz, gridstruct%agrid, &
993  -50., 100., bad_range)
994  endif
995 
996 #ifdef CCPP
997  end associate ccpp_associate
998 #endif
999 
1000  end subroutine fv_dynamics
1001 
1002 #ifdef USE_RF_FAST
1003  subroutine rayleigh_fast(dt, npx, npy, npz, pfull, tau, u, v, w, &
1004  ks, dp, ptop, hydrostatic, rf_cutoff, bd)
1005 ! Simple "inline" version of the Rayleigh friction
1006  real, intent(in):: dt
1007  real, intent(in):: tau
1008  real, intent(in):: ptop, rf_cutoff
1009  real, intent(in), dimension(npz):: pfull
1010  integer, intent(in):: npx, npy, npz, ks
1011  logical, intent(in):: hydrostatic
1012  type(fv_grid_bounds_type), intent(IN) :: bd
1013  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1014  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
1015  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
1016  real, intent(in):: dp(npz)
1017 !
1018  real(kind=R_GRID):: rff(npz)
1019  real, parameter:: sday = 86400.
1020  real, dimension(bd%is:bd%ie+1):: dmv
1021  real, dimension(bd%is:bd%ie):: dmu
1022  real:: tau0, dm
1023  integer i, j, k
1024 
1025  integer :: is, ie, js, je
1026  integer :: isd, ied, jsd, jed
1027 
1028  is = bd%is
1029  ie = bd%ie
1030  js = bd%js
1031  je = bd%je
1032  isd = bd%isd
1033  ied = bd%ied
1034  jsd = bd%jsd
1035  jed = bd%jed
1036 
1037  if ( .not. rf_initialized ) then
1038  tau0 = tau * sday
1039  allocate( rf(npz) )
1040  rf(:) = 1.
1041 
1042  if( is_master() ) write(6,*) 'Fast Rayleigh friction E-folding time (days):'
1043  do k=1, npz
1044  if ( pfull(k) < rf_cutoff ) then
1045  rff(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pfull(k))/log(rf_cutoff/ptop))**2
1046 ! Re-FACTOR rf
1047  if( is_master() ) write(6,*) k, 0.01*pfull(k), dt/(rff(k)*sday)
1048  kmax = k
1049  rff(k) = 1.d0 / (1.0d0+rff(k))
1050  rf(k) = rff(k)
1051  else
1052  exit
1053  endif
1054  enddo
1055  dm = 0.
1056  do k=1,ks
1057  if ( pfull(k) < 100.e2 ) then
1058  dm = dm + dp(k)
1059  k_rf = k
1060  else
1061  exit
1062  endif
1063  enddo
1064  if( is_master() ) write(6,*) 'k_rf=', k_rf, 0.01*pfull(k_rf), 'dm=', dm
1065  rf_initialized = .true.
1066  endif
1067 
1068 !$OMP parallel do default(none) shared(k_rf,is,ie,js,je,kmax,pfull,rf_cutoff,w,rf,dp,u,v,hydrostatic) &
1069 !$OMP private(dm, dmu, dmv)
1070  do j=js,je+1
1071 
1072  dm = 0.
1073  do k=1, k_rf
1074  dm = dm + dp(k)
1075  enddo
1076 
1077  dmu(:) = 0.
1078  dmv(:) = 0.
1079  do k=1,kmax
1080  do i=is,ie
1081  dmu(i) = dmu(i) + (1.-rf(k))*dp(k)*u(i,j,k)
1082  u(i,j,k) = rf(k)*u(i,j,k)
1083  enddo
1084  if ( j/=je+1 ) then
1085  do i=is,ie+1
1086  dmv(i) = dmv(i) + (1.-rf(k))*dp(k)*v(i,j,k)
1087  v(i,j,k) = rf(k)*v(i,j,k)
1088  enddo
1089  if ( .not. hydrostatic ) then
1090  do i=is,ie
1091  w(i,j,k) = rf(k)*w(i,j,k)
1092  enddo
1093  endif
1094  endif
1095  enddo
1096 
1097  do i=is,ie
1098  dmu(i) = dmu(i) / dm
1099  enddo
1100  if ( j/=je+1 ) then
1101  do i=is,ie+1
1102  dmv(i) = dmv(i) / dm
1103  enddo
1104  endif
1105 
1106  do k=1, k_rf
1107  do i=is,ie
1108  u(i,j,k) = u(i,j,k) + dmu(i)
1109  enddo
1110  if ( j/=je+1 ) then
1111  do i=is,ie+1
1112  v(i,j,k) = v(i,j,k) + dmv(i)
1113  enddo
1114  endif
1115  enddo
1116 
1117  enddo
1118 
1119  end subroutine rayleigh_fast
1120 #endif
1121 
1122 
1123 
1124  subroutine rayleigh_super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, &
1125  ua, va, delz, agrid, cp, rg, ptop, hydrostatic, &
1126  conserve, rf_cutoff, gridstruct, domain, bd)
1127  real, intent(in):: dt
1128  real, intent(in):: tau
1129  real, intent(in):: cp, rg, ptop, rf_cutoff
1130  real, intent(in), dimension(npz):: pm
1131  integer, intent(in):: npx, npy, npz, ks
1132  logical, intent(in):: hydrostatic
1133  logical, intent(in):: conserve
1134  type(fv_grid_bounds_type), intent(IN) :: bd
1135  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1136  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
1137  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
1138  real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1139  real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1140  real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1141  real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: )
1142  real, intent(in) :: agrid(bd%isd:bd%ied, bd%jsd:bd%jed,2)
1143  real, intent(in) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
1144  type(fv_grid_type), intent(IN) :: gridstruct
1145  type(domain2d), intent(INOUT) :: domain
1146 !
1147  real, allocatable :: u2f(:,:,:)
1148  real, parameter:: u0 = 60.
1149  real, parameter:: sday = 86400.
1150  real rcv, tau0
1151  integer i, j, k
1152 
1153  integer :: is, ie, js, je
1154  integer :: isd, ied, jsd, jed
1155 
1156  is = bd%is
1157  ie = bd%ie
1158  js = bd%js
1159  je = bd%je
1160  isd = bd%isd
1161  ied = bd%ied
1162  jsd = bd%jsd
1163  jed = bd%jed
1164 
1165  rcv = 1. / (cp - rg)
1166 
1167  if ( .not. rf_initialized ) then
1168 #ifdef HIWPP
1169  allocate ( u00(is:ie, js:je+1,npz) )
1170  allocate ( v00(is:ie+1,js:je ,npz) )
1171 !$OMP parallel do default(none) shared(is,ie,js,je,npz,u00,u,v00,v)
1172  do k=1,npz
1173  do j=js,je+1
1174  do i=is,ie
1175  u00(i,j,k) = u(i,j,k)
1176  enddo
1177  enddo
1178  do j=js,je
1179  do i=is,ie+1
1180  v00(i,j,k) = v(i,j,k)
1181  enddo
1182  enddo
1183  enddo
1184 #endif
1185 #ifdef SMALL_EARTH
1186  tau0 = abs( tau )
1187 #else
1188  tau0 = abs( tau * sday )
1189 #endif
1190  if( is_master() ) write(6,*) 'Rayleigh_Super tau=',tau
1191  allocate( rf(npz) )
1192  rf(:) = 0.
1193 
1194  do k=1, ks+1
1195  if( is_master() ) write(6,*) k, 0.01*pm(k)
1196  enddo
1197  if( is_master() ) write(6,*) 'Rayleigh friction E-folding time (days):'
1198  do k=1, npz
1199  if ( pm(k) < rf_cutoff ) then
1200  if ( tau < 0 ) then ! GSM formula
1201  rf(k) = dt/tau0*( log(rf_cutoff/pm(k)) )**2
1202  else
1203  rf(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pm(k))/log(rf_cutoff/ptop))**2
1204  endif
1205  if( is_master() ) write(6,*) k, 0.01*pm(k), dt/(rf(k)*sday)
1206  kmax = k
1207  else
1208  exit
1209  endif
1210  enddo
1211  rf_initialized = .true.
1212  endif
1213 
1214  call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested)
1215 
1216  allocate( u2f(isd:ied,jsd:jed,kmax) )
1217 
1218 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,pm,rf_cutoff,hydrostatic,ua,va,agrid, &
1219 !$OMP u2f,rf,w)
1220  do k=1,kmax
1221  if ( pm(k) < rf_cutoff ) then
1222  u2f(:,:,k) = 1. / (1.+rf(k))
1223  else
1224  u2f(:,:,k) = 1.
1225  endif
1226  enddo
1227  call timing_on('COMM_TOTAL')
1228  call mpp_update_domains(u2f, domain)
1229  call timing_off('COMM_TOTAL')
1230 
1231 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,pm,rf_cutoff,w,rf,u,v, &
1232 #ifdef HIWPP
1233 !$OMP u00,v00, &
1234 #endif
1235 !$OMP conserve,hydrostatic,pt,ua,va,u2f,cp,rg,ptop,rcv)
1236  do k=1,kmax
1237  if ( pm(k) < rf_cutoff ) then
1238 #ifdef HIWPP
1239  if (.not. hydrostatic) then
1240  do j=js,je
1241  do i=is,ie
1242  w(i,j,k) = w(i,j,k)/(1.+rf(k))
1243  enddo
1244  enddo
1245  endif
1246  do j=js,je+1
1247  do i=is,ie
1248  u(i,j,k) = (u(i,j,k)+rf(k)*u00(i,j,k))/(1.+rf(k))
1249  enddo
1250  enddo
1251  do j=js,je
1252  do i=is,ie+1
1253  v(i,j,k) = (v(i,j,k)+rf(k)*v00(i,j,k))/(1.+rf(k))
1254  enddo
1255  enddo
1256 #else
1257 ! Add heat so as to conserve TE
1258  if ( conserve ) then
1259  if ( hydrostatic ) then
1260  do j=js,je
1261  do i=is,ie
1262  pt(i,j,k) = pt(i,j,k) + 0.5*(ua(i,j,k)**2+va(i,j,k)**2)*(1.-u2f(i,j,k)**2)/(cp-rg*ptop/pm(k))
1263  enddo
1264  enddo
1265  else
1266  do j=js,je
1267  do i=is,ie
1268  pt(i,j,k) = pt(i,j,k) + 0.5*(ua(i,j,k)**2+va(i,j,k)**2+w(i,j,k)**2)*(1.-u2f(i,j,k)**2)*rcv
1269  enddo
1270  enddo
1271  endif
1272  endif
1273 
1274  do j=js,je+1
1275  do i=is,ie
1276  u(i,j,k) = 0.5*(u2f(i,j-1,k)+u2f(i,j,k))*u(i,j,k)
1277  enddo
1278  enddo
1279  do j=js,je
1280  do i=is,ie+1
1281  v(i,j,k) = 0.5*(u2f(i-1,j,k)+u2f(i,j,k))*v(i,j,k)
1282  enddo
1283  enddo
1284  if ( .not. hydrostatic ) then
1285  do j=js,je
1286  do i=is,ie
1287  w(i,j,k) = u2f(i,j,k)*w(i,j,k)
1288  enddo
1289  enddo
1290  endif
1291 #endif
1292  endif
1293  enddo
1294 
1295  deallocate ( u2f )
1296 
1297  end subroutine rayleigh_super
1298 
1299 
1300  subroutine rayleigh_friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, &
1301  ua, va, delz, cp, rg, ptop, hydrostatic, conserve, &
1302  rf_cutoff, gridstruct, domain, bd)
1303  real, intent(in):: dt
1304  real, intent(in):: tau
1305  real, intent(in):: cp, rg, ptop, rf_cutoff
1306  real, intent(in), dimension(npz):: pm
1307  integer, intent(in):: npx, npy, npz, ks
1308  logical, intent(in):: hydrostatic
1309  logical, intent(in):: conserve
1310  type(fv_grid_bounds_type), intent(IN) :: bd
1311  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1312  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
1313  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
1314  real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1315  real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1316  real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1317  real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: )
1318  type(fv_grid_type), intent(IN) :: gridstruct
1319  type(domain2d), intent(INOUT) :: domain
1320 ! local:
1321  real, allocatable :: u2f(:,:,:)
1322  real, parameter:: sday = 86400.
1323  real, parameter:: u000 = 4900.
1324  real rcv
1325  integer i, j, k
1326 
1327  integer :: is, ie, js, je
1328  integer :: isd, ied, jsd, jed
1329 
1330 
1331  is = bd%is
1332  ie = bd%ie
1333  js = bd%js
1334  je = bd%je
1335  isd = bd%isd
1336  ied = bd%ied
1337  jsd = bd%jsd
1338  jed = bd%jed
1339 
1340 
1341  rcv = 1. / (cp - rg)
1342 
1343  if ( .not. rf_initialized ) then
1344  allocate( rf(npz) )
1345  if( is_master() ) write(6,*) 'Rayleigh friction E-folding time (days):'
1346  do k=1, npz
1347  if ( pm(k) < rf_cutoff ) then
1348  rf(k) = dt/(tau*sday)*sin(0.5*pi*log(rf_cutoff/pm(k))/log(rf_cutoff/ptop))**2
1349  if( is_master() ) write(6,*) k, 0.01*pm(k), dt/(rf(k)*sday)
1350  kmax = k
1351  else
1352  exit
1353  endif
1354  enddo
1355  rf_initialized = .true.
1356  endif
1357 
1358  allocate( u2f(isd:ied,jsd:jed,kmax) )
1359 
1360  call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested)
1361 
1362 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,u2f,hydrostatic,ua,va,w)
1363  do k=1,kmax
1364  if ( hydrostatic ) then
1365  do j=js,je
1366  do i=is,ie
1367  u2f(i,j,k) = ua(i,j,k)**2 + va(i,j,k)**2
1368  enddo
1369  enddo
1370  else
1371  do j=js,je
1372  do i=is,ie
1373  u2f(i,j,k) = ua(i,j,k)**2 + va(i,j,k)**2 + w(i,j,k)**2
1374  enddo
1375  enddo
1376  endif
1377  enddo
1378 
1379  call timing_on('COMM_TOTAL')
1380  call mpp_update_domains(u2f, domain)
1381  call timing_off('COMM_TOTAL')
1382 
1383 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,conserve,hydrostatic,pt,u2f,cp,rg, &
1384 !$OMP ptop,pm,rf,delz,rcv,u,v,w)
1385  do k=1,kmax
1386 
1387  if ( conserve ) then
1388  if ( hydrostatic ) then
1389  do j=js,je
1390  do i=is,ie
1391  pt(i,j,k) = pt(i,j,k) + 0.5*u2f(i,j,k)/(cp-rg*ptop/pm(k)) &
1392  * ( 1. - 1./(1.+rf(k)*sqrt(u2f(i,j,k)/u000))**2 )
1393  enddo
1394  enddo
1395  else
1396  do j=js,je
1397  do i=is,ie
1398  delz(i,j,k) = delz(i,j,k) / pt(i,j,k)
1399  pt(i,j,k) = pt(i,j,k) + 0.5*u2f(i,j,k) * rcv &
1400  * ( 1. - 1./(1.+rf(k)*sqrt(u2f(i,j,k)/u000))**2 )
1401  delz(i,j,k) = delz(i,j,k) * pt(i,j,k)
1402  enddo
1403  enddo
1404  endif
1405  endif
1406 
1407  do j=js-1,je+1
1408  do i=is-1,ie+1
1409  u2f(i,j,k) = rf(k)*sqrt(u2f(i,j,k)/u000)
1410  enddo
1411  enddo
1412 
1413  do j=js,je+1
1414  do i=is,ie
1415  u(i,j,k) = u(i,j,k) / (1.+0.5*(u2f(i,j-1,k)+u2f(i,j,k)))
1416  enddo
1417  enddo
1418  do j=js,je
1419  do i=is,ie+1
1420  v(i,j,k) = v(i,j,k) / (1.+0.5*(u2f(i-1,j,k)+u2f(i,j,k)))
1421  enddo
1422  enddo
1423 
1424  if ( .not. hydrostatic ) then
1425  do j=js,je
1426  do i=is,ie
1427  w(i,j,k) = w(i,j,k) / (1.+u2f(i,j,k))
1428  enddo
1429  enddo
1430  endif
1431 
1432  enddo
1433 
1434  deallocate ( u2f )
1435 
1436  end subroutine rayleigh_friction
1437 
1439  subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, &
1440  ptop, ua, va, u, v, delp, aam, ps, m_fac)
1441 ! Compute vertically (mass) integrated Atmospheric Angular Momentum
1442  integer, intent(in):: npz
1443  integer, intent(in):: is, ie, js, je
1444  integer, intent(in):: isd, ied, jsd, jed
1445  real, intent(in):: ptop
1446  real, intent(inout):: u(isd:ied ,jsd:jed+1,npz)
1447  real, intent(inout):: v(isd:ied+1,jsd:jed,npz)
1448  real, intent(inout):: delp(isd:ied,jsd:jed,npz)
1449  real, intent(inout), dimension(isd:ied,jsd:jed, npz):: ua, va
1450  real, intent(out):: aam(is:ie,js:je)
1451  real, intent(out):: m_fac(is:ie,js:je)
1452  real, intent(out):: ps(isd:ied,jsd:jed)
1453  type(fv_grid_bounds_type), intent(IN) :: bd
1454  type(fv_grid_type), intent(IN) :: gridstruct
1455 ! local:
1456  real, dimension(is:ie):: r1, r2, dm
1457  integer i, j, k
1458 
1459  call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested)
1460 
1461 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,aam,m_fac,ps,ptop,delp,agrav,ua) &
1462 !$OMP private(r1, r2, dm)
1463  do j=js,je
1464  do i=is,ie
1465  r1(i) = radius*cos(gridstruct%agrid(i,j,2))
1466  r2(i) = r1(i)*r1(i)
1467  aam(i,j) = 0.
1468  m_fac(i,j) = 0.
1469  ps(i,j) = ptop
1470  enddo
1471  do k=1,npz
1472  do i=is,ie
1473  dm(i) = delp(i,j,k)
1474  ps(i,j) = ps(i,j) + dm(i)
1475  dm(i) = dm(i)*agrav
1476  aam(i,j) = aam(i,j) + (r2(i)*omega + r1(i)*ua(i,j,k)) * dm(i)
1477  m_fac(i,j) = m_fac(i,j) + dm(i)*r2(i)
1478  enddo
1479  enddo
1480  enddo
1481 
1482  end subroutine compute_aam
1483 
1484 end module fv_dynamics_mod
subroutine, public init_ijk_mem(i1, i2, j1, j2, km, array, var)
Definition: dyn_core.F90:2532
subroutine, public fill2d(is, ie, js, je, ng, km, q, delp, area, domain, nested, regional, npx, npy)
The subroutine &#39;fill2D&#39; fills in nonphysical negative values in a single scalar field using a two-dim...
Definition: fv_fill.F90:207
integer, public k_step
logical, public do_adiabatic_init
Definition: fv_nudge.F90:138
pure real function, public vicpqd(q)
subroutine, public setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, u, v, w, pt, delp, delz, q, uc, vc, pkz, nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, nest_timestep, tracer_nest_timestep, domain, bd, nwat)
The subroutine &#39;setup_nested_grid_BCs&#39; fetches data from the coarse grid to set up the nested-grid bo...
Definition: fv_nesting.F90:163
subroutine, public regional_boundary_update(array, bc_vbl_name, lbnd_x, ubnd_x, lbnd_y, ubnd_y, ubnd_z, is, ie, js, je, isd, ied, jsd, jed, fcst_time, index4)
real, public current_time_in_seconds
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:120
subroutine, public neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qg, qa, check_negative)
Definition: fv_sg.F90:1477
integer, parameter, public h_stagger
integer, public p_step
subroutine, public moist_cv(is, ie, isd, ied, jsd, jed, km, j, k, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, q, qd, cvm, t1)
The subroutine &#39;moist_cv&#39; computes the FV3-consistent moist heat capacity under constant volume...
Definition: fv_mapz.F90:3437
The module &#39;multi_gases&#39; peforms multi constitutents computations.
Definition: multi_gases.F90:25
subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, ptop, ua, va, u, v, delp, aam, ps, m_fac)
The subroutine &#39;compute_aam&#39; computes vertically (mass) integrated Atmospheric Angular Momentum...
real function, public g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, reproduce)
The function &#39;g_sum&#39; is the fast version of &#39;globalsum&#39;.
subroutine, public compute_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, u, v, w, delz, pt, delp, q, qc, pe, peln, hs, rsin2_l, cosa_s_l, r_vir, cp, rg, hlv, te_2d, ua, va, teq, moist_phys, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, hydrostatic, id_te)
The subroutine &#39;compute_total_energy&#39; performs the FV3-consistent computation of the global total ene...
Definition: fv_mapz.F90:979
The module &#39;fv_dynamics&#39; is the top-level routine for the dynamical core.
Definition: fv_dynamics.F90:26
The module &#39;fv_sg&#39; performs FV sub-grid mixing.
Definition: fv_sg.F90:54
pure real function, public virqd(q)
The module fv_nwp_nudge contains routines for nudging to input analyses. note This module is currentl...
Definition: fv_nudge.F90:32
subroutine, public prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain)
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
pure real function, public virq(q)
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
subroutine, public lagrangian_to_eulerian(last_step, consv, ps, pe, delp, pkz, pk, mdt, pdt, km, is, ie, js, je, isd, ied, jsd, jed, nq, nwat, sphum, q_con, u, v, w, delz, pt, q, hs, r_vir, cp, akap, cappa, kord_mt, kord_wz, kord_tr, kord_tm, peln, te0_2d, ng, ua, va, omga, te, ws, fill, reproduce_sum, out_dt, dtdt, ptop, ak, bk, pfull, gridstruct, domain, do_sat_adj, hydrostatic, hybrid_z, do_omega, adiabatic, do_adiabatic_init)
The subroutine &#39;Lagrangian_to_Eulerian&#39; remaps deformed Lagrangian layers back to the reference Euler...
Definition: fv_mapz.F90:145
subroutine, public fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, reproduce_sum, kappa, cp_air, zvir, ptop, ks, ncnst, n_split, q_split, u, v, w, delz, hydrostatic, pt, delp, q, ps, pe, pk, peln, pkz, phis, q_con, omga, ua, va, uc, vc, ak, bk, mfx, mfy, cx, cy, ze0, hybrid_z, gridstruct, flagstruct, neststruct, idiag, bd, parent_grid, domain, diss_est, time_total)
The module &#39;fv_tracer2d.F90&#39; performs sub-cycled tracer advection.
Definition: fv_tracer2d.F90:64
subroutine, public set_regional_bcs(delp, delz, w, pt ifdef USE_COND
The module &#39;fv_mapz&#39; contains the vertical mapping routines .
Definition: fv_mapz.F90:27
subroutine, public dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, cappa, grav, hydrostatic, u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, init_step, i_pack, end_step, diss_est, time_total)
Definition: dyn_core.F90:180
logical rf_initialized
logical, public prt_minmax
logical pt_initialized
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
subroutine, public tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac, regional)
The subroutine &#39;tracer_2d&#39; is the standard routine for sub-cycled tracer advection.
subroutine, public moist_cp(is, ie, isd, ied, jsd, jed, km, j, k, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, q, qd, cpm, t1)
The subroutine &#39;moist_cp&#39; computes the FV3-consistent moist heat capacity under constant pressure...
Definition: fv_mapz.F90:3540
subroutine, public del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
The subroutine &#39;del2-cubed&#39; filters the omega field for the physics.
Definition: dyn_core.F90:2421
type(time_type), public fv_time
subroutine rayleigh_friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, ua, va, delz, cp, rg, ptop, hydrostatic, conserve, rf_cutoff, gridstruct, domain, bd)
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
subroutine, public range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_range)
integer, parameter, public v_stagger
subroutine, public c2l_ord2(u, v, ua, va, gridstruct, km, grid_type, bd, do_halo)
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
The module &#39;fv_grid_utils&#39; contains routines for setting up and computing grid-related quantities...
The module &#39;dyn_core&#39; peforms the Lagrangian acoustic dynamics described by .
Definition: dyn_core.F90:28
integer, public a_step
subroutine rayleigh_super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, ua, va, delz, agrid, cp, rg, ptop, hydrostatic, conserve, rf_cutoff, gridstruct, domain, bd)
subroutine, public tracer_2d_1l(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac, regional)
The subroutine &#39;tracer_2d_1L&#39; performs 2-D horizontal-to-lagrangian transport.
Definition: fv_tracer2d.F90:94
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
The subroutine &#39;nested_grid_BC_apply_intT&#39; performs linear interpolation or extrapolation in time for...
Definition: boundary.F90:1684
subroutine, public neg_adj2(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qa, check_negative)
Definition: fv_sg.F90:1867
subroutine, public cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, nested, c2l_ord, bd)
real, dimension(:), allocatable rf
subroutine, public tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, k_split, neststruct, parent_grid, lim_fac, regional)
integer, parameter, public u_stagger
The module &#39;fv_nesting&#39; is a collection of routines pertaining to grid nesting .
Definition: fv_nesting.F90:27