FV3DYCORE  Version 2.0.0
fv_dynamics.F90
Go to the documentation of this file.
1 
2 !***********************************************************************
3 !* GNU Lesser General Public License
4 !*
5 !* This file is part of the FV3 dynamical core.
6 !*
7 !* The FV3 dynamical core is free software: you can redistribute it
8 !* and/or modify it under the terms of the
9 !* GNU Lesser General Public License as published by the
10 !* Free Software Foundation, either version 3 of the License, or
11 !* (at your option) any later version.
12 !*
13 !* The FV3 dynamical core is distributed in the hope that it will be
14 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
15 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 !* See the GNU General Public License for more details.
17 !*
18 !* You should have received a copy of the GNU Lesser General Public
19 !* License along with the FV3 dynamical core.
20 !* If not, see <http://www.gnu.org/licenses/>.
21 !***********************************************************************
22 
25 
27 
28 ! Modules Included:
29 ! <table>
30 ! <tr>
31 ! <th>Module Name</th>
32 ! <th>Functions Included</th>
33 ! </tr>
34 ! <tr>
35 ! <td>boundary_mod</td>
36 ! <td>nested_grid_BC_apply_intT</td>
37 ! </tr>
38 ! <tr>
39 ! <td>constants_mod</td>
40 ! <td>grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor</td>
41 ! </tr>
42 ! <tr>
43 ! <td>diag_manager_mod</td>
44 ! <td>send_data</td>
45 ! </tr>
46 ! <tr>
47 ! <td>dyn_core_mod</td>
48 ! <td>dyn_core, del2_cubed, init_ijk_mem</td>
49 ! </tr>
50 ! <tr>
51 ! <td>field_manager_mod</td>
52 ! <td>MODEL_ATMOS</td>
53 ! </tr>
54 ! <tr>
55 ! <td>fv_arrays_mod</td>
56 ! <td>fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, fv_grid_bounds_type</td>
57 ! </tr>
58 ! <tr>
59 ! <td>fv_diagnostics_mod</td>
60 ! <td>fv_time, prt_mxm, range_check, prt_minmax</td>
61 ! </tr>
62 ! <tr>
63 ! <td>fv_fill_mod</td>
64 ! <td>fill2D</td>
65 ! </tr>
66 ! <tr>
67 ! <td>fv_grid_utils_mod</td>
68 ! <td>cubed_to_latlon, c2l_ord2, g_sum</td>
69 ! </tr>
70 ! <tr>
71 ! <td>fv_mapz_mod</td>
72 ! <td>compute_total_energy, Lagrangian_to_Eulerian, moist_cv, moist_cp</td>
73 ! </tr>
74 ! <tr>
75 ! <td>fv_mp_mod</td>
76 ! <td>is_master,group_halo_update_type, start_group_halo_update, complete_group_halo_update</td>
77 ! </tr>
78 ! <tr>
79 ! <td>fv_nesting_mod</td>
80 ! <td>setup_nested_grid_BCs</td>
81 ! </tr>
82 ! <tr>
83 ! <td>fv_nwp_mod</td>
84 ! <td>do_adiabatic_init</td>
85 ! </tr>
86 ! <tr>
87 ! <td>fv_restart_mod</td>
88 ! <td>d2a_setup, d2c_setup</td>
89 ! </tr>
90 ! <tr>
91 ! <td>fv_sg_mod</td>
92 ! <td>neg_adj3</td>
93 ! </tr>
94 ! <tr>
95 ! <td>fv_sg_mod</td>
96 ! <td>neg_adj2</td>
97 ! </tr>
98 ! <tr>
99 ! <td>fv_timing_mod</td>
100 ! <td>timing_on, timing_off</td>
101 ! </tr>
102 ! <tr>
103 ! <td>fv_tracer2d_mod</td>
104 ! <td>tracer_2d, tracer_2d_1L, tracer_2d_nested</td>
105 ! </tr>
106 ! <tr>
107 ! <td>mpp_mod/td>
108 ! <td>mpp_pe</td>
109 ! </tr>
110 ! <tr>
111 ! <td>mpp_domains_mod/td>
112 ! <td>DGRID_NE, CGRID_NE, mpp_update_domains, domain2D</td>
113 ! </tr>
114 ! <tr>
115 ! <td>fv_sg_mod</td>
116 ! <td>neg_adj3</td>
117 ! </tr>
118 ! <tr>
119 ! <td>tracer_manager_mod</td>
120 ! <td>get_tracer_index</td>
121 ! </tr>
122 ! </table>
123 
124  use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor
129  use fv_fill_mod, only: fill2d
130  use fv_mp_mod, only: is_master
131  use fv_mp_mod, only: group_halo_update_type
132  use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update
134  use diag_manager_mod, only: send_data
136  use mpp_domains_mod, only: dgrid_ne, cgrid_ne, mpp_update_domains, domain2d
137  use mpp_mod, only: mpp_pe
138  use field_manager_mod, only: model_atmos
139  use tracer_manager_mod, only: get_tracer_index
140  use fv_sg_mod, only: neg_adj3, neg_adj2
144  use fv_regional_mod, only: a_step, p_step, k_step
149 #ifdef MULTI_GASES
150  use multi_gases_mod, only: virq, virqd, vicpqd
151 #endif
152 
153 implicit none
154  logical :: rf_initialized = .false.
155  logical :: pt_initialized = .false.
156  logical :: bad_range = .false.
157  real, allocatable :: rf(:)
158  integer :: kmax=1
159  integer :: k_rf = 0
160 
161  real :: agrav
162 #ifdef HIWPP
163  real, allocatable:: u00(:,:,:), v00(:,:,:)
164 #endif
165 private
166 public :: fv_dynamics
167 
168 contains
169 
170 !-----------------------------------------------------------------------
171 ! fv_dynamics :: FV dynamical core driver
172 !-----------------------------------------------------------------------
173 
174  subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, &
175  reproduce_sum, kappa, cp_air, zvir, ptop, ks, ncnst, n_split, &
176  q_split, u, v, w, delz, hydrostatic, pt, delp, q, &
177  ps, pe, pk, peln, pkz, phis, q_con, omga, ua, va, uc, vc, &
178  ak, bk, mfx, mfy, cx, cy, ze0, hybrid_z, &
179  gridstruct, flagstruct, neststruct, idiag, bd, &
180  parent_grid, domain, diss_est, time_total)
182 #ifdef CCPP
183  use mpp_mod, only: fatal, mpp_error
184  use ccpp_data, only: ccpp_interstitial
185 #endif
186 
187  real, intent(IN) :: bdt
188  real, intent(IN) :: consv_te
189  real, intent(IN) :: kappa, cp_air
190  real, intent(IN) :: zvir, ptop
191  real, intent(IN), optional :: time_total
192 
193  integer, intent(IN) :: npx
194  integer, intent(IN) :: npy
195  integer, intent(IN) :: npz
196  integer, intent(IN) :: nq_tot
197  integer, intent(IN) :: ng
198  integer, intent(IN) :: ks
199  integer, intent(IN) :: ncnst
200  integer, intent(IN) :: n_split
201  integer, intent(IN) :: q_split
202  logical, intent(IN) :: fill
203  logical, intent(IN) :: reproduce_sum
204  logical, intent(IN) :: hydrostatic
205  logical, intent(IN) :: hybrid_z
206 
207  type(fv_grid_bounds_type), intent(IN) :: bd
208  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u
209  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v
210  real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:)
211  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
212  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz)
213  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst)
214  real, intent(inout) :: delz(bd%is:,bd%js:,1:)
215  real, intent(inout) :: ze0(bd%is:, bd%js: ,1:)
216  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed,npz) :: diss_est! diffusion estimate for SKEB
217 ! ze0 no longer used
218 
219 !-----------------------------------------------------------------------
220 ! Auxilliary pressure arrays:
221 ! The 5 vars below can be re-computed from delp and ptop.
222 !-----------------------------------------------------------------------
223 ! dyn_aux:
224  real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed)
225  real, intent(inout) :: pe (bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1)
226  real, intent(inout) :: pk (bd%is:bd%ie,bd%js:bd%je, npz+1)
227  real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je)
228  real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz)
229  real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:)
230 
231 !-----------------------------------------------------------------------
232 ! Others:
233 !-----------------------------------------------------------------------
234  real, intent(inout) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
235  real, intent(inout) :: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
236  real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
237  real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
238 
239  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz):: ua, va
240  real, intent(in), dimension(npz+1):: ak, bk
241 
242 ! Accumulated Mass flux arrays: the "Flux Capacitor"
243  real, intent(inout) :: mfx(bd%is:bd%ie+1, bd%js:bd%je, npz)
244  real, intent(inout) :: mfy(bd%is:bd%ie , bd%js:bd%je+1, npz)
245 ! Accumulated Courant number arrays
246  real, intent(inout) :: cx(bd%is:bd%ie+1, bd%jsd:bd%jed, npz)
247  real, intent(inout) :: cy(bd%isd:bd%ied ,bd%js:bd%je+1, npz)
248 
249  type(fv_grid_type), intent(inout), target :: gridstruct
250  type(fv_flags_type), intent(INOUT) :: flagstruct
251  type(fv_nest_type), intent(INOUT) :: neststruct
252  type(domain2d), intent(INOUT) :: domain
253  type(fv_atmos_type), pointer, intent(IN) :: parent_grid
254  type(fv_diag_type), intent(IN) :: idiag
255 
256 ! Local Arrays
257  real :: ws(bd%is:bd%ie,bd%js:bd%je)
258 #ifndef CCPP
259  real :: te_2d(bd%is:bd%ie,bd%js:bd%je)
260 #endif
261  real :: teq(bd%is:bd%ie,bd%js:bd%je)
262  real :: ps2(bd%isd:bd%ied,bd%jsd:bd%jed)
263  real :: m_fac(bd%is:bd%ie,bd%js:bd%je)
264  real :: pfull(npz)
265  real, dimension(bd%is:bd%ie):: cvm
266 #ifndef CCPP
267  real, allocatable :: dp1(:,:,:), dtdt_m(:,:,:), cappa(:,:,:)
268 #endif
269 #ifdef MULTI_GASES
270  real, allocatable :: kapad(:,:,:)
271 #endif
272  real:: akap, rdg, ph1, ph2, mdt, gam, amdt, u0
273  real:: recip_k_split,reg_bc_update_time
274  integer:: kord_tracer(ncnst)
275  integer :: i,j,k, n, iq, n_map, nq, nr, nwat, k_split
276  integer :: sphum, liq_wat = -999, ice_wat = -999 ! GFDL physics
277  integer :: rainwat = -999, snowwat = -999, graupel = -999, cld_amt = -999
278  integer :: theta_d = -999
279 #ifdef CCPP
280  logical used, do_omega
281 #else
282  logical used, last_step, do_omega
283 #endif
284 #ifdef MULTI_GASES
285  integer, parameter :: max_packs=13
286 #else
287  integer, parameter :: max_packs=12
288 #endif
289  type(group_halo_update_type), save :: i_pack(max_packs)
290  integer :: is, ie, js, je
291  integer :: isd, ied, jsd, jed
292  real :: dt2
293 #ifdef CCPP
294  integer :: ierr
295 #endif
296 
297 #ifdef CCPP
298  ccpp_associate: associate( cappa => ccpp_interstitial%cappa, &
299  dp1 => ccpp_interstitial%te0, &
300  dtdt_m => ccpp_interstitial%dtdt, &
301  last_step => ccpp_interstitial%last_step, &
302  te_2d => ccpp_interstitial%te0_2d )
303 #endif
304 
305  is = bd%is
306  ie = bd%ie
307  js = bd%js
308  je = bd%je
309  isd = bd%isd
310  ied = bd%ied
311  jsd = bd%jsd
312  jed = bd%jed
313 
314 
315 ! cv_air = cp_air - rdgas
316  agrav = 1. / grav
317  dt2 = 0.5*bdt
318  k_split = flagstruct%k_split
319  recip_k_split=1./real(k_split)
320  nwat = flagstruct%nwat
321  nq = nq_tot - flagstruct%dnats
322  nr = nq_tot - flagstruct%dnrts
323  rdg = -rdgas * agrav
324 
325 #ifdef CCPP
326 
327  ! Reset all interstitial variables for CCPP version
328  ! of fast physics, and manually set runtime parameters
329  call ccpp_interstitial%reset()
330  if (flagstruct%do_sat_adj) then
331  ccpp_interstitial%out_dt = (idiag%id_mdt > 0)
332  end if
333 
334 #else
335  te_2d = 0.
336 
337  allocate ( dp1(isd:ied, jsd:jed, 1:npz) )
338  call init_ijk_mem(isd,ied, jsd,jed, npz, dp1, 0.)
339 
340 #ifdef MOIST_CAPPA
341  allocate ( cappa(isd:ied,jsd:jed,npz) )
342  call init_ijk_mem(isd,ied, jsd,jed, npz, cappa, 0.)
343 #else
344  allocate ( cappa(isd:isd,jsd:jsd,1) )
345  cappa = 0.
346 #endif
347 #endif
348 
349 #ifdef MULTI_GASES
350  allocate ( kapad(isd:ied, jsd:jed, npz) )
351  call init_ijk_mem(isd,ied, jsd,jed, npz, kapad, kappa)
352 #endif
353 
354  !We call this BEFORE converting pt to virtual potential temperature,
355  !since we interpolate on (regular) temperature rather than theta.
356  if (gridstruct%nested .or. any(neststruct%child_grids)) then
357  call timing_on('NEST_BCs')
358  call setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, &
359  u, v, w, pt, delp, delz, q, uc, vc, &
360 #ifdef USE_COND
361  q_con, &
362 #ifdef MOIST_CAPPA
363  cappa, &
364 #endif
365 #endif
366  neststruct%nested, flagstruct%inline_q, flagstruct%make_nh, ng, &
367  gridstruct, flagstruct, neststruct, &
368  neststruct%nest_timestep, neststruct%tracer_nest_timestep, &
369  domain, parent_grid, bd, nwat, ak, bk)
370 
371  call timing_off('NEST_BCs')
372  endif
373 
374  ! For the regional domain set values valid the beginning of the
375  ! current large timestep at the boundary points of the pertinent
376  ! prognostic arrays.
377 
378  if (flagstruct%regional) then
379  call timing_on('Regional_BCs')
380 
381  reg_bc_update_time=current_time_in_seconds
382  call set_regional_bcs &
383  (delp,delz,w,pt &
384 #ifdef USE_COND
385  ,q_con &
386 #endif
387 #ifdef MOIST_CAPPA
388  ,cappa &
389 #endif
390  ,q,u,v,uc,vc, bd, npz, reg_bc_update_time )
391 
392  call timing_off('Regional_BCs')
393  endif
394 
395  if ( flagstruct%no_dycore ) then
396  if ( nwat.eq.2 .and. (.not.hydrostatic) ) then
397  sphum = get_tracer_index(model_atmos, 'sphum')
398  endif
399  goto 911
400  endif
401 
402  if ( nwat==0 ) then
403  sphum = 1
404  cld_amt = -1 ! to cause trouble if (mis)used
405  else
406  sphum = get_tracer_index(model_atmos, 'sphum')
407  liq_wat = get_tracer_index(model_atmos, 'liq_wat')
408  ice_wat = get_tracer_index(model_atmos, 'ice_wat')
409  rainwat = get_tracer_index(model_atmos, 'rainwat')
410  snowwat = get_tracer_index(model_atmos, 'snowwat')
411  graupel = get_tracer_index(model_atmos, 'graupel')
412  cld_amt = get_tracer_index(model_atmos, 'cld_amt')
413  endif
414 
415  theta_d = get_tracer_index(model_atmos, 'theta_d')
416 
417 #ifdef SW_DYNAMICS
418  akap = 1.
419  pfull(1) = 0.5*flagstruct%p_ref
420 #else
421  akap = kappa
422 
423 !$OMP parallel do default(none) shared(npz,ak,bk,flagstruct,pfull) &
424 !$OMP private(ph1, ph2,k)
425  do k=1,npz
426  ph1 = ak(k ) + bk(k )*flagstruct%p_ref
427  ph2 = ak(k+1) + bk(k+1)*flagstruct%p_ref
428  pfull(k) = (ph2 - ph1) / log(ph2/ph1)
429  enddo
430 
431  if ( hydrostatic ) then
432 #if defined(CCPP) && defined(__GFORTRAN__)
433 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,zvir,nwat,q,q_con,sphum,liq_wat, &
434 #else
435 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,zvir,nwat,q,q_con,sphum,liq_wat, &
436 #endif
437 !$OMP rainwat,ice_wat,snowwat,graupel) private(cvm,i,j,k)
438  do k=1,npz
439  do j=js,je
440 #ifdef USE_COND
441  call moist_cp(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
442  ice_wat, snowwat, graupel, q, q_con(is:ie,j,k), cvm)
443 #endif
444  do i=is,ie
445  dp1(i,j,k) = zvir*q(i,j,k,sphum)
446  enddo
447  enddo
448  enddo
449  else
450 #if defined(CCPP) && defined(__GFORTRAN__)
451 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,zvir,q,q_con,sphum,liq_wat, &
452 #else
453 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,zvir,q,q_con,sphum,liq_wat, &
454 #endif
455 !$OMP rainwat,ice_wat,snowwat,graupel,pkz,flagstruct, &
456 #ifdef MULTI_GASES
457 !$OMP kapad, &
458 #endif
459 #if defined(CCPP) && defined(__GFORTRAN__)
460 !$OMP kappa,rdg,delp,pt,delz,nwat) &
461 #else
462 !$OMP cappa,kappa,rdg,delp,pt,delz,nwat) &
463 #endif
464 !$OMP private(cvm,i,j,k)
465  do k=1,npz
466  if ( flagstruct%moist_phys ) then
467  do j=js,je
468 #ifdef MOIST_CAPPA
469  call moist_cv(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
470  ice_wat, snowwat, graupel, q, q_con(is:ie,j,k), cvm)
471 #endif
472  do i=is,ie
473 #ifdef MULTI_GASES
474  dp1(i,j,k) = virq(q(i,j,k,:))-1.
475  kapad(i,j,k)= kappa * (virqd(q(i,j,k,:))/vicpqd(q(i,j,k,:)))
476 #else
477  dp1(i,j,k) = zvir*q(i,j,k,sphum)
478 #endif
479 
480 #ifdef MOIST_CAPPA
481  cappa(i,j,k) = rdgas/(rdgas + cvm(i)/(1.+dp1(i,j,k)))
482  pkz(i,j,k) = exp(cappa(i,j,k)*log(rdg*delp(i,j,k)*pt(i,j,k)* &
483 #ifdef MULTI_GASES
484  (1.+dp1(i,j,k)) /delz(i,j,k)) )
485 #else
486  (1.+dp1(i,j,k))*(1.-q_con(i,j,k))/delz(i,j,k)) )
487 #endif
488 #else
489  pkz(i,j,k) = exp( kappa*log(rdg*delp(i,j,k)*pt(i,j,k)* &
490  (1.+dp1(i,j,k))/delz(i,j,k)) )
491 ! Using dry pressure for the definition of the virtual potential temperature
492 ! pkz(i,j,k) = exp( kappa*log(rdg*delp(i,j,k)*pt(i,j,k)* &
493 ! (1.-q(i,j,k,sphum))/delz(i,j,k)) )
494 #endif
495 
496  enddo
497  enddo
498  else
499  do j=js,je
500  do i=is,ie
501  dp1(i,j,k) = 0.
502 #ifdef MULTI_GASES
503  kapad(i,j,k)= kappa * (virqd(q(i,j,k,:))/vicpqd(q(i,j,k,:)))
504  pkz(i,j,k) = exp(kapad(i,j,k)*log(rdg*virqd(q(i,j,k,:))*delp(i,j,k)*pt(i,j,k)/delz(i,j,k)))
505 #else
506  pkz(i,j,k) = exp(kappa*log(rdg*delp(i,j,k)*pt(i,j,k)/delz(i,j,k)))
507 #endif
508  enddo
509  enddo
510  endif
511  enddo
512  endif
513 
514  if ( flagstruct%fv_debug ) then
515 #ifdef MOIST_CAPPA
516  call prt_mxm('cappa', cappa, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
517 #endif
518  call prt_mxm('PS', ps, is, ie, js, je, ng, 1, 0.01, gridstruct%area_64, domain)
519  call prt_mxm('T_dyn_b', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
520  if ( .not. hydrostatic) call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain)
521  call prt_mxm('delp_b ', delp, is, ie, js, je, ng, npz, 0.01, gridstruct%area_64, domain)
522  call prt_mxm('pk_b', pk, is, ie, js, je, 0, npz+1, 1.,gridstruct%area_64, domain)
523  call prt_mxm('pkz_b', pkz,is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain)
524  endif
525 
526 !---------------------
527 ! Compute Total Energy
528 !---------------------
529  if ( consv_te > 0. .and. (.not.do_adiabatic_init) ) then
530  call compute_total_energy(is, ie, js, je, isd, ied, jsd, jed, npz, &
531  u, v, w, delz, pt, delp, q, dp1, pe, peln, phis, &
532  gridstruct%rsin2, gridstruct%cosa_s, &
533  zvir, cp_air, rdgas, hlv, te_2d, ua, va, teq, &
534  flagstruct%moist_phys, nwat, sphum, liq_wat, rainwat, &
535  ice_wat, snowwat, graupel, hydrostatic, idiag%id_te)
536  if( idiag%id_te>0 ) then
537  used = send_data(idiag%id_te, teq, fv_time)
538 ! te_den=1.E-9*g_sum(teq, is, ie, js, je, ng, area, 0)/(grav*4.*pi*radius**2)
539 ! if(is_master()) write(*,*) 'Total Energy Density (Giga J/m**2)=',te_den
540  endif
541  endif
542 
543  if( (flagstruct%consv_am.or.idiag%id_amdt>0) .and. (.not.do_adiabatic_init) ) then
544  call compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, &
545  ptop, ua, va, u, v, delp, teq, ps2, m_fac)
546  endif
547 
548  if( .not.flagstruct%RF_fast .and. flagstruct%tau .ne. 0. ) then
549  if ( gridstruct%grid_type<4 ) then
550 ! if ( flagstruct%RF_fast ) then
551 ! call Ray_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, &
552 ! dp_ref, ptop, hydrostatic, flagstruct%rf_cutoff, bd)
553 ! else
554  call rayleigh_super(abs(bdt), npx, npy, npz, ks, pfull, phis, flagstruct%tau, u, v, w, pt, &
555  ua, va, delz, gridstruct%agrid, cp_air, rdgas, ptop, hydrostatic, &
556  .not. gridstruct%bounded_domain, flagstruct%rf_cutoff, gridstruct, domain, bd)
557 ! endif
558  else
559  call rayleigh_friction(abs(bdt), npx, npy, npz, ks, pfull, flagstruct%tau, u, v, w, pt, &
560  ua, va, delz, cp_air, rdgas, ptop, hydrostatic, .true., flagstruct%rf_cutoff, gridstruct, domain, bd)
561  endif
562  endif
563 
564 #endif
565 
566 #ifndef SW_DYNAMICS
567 ! Convert pt to virtual potential temperature on the first timestep
568  if ( flagstruct%adiabatic .and. flagstruct%kord_tm>0 ) then
569  if ( .not.pt_initialized )then
570 !$OMP parallel do default(none) shared(theta_d,is,ie,js,je,npz,pt,pkz,q)
571  do k=1,npz
572  do j=js,je
573  do i=is,ie
574  pt(i,j,k) = pt(i,j,k)/pkz(i,j,k)
575  enddo
576  enddo
577  if ( theta_d>0 ) then
578  do j=js,je
579  do i=is,ie
580  q(i,j,k,theta_d) = pt(i,j,k)
581  enddo
582  enddo
583  endif
584  enddo
585  pt_initialized = .true.
586  endif
587  else
588 #if defined(CCPP) && defined(__GFORTRAN__)
589 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pt,pkz,q_con)
590 #else
591 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pt,dp1,pkz,q_con)
592 #endif
593  do k=1,npz
594  do j=js,je
595  do i=is,ie
596 
597 #ifdef MULTI_GASES
598  pt(i,j,k) = pt(i,j,k)*(1.+dp1(i,j,k))/pkz(i,j,k)
599 #else
600 #ifdef USE_COND
601  pt(i,j,k) = pt(i,j,k)*(1.+dp1(i,j,k))*(1.-q_con(i,j,k))/pkz(i,j,k)
602 #else
603  pt(i,j,k) = pt(i,j,k)*(1.+dp1(i,j,k))/pkz(i,j,k)
604 #endif
605 #endif
606  enddo
607  enddo
608  enddo
609  endif
610 #endif
611 
612  last_step = .false.
613  mdt = bdt / real(k_split)
614 
615 #ifndef CCPP
616  if ( idiag%id_mdt > 0 .and. (.not. do_adiabatic_init) ) then
617  allocate ( dtdt_m(is:ie,js:je,npz) )
618 !$OMP parallel do default(none) shared(is,ie,js,je,npz,dtdt_m)
619  do k=1,npz
620  do j=js,je
621  do i=is,ie
622  dtdt_m(i,j,k) = 0.
623  enddo
624  enddo
625  enddo
626  endif
627 #endif
628 
629  call timing_on('FV_DYN_LOOP')
630  do n_map=1, k_split ! first level of time-split
631  k_step = n_map
632  call timing_on('COMM_TOTAL')
633 #ifdef USE_COND
634  call start_group_halo_update(i_pack(11), q_con, domain)
635 #ifdef MOIST_CAPPA
636  call start_group_halo_update(i_pack(12), cappa, domain)
637 #endif
638 #endif
639 #ifdef MULTI_GASES
640  call start_group_halo_update(i_pack(13), kapad, domain)
641 #endif
642  call start_group_halo_update(i_pack(1), delp, domain, complete=.false.)
643  call start_group_halo_update(i_pack(1), pt, domain, complete=.true.)
644 #ifndef ROT3
645  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
646 #endif
647  call timing_off('COMM_TOTAL')
648 #if defined(CCPP) && defined(__GFORTRAN__)
649 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,delp)
650 #else
651 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,dp1,delp)
652 #endif
653  do k=1,npz
654  do j=jsd,jed
655  do i=isd,ied
656  dp1(i,j,k) = delp(i,j,k)
657  enddo
658  enddo
659  enddo
660 
661  if ( n_map==k_split ) last_step = .true.
662 
663 #ifdef USE_COND
664  call timing_on('COMM_TOTAL')
665  call complete_group_halo_update(i_pack(11), domain)
666 #ifdef MOIST_CAPPA
667  call complete_group_halo_update(i_pack(12), domain)
668 #endif
669  call timing_off('COMM_TOTAL')
670 #endif
671 #ifdef MULTI_GASES
672  call complete_group_halo_update(i_pack(13), domain)
673 #endif
674 
675  call timing_on('DYN_CORE')
676  call dyn_core(npx, npy, npz, ng, sphum, nq, mdt, n_map, n_split, zvir, cp_air, akap, cappa, &
677 #ifdef MULTI_GASES
678  kapad, &
679 #endif
680  grav, hydrostatic, &
681  u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, &
682  uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, &
683  gridstruct, flagstruct, neststruct, idiag, bd, &
684  domain, n_map==1, i_pack, last_step, diss_est,time_total)
685  call timing_off('DYN_CORE')
686 
687 
688 #ifdef SW_DYNAMICS
689 !!$OMP parallel do default(none) shared(is,ie,js,je,ps,delp,agrav)
690  do j=js,je
691  do i=is,ie
692  ps(i,j) = delp(i,j,1) * agrav
693  enddo
694  enddo
695 #else
696  if( .not. flagstruct%inline_q .and. nq /= 0 ) then
697 !--------------------------------------------------------
698 ! Perform large-time-step scalar transport using the accumulated CFL and
699 ! mass fluxes
700  call timing_on('tracer_2d')
701  !!! CLEANUP: merge these two calls?
702  if (gridstruct%bounded_domain) then
703  call tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, &
704  flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), &
705  flagstruct%nord_tr, flagstruct%trdm2, &
706  k_split, neststruct, parent_grid, n_map, flagstruct%lim_fac)
707  else
708  if ( flagstruct%z_tracer ) then
709  call tracer_2d_1l(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, &
710  flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), &
711  flagstruct%nord_tr, flagstruct%trdm2, flagstruct%lim_fac)
712  else
713  call tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, &
714  flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), &
715  flagstruct%nord_tr, flagstruct%trdm2, flagstruct%lim_fac)
716  endif
717  endif
718  call timing_off('tracer_2d')
719 
720 #ifdef FILL2D
721  if ( flagstruct%hord_tr<8 .and. flagstruct%moist_phys ) then
722  call timing_on('Fill2D')
723  if ( liq_wat > 0 ) &
724  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,liq_wat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy)
725  if ( rainwat > 0 ) &
726  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,rainwat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy)
727  if ( ice_wat > 0 ) &
728  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,ice_wat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy)
729  if ( snowwat > 0 ) &
730  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,snowwat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy)
731  if ( graupel > 0 ) &
732  call fill2d(is, ie, js, je, ng, npz, q(isd,jsd,1,graupel), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy)
733  call timing_off('Fill2D')
734  endif
735 #endif
736 
737  if( last_step .and. idiag%id_divg>0 ) then
738  used = send_data(idiag%id_divg, dp1, fv_time)
739  if(flagstruct%fv_debug) call prt_mxm('divg', dp1, is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain)
740  endif
741  endif
742 
743  if ( npz > 4 ) then
744 !------------------------------------------------------------------------
745 ! Perform vertical remapping from Lagrangian control-volume to
746 ! the Eulerian coordinate as specified by the routine set_eta.
747 ! Note that this finite-volume dycore is otherwise independent of the vertical
748 ! Eulerian coordinate.
749 !------------------------------------------------------------------------
750 
751  do iq=1,nr
752  kord_tracer(iq) = flagstruct%kord_tr
753  if ( iq==cld_amt ) kord_tracer(iq) = 9 ! monotonic
754  enddo
755 
756  do_omega = hydrostatic .and. last_step
757  call timing_on('Remapping')
758 #ifdef AVEC_TIMERS
759  call avec_timer_start(6)
760 #endif
761 
762  call lagrangian_to_eulerian(last_step, consv_te, ps, pe, delp, &
763  pkz, pk, mdt, bdt, npx, npy, npz, is,ie,js,je, isd,ied,jsd,jed, &
764  nr, nwat, sphum, q_con, u, v, w, delz, pt, q, phis, &
765  zvir, cp_air, akap, cappa, flagstruct%kord_mt, flagstruct%kord_wz, &
766  kord_tracer, flagstruct%kord_tm, peln, te_2d, &
767  ng, ua, va, omga, dp1, ws, fill, reproduce_sum, &
768  idiag%id_mdt>0, dtdt_m, ptop, ak, bk, pfull, gridstruct, domain, &
769  flagstruct%do_sat_adj, hydrostatic, hybrid_z, do_omega, &
770  flagstruct%adiabatic, do_adiabatic_init, &
771  flagstruct%c2l_ord, bd, flagstruct%fv_debug, &
772  flagstruct%moist_phys)
773 
774  if ( flagstruct%fv_debug ) then
775  if (is_master()) write(*,'(A, I3, A1, I3)') 'finished k_split ', n_map, '/', k_split
776  call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
777  if (sphum > 0) call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
778  if (liq_wat > 0) call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
779  if (rainwat > 0) call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
780  if (ice_wat > 0) call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
781  if (snowwat > 0) call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
782  if (graupel > 0) call prt_mxm('graupel_dyn', q(isd,jsd,1,graupel), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
783  endif
784 #ifdef AVEC_TIMERS
785  call avec_timer_stop(6)
786 #endif
787  call timing_off('Remapping')
788 #ifdef MOIST_CAPPA
789  if ( neststruct%nested .and. .not. last_step) then
790  call nested_grid_bc_apply_intt(cappa, &
791  0, 0, npx, npy, npz, bd, real(n_map+1), real(k_split), &
792  neststruct%cappa_BC, bctype=neststruct%nestbctype )
793  endif
794  if ( flagstruct%regional .and. .not. last_step) then
795  reg_bc_update_time=current_time_in_seconds+(n_map+1)*mdt
796  call regional_boundary_update(cappa, 'cappa', &
797  isd, ied, jsd, jed, npz, &
798  is, ie, js, je, &
799  isd, ied, jsd, jed, &
800  reg_bc_update_time )
801  endif
802 #endif
803 
804  if( last_step ) then
805  if( .not. hydrostatic ) then
806 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga,delp,delz,w)
807  do k=1,npz
808  do j=js,je
809  do i=is,ie
810  omga(i,j,k) = delp(i,j,k)/delz(i,j,k)*w(i,j,k)
811  enddo
812  enddo
813  enddo
814  endif
815 !--------------------------
816 ! Filter omega for physics:
817 !--------------------------
818  if(flagstruct%nf_omega>0) &
819  call del2_cubed(omga, 0.18*gridstruct%da_min, gridstruct, domain, npx, npy, npz, flagstruct%nf_omega, bd)
820  endif
821  end if
822 #endif
823  enddo ! n_map loop
824  call timing_off('FV_DYN_LOOP')
825  if ( idiag%id_mdt > 0 .and. (.not.do_adiabatic_init) ) then
826 ! Output temperature tendency due to inline moist physics:
827 #if defined(CCPP) && defined(__GFORTRAN__)
828 !$OMP parallel do default(none) shared(is,ie,js,je,npz,bdt)
829 #else
830 !$OMP parallel do default(none) shared(is,ie,js,je,npz,dtdt_m,bdt)
831 #endif
832  do k=1,npz
833  do j=js,je
834  do i=is,ie
835  dtdt_m(i,j,k) = dtdt_m(i,j,k) / bdt * 86400.
836  enddo
837  enddo
838  enddo
839 ! call prt_mxm('Fast DTDT (deg/Day)', dtdt_m, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain)
840  used = send_data(idiag%id_mdt, dtdt_m, fv_time)
841 #ifndef CCPP
842  deallocate ( dtdt_m )
843 #endif
844  endif
845 
846  if( nwat == 6 ) then
847  if (cld_amt > 0) then
848  call neg_adj3(is, ie, js, je, ng, npz, &
849  flagstruct%hydrostatic, &
850  peln, delz, &
851  pt, delp, q(isd,jsd,1,sphum), &
852  q(isd,jsd,1,liq_wat), &
853  q(isd,jsd,1,rainwat), &
854  q(isd,jsd,1,ice_wat), &
855  q(isd,jsd,1,snowwat), &
856  q(isd,jsd,1,graupel), &
857  q(isd,jsd,1,cld_amt), flagstruct%check_negative)
858  else
859  call neg_adj3(is, ie, js, je, ng, npz, &
860  flagstruct%hydrostatic, &
861  peln, delz, &
862  pt, delp, q(isd,jsd,1,sphum), &
863  q(isd,jsd,1,liq_wat), &
864  q(isd,jsd,1,rainwat), &
865  q(isd,jsd,1,ice_wat), &
866  q(isd,jsd,1,snowwat), &
867  q(isd,jsd,1,graupel), check_negative=flagstruct%check_negative)
868  endif
869  if ( flagstruct%fv_debug ) then
870  call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
871  call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
872  call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
873  call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
874  call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
875  call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
876  call prt_mxm('graupel_dyn', q(isd,jsd,1,graupel), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
877  endif
878  endif
879 
880  if( nwat == 5 ) then
881  if (cld_amt > 0) then
882  call neg_adj2(is, ie, js, je, ng, npz, &
883  flagstruct%hydrostatic, &
884  peln, delz, &
885  pt, delp, q(isd,jsd,1,sphum), &
886  q(isd,jsd,1,liq_wat), &
887  q(isd,jsd,1,rainwat), &
888  q(isd,jsd,1,ice_wat), &
889  q(isd,jsd,1,snowwat), &
890  q(isd,jsd,1,cld_amt), flagstruct%check_negative)
891  else
892  call neg_adj2(is, ie, js, je, ng, npz, &
893  flagstruct%hydrostatic, &
894  peln, delz, &
895  pt, delp, q(isd,jsd,1,sphum), &
896  q(isd,jsd,1,liq_wat), &
897  q(isd,jsd,1,rainwat), &
898  q(isd,jsd,1,ice_wat), &
899  q(isd,jsd,1,snowwat), &
900  check_negative=flagstruct%check_negative)
901  endif
902  if ( flagstruct%fv_debug ) then
903  call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
904  call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
905  call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
906  call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
907  call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
908  call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain)
909  endif
910  endif
911 
912  if( (flagstruct%consv_am.or.idiag%id_amdt>0.or.idiag%id_aam>0) .and. (.not.do_adiabatic_init) ) then
913  call compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, &
914  ptop, ua, va, u, v, delp, te_2d, ps, m_fac)
915  if( idiag%id_aam>0 ) then
916  used = send_data(idiag%id_aam, te_2d, fv_time)
917  if ( prt_minmax ) then
918  gam = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0)
919  if( is_master() ) write(6,*) 'Total AAM =', gam
920  endif
921  endif
922  endif
923 
924  if( (flagstruct%consv_am.or.idiag%id_amdt>0) .and. (.not.do_adiabatic_init) ) then
925 #if defined(CCPP) && defined(__GFORTRAN__)
926 !$OMP parallel do default(none) shared(is,ie,js,je,teq,dt2,ps2,ps,idiag)
927 #else
928 !$OMP parallel do default(none) shared(is,ie,js,je,te_2d,teq,dt2,ps2,ps,idiag)
929 #endif
930  do j=js,je
931  do i=is,ie
932 ! Note: the mountain torque computation contains also numerical error
933 ! The numerical error is mostly from the zonal gradient of the terrain (zxg)
934  te_2d(i,j) = te_2d(i,j)-teq(i,j) + dt2*(ps2(i,j)+ps(i,j))*idiag%zxg(i,j)
935  enddo
936  enddo
937  if( idiag%id_amdt>0 ) used = send_data(idiag%id_amdt, te_2d/bdt, fv_time)
938 
939  if ( flagstruct%consv_am .or. prt_minmax ) then
940  amdt = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.)
941  u0 = -radius*amdt/g_sum( domain, m_fac, is, ie, js, je, ng, gridstruct%area_64, 0,reproduce=.true.)
942  if(is_master() .and. prt_minmax) &
943  write(6,*) 'Dynamic AM tendency (Hadleys)=', amdt/(bdt*1.e18), 'del-u (per day)=', u0*86400./bdt
944  endif
945 
946  if( flagstruct%consv_am ) then
947 !$OMP parallel do default(none) shared(is,ie,js,je,m_fac,u0,gridstruct)
948  do j=js,je
949  do i=is,ie
950  m_fac(i,j) = u0*cos(gridstruct%agrid(i,j,2))
951  enddo
952  enddo
953 !$OMP parallel do default(none) shared(is,ie,js,je,npz,hydrostatic,pt,m_fac,ua,cp_air, &
954 !$OMP u,u0,gridstruct,v )
955  do k=1,npz
956  do j=js,je+1
957  do i=is,ie
958  u(i,j,k) = u(i,j,k) + u0*gridstruct%l2c_u(i,j)
959  enddo
960  enddo
961  do j=js,je
962  do i=is,ie+1
963  v(i,j,k) = v(i,j,k) + u0*gridstruct%l2c_v(i,j)
964  enddo
965  enddo
966  enddo
967  endif ! consv_am
968  endif
969 
970 911 call cubed_to_latlon(u, v, ua, va, gridstruct, &
971  npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%bounded_domain, flagstruct%c2l_ord, bd)
972 
973 #ifdef MULTI_GASES
974  deallocate(kapad)
975 #endif
976 #ifndef CCPP
977  deallocate(dp1)
978  deallocate(cappa)
979 #endif
980 
981  if ( flagstruct%fv_debug ) then
982  call prt_mxm('UA', ua, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
983  call prt_mxm('VA', va, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
984  call prt_mxm('TA', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
985  if (.not. hydrostatic) call prt_mxm('W ', w, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
986  endif
987 
988  if ( flagstruct%range_warn ) then
989  call range_check('UA_dyn', ua, is, ie, js, je, ng, npz, gridstruct%agrid, &
990  -280., 280., bad_range, fv_time)
991  call range_check('VA_dyn', ua, is, ie, js, je, ng, npz, gridstruct%agrid, &
992  -280., 280., bad_range, fv_time)
993  call range_check('TA_dyn', pt, is, ie, js, je, ng, npz, gridstruct%agrid, &
994  150., 335., bad_range, fv_time)
995  if ( .not. hydrostatic ) &
996  call range_check('W_dyn', w, is, ie, js, je, ng, npz, gridstruct%agrid, &
997  -50., 100., bad_range, fv_time)
998  endif
999 
1000 #ifdef CCPP
1001  end associate ccpp_associate
1002 #endif
1003 
1004  end subroutine fv_dynamics
1005 
1006 #ifdef USE_RF_FAST
1007  subroutine rayleigh_fast(dt, npx, npy, npz, pfull, tau, u, v, w, &
1008  ks, dp, ptop, hydrostatic, rf_cutoff, bd)
1009 ! Simple "inline" version of the Rayleigh friction
1010  real, intent(in):: dt
1011  real, intent(in):: tau
1012  real, intent(in):: ptop, rf_cutoff
1013  real, intent(in), dimension(npz):: pfull
1014  integer, intent(in):: npx, npy, npz, ks
1015  logical, intent(in):: hydrostatic
1016  type(fv_grid_bounds_type), intent(IN) :: bd
1017  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1018  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
1019  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
1020  real, intent(in):: dp(npz)
1021 !
1022  real(kind=R_GRID):: rff(npz)
1023  real, parameter:: sday = 86400.
1024  real, dimension(bd%is:bd%ie+1):: dmv
1025  real, dimension(bd%is:bd%ie):: dmu
1026  real:: tau0, dm
1027  integer i, j, k
1028 
1029  integer :: is, ie, js, je
1030  integer :: isd, ied, jsd, jed
1031 
1032  is = bd%is
1033  ie = bd%ie
1034  js = bd%js
1035  je = bd%je
1036  isd = bd%isd
1037  ied = bd%ied
1038  jsd = bd%jsd
1039  jed = bd%jed
1040 
1041  if ( .not. rf_initialized ) then
1042  tau0 = tau * sday
1043  allocate( rf(npz) )
1044  rf(:) = 1.
1045 
1046  if( is_master() ) write(6,*) 'Fast Rayleigh friction E-folding time (days):'
1047  do k=1, npz
1048  if ( pfull(k) < rf_cutoff ) then
1049  rff(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pfull(k))/log(rf_cutoff/ptop))**2
1050 ! Re-FACTOR rf
1051  if( is_master() ) write(6,*) k, 0.01*pfull(k), dt/(rff(k)*sday)
1052  kmax = k
1053  rff(k) = 1.d0 / (1.0d0+rff(k))
1054  rf(k) = rff(k)
1055  else
1056  exit
1057  endif
1058  enddo
1059  dm = 0.
1060  do k=1,ks
1061  if ( pfull(k) < 100.e2 ) then
1062  dm = dm + dp(k)
1063  k_rf = k
1064  else
1065  exit
1066  endif
1067  enddo
1068  if( is_master() ) write(6,*) 'k_rf=', k_rf, 0.01*pfull(k_rf), 'dm=', dm
1069  rf_initialized = .true.
1070  endif
1071 
1072 !$OMP parallel do default(none) shared(k_rf,is,ie,js,je,kmax,pfull,rf_cutoff,w,rf,dp,u,v,hydrostatic) &
1073 !$OMP private(dm, dmu, dmv)
1074  do j=js,je+1
1075 
1076  dm = 0.
1077  do k=1, k_rf
1078  dm = dm + dp(k)
1079  enddo
1080 
1081  dmu(:) = 0.
1082  dmv(:) = 0.
1083  do k=1,kmax
1084  do i=is,ie
1085  dmu(i) = dmu(i) + (1.-rf(k))*dp(k)*u(i,j,k)
1086  u(i,j,k) = rf(k)*u(i,j,k)
1087  enddo
1088  if ( j/=je+1 ) then
1089  do i=is,ie+1
1090  dmv(i) = dmv(i) + (1.-rf(k))*dp(k)*v(i,j,k)
1091  v(i,j,k) = rf(k)*v(i,j,k)
1092  enddo
1093  if ( .not. hydrostatic ) then
1094  do i=is,ie
1095  w(i,j,k) = rf(k)*w(i,j,k)
1096  enddo
1097  endif
1098  endif
1099  enddo
1100 
1101  do i=is,ie
1102  dmu(i) = dmu(i) / dm
1103  enddo
1104  if ( j/=je+1 ) then
1105  do i=is,ie+1
1106  dmv(i) = dmv(i) / dm
1107  enddo
1108  endif
1109 
1110  do k=1, k_rf
1111  do i=is,ie
1112  u(i,j,k) = u(i,j,k) + dmu(i)
1113  enddo
1114  if ( j/=je+1 ) then
1115  do i=is,ie+1
1116  v(i,j,k) = v(i,j,k) + dmv(i)
1117  enddo
1118  endif
1119  enddo
1120 
1121  enddo
1122 
1123  end subroutine rayleigh_fast
1124 #endif
1125 
1126 
1127 
1128  subroutine rayleigh_super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, &
1129  ua, va, delz, agrid, cp, rg, ptop, hydrostatic, &
1130  conserve, rf_cutoff, gridstruct, domain, bd)
1131  real, intent(in):: dt
1132  real, intent(in):: tau
1133  real, intent(in):: cp, rg, ptop, rf_cutoff
1134  real, intent(in), dimension(npz):: pm
1135  integer, intent(in):: npx, npy, npz, ks
1136  logical, intent(in):: hydrostatic
1137  logical, intent(in):: conserve
1138  type(fv_grid_bounds_type), intent(IN) :: bd
1139  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1140  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
1141  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
1142  real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1143  real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1144  real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1145  real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: )
1146  real, intent(in) :: agrid(bd%isd:bd%ied, bd%jsd:bd%jed,2)
1147  real, intent(in) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed)
1148  type(fv_grid_type), intent(IN) :: gridstruct
1149  type(domain2d), intent(INOUT) :: domain
1150 !
1151  real, allocatable :: u2f(:,:,:)
1152  real, parameter:: u0 = 60.
1153  real, parameter:: sday = 86400.
1154  real rcv, tau0
1155  integer i, j, k
1156 
1157  integer :: is, ie, js, je
1158  integer :: isd, ied, jsd, jed
1159 
1160  is = bd%is
1161  ie = bd%ie
1162  js = bd%js
1163  je = bd%je
1164  isd = bd%isd
1165  ied = bd%ied
1166  jsd = bd%jsd
1167  jed = bd%jed
1168 
1169  rcv = 1. / (cp - rg)
1170 
1171  if ( .not. rf_initialized ) then
1172 #ifdef HIWPP
1173  allocate ( u00(is:ie, js:je+1,npz) )
1174  allocate ( v00(is:ie+1,js:je ,npz) )
1175 !$OMP parallel do default(none) shared(is,ie,js,je,npz,u00,u,v00,v)
1176  do k=1,npz
1177  do j=js,je+1
1178  do i=is,ie
1179  u00(i,j,k) = u(i,j,k)
1180  enddo
1181  enddo
1182  do j=js,je
1183  do i=is,ie+1
1184  v00(i,j,k) = v(i,j,k)
1185  enddo
1186  enddo
1187  enddo
1188 #endif
1189 #ifdef SMALL_EARTH
1190  tau0 = abs( tau )
1191 #else
1192  tau0 = abs( tau * sday )
1193 #endif
1194  if( is_master() ) write(6,*) 'Rayleigh_Super tau=',tau
1195  allocate( rf(npz) )
1196  rf(:) = 0.
1197 
1198  do k=1, ks+1
1199  if( is_master() ) write(6,*) k, 0.01*pm(k)
1200  enddo
1201  if( is_master() ) write(6,*) 'Rayleigh friction E-folding time (days):'
1202  do k=1, npz
1203  if ( pm(k) < rf_cutoff ) then
1204  if ( tau < 0 ) then ! GSM formula
1205  rf(k) = dt/tau0*( log(rf_cutoff/pm(k)) )**2
1206  else
1207  rf(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pm(k))/log(rf_cutoff/ptop))**2
1208  endif
1209  if( is_master() ) write(6,*) k, 0.01*pm(k), dt/(rf(k)*sday)
1210  kmax = k
1211  else
1212  exit
1213  endif
1214  enddo
1215  rf_initialized = .true.
1216  endif
1217 
1218  call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%bounded_domain)
1219 
1220  allocate( u2f(isd:ied,jsd:jed,kmax) )
1221 
1222 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,pm,rf_cutoff,hydrostatic,ua,va,agrid, &
1223 !$OMP u2f,rf,w)
1224  do k=1,kmax
1225  if ( pm(k) < rf_cutoff ) then
1226  u2f(:,:,k) = 1. / (1.+rf(k))
1227  else
1228  u2f(:,:,k) = 1.
1229  endif
1230  enddo
1231  call timing_on('COMM_TOTAL')
1232  call mpp_update_domains(u2f, domain)
1233  call timing_off('COMM_TOTAL')
1234 
1235 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,pm,rf_cutoff,w,rf,u,v, &
1236 #ifdef HIWPP
1237 !$OMP u00,v00, &
1238 #endif
1239 !$OMP conserve,hydrostatic,pt,ua,va,u2f,cp,rg,ptop,rcv)
1240  do k=1,kmax
1241  if ( pm(k) < rf_cutoff ) then
1242 #ifdef HIWPP
1243  if (.not. hydrostatic) then
1244  do j=js,je
1245  do i=is,ie
1246  w(i,j,k) = w(i,j,k)/(1.+rf(k))
1247  enddo
1248  enddo
1249  endif
1250  do j=js,je+1
1251  do i=is,ie
1252  u(i,j,k) = (u(i,j,k)+rf(k)*u00(i,j,k))/(1.+rf(k))
1253  enddo
1254  enddo
1255  do j=js,je
1256  do i=is,ie+1
1257  v(i,j,k) = (v(i,j,k)+rf(k)*v00(i,j,k))/(1.+rf(k))
1258  enddo
1259  enddo
1260 #else
1261 ! Add heat so as to conserve TE
1262  if ( conserve ) then
1263  if ( hydrostatic ) then
1264  do j=js,je
1265  do i=is,ie
1266  pt(i,j,k) = pt(i,j,k) + 0.5*(ua(i,j,k)**2+va(i,j,k)**2)*(1.-u2f(i,j,k)**2)/(cp-rg*ptop/pm(k))
1267  enddo
1268  enddo
1269  else
1270  do j=js,je
1271  do i=is,ie
1272  pt(i,j,k) = pt(i,j,k) + 0.5*(ua(i,j,k)**2+va(i,j,k)**2+w(i,j,k)**2)*(1.-u2f(i,j,k)**2)*rcv
1273  enddo
1274  enddo
1275  endif
1276  endif
1277 
1278  do j=js,je+1
1279  do i=is,ie
1280  u(i,j,k) = 0.5*(u2f(i,j-1,k)+u2f(i,j,k))*u(i,j,k)
1281  enddo
1282  enddo
1283  do j=js,je
1284  do i=is,ie+1
1285  v(i,j,k) = 0.5*(u2f(i-1,j,k)+u2f(i,j,k))*v(i,j,k)
1286  enddo
1287  enddo
1288  if ( .not. hydrostatic ) then
1289  do j=js,je
1290  do i=is,ie
1291  w(i,j,k) = u2f(i,j,k)*w(i,j,k)
1292  enddo
1293  enddo
1294  endif
1295 #endif
1296  endif
1297  enddo
1298 
1299  deallocate ( u2f )
1300 
1301  end subroutine rayleigh_super
1302 
1303 
1304  subroutine rayleigh_friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, &
1305  ua, va, delz, cp, rg, ptop, hydrostatic, conserve, &
1306  rf_cutoff, gridstruct, domain, bd)
1307  real, intent(in):: dt
1308  real, intent(in):: tau
1309  real, intent(in):: cp, rg, ptop, rf_cutoff
1310  real, intent(in), dimension(npz):: pm
1311  integer, intent(in):: npx, npy, npz, ks
1312  logical, intent(in):: hydrostatic
1313  logical, intent(in):: conserve
1314  type(fv_grid_bounds_type), intent(IN) :: bd
1315  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1316  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz)
1317  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: )
1318  real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1319  real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1320  real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) !
1321  real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: )
1322  type(fv_grid_type), intent(IN) :: gridstruct
1323  type(domain2d), intent(INOUT) :: domain
1324 ! local:
1325  real, allocatable :: u2f(:,:,:)
1326  real, parameter:: sday = 86400.
1327  real, parameter:: u000 = 4900.
1328  real rcv
1329  integer i, j, k
1330 
1331  integer :: is, ie, js, je
1332  integer :: isd, ied, jsd, jed
1333 
1334 
1335  is = bd%is
1336  ie = bd%ie
1337  js = bd%js
1338  je = bd%je
1339  isd = bd%isd
1340  ied = bd%ied
1341  jsd = bd%jsd
1342  jed = bd%jed
1343 
1344 
1345  rcv = 1. / (cp - rg)
1346 
1347  if ( .not. rf_initialized ) then
1348  allocate( rf(npz) )
1349  if( is_master() ) write(6,*) 'Rayleigh friction E-folding time (days):'
1350  do k=1, npz
1351  if ( pm(k) < rf_cutoff ) then
1352  rf(k) = dt/(tau*sday)*sin(0.5*pi*log(rf_cutoff/pm(k))/log(rf_cutoff/ptop))**2
1353  if( is_master() ) write(6,*) k, 0.01*pm(k), dt/(rf(k)*sday)
1354  kmax = k
1355  else
1356  exit
1357  endif
1358  enddo
1359  rf_initialized = .true.
1360  endif
1361 
1362  allocate( u2f(isd:ied,jsd:jed,kmax) )
1363 
1364  call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%bounded_domain)
1365 
1366 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,u2f,hydrostatic,ua,va,w)
1367  do k=1,kmax
1368  if ( hydrostatic ) then
1369  do j=js,je
1370  do i=is,ie
1371  u2f(i,j,k) = ua(i,j,k)**2 + va(i,j,k)**2
1372  enddo
1373  enddo
1374  else
1375  do j=js,je
1376  do i=is,ie
1377  u2f(i,j,k) = ua(i,j,k)**2 + va(i,j,k)**2 + w(i,j,k)**2
1378  enddo
1379  enddo
1380  endif
1381  enddo
1382 
1383  call timing_on('COMM_TOTAL')
1384  call mpp_update_domains(u2f, domain)
1385  call timing_off('COMM_TOTAL')
1386 
1387 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,conserve,hydrostatic,pt,u2f,cp,rg, &
1388 !$OMP ptop,pm,rf,delz,rcv,u,v,w)
1389  do k=1,kmax
1390 
1391  if ( conserve ) then
1392  if ( hydrostatic ) then
1393  do j=js,je
1394  do i=is,ie
1395  pt(i,j,k) = pt(i,j,k) + 0.5*u2f(i,j,k)/(cp-rg*ptop/pm(k)) &
1396  * ( 1. - 1./(1.+rf(k)*sqrt(u2f(i,j,k)/u000))**2 )
1397  enddo
1398  enddo
1399  else
1400  do j=js,je
1401  do i=is,ie
1402  delz(i,j,k) = delz(i,j,k) / pt(i,j,k)
1403  pt(i,j,k) = pt(i,j,k) + 0.5*u2f(i,j,k) * rcv &
1404  * ( 1. - 1./(1.+rf(k)*sqrt(u2f(i,j,k)/u000))**2 )
1405  delz(i,j,k) = delz(i,j,k) * pt(i,j,k)
1406  enddo
1407  enddo
1408  endif
1409  endif
1410 
1411  do j=js-1,je+1
1412  do i=is-1,ie+1
1413  u2f(i,j,k) = rf(k)*sqrt(u2f(i,j,k)/u000)
1414  enddo
1415  enddo
1416 
1417  do j=js,je+1
1418  do i=is,ie
1419  u(i,j,k) = u(i,j,k) / (1.+0.5*(u2f(i,j-1,k)+u2f(i,j,k)))
1420  enddo
1421  enddo
1422  do j=js,je
1423  do i=is,ie+1
1424  v(i,j,k) = v(i,j,k) / (1.+0.5*(u2f(i-1,j,k)+u2f(i,j,k)))
1425  enddo
1426  enddo
1427 
1428  if ( .not. hydrostatic ) then
1429  do j=js,je
1430  do i=is,ie
1431  w(i,j,k) = w(i,j,k) / (1.+u2f(i,j,k))
1432  enddo
1433  enddo
1434  endif
1435 
1436  enddo
1437 
1438  deallocate ( u2f )
1439 
1440  end subroutine rayleigh_friction
1441 
1443  subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, &
1444  ptop, ua, va, u, v, delp, aam, ps, m_fac)
1445 ! Compute vertically (mass) integrated Atmospheric Angular Momentum
1446  integer, intent(in):: npz
1447  integer, intent(in):: is, ie, js, je
1448  integer, intent(in):: isd, ied, jsd, jed
1449  real, intent(in):: ptop
1450  real, intent(inout):: u(isd:ied ,jsd:jed+1,npz)
1451  real, intent(inout):: v(isd:ied+1,jsd:jed,npz)
1452  real, intent(inout):: delp(isd:ied,jsd:jed,npz)
1453  real, intent(inout), dimension(isd:ied,jsd:jed, npz):: ua, va
1454  real, intent(out):: aam(is:ie,js:je)
1455  real, intent(out):: m_fac(is:ie,js:je)
1456  real, intent(out):: ps(isd:ied,jsd:jed)
1457  type(fv_grid_bounds_type), intent(IN) :: bd
1458  type(fv_grid_type), intent(IN) :: gridstruct
1459 ! local:
1460  real, dimension(is:ie):: r1, r2, dm
1461  integer i, j, k
1462 
1463  call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%bounded_domain)
1464 
1465 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,aam,m_fac,ps,ptop,delp,agrav,ua) &
1466 !$OMP private(r1, r2, dm)
1467  do j=js,je
1468  do i=is,ie
1469  r1(i) = radius*cos(gridstruct%agrid(i,j,2))
1470  r2(i) = r1(i)*r1(i)
1471  aam(i,j) = 0.
1472  m_fac(i,j) = 0.
1473  ps(i,j) = ptop
1474  enddo
1475  do k=1,npz
1476  do i=is,ie
1477  dm(i) = delp(i,j,k)
1478  ps(i,j) = ps(i,j) + dm(i)
1479  dm(i) = dm(i)*agrav
1480  aam(i,j) = aam(i,j) + (r2(i)*omega + r1(i)*ua(i,j,k)) * dm(i)
1481  m_fac(i,j) = m_fac(i,j) + dm(i)*r2(i)
1482  enddo
1483  enddo
1484  enddo
1485 
1486  end subroutine compute_aam
1487 
1488 end module fv_dynamics_mod
subroutine, public init_ijk_mem(i1, i2, j1, j2, km, array, var)
Definition: dyn_core.F90:2572
subroutine, public lagrangian_to_eulerian(last_step, consv, ps, pe, delp, pkz, pk, mdt, pdt, npx, npy, km, is, ie, js, je, isd, ied, jsd, jed, nq, nwat, sphum, q_con, u, v, w, delz, pt, q, hs, r_vir, cp, akap, cappa, kord_mt, kord_wz, kord_tr, kord_tm, peln, te0_2d, ng, ua, va, omga, te, ws, fill, reproduce_sum, out_dt, dtdt, ptop, ak, bk, pfull, gridstruct, domain, do_sat_adj, hydrostatic, hybrid_z, do_omega, adiabatic, do_adiabatic_init, c2l_ord, bd, fv_debug, moist_phys)
The subroutine &#39;Lagrangian_to_Eulerian&#39; remaps deformed Lagrangian layers back to the reference Euler...
Definition: fv_mapz.F90:143
integer, public k_step
logical, public do_adiabatic_init
Definition: fv_nudge.F90:138
pure real function, public vicpqd(q)
subroutine, public regional_boundary_update(array, bc_vbl_name, lbnd_x, ubnd_x, lbnd_y, ubnd_y, ubnd_z, is, ie, js, je, isd, ied, jsd, jed, fcst_time, index4)
real, public current_time_in_seconds
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:123
subroutine, public neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qg, qa, check_negative)
Definition: fv_sg.F90:1549
integer, parameter, public h_stagger
integer, public p_step
subroutine, public moist_cv(is, ie, isd, ied, jsd, jed, km, j, k, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, q, qd, cvm, t1)
The subroutine &#39;moist_cv&#39; computes the FV3-consistent moist heat capacity under constant volume...
Definition: fv_mapz.F90:3418
subroutine, public setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, u, v, w, pt, delp, delz, q, uc, vc, nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, nest_timestep, tracer_nest_timestep, domain, parent_grid, bd, nwat, ak, bk)
The subroutine &#39;setup_nested_grid_BCs&#39; fetches data from the coarse grid to set up the nested-grid bo...
Definition: fv_nesting.F90:165
The module &#39;multi_gases&#39; peforms multi constitutents computations.
Definition: multi_gases.F90:25
subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, ptop, ua, va, u, v, delp, aam, ps, m_fac)
The subroutine &#39;compute_aam&#39; computes vertically (mass) integrated Atmospheric Angular Momentum...
real function, public g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, reproduce)
The function &#39;g_sum&#39; is the fast version of &#39;globalsum&#39;.
subroutine, public compute_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, u, v, w, delz, pt, delp, q, qc, pe, peln, hs, rsin2_l, cosa_s_l, r_vir, cp, rg, hlv, te_2d, ua, va, teq, moist_phys, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, hydrostatic, id_te)
The subroutine &#39;compute_total_energy&#39; performs the FV3-consistent computation of the global total ene...
Definition: fv_mapz.F90:960
The module &#39;fv_dynamics&#39; is the top-level routine for the dynamical core.
Definition: fv_dynamics.F90:26
The module &#39;fv_sg&#39; performs FV sub-grid mixing.
Definition: fv_sg.F90:54
pure real function, public virqd(q)
The module fv_nwp_nudge contains routines for nudging to input analyses. note This module is currentl...
Definition: fv_nudge.F90:32
subroutine, public prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain)
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
pure real function, public virq(q)
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
subroutine, public fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, reproduce_sum, kappa, cp_air, zvir, ptop, ks, ncnst, n_split, q_split, u, v, w, delz, hydrostatic, pt, delp, q, ps, pe, pk, peln, pkz, phis, q_con, omga, ua, va, uc, vc, ak, bk, mfx, mfy, cx, cy, ze0, hybrid_z, gridstruct, flagstruct, neststruct, idiag, bd, parent_grid, domain, diss_est, time_total)
The module &#39;fv_tracer2d.F90&#39; performs sub-cycled tracer advection.
Definition: fv_tracer2d.F90:64
subroutine, public set_regional_bcs(delp, delz, w, pt ifdef USE_COND
The module &#39;fv_mapz&#39; contains the vertical mapping routines .
Definition: fv_mapz.F90:27
subroutine, public tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, k_split, neststruct, parent_grid, n_map, lim_fac)
logical rf_initialized
subroutine, public dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, akap, cappa, grav, hydrostatic, u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, init_step, i_pack, end_step, diss_est, time_total)
Definition: dyn_core.F90:181
logical, public prt_minmax
logical pt_initialized
subroutine, public cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, bounded_domain, c2l_ord, bd)
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
subroutine, public moist_cp(is, ie, isd, ied, jsd, jed, km, j, k, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, q, qd, cpm, t1)
The subroutine &#39;moist_cp&#39; computes the FV3-consistent moist heat capacity under constant pressure...
Definition: fv_mapz.F90:3539
subroutine, public del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
The subroutine &#39;del2-cubed&#39; filters the omega field for the physics.
Definition: dyn_core.F90:2461
subroutine, public fill2d(is, ie, js, je, ng, km, q, delp, area, domain, bounded_domain, npx, npy)
The subroutine &#39;fill2D&#39; fills in nonphysical negative values in a single scalar field using a two-dim...
Definition: fv_fill.F90:207
type(time_type), public fv_time
subroutine rayleigh_friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, ua, va, delz, cp, rg, ptop, hydrostatic, conserve, rf_cutoff, gridstruct, domain, bd)
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
integer, parameter, public v_stagger
subroutine, public c2l_ord2(u, v, ua, va, gridstruct, km, grid_type, bd, do_halo)
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
The module &#39;fv_grid_utils&#39; contains routines for setting up and computing grid-related quantities...
The module &#39;dyn_core&#39; peforms the Lagrangian acoustic dynamics described by .
Definition: dyn_core.F90:28
integer, public a_step
subroutine rayleigh_super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, ua, va, delz, agrid, cp, rg, ptop, hydrostatic, conserve, rf_cutoff, gridstruct, domain, bd)
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
The subroutine &#39;nested_grid_BC_apply_intT&#39; performs linear interpolation or extrapolation in time for...
Definition: boundary.F90:2171
subroutine, public neg_adj2(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qa, check_negative)
Definition: fv_sg.F90:1939
real, dimension(:), allocatable rf
subroutine, public tracer_2d_1l(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac)
The subroutine &#39;tracer_2d_1L&#39; performs 2-D horizontal-to-lagrangian transport.
Definition: fv_tracer2d.F90:94
subroutine, public tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac)
The subroutine &#39;tracer_2d&#39; is the standard routine for sub-cycled tracer advection.
integer, parameter, public u_stagger
The module &#39;fv_nesting&#39; is a collection of routines pertaining to grid nesting .
Definition: fv_nesting.F90:27