FV3DYCORE  Version 1.1.0
fv_update_phys.F90
Go to the documentation of this file.
1 
2 !***********************************************************************
3 !* GNU Lesser General Public License
4 !*
5 !* This file is part of the FV3 dynamical core.
6 !*
7 !* The FV3 dynamical core is free software: you can redistribute it
8 !* and/or modify it under the terms of the
9 !* GNU Lesser General Public License as published by the
10 !* Free Software Foundation, either version 3 of the License, or
11 !* (at your option) any later version.
12 !*
13 !* The FV3 dynamical core is distributed in the hope that it will be
14 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
15 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 !* See the GNU General Public License for more details.
17 !*
18 !* You should have received a copy of the GNU Lesser General Public
19 !* License along with the FV3 dynamical core.
20 !* If not, see <http://www.gnu.org/licenses/>.
21 !***********************************************************************
22 
25 
27 
28 ! Modules Included:
29 ! <table>
30 ! <tr>
31 ! <th>Module Name</th>
32 ! <th>Functions Included</th>
33 ! </tr>
34 ! <tr>
35 ! <td>boundary_mod</td>
36 ! <td>nested_grid_BC, extrapolation_BC</td>
37 ! </tr>
38 ! <tr>
39 ! <td>constants_mod</td>
40 ! <td>kappa, rdgas, rvgas, grav, cp_air, cp_vapor, pi=>pi_8, radius</td>
41 ! </tr>
42 ! <tr>
43 ! <td>field_manager_mod</td>
44 ! <td>MODEL_ATMOS</td>
45 ! </tr>
46 ! <tr>
47 ! <td>fv_arrays_mod</td>
48 ! <td>fv_flags_type, fv_nest_type, R_GRID</td>
49 ! </tr>
50 ! <tr>
51 ! <td>fv_diagnostics_mod</td>
52 ! <td>prt_maxmin</td>
53 ! </tr>
54 ! <tr>
55 ! <td>fv_eta_mod</td>
56 ! <td>get_eta_level</td>
57 ! </tr>
58 ! <tr>
59 ! <td>fv_mapz_mod</td>
60 ! <td>moist_cv, moist_cp</td>
61 ! </tr>
62 ! <tr>
63 ! <td>fv_mp_mod</td>
64 ! <td>start_group_halo_update, complete_group_halo_update,group_halo_update_type</td>
65 ! </tr>
66 ! <tr>
67 ! <td>fv_timing_mod</td>
68 ! <td>timing_on, timing_off</td>
69 ! </tr>
70 ! <tr>
71 ! <td>mpp_mod</td>
72 ! <td>FATAL, mpp_error,NOTE, WARNING</td>
73 ! </tr>
74 ! <tr>
75 ! <td>mpp_domains_mod</td>
76 ! <td>mpp_update_domains, domain2d</td>
77 ! </tr>
78 ! <tr>
79 ! <td>mpp_parameter_mod</td>
80 ! <td>AGRID_PARAM=>AGRID</td>
81 ! </tr>
82 ! <tr>
83 ! <td>time_manager_mod</td>
84 ! <td>time_type</td>
85 ! </tr>
86 ! <tr>
87 ! <td>tracer_manager_mod</td>
88 ! <td>get_tracer_index, adjust_mass, get_tracer_names</td>
89 ! </tr>
90 ! </table>
91 
92  use constants_mod, only: kappa, rdgas, rvgas, grav, cp_air, cp_vapor, pi=>pi_8, radius
93  use field_manager_mod, only: model_atmos
94  use mpp_domains_mod, only: mpp_update_domains, domain2d
95  use mpp_parameter_mod, only: agrid_param=>agrid
96  use mpp_mod, only: fatal, mpp_error
97  use mpp_mod, only: mpp_error, note, warning
98  use time_manager_mod, only: time_type
99  use tracer_manager_mod, only: get_tracer_index, adjust_mass, get_tracer_names
100  use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update
101  use fv_mp_mod, only: group_halo_update_type
103  use boundary_mod, only: nested_grid_bc
104  use boundary_mod, only: extrapolation_bc
105  use fv_eta_mod, only: get_eta_level
107  use fv_diagnostics_mod, only: prt_maxmin
108  use fv_mapz_mod, only: moist_cv, moist_cp
109 #if defined (ATMOS_NUDGE)
110  use atmos_nudge_mod, only: get_atmos_nudge, do_ps
111 #elif defined (CLIMATE_NUDGE)
112  use fv_climate_nudge_mod, only: fv_climate_nudge, do_ps
113 #elif defined (ADA_NUDGE)
114  use fv_ada_nudge_mod, only: fv_ada_nudge
115 #else
116  use fv_nwp_nudge_mod, only: fv_nwp_nudge
117 #endif
120 #ifdef MULTI_GASES
122 #endif
123 
124  implicit none
125 
126  public :: fv_update_phys, del2_phys
127 #ifdef ROT3
128  public :: update_dwinds_phys
129 #endif
130 
131  real,parameter:: con_cp = cp_air
132 
133  contains
134 
135  subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, &
136  u, v, w, delp, pt, q, qdiag, ua, va, ps, pe, peln, pk, pkz, &
137  ak, bk, phis, u_srf, v_srf, ts, delz, hydrostatic, &
138  u_dt, v_dt, t_dt, moist_phys, Time, nudge, &
139  gridstruct, lona, lata, npx, npy, npz, flagstruct, &
140  neststruct, bd, domain, ptop, q_dt)
141  real, intent(in) :: dt, ptop
142  integer, intent(in):: is, ie, js, je, ng
143  integer, intent(in):: isd, ied, jsd, jed
144  integer, intent(in):: nq ! tracers modified by physics
145  ! ncnst is the total number of tracers
146  logical, intent(in):: moist_phys
147  logical, intent(in):: hydrostatic
148  logical, intent(in):: nudge
149 
150  type(time_type), intent(in) :: time
151 
152  real, intent(in), dimension(npz+1):: ak, bk
153  real, intent(in) :: phis(isd:ied,jsd:jed)
154  real, intent(inout):: delz(isd:,jsd:,1:)
155 
156 ! optional arguments for atmospheric nudging
157  real, intent(in), dimension(isd:ied,jsd:jed), optional :: &
158  lona, lata
159 
160 ! Winds on lat-lon grid:
161  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: ua, va
162  real, intent(inout), dimension(isd: ,jsd: ,1: ):: w
163 
164 ! Tendencies from Physics:
165  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt
166  real, intent(inout):: t_dt(is:ie,js:je,npz)
167  real, intent(inout), optional :: q_dt(is:ie,js:je,npz,nq)
168 
169 ! Saved Bottom winds for GFDL Physics Interface
170  real, intent(out), dimension(is:ie,js:je):: u_srf, v_srf, ts
171 
172  type(fv_flags_type) :: flagstruct
173  type(fv_grid_bounds_type), intent(IN) :: bd
174  type(domain2d), intent(INOUT) :: domain
175 
176  real, intent(inout):: u(isd:ied ,jsd:jed+1,npz)
177  real, intent(inout):: v(isd:ied+1,jsd:jed ,npz)
178  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: pt, delp
179  real, intent(inout):: q(isd:ied,jsd:jed,npz,nq)
180  real, intent(inout):: qdiag(isd:ied,jsd:jed,npz,nq+1:flagstruct%ncnst)
181 
182 !-----------------------------------------------------------------------
183 ! Auxilliary pressure arrays:
184 ! The 5 vars below can be re-computed from delp and ptop.
185 !-----------------------------------------------------------------------
186 ! dyn_aux:
187  real, intent(inout):: ps (isd:ied ,jsd:jed)
188  real, intent(inout):: pe (is-1:ie+1, npz+1,js-1:je+1)
189  real, intent(inout):: pk (is:ie,js:je , npz+1)
190  real, intent(inout):: peln(is:ie,npz+1,js:je)
191  real, intent(inout):: pkz (is:ie,js:je,npz)
192  real, parameter:: tice = 273.16
193 
194  type(fv_grid_type) :: gridstruct
195  type(fv_nest_type) :: neststruct
196 
197  integer, intent(IN) :: npx, npy, npz
198 
199 !***********
200 ! Haloe Data
201 !***********
202  real, parameter:: q1_h2o = 2.2e-6
203  real, parameter:: q7_h2o = 3.8e-6
204  real, parameter:: q100_h2o = 3.8e-6
205  real, parameter:: q1000_h2o = 3.1e-6
206  real, parameter:: q2000_h2o = 2.8e-6
207  real, parameter:: q3000_h2o = 3.0e-6
208 
209 ! Local arrays:
210  real ps_dt(is:ie,js:je)
211  real cvm(is:ie), qc(is:ie)
212  real phalf(npz+1), pfull(npz)
213 
214 #ifdef MULTI_GASES
215  integer :: nn, nm
216 #endif
217 
218  type(group_halo_update_type), save :: i_pack(2)
219  integer i, j, k, m, n, nwat
220  integer sphum, liq_wat, ice_wat, cld_amt ! GFDL AM physics
221  integer rainwat, snowwat, graupel ! GFDL Cloud Microphysics
222  integer w_diff ! w-tracer for PBL diffusion
223  real:: qstar, dbk, rdg, zvir, p_fac, cv_air, gama_dt
224 
225  real, dimension(1,1,1) :: parent_u_dt, parent_v_dt ! dummy variables for nesting
226 
227 !f1p
228 !account for change in air molecular weight because of H2O change
229  logical, dimension(nq) :: conv_vmr_mmr
230  real :: adj_vmr(is:ie,js:je,npz)
231  character(len=32) :: tracer_units, tracer_name
232 
233  cv_air = cp_air - rdgas ! = rdgas * (7/2-1) = 2.5*rdgas=717.68
234 
235  rdg = -rdgas / grav
236 
237  nwat = flagstruct%nwat
238 
239  if ( moist_phys .or. nwat/=0 ) then
240  zvir = rvgas/rdgas - 1.
241  else
242  zvir = 0.
243  endif
244 
245 !f1p
246  conv_vmr_mmr(1:nq) = .false.
247  if (flagstruct%adj_mass_vmr) then
248  do m=1,nq
249  call get_tracer_names (model_atmos, m, name = tracer_name, &
250  units = tracer_units)
251  if ( trim(tracer_units) .eq. 'vmr' ) then
252  conv_vmr_mmr(m) = .true.
253  else
254  conv_vmr_mmr(m) = .false.
255  end if
256  end do
257  end if
258 
259  sphum = get_tracer_index(model_atmos, 'sphum')
260  liq_wat = get_tracer_index(model_atmos, 'liq_wat')
261  ice_wat = get_tracer_index(model_atmos, 'ice_wat')
262  rainwat = get_tracer_index(model_atmos, 'rainwat')
263  snowwat = get_tracer_index(model_atmos, 'snowwat')
264  graupel = get_tracer_index(model_atmos, 'graupel')
265  cld_amt = get_tracer_index(model_atmos, 'cld_amt')
266 
267  if ( .not. hydrostatic ) then
268  w_diff = get_tracer_index(model_atmos, 'w_diff')
269 ! if ( w_diff<8 ) call mpp_error(FATAL,'W_tracer index error')
270  else
271  w_diff = 0
272  endif
273 
274  if ( .not. hydrostatic .and. .not. flagstruct%phys_hydrostatic .and. nwat == 0 ) then
275  gama_dt = dt*cp_air/cv_air
276  else
277  gama_dt = -1.e24
278  endif
279 
280  if ( flagstruct%fv_debug ) then
281  call prt_maxmin('delp_b_update', delp, is, ie, js, je, ng, npz, 0.01)
282  if (present(q_dt)) then
283  do m=1,nq
284  call prt_maxmin('q_dt', q_dt(is,js,1,m), is, ie, js, je, 0, npz, 1.)
285  enddo
286  endif
287  call prt_maxmin('u_dt', u_dt, is, ie, js, je, ng, npz, 1.)
288  call prt_maxmin('v_dt', v_dt, is, ie, js, je, ng, npz, 1.)
289  call prt_maxmin('T_dt', t_dt, is, ie, js, je, 0, npz, 1.)
290  endif
291 
292  call get_eta_level(npz, 1.0e5, pfull, phalf, ak, bk)
293 
294 #ifdef MULTI_GASES
295  nm = max( nwat,num_gas)
296  nn = max(flagstruct%nwat,num_gas)
297 #endif
298 
299 !$OMP parallel do default(none) &
300 !$OMP shared(is,ie,js,je,npz,flagstruct,pfull,q_dt,sphum,q,qdiag, &
301 !$OMP nq,w_diff,dt,nwat,liq_wat,rainwat,ice_wat,snowwat, &
302 !$OMP graupel,delp,cld_amt,hydrostatic,pt,t_dt,delz,adj_vmr,&
303 !$OMP gama_dt,cv_air,ua,u_dt,va,v_dt,isd,ied,jsd,jed, &
304 #ifdef MULTI_GASES
305 !$OMP nn, nm, &
306 #endif
307 !$OMP conv_vmr_mmr) &
308 !$OMP private(cvm, qc, qstar, ps_dt, p_fac)
309  do k=1, npz
310 
311  if (present(q_dt)) then
312 
313  if (flagstruct%tau_h2o<0.0 .and. pfull(k) < 100.e2 ) then
314 ! Wipe the stratosphere clean:
315 ! This should only be used for initialization from a bad model state
316  p_fac = -flagstruct%tau_h2o*86400.
317  do j=js,je
318  do i=is,ie
319  q_dt(i,j,k,sphum) = q_dt(i,j,k,sphum) + (3.e-6-q(i,j,k,sphum))/p_fac
320  enddo
321  enddo
322  elseif ( flagstruct%tau_h2o>0.0 .and. pfull(k) < 3000. ) then
323 ! Do idealized Ch4 chemistry
324 
325  if ( pfull(k) < 1. ) then
326  qstar = q1_h2o
327  p_fac = 0.2 * flagstruct%tau_h2o*86400.
328  elseif ( pfull(k) < 7. .and. pfull(k) >= 1. ) then
329  qstar = q1_h2o + (q7_h2o-q1_h2o)*log(pfull(k)/1.)/log(7.)
330  p_fac = 0.3 * flagstruct%tau_h2o*86400.
331  elseif ( pfull(k) < 100. .and. pfull(k) >= 7. ) then
332  qstar = q7_h2o + (q100_h2o-q7_h2o)*log(pfull(k)/7.)/log(100./7.)
333  p_fac = 0.4 * flagstruct%tau_h2o*86400.
334  elseif ( pfull(k) < 1000. .and. pfull(k) >= 100. ) then
335  qstar = q100_h2o + (q1000_h2o-q100_h2o)*log(pfull(k)/1.e2)/log(10.)
336  p_fac = 0.5 * flagstruct%tau_h2o*86400.
337  elseif ( pfull(k) < 2000. .and. pfull(k) >= 1000. ) then
338  qstar = q1000_h2o + (q2000_h2o-q1000_h2o)*log(pfull(k)/1.e3)/log(2.)
339  p_fac = 0.75 * flagstruct%tau_h2o*86400.
340  else
341  qstar = q3000_h2o
342  p_fac = flagstruct%tau_h2o*86400.
343  endif
344 
345  do j=js,je
346  do i=is,ie
347  q_dt(i,j,k,sphum) = q_dt(i,j,k,sphum) + (qstar-q(i,j,k,sphum))/p_fac
348  enddo
349  enddo
350  endif
351 
352 !----------------
353 ! Update tracers:
354 !----------------
355  do m=1,nq
356  if( m /= w_diff ) then
357  do j=js,je
358  do i=is,ie
359  q(i,j,k,m) = q(i,j,k,m) + dt*q_dt(i,j,k,m)
360  enddo
361  enddo
362  endif
363  enddo
364 
365 !--------------------------------------------------------
366 ! Adjust total air mass due to changes in water substance
367 !--------------------------------------------------------
368  do j=js,je
369  do i=is,ie
370 #ifdef MULTI_GASES
371  ps_dt(i,j) = 1. + dt*sum(q_dt(i,j,k,1:nm))
372 #else
373  ps_dt(i,j) = 1. + dt*sum(q_dt(i,j,k,1:nwat))
374 #endif
375  delp(i,j,k) = delp(i,j,k) * ps_dt(i,j)
376  if (flagstruct%adj_mass_vmr) then
377 #ifdef MULTI_GASES
378  adj_vmr(i,j,k) = &
379  (ps_dt(i,j) - sum(q(i,j,k,1:nn))) / &
380  (1.d0 - sum(q(i,j,k,1:nn)))
381 #else
382  adj_vmr(i,j,k) = &
383  (ps_dt(i,j) - sum(q(i,j,k,1:flagstruct%nwat))) / &
384  (1.d0 - sum(q(i,j,k,1:flagstruct%nwat)))
385 #endif
386  end if
387  enddo
388  enddo
389 
390 !-----------------------------------------
391 ! Adjust mass mixing ratio of all tracers
392 !-----------------------------------------
393  if ( nwat /=0 ) then
394  do m=1,flagstruct%ncnst
395 !-- check to query field_table to determine if tracer needs mass adjustment
396  if( m /= cld_amt .and. m /= w_diff .and. adjust_mass(model_atmos,m)) then
397  if (m <= nq) then
398  q(is:ie,js:je,k,m) = q(is:ie,js:je,k,m) / ps_dt(is:ie,js:je)
399  if (conv_vmr_mmr(m)) &
400  q(is:ie,js:je,k,m) = q(is:ie,js:je,k,m) * adj_vmr(is:ie,js:je,k)
401  else
402  qdiag(is:ie,js:je,k,m) = qdiag(is:ie,js:je,k,m) / ps_dt(is:ie,js:je)
403  endif
404  endif
405  enddo
406  endif
407 
408  endif ! present(q_dt)
409 
410  if ( hydrostatic ) then
411  do j=js,je
412  call moist_cp(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
413  ice_wat, snowwat, graupel, q, qc, cvm, pt(is:ie,j,k) )
414  do i=is,ie
415  pt(i,j,k) = pt(i,j,k) + t_dt(i,j,k)*dt*con_cp/cvm(i)
416  enddo
417  enddo
418  else
419  if ( flagstruct%phys_hydrostatic ) then
420 ! Constant pressure
421  do j=js,je
422  call moist_cp(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
423  ice_wat, snowwat, graupel, q, qc, cvm, pt(is:ie,j,k) )
424  do i=is,ie
425  delz(i,j,k) = delz(i,j,k) / pt(i,j,k)
426  pt(i,j,k) = pt(i,j,k) + t_dt(i,j,k)*dt*con_cp/cvm(i)
427  delz(i,j,k) = delz(i,j,k) * pt(i,j,k)
428  enddo
429  enddo
430  else
431  !NOTE: only works for either no physics or GFDL MP
432  if (nwat == 0) then
433  do j=js,je
434  do i=is,ie
435  pt(i,j,k) = pt(i,j,k) + t_dt(i,j,k)*gama_dt
436  enddo
437  enddo
438  else
439  do j=js,je
440  call moist_cv(is,ie,isd,ied,jsd,jed, npz, j, k, nwat, sphum, liq_wat, rainwat, &
441  ice_wat, snowwat, graupel, q, qc, cvm, pt(is:ie,j,k))
442  do i=is,ie
443  pt(i,j,k) = pt(i,j,k) + t_dt(i,j,k)*dt*con_cp/cvm(i)
444  enddo
445  enddo
446  endif
447  endif
448  endif
449 
450 #ifndef GFS_PHYS
451  do j=js,je
452  do i=is,ie
453  ua(i,j,k) = ua(i,j,k) + dt*u_dt(i,j,k)
454  va(i,j,k) = va(i,j,k) + dt*v_dt(i,j,k)
455  enddo
456  enddo
457 #endif
458 
459  enddo ! k-loop
460 
461 ! [delp, (ua, va), pt, q] updated. Perform nudging if requested
462 !------- nudging of atmospheric variables toward specified data --------
463 
464  ps_dt(:,:) = 0.
465 
466  if ( nudge ) then
467 #if defined (ATMOS_NUDGE)
468 !--------------------------------------------
469 ! All fields will be updated; tendencies added
470 !--------------------------------------------
471  call get_atmos_nudge ( time, dt, is, ie, js, je, &
472  npz, ng, ps(is:ie,js:je), ua(is:ie, js:je,:), &
473  va(is:ie,js:je,:), pt(is:ie,js:je,:), &
474  q(is:ie,js:je,:,:), ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), &
475  v_dt(is:ie,js:je,:), t_dt(is:ie,js:je,:), &
476  q_dt(is:ie,js:je,:,:) )
477 
478 !--------------
479 ! Update delp
480 !--------------
481  if (do_ps) then
482 !$omp parallel do default(none) shared(is,ie,js,je,npz,bk,delp,ps_dt) private(dbk)
483  do k=1,npz
484  dbk = dt * (bk(k+1) - bk(k))
485  do j=js,je
486  do i=is,ie
487  delp(i,j,k) = delp(i,j,k) + dbk*ps_dt(i,j)
488  enddo
489  enddo
490  enddo
491  endif
492 #elif defined (CLIMATE_NUDGE)
493 !--------------------------------------------
494 ! All fields will be updated; tendencies added
495 !--------------------------------------------
496  call fv_climate_nudge ( time, dt, is, ie, js, je, npz, pfull, &
497  lona(is:ie,js:je), lata(is:ie,js:je), phis(is:ie,js:je), &
498  ptop, ak, bk, &
499  ps(is:ie,js:je), ua(is:ie,js:je,:), va(is:ie,js:je,:), &
500  pt(is:ie,js:je,:), q(is:ie,js:je,:,sphum:sphum), &
501  ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), &
502  v_dt(is:ie,js:je,:), t_dt(is:ie,js:je,:), &
503  q_dt(is:ie,js:je,:,sphum:sphum) )
504 
505 !--------------
506 ! Update delp
507 !--------------
508  if (do_ps) then
509 !$omp parallel do default(none) shared(is,ie,js,je,npz,dt,bk,delp,ps_dt) private(dbk)
510  do k=1,npz
511  dbk = dt * (bk(k+1) - bk(k))
512  do j=js,je
513  do i=is,ie
514  delp(i,j,k) = delp(i,j,k) + dbk*ps_dt(i,j)
515  enddo
516  enddo
517  enddo
518  endif
519 #elif defined (ADA_NUDGE)
520 ! All fields will be updated except winds; wind tendencies added
521 !$omp parallel do default(shared)
522  do j=js,je
523  do k=2,npz+1
524  do i=is,ie
525  pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1)
526  enddo
527  enddo
528  do i=is,ie
529  ps(i,j) = pe(i,npz+1,j)
530  enddo
531  enddo
532  call fv_ada_nudge ( time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, &
533  zvir, ptop, ak, bk, ts, ps, delp, ua, va, pt, &
534  nwat, q, phis, gridstruct, bd, domain )
535 #else
536 ! All fields will be updated except winds; wind tendencies added
537 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ps)
538  do j=js,je
539  do k=2,npz+1
540  do i=is,ie
541  pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1)
542  enddo
543  enddo
544  do i=is,ie
545  ps(i,j) = pe(i,npz+1,j)
546  enddo
547  enddo
548  call fv_nwp_nudge ( time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, &
549  zvir, ptop, ak, bk, ts, ps, delp, ua, va, pt, &
550  nwat, q, phis, gridstruct, bd, domain )
551 #endif
552  endif ! end nudging
553 
554  if ( .not.flagstruct%dwind_2d ) then
555 
556  call timing_on('COMM_TOTAL')
557  if ( gridstruct%square_domain ) then
558  call start_group_halo_update(i_pack(1), u_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.false.)
559  call start_group_halo_update(i_pack(1), v_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.true.)
560  else
561  call start_group_halo_update(i_pack(1), u_dt, domain, complete=.false.)
562  call start_group_halo_update(i_pack(1), v_dt, domain, complete=.true.)
563  endif
564  call timing_off('COMM_TOTAL')
565  endif
566 
567 !----------------------------------------
568 ! Update pe, peln, pkz, and surface winds
569 !----------------------------------------
570  if ( flagstruct%fv_debug ) then
571  call prt_maxmin('PS_b_update', ps, is, ie, js, je, ng, 1, 0.01)
572  call prt_maxmin('delp_a_update', delp, is, ie, js, je, ng, npz, 0.01)
573  endif
574 
575 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,peln,pk,ps,u_srf,v_srf, &
576 #ifdef MULTI_GASES
577 !$OMP q, &
578 #endif
579 !$OMP ua,va,pkz,hydrostatic)
580  do j=js,je
581  do k=2,npz+1
582  do i=is,ie
583  pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1)
584  peln(i,k,j) = log( pe(i,k,j) )
585  pk(i,j,k) = exp( kappa*peln(i,k,j) )
586  enddo
587  enddo
588 
589  do i=is,ie
590  ps(i,j) = pe(i,npz+1,j)
591  u_srf(i,j) = ua(i,j,npz)
592  v_srf(i,j) = va(i,j,npz)
593  enddo
594 
595  if ( hydrostatic ) then
596  do k=1,npz
597  do i=is,ie
598  pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j)))
599 #ifdef MULTI_GASES
600  pkz(i,j,k) = exp( virqd(q(i,j,k,:))/vicpqd(q(i,j,k,:)) * log( pkz(i,j,k) ) )
601 #endif
602  enddo
603  enddo
604  endif
605  enddo ! j-loop
606 
607  call timing_on(' Update_dwinds')
608  if ( flagstruct%dwind_2d ) then
609  call update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, &
610  npx,npy,npz,domain)
611  else
612 
613  !I have not seen dwind_2d be used for anything; so we will only handle nesting assuming dwind_2d == .false.
614 
615  call timing_on('COMM_TOTAL')
616 
617  call complete_group_halo_update(i_pack(1), domain)
618 
619  if (size(neststruct%child_grids) > 1) then
620  if (gridstruct%nested) then
621  call nested_grid_bc(u_dt, parent_u_dt, neststruct%nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, &
622  npx, npy, npz, bd, 1, npx-1, 1, npy-1)
623  call nested_grid_bc(v_dt, parent_v_dt, neststruct%nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, &
624  npx, npy, npz, bd, 1, npx-1, 1, npy-1)
625  endif
626  do n=1,size(neststruct%child_grids)
627  if (neststruct%child_grids(n)) then
628  call nested_grid_bc(u_dt, neststruct%nest_domain_all(n), 0, 0)
629  call nested_grid_bc(v_dt, neststruct%nest_domain_all(n), 0, 0)
630  endif
631  enddo
632  endif
633 
634  call timing_off('COMM_TOTAL')
635 !
636 ! for regional grid need to set values for u_dt and v_dt at the edges.
637 ! Note from Lucas:The physics only operates on the compute domain.
638 ! One snag is that in fv_update_phys.F90 u_dt and v_dt from the physics need to be interpolated to the D-grids,
639 ! which requires BCs for u_dt and v_dt. For the nested grid I can simply get the BCs from the coarse grid, but
640 ! in your case I would recommend just setting the boundary conditions to 0 or to constant values (ie the value
641 ! of the cell closest to the boundary).
642  if (gridstruct%regional) then
643  if (is == 1) then
644  do k=1,npz
645  do j = js,je
646  u_dt(is-1,j,k) = u_dt(is,j,k)
647  v_dt(is-1,j,k) = v_dt(is,j,k)
648  enddo
649  enddo
650  endif
651  if (ie == npx) then
652  do k=1,npz
653  do j = js,je
654  u_dt(ie+1,j,k) = u_dt(ie,j,k)
655  v_dt(ie+1,j,k) = v_dt(ie,j,k)
656  enddo
657  enddo
658  endif
659  if (js == 1) then
660  do k=1,npz
661  do i = is,ie
662  u_dt(i,js-1,k) = u_dt(i,js,k)
663  v_dt(i,js-1,k) = v_dt(i,js,k)
664  enddo
665  enddo
666  endif
667  if (je == npy) then
668  do k=1,npz
669  do i = is,ie
670  u_dt(i,je+1,k) = u_dt(i,je,k)
671  v_dt(i,je+1,k) = v_dt(i,je,k)
672  enddo
673  enddo
674  endif
675 !
676 ! corners
677 !
678  do k=1,npz
679  if (is == 1 .and. js == 1) then
680  u_dt(is-1,js-1,k) = u_dt(is,js,k)
681  v_dt(is-1,js-1,k) = v_dt(is,js,k)
682  elseif (is == 1 .and. je == npy) then
683  u_dt(is-1,je+1,k) = u_dt(is,je,k)
684  v_dt(is-1,je+1,k) = v_dt(is,je,k)
685  elseif (ie == npx .and. js == 1) then
686  u_dt(ie+1,js-1,k) = u_dt(ie,je,k)
687  v_dt(ie+1,js-1,k) = v_dt(ie,je,k)
688  elseif (ie == npx .and. je == npy) then
689  u_dt(ie+1,je+1,k) = u_dt(ie,je,k)
690  v_dt(ie+1,je+1,k) = v_dt(ie,je,k)
691  endif
692  enddo
693  endif !regional
694 !
695  call update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
696  endif
697  call timing_off(' Update_dwinds')
698 #ifdef GFS_PHYS
699  call cubed_to_latlon(u, v, ua, va, gridstruct, &
700  npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%nested, flagstruct%c2l_ord, bd)
701 #endif
702 
703  if ( flagstruct%fv_debug ) then
704  call prt_maxmin('PS_a_update', ps, is, ie, js, je, ng, 1, 0.01)
705  endif
706 
707  end subroutine fv_update_phys
708 
710  subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, &
711  isd, ied, jsd, jed, ngc, domain)
712 ! This routine is for filtering the physics tendency
713  integer, intent(in):: npx, npy, km
714  integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, ngc
715  real, intent(in):: cd
716  real, intent(in ):: delp(isd:ied,jsd:jed,km)
717  real, intent(inout):: qdt(is-ngc:ie+ngc,js-ngc:je+ngc,km)
718  type(fv_grid_type), intent(IN), target :: gridstruct
719  type(domain2d), intent(INOUT) :: domain
720 
721  real, pointer, dimension(:,:) :: rarea, dx, dy, sina_u, sina_v, rdxc, rdyc
722  real, pointer, dimension(:,:,:) :: sin_sg
723 !
724  real :: q(isd:ied,jsd:jed,km)
725  real :: fx(is:ie+1,js:je), fy(is:ie,js:je+1)
726  real :: mask(is:ie+1,js:je+1)
727  real :: f1(is:ie+1), f2(js:je+1)
728  real :: damp
729  integer i,j,k
730 
731  rarea => gridstruct%rarea
732  dx => gridstruct%dx
733  dy => gridstruct%dy
734  sina_u => gridstruct%sina_u
735  sina_v => gridstruct%sina_v
736  rdxc => gridstruct%rdxc
737  rdyc => gridstruct%rdyc
738  sin_sg => gridstruct%sin_sg
739 
740 ! Applying mask to cd, the damping coefficient?
741  damp = 0.25 * cd * gridstruct%da_min
742 
743 ! Mask defined at corners
744 
745 !$OMP parallel do default(none) shared(is,ie,f1,npx)
746  do i=is,ie+1
747  f1(i) = (1. - sin(real(i-1)/real(npx-1)*pi))**2
748  enddo
749 
750 !$OMP parallel do default(none) shared(is,ie,js,je,f1,f2,npy,mask,damp)
751  do j=js,je+1
752  f2(j) = (1. - sin(real(j-1)/real(npy-1)*pi))**2
753  do i=is,ie+1
754  mask(i,j) = damp * (f1(i) + f2(j))
755  enddo
756  enddo
757 
758 ! mass weighted tendency from physics is filtered
759 
760 !$OMP parallel do default(none) shared(is,ie,js,je,km,q,qdt,delp)
761  do k=1,km
762  do j=js,je
763  do i=is,ie
764  q(i,j,k) = qdt(i,j,k)*delp(i,j,k)
765  enddo
766  enddo
767  enddo
768  call timing_on('COMM_TOTAL')
769  call mpp_update_domains(q, domain, complete=.true.)
770  call timing_off('COMM_TOTAL')
771 
772 !$OMP parallel do default(none) shared(is,ie,js,je,km,mask,dy,sina_u,q,rdxc,gridstruct, &
773 !$OMP sin_sg,npx,dx,npy,rdyc,sina_v,qdt,rarea,delp) &
774 !$OMP private(fx, fy)
775  do k=1,km
776  do j=js,je
777  do i=is,ie+1
778  fx(i,j) = &
779  (mask(i,j)+mask(i,j+1))*dy(i,j)*sina_u(i,j)* &
780  (q(i-1,j,k)-q(i,j,k))*rdxc(i,j)
781  enddo
782  if (is == 1 .and. .not. gridstruct%nested) fx(i,j) = &
783  (mask(is,j)+mask(is,j+1))*dy(is,j)*(q(is-1,j,k)-q(is,j,k))*rdxc(is,j)* &
784  0.5*(sin_sg(1,j,1) + sin_sg(0,j,3))
785  if (ie+1==npx .and. .not. gridstruct%nested) fx(i,j) = &
786  (mask(ie+1,j)+mask(ie+1,j+1))*dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* &
787  0.5*(sin_sg(npx,j,1) + sin_sg(npx-1,j,3))
788  enddo
789  do j=js,je+1
790  if ((j == 1 .OR. j == npy) .and. .not. gridstruct%nested) then
791  do i=is,ie
792  fy(i,j) = (mask(i,j)+mask(i+1,j))*dx(i,j)*&
793  (q(i,j-1,k)-q(i,j,k))*rdyc(i,j) &
794  *0.5*(sin_sg(i,j,2) + sin_sg(i,j-1,4) )
795  enddo
796  else
797  do i=is,ie
798  fy(i,j) = (mask(i,j)+mask(i+1,j))*dx(i,j)*sina_v(i,j)*&
799  (q(i,j-1,k)-q(i,j,k))*rdyc(i,j)
800  enddo
801  end if
802  enddo
803  do j=js,je
804  do i=is,ie
805  qdt(i,j,k) = qdt(i,j,k) + rarea(i,j)*(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))/delp(i,j,k)
806  enddo
807  enddo
808  enddo
809 
810  end subroutine del2_phys
811 
814  subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
816 ! Purpose; Transform wind tendencies on A grid to D grid for the final update
817 
818  integer, intent(in) :: is, ie, js, je
819  integer, intent(in) :: isd, ied, jsd, jed
820  integer, intent(IN) :: npx,npy, npz
821  real, intent(in) :: dt
822  real, intent(inout) :: u(isd:ied, jsd:jed+1,npz)
823  real, intent(inout) :: v(isd:ied+1,jsd:jed ,npz)
824  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt
825  type(fv_grid_type), intent(IN), target :: gridstruct
826  type(domain2d), intent(INOUT) :: domain
827 
828 ! local:
829  real v3(is-1:ie+1,js-1:je+1,3)
830  real ue(is-1:ie+1,js:je+1,3)
831  real ve(is:ie+1,js-1:je+1,3)
832  real, dimension(is:ie) :: ut1, ut2, ut3
833  real, dimension(js:je) :: vt1, vt2, vt3
834  real dt5, gratio
835  integer i, j, k, m, im2, jm2
836 
837  real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat
838  real(kind=R_GRID), pointer, dimension(:,:,:,:) :: es, ew
839  real(kind=R_GRID), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n
840 
841  es => gridstruct%es
842  ew => gridstruct%ew
843  vlon => gridstruct%vlon
844  vlat => gridstruct%vlat
845 
846  edge_vect_w => gridstruct%edge_vect_w
847  edge_vect_e => gridstruct%edge_vect_e
848  edge_vect_s => gridstruct%edge_vect_s
849  edge_vect_n => gridstruct%edge_vect_n
850 
851  dt5 = 0.5 * dt
852  im2 = (npx-1)/2
853  jm2 = (npy-1)/2
854 
855 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,u,dt5,u_dt,v,v_dt, &
856 !$OMP vlon,vlat,jm2,edge_vect_w,npx,edge_vect_e,im2, &
857 !$OMP edge_vect_s,npy,edge_vect_n,es,ew) &
858 !$OMP private(ut1, ut2, ut3, vt1, vt2, vt3, ue, ve, v3)
859  do k=1, npz
860 
861  if ( gridstruct%grid_type > 3 ) then ! Local & one tile configurations
862 
863  do j=js,je+1
864  do i=is,ie
865  u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k) + u_dt(i,j,k))
866  enddo
867  enddo
868  do j=js,je
869  do i=is,ie+1
870  v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k) + v_dt(i,j,k))
871  enddo
872  enddo
873 
874  else
875 ! Compute 3D wind tendency on A grid
876  do j=js-1,je+1
877  do i=is-1,ie+1
878  v3(i,j,1) = u_dt(i,j,k)*vlon(i,j,1) + v_dt(i,j,k)*vlat(i,j,1)
879  v3(i,j,2) = u_dt(i,j,k)*vlon(i,j,2) + v_dt(i,j,k)*vlat(i,j,2)
880  v3(i,j,3) = u_dt(i,j,k)*vlon(i,j,3) + v_dt(i,j,k)*vlat(i,j,3)
881  enddo
882  enddo
883 
884 ! Interpolate to cell edges
885  do j=js,je+1
886  do i=is-1,ie+1
887  ue(i,j,1) = v3(i,j-1,1) + v3(i,j,1)
888  ue(i,j,2) = v3(i,j-1,2) + v3(i,j,2)
889  ue(i,j,3) = v3(i,j-1,3) + v3(i,j,3)
890  enddo
891  enddo
892 
893  do j=js-1,je+1
894  do i=is,ie+1
895  ve(i,j,1) = v3(i-1,j,1) + v3(i,j,1)
896  ve(i,j,2) = v3(i-1,j,2) + v3(i,j,2)
897  ve(i,j,3) = v3(i-1,j,3) + v3(i,j,3)
898  enddo
899  enddo
900 
901 ! --- E_W edges (for v-wind):
902  if ( is==1 .and. .not. (gridstruct%nested .or. gridstruct%regional)) then
903  i = 1
904  do j=js,je
905  if ( j>jm2 ) then
906  vt1(j) = edge_vect_w(j)*ve(i,j-1,1) + (1.-edge_vect_w(j))*ve(i,j,1)
907  vt2(j) = edge_vect_w(j)*ve(i,j-1,2) + (1.-edge_vect_w(j))*ve(i,j,2)
908  vt3(j) = edge_vect_w(j)*ve(i,j-1,3) + (1.-edge_vect_w(j))*ve(i,j,3)
909  else
910  vt1(j) = edge_vect_w(j)*ve(i,j+1,1) + (1.-edge_vect_w(j))*ve(i,j,1)
911  vt2(j) = edge_vect_w(j)*ve(i,j+1,2) + (1.-edge_vect_w(j))*ve(i,j,2)
912  vt3(j) = edge_vect_w(j)*ve(i,j+1,3) + (1.-edge_vect_w(j))*ve(i,j,3)
913  endif
914  enddo
915  do j=js,je
916  ve(i,j,1) = vt1(j)
917  ve(i,j,2) = vt2(j)
918  ve(i,j,3) = vt3(j)
919  enddo
920  endif
921  if ( (ie+1)==npx .and. .not. (gridstruct%nested .or. gridstruct%regional)) then
922  i = npx
923  do j=js,je
924  if ( j>jm2 ) then
925  vt1(j) = edge_vect_e(j)*ve(i,j-1,1) + (1.-edge_vect_e(j))*ve(i,j,1)
926  vt2(j) = edge_vect_e(j)*ve(i,j-1,2) + (1.-edge_vect_e(j))*ve(i,j,2)
927  vt3(j) = edge_vect_e(j)*ve(i,j-1,3) + (1.-edge_vect_e(j))*ve(i,j,3)
928  else
929  vt1(j) = edge_vect_e(j)*ve(i,j+1,1) + (1.-edge_vect_e(j))*ve(i,j,1)
930  vt2(j) = edge_vect_e(j)*ve(i,j+1,2) + (1.-edge_vect_e(j))*ve(i,j,2)
931  vt3(j) = edge_vect_e(j)*ve(i,j+1,3) + (1.-edge_vect_e(j))*ve(i,j,3)
932  endif
933  enddo
934  do j=js,je
935  ve(i,j,1) = vt1(j)
936  ve(i,j,2) = vt2(j)
937  ve(i,j,3) = vt3(j)
938  enddo
939  endif
940 ! N-S edges (for u-wind):
941  if ( js==1 .and. .not. (gridstruct%nested .or. gridstruct%regional)) then
942  j = 1
943  do i=is,ie
944  if ( i>im2 ) then
945  ut1(i) = edge_vect_s(i)*ue(i-1,j,1) + (1.-edge_vect_s(i))*ue(i,j,1)
946  ut2(i) = edge_vect_s(i)*ue(i-1,j,2) + (1.-edge_vect_s(i))*ue(i,j,2)
947  ut3(i) = edge_vect_s(i)*ue(i-1,j,3) + (1.-edge_vect_s(i))*ue(i,j,3)
948  else
949  ut1(i) = edge_vect_s(i)*ue(i+1,j,1) + (1.-edge_vect_s(i))*ue(i,j,1)
950  ut2(i) = edge_vect_s(i)*ue(i+1,j,2) + (1.-edge_vect_s(i))*ue(i,j,2)
951  ut3(i) = edge_vect_s(i)*ue(i+1,j,3) + (1.-edge_vect_s(i))*ue(i,j,3)
952  endif
953  enddo
954  do i=is,ie
955  ue(i,j,1) = ut1(i)
956  ue(i,j,2) = ut2(i)
957  ue(i,j,3) = ut3(i)
958  enddo
959  endif
960  if ( (je+1)==npy .and. .not. (gridstruct%nested .or. gridstruct%regional)) then
961  j = npy
962  do i=is,ie
963  if ( i>im2 ) then
964  ut1(i) = edge_vect_n(i)*ue(i-1,j,1) + (1.-edge_vect_n(i))*ue(i,j,1)
965  ut2(i) = edge_vect_n(i)*ue(i-1,j,2) + (1.-edge_vect_n(i))*ue(i,j,2)
966  ut3(i) = edge_vect_n(i)*ue(i-1,j,3) + (1.-edge_vect_n(i))*ue(i,j,3)
967  else
968  ut1(i) = edge_vect_n(i)*ue(i+1,j,1) + (1.-edge_vect_n(i))*ue(i,j,1)
969  ut2(i) = edge_vect_n(i)*ue(i+1,j,2) + (1.-edge_vect_n(i))*ue(i,j,2)
970  ut3(i) = edge_vect_n(i)*ue(i+1,j,3) + (1.-edge_vect_n(i))*ue(i,j,3)
971  endif
972  enddo
973  do i=is,ie
974  ue(i,j,1) = ut1(i)
975  ue(i,j,2) = ut2(i)
976  ue(i,j,3) = ut3(i)
977  enddo
978  endif
979  do j=js,je+1
980  do i=is,ie
981  u(i,j,k) = u(i,j,k) + dt5*( ue(i,j,1)*es(1,i,j,1) + &
982  ue(i,j,2)*es(2,i,j,1) + &
983  ue(i,j,3)*es(3,i,j,1) )
984  enddo
985  enddo
986  do j=js,je
987  do i=is,ie+1
988  v(i,j,k) = v(i,j,k) + dt5*( ve(i,j,1)*ew(1,i,j,2) + &
989  ve(i,j,2)*ew(2,i,j,2) + &
990  ve(i,j,3)*ew(3,i,j,2) )
991  enddo
992  enddo
993 ! Update:
994  endif ! end grid_type
995 
996  enddo ! k-loop
997 
998  end subroutine update_dwinds_phys
999 
1002  subroutine update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
1004 ! Purpose; Transform wind tendencies on A grid to D grid for the final update
1005 
1006  integer, intent(in):: is, ie, js, je
1007  integer, intent(in):: isd, ied, jsd, jed
1008  real, intent(in):: dt
1009  real, intent(inout):: u(isd:ied, jsd:jed+1,npz)
1010  real, intent(inout):: v(isd:ied+1,jsd:jed ,npz)
1011  real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt
1012  type(fv_grid_type), intent(IN), target :: gridstruct
1013  integer, intent(IN) :: npx,npy, npz
1014  type(domain2d), intent(INOUT) :: domain
1015 
1016 ! local:
1017  real ut(isd:ied,jsd:jed)
1018  real:: dt5, gratio
1019  integer i, j, k
1020 
1021  real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat
1022  real(kind=R_GRID), pointer, dimension(:,:,:,:) :: es, ew
1023  real(kind=R_GRID), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n
1024  real, pointer, dimension(:,:) :: z11, z12, z21, z22, dya, dxa
1025 
1026  es => gridstruct%es
1027  ew => gridstruct%ew
1028  vlon => gridstruct%vlon
1029  vlat => gridstruct%vlat
1030 
1031  edge_vect_w => gridstruct%edge_vect_w
1032  edge_vect_e => gridstruct%edge_vect_e
1033  edge_vect_s => gridstruct%edge_vect_s
1034  edge_vect_n => gridstruct%edge_vect_n
1035 
1036  z11 => gridstruct%z11
1037  z21 => gridstruct%z21
1038  z12 => gridstruct%z12
1039  z22 => gridstruct%z22
1040 
1041  dxa => gridstruct%dxa
1042  dya => gridstruct%dya
1043 
1044 ! Transform wind tendency on A grid to local "co-variant" components:
1045 
1046 !$OMP parallel do default(none) shared(is,ie,js,je,npz,z11,u_dt,z12,v_dt,z21,z22) &
1047 !$OMP private(ut)
1048  do k=1,npz
1049  do j=js,je
1050  do i=is,ie
1051  ut(i,j) = z11(i,j)*u_dt(i,j,k) + z12(i,j)*v_dt(i,j,k)
1052  v_dt(i,j,k) = z21(i,j)*u_dt(i,j,k) + z22(i,j)*v_dt(i,j,k)
1053  u_dt(i,j,k) = ut(i,j)
1054  enddo
1055  enddo
1056  enddo
1057 ! (u_dt,v_dt) are now on local coordinate system
1058  call timing_on('COMM_TOTAL')
1059  call mpp_update_domains(u_dt, v_dt, domain, gridtype=agrid_param)
1060  call timing_off('COMM_TOTAL')
1061 
1062  dt5 = 0.5 * dt
1063 
1064 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,u,dt5,u_dt,v,v_dt, &
1065 !$OMP dya,npy,dxa,npx) &
1066 !$OMP private(gratio)
1067  do k=1, npz
1068 
1069  if ( gridstruct%grid_type > 3 .or. gridstruct%nested) then ! Local & one tile configurations
1070 
1071  do j=js,je+1
1072  do i=is,ie
1073  u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k) + u_dt(i,j,k))
1074  enddo
1075  enddo
1076  do j=js,je
1077  do i=is,ie+1
1078  v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k) + v_dt(i,j,k))
1079  enddo
1080  enddo
1081 
1082  else
1083 
1084 !--------
1085 ! u-wind
1086 !--------
1087 ! Edges:
1088  if ( js==1 ) then
1089  do i=is,ie
1090  gratio = dya(i,2) / dya(i,1)
1091  u(i,1,k) = u(i,1,k) + dt5*((2.+gratio)*(u_dt(i,0,k)+u_dt(i,1,k)) &
1092  -(u_dt(i,-1,k)+u_dt(i,2,k)))/(1.+gratio)
1093  enddo
1094  endif
1095 
1096 ! Interior
1097  do j=max(2,js),min(npy-1,je+1)
1098  do i=is,ie
1099  u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k)+u_dt(i,j,k))
1100  enddo
1101  enddo
1102 
1103  if ( (je+1)==npy ) then
1104  do i=is,ie
1105  gratio = dya(i,npy-2) / dya(i,npy-1)
1106  u(i,npy,k) = u(i,npy,k) + dt5*((2.+gratio)*(u_dt(i,npy-1,k)+u_dt(i,npy,k)) &
1107  -(u_dt(i,npy-2,k)+u_dt(i,npy+1,k)))/(1.+gratio)
1108  enddo
1109  endif
1110 
1111 !--------
1112 ! v-wind
1113 !--------
1114 ! West Edges:
1115  if ( is==1 ) then
1116  do j=js,je
1117  gratio = dxa(2,j) / dxa(1,j)
1118  v(1,j,k) = v(1,j,k) + dt5*((2.+gratio)*(v_dt(0,j,k)+v_dt(1,j,k)) &
1119  -(v_dt(-1,j,k)+v_dt(2,j,k)))/(1.+gratio)
1120  enddo
1121  endif
1122 
1123 ! Interior
1124  do j=js,je
1125  do i=max(2,is),min(npx-1,ie+1)
1126  v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k)+v_dt(i,j,k))
1127  enddo
1128  enddo
1129 
1130 ! East Edges:
1131  if ( (ie+1)==npx ) then
1132  do j=js,je
1133  gratio = dxa(npx-2,j) / dxa(npx-1,j)
1134  v(npx,j,k) = v(npx,j,k) + dt5*((2.+gratio)*(v_dt(npx-1,j,k)+v_dt(npx,j,k)) &
1135  -(v_dt(npx-2,j,k)+v_dt(npx+1,j,k)))/(1.+gratio)
1136  enddo
1137  endif
1138 
1139  endif ! end grid_type
1140 
1141  enddo ! k-loop
1142 
1143  end subroutine update2d_dwinds_phys
1144 
1145 end module fv_update_phys_mod
subroutine update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
The subroutine &#39;update2d_dwinds_phys&#39; transforms the wind tendencies from the A grid to the D grid fo...
subroutine, public fv_update_phys(dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, u, v, w, delp, pt, q, qdiag, ua, va, ps, pe, peln, pk, pkz, ak, bk, phis, u_srf, v_srf, ts, delz, hydrostatic, u_dt, v_dt, t_dt, moist_phys, Time, nudge, gridstruct, lona, lata, npx, npy, npz, flagstruct, neststruct, bd, domain, ptop, q_dt)
pure real function, public vicpqd(q)
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:120
subroutine, public moist_cv(is, ie, isd, ied, jsd, jed, km, j, k, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, q, qd, cvm, t1)
The subroutine &#39;moist_cv&#39; computes the FV3-consistent moist heat capacity under constant volume...
Definition: fv_mapz.F90:3415
integer, public num_gas
Definition: multi_gases.F90:44
real, parameter con_cp
The module &#39;multi_gases&#39; peforms multi constitutents computations.
Definition: multi_gases.F90:25
subroutine, public get_eta_level(npz, p_s, pf, ph, ak, bk, pscale)
The subroutine &#39;get_eta_level&#39; returns the interface and layer-mean pressures for reference...
Definition: fv_eta.F90:2588
The module &#39;fv_update_phys&#39; applies physics tendencies consistent with the FV3 discretization and def...
integer, parameter, public r_grid
Definition: fv_arrays.F90:35
pure real function, public virqd(q)
The module fv_nwp_nudge contains routines for nudging to input analyses. note This module is currentl...
Definition: fv_nudge.F90:32
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
pure real function, public virq(q)
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
subroutine, public fv_nwp_nudge(Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, ptop, ak, bk, ts, ps, delp, ua, va, pt, nwat, q, phis, gridstruct, bd, domain)
Ths subroutine &#39;fv_nwp_nudge&#39; computes and returns time tendencies for nudging to analysis...
Definition: fv_nudge.F90:306
The module &#39;fv_mapz&#39; contains the vertical mapping routines .
Definition: fv_mapz.F90:27
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
interface &#39;nested_grid_BC&#39; includes subroutines &#39;nested_grid_BC_2d&#39; and &#39;nested_grid_BC_3d&#39; that fetc...
Definition: boundary.F90:88
subroutine, public del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, isd, ied, jsd, jed, ngc, domain)
The subroutine &#39;del2_phys&#39; is for filtering the physics tendency.
The module &#39;fv_eta&#39; contains routine to set up the reference (Eulerian) pressure coordinate.
Definition: fv_eta.F90:25
subroutine, public moist_cp(is, ie, isd, ied, jsd, jed, km, j, k, nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel, q, qd, cpm, t1)
The subroutine &#39;moist_cp&#39; computes the FV3-consistent moist heat capacity under constant pressure...
Definition: fv_mapz.F90:3518
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
@ The module &#39;fv_diagnostics&#39; contains routines to compute diagnosic fields.
The module &#39;fv_grid_utils&#39; contains routines for setting up and computing grid-related quantities...
subroutine, public prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac)
subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
The subroutine &#39;update_dwinds_phys&#39; transforms the wind tendencies from the A grid to the D grid for ...
subroutine, public cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, nested, c2l_ord, bd)
pure real function, public vicvqd(q)
subroutine, public extrapolation_bc(q, istag, jstag, npx, npy, bd, pd_in, debug_in)
The subroutine &#39;extrapolation_BC&#39; performs linear extrapolation into the halo region.
Definition: boundary.F90:118