FV3DYCORE  Version 2.0.0
fv_tracer2d.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the FV3 dynamical core.
5 !*
6 !* The FV3 dynamical core is free software: you can redistribute it
7 !* and/or modify it under the terms of the
8 !* GNU Lesser General Public License as published by the
9 !* Free Software Foundation, either version 3 of the License, or
10 !* (at your option) any later version.
11 !*
12 !* The FV3 dynamical core is distributed in the hope that it will be
13 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
14 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 !* See the GNU General Public License for more details.
16 !*
17 !* You should have received a copy of the GNU Lesser General Public
18 !* License along with the FV3 dynamical core.
19 !* If not, see <http://www.gnu.org/licenses/>.
20 !***********************************************************************
21 
24 
25 ! Modules Included:
26 ! <table>
27 ! <tr>
28 ! <th>Module Name</th>
29 ! <th>Functions Included</th>
30 ! </tr>
31 ! <table>
32 ! <tr>
33 ! <td>boundary_mod</td>
34 ! <td>nested_grid_BC_apply_intT</td>
35 ! </tr>
36 ! <tr>
37 ! <td>fv_arrays_mod</td>
38 ! <td>fv_grid_type, fv_nest_type, fv_atmos_type, fv_grid_bounds_type</td>
39 ! </tr>
40 ! <tr>
41 ! <tr>
42 ! <td>fv_mp_mod</td>
43 ! <td>mp_reduce_max, ng, mp_gather, is_master, group_halo_update_type,
44 ! start_group_halo_update, complete_group_halo_update</td>
45 ! </tr>
46 ! <tr>
47 ! <td>fv_timing_mod</td>
48 ! <td>timing_on, timing_off</td>
49 ! </tr>
50 ! <tr>
51 ! <td>mpp_mod</td>
52 ! <td>mpp_error, FATAL, mpp_broadcast, mpp_send, mpp_recv, mpp_sum, mpp_max</td>
53 ! </tr>
54 ! <tr>
55 ! <td>mpp_domains_mod</td>
56 ! <td>mpp_update_domains, CGRID_NE, domain2d</td>
57 ! </tr>
58 ! <tr>
59 ! <td>tp_core_mod</td>
60 ! <td>fv_tp_2d, copy_corners</td>
61 ! </tr>
62 ! </table>
63 
66  use fv_mp_mod, only: mp_reduce_max
67  use fv_mp_mod, only: mp_gather, is_master
68  use fv_mp_mod, only: group_halo_update_type
69  use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update
70  use mpp_domains_mod, only: mpp_update_domains, cgrid_ne, domain2d
76  use mpp_mod, only: mpp_error, fatal, mpp_broadcast, mpp_send, mpp_recv, mpp_sum, mpp_max
77 
78 implicit none
79 private
80 
82 
84 
85 contains
86 
92 subroutine tracer_2d_1l(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, &
93  nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac)
94 
95  type(fv_grid_bounds_type), intent(IN) :: bd
96  integer, intent(IN) :: npx
97  integer, intent(IN) :: npy
98  integer, intent(IN) :: npz
99  integer, intent(IN) :: nq
100  integer, intent(IN) :: hord, nord_tr
101  integer, intent(IN) :: q_split
102  integer, intent(IN) :: id_divg
103  real , intent(IN) :: dt, trdm
104  real , intent(IN) :: lim_fac
105  type(group_halo_update_type), intent(inout) :: q_pack
106  real , intent(INOUT) :: q(bd%isd:bd%ied,bd%jsd:bd%jed,npz,nq)
107  real , intent(INOUT) :: dp1(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
108  real , intent(INOUT) :: mfx(bd%is:bd%ie+1,bd%js:bd%je, npz)
109  real , intent(INOUT) :: mfy(bd%is:bd%ie ,bd%js:bd%je+1,npz)
110  real , intent(INOUT) :: cx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz)
111  real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz)
112  type(fv_grid_type), intent(IN), target :: gridstruct
113  type(domain2d), intent(INOUT) :: domain
114 
115 ! Local Arrays
116  real :: qn2(bd%isd:bd%ied,bd%jsd:bd%jed,nq)
117  real :: dp2(bd%is:bd%ie,bd%js:bd%je)
118  real :: fx(bd%is:bd%ie+1,bd%js:bd%je )
119  real :: fy(bd%is:bd%ie , bd%js:bd%je+1)
120  real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed)
121  real :: ra_y(bd%isd:bd%ied,bd%js:bd%je)
122  real :: xfx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz)
123  real :: yfx(bd%isd:bd%ied,bd%js: bd%je+1, npz)
124  real :: cmax(npz)
125  real :: frac
126  integer :: nsplt
127  integer :: i,j,k,it,iq
128 
129  real, pointer, dimension(:,:) :: area, rarea
130  real, pointer, dimension(:,:,:) :: sin_sg
131  real, pointer, dimension(:,:) :: dxa, dya, dx, dy
132 
133  integer :: is, ie, js, je
134  integer :: isd, ied, jsd, jed
135 
136  is = bd%is
137  ie = bd%ie
138  js = bd%js
139  je = bd%je
140  isd = bd%isd
141  ied = bd%ied
142  jsd = bd%jsd
143  jed = bd%jed
144 
145  area => gridstruct%area
146  rarea => gridstruct%rarea
147 
148  sin_sg => gridstruct%sin_sg
149  dxa => gridstruct%dxa
150  dya => gridstruct%dya
151  dx => gridstruct%dx
152  dy => gridstruct%dy
153 
154 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, &
155 !$OMP sin_sg,cy,yfx,dya,dx,cmax)
156  do k=1,npz
157  do j=jsd,jed
158  do i=is,ie+1
159  if (cx(i,j,k) > 0.) then
160  xfx(i,j,k) = cx(i,j,k)*dxa(i-1,j)*dy(i,j)*sin_sg(i-1,j,3)
161  else
162  xfx(i,j,k) = cx(i,j,k)*dxa(i, j)*dy(i,j)*sin_sg(i, j,1)
163  endif
164  enddo
165  enddo
166  do j=js,je+1
167  do i=isd,ied
168  if (cy(i,j,k) > 0.) then
169  yfx(i,j,k) = cy(i,j,k)*dya(i,j-1)*dx(i,j)*sin_sg(i,j-1,4)
170  else
171  yfx(i,j,k) = cy(i,j,k)*dya(i,j )*dx(i,j)*sin_sg(i,j, 2)
172  endif
173  enddo
174  enddo
175 
176  cmax(k) = 0.
177  if ( k < npz/6 ) then
178  do j=js,je
179  do i=is,ie
180  cmax(k) = max( cmax(k), abs(cx(i,j,k)), abs(cy(i,j,k)) )
181  enddo
182  enddo
183  else
184  do j=js,je
185  do i=is,ie
186  cmax(k) = max( cmax(k), max(abs(cx(i,j,k)),abs(cy(i,j,k)))+1.-sin_sg(i,j,5) )
187  enddo
188  enddo
189  endif
190  enddo ! k-loop
191 
192  call mp_reduce_max(cmax,npz)
193 
194 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx, &
195 !$OMP cy,yfx,mfx,mfy,cmax) &
196 !$OMP private(nsplt, frac)
197  do k=1,npz
198 
199  nsplt = int(1. + cmax(k))
200  if ( nsplt > 1 ) then
201  frac = 1. / real(nsplt)
202  do j=jsd,jed
203  do i=is,ie+1
204  cx(i,j,k) = cx(i,j,k) * frac
205  xfx(i,j,k) = xfx(i,j,k) * frac
206  enddo
207  enddo
208  do j=js,je
209  do i=is,ie+1
210  mfx(i,j,k) = mfx(i,j,k) * frac
211  enddo
212  enddo
213  do j=js,je+1
214  do i=isd,ied
215  cy(i,j,k) = cy(i,j,k) * frac
216  yfx(i,j,k) = yfx(i,j,k) * frac
217  enddo
218  enddo
219  do j=js,je+1
220  do i=is,ie
221  mfy(i,j,k) = mfy(i,j,k) * frac
222  enddo
223  enddo
224  endif
225 
226  enddo
227  call timing_on('COMM_TOTAL')
228  call timing_on('COMM_TRACER')
229  call complete_group_halo_update(q_pack, domain)
230  call timing_off('COMM_TRACER')
231  call timing_off('COMM_TOTAL')
232 
233 ! Begin k-independent tracer transport; can not be OpenMPed because the mpp_update call.
234  do k=1,npz
235 
236 !$OMP parallel do default(none) shared(k,is,ie,js,je,isd,ied,jsd,jed,xfx,area,yfx,ra_x,ra_y)
237  do j=jsd,jed
238  do i=is,ie
239  ra_x(i,j) = area(i,j) + xfx(i,j,k) - xfx(i+1,j,k)
240  enddo
241  if ( j>=js .and. j<=je ) then
242  do i=isd,ied
243  ra_y(i,j) = area(i,j) + yfx(i,j,k) - yfx(i,j+1,k)
244  enddo
245  endif
246  enddo
247 
248  nsplt = int(1. + cmax(k))
249  do it=1,nsplt
250 
251 !$OMP parallel do default(none) shared(k,is,ie,js,je,rarea,mfx,mfy,dp1,dp2)
252  do j=js,je
253  do i=is,ie
254  dp2(i,j) = dp1(i,j,k) + (mfx(i,j,k)-mfx(i+1,j,k)+mfy(i,j,k)-mfy(i,j+1,k))*rarea(i,j)
255  enddo
256  enddo
257 
258 !$OMP parallel do default(none) shared(k,nsplt,it,is,ie,js,je,isd,ied,jsd,jed,npx,npy,cx,xfx,hord,trdm, &
259 !$OMP nord_tr,nq,gridstruct,bd,cy,yfx,mfx,mfy,qn2,q,ra_x,ra_y,dp1,dp2,rarea,lim_fac) &
260 !$OMP private(fx,fy)
261  do iq=1,nq
262  if ( nsplt /= 1 ) then
263  if ( it==1 ) then
264  do j=jsd,jed
265  do i=isd,ied
266  qn2(i,j,iq) = q(i,j,k,iq)
267  enddo
268  enddo
269  endif
270  call fv_tp_2d(qn2(isd,jsd,iq), cx(is,jsd,k), cy(isd,js,k), &
271  npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), &
272  gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k))
273  if ( it < nsplt ) then ! not last call
274  do j=js,je
275  do i=is,ie
276  qn2(i,j,iq) = (qn2(i,j,iq)*dp1(i,j,k)+(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j))/dp2(i,j)
277  enddo
278  enddo
279  else
280  do j=js,je
281  do i=is,ie
282  q(i,j,k,iq) = (qn2(i,j,iq)*dp1(i,j,k)+(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j))/dp2(i,j)
283  enddo
284  enddo
285  endif
286  else
287  call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), &
288  npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), &
289  gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k))
290  do j=js,je
291  do i=is,ie
292  q(i,j,k,iq) = (q(i,j,k,iq)*dp1(i,j,k)+(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j))/dp2(i,j)
293  enddo
294  enddo
295  endif
296  enddo ! tracer-loop
297 
298  if ( it < nsplt ) then ! not last call
299  do j=js,je
300  do i=is,ie
301  dp1(i,j,k) = dp2(i,j)
302  enddo
303  enddo
304  call timing_on('COMM_TOTAL')
305  call timing_on('COMM_TRACER')
306  call mpp_update_domains(qn2, domain)
307  call timing_off('COMM_TRACER')
308  call timing_off('COMM_TOTAL')
309  endif
310  enddo ! time-split loop
311  enddo ! k-loop
312 
313 end subroutine tracer_2d_1l
314 
316 subroutine tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, &
317  nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac)
319  type(fv_grid_bounds_type), intent(IN) :: bd
320  integer, intent(IN) :: npx
321  integer, intent(IN) :: npy
322  integer, intent(IN) :: npz
323  integer, intent(IN) :: nq
324  integer, intent(IN) :: hord, nord_tr
325  integer, intent(IN) :: q_split
326  integer, intent(IN) :: id_divg
327  real , intent(IN) :: dt, trdm
328  real , intent(IN) :: lim_fac
329  type(group_halo_update_type), intent(inout) :: q_pack
330  real , intent(INOUT) :: q(bd%isd:bd%ied,bd%jsd:bd%jed,npz,nq)
331  real , intent(INOUT) :: dp1(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
332  real , intent(INOUT) :: mfx(bd%is:bd%ie+1,bd%js:bd%je, npz)
333  real , intent(INOUT) :: mfy(bd%is:bd%ie ,bd%js:bd%je+1,npz)
334  real , intent(INOUT) :: cx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz)
335  real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz)
336  type(fv_grid_type), intent(IN), target :: gridstruct
337  type(domain2d), intent(INOUT) :: domain
338 
339 ! Local Arrays
340  real :: dp2(bd%is:bd%ie,bd%js:bd%je)
341  real :: fx(bd%is:bd%ie+1,bd%js:bd%je )
342  real :: fy(bd%is:bd%ie , bd%js:bd%je+1)
343  real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed)
344  real :: ra_y(bd%isd:bd%ied,bd%js:bd%je)
345  real :: xfx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz)
346  real :: yfx(bd%isd:bd%ied,bd%js: bd%je+1, npz)
347  real :: cmax(npz)
348  real :: c_global
349  real :: frac, rdt
350  integer :: ksplt(npz)
351  integer :: nsplt
352  integer :: i,j,k,it,iq
353 
354  real, pointer, dimension(:,:) :: area, rarea
355  real, pointer, dimension(:,:,:) :: sin_sg
356  real, pointer, dimension(:,:) :: dxa, dya, dx, dy
357 
358  integer :: is, ie, js, je
359  integer :: isd, ied, jsd, jed
360 
361  is = bd%is
362  ie = bd%ie
363  js = bd%js
364  je = bd%je
365  isd = bd%isd
366  ied = bd%ied
367  jsd = bd%jsd
368  jed = bd%jed
369 
370  area => gridstruct%area
371  rarea => gridstruct%rarea
372 
373  sin_sg => gridstruct%sin_sg
374  dxa => gridstruct%dxa
375  dya => gridstruct%dya
376  dx => gridstruct%dx
377  dy => gridstruct%dy
378 
379 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, &
380 !$OMP sin_sg,cy,yfx,dya,dx,cmax,q_split,ksplt)
381  do k=1,npz
382  do j=jsd,jed
383  do i=is,ie+1
384  if (cx(i,j,k) > 0.) then
385  xfx(i,j,k) = cx(i,j,k)*dxa(i-1,j)*dy(i,j)*sin_sg(i-1,j,3)
386  else
387  xfx(i,j,k) = cx(i,j,k)*dxa(i,j)*dy(i,j)*sin_sg(i,j,1)
388  endif
389  enddo
390  enddo
391  do j=js,je+1
392  do i=isd,ied
393  if (cy(i,j,k) > 0.) then
394  yfx(i,j,k) = cy(i,j,k)*dya(i,j-1)*dx(i,j)*sin_sg(i,j-1,4)
395  else
396  yfx(i,j,k) = cy(i,j,k)*dya(i,j)*dx(i,j)*sin_sg(i,j,2)
397  endif
398  enddo
399  enddo
400 
401  if ( q_split == 0 ) then
402  cmax(k) = 0.
403  if ( k < npz/6 ) then
404  do j=js,je
405  do i=is,ie
406  cmax(k) = max( cmax(k), abs(cx(i,j,k)), abs(cy(i,j,k)) )
407  enddo
408  enddo
409  else
410  do j=js,je
411  do i=is,ie
412  cmax(k) = max( cmax(k), max(abs(cx(i,j,k)),abs(cy(i,j,k)))+1.-sin_sg(i,j,5) )
413  enddo
414  enddo
415  endif
416  endif
417  ksplt(k) = 1
418 
419  enddo
420 
421 !--------------------------------------------------------------------------------
422 
423 ! Determine global nsplt:
424  if ( q_split == 0 ) then
425  call mp_reduce_max(cmax,npz)
426 ! find global max courant number and define nsplt to scale cx,cy,mfx,mfy
427  c_global = cmax(1)
428  if ( npz /= 1 ) then ! if NOT shallow water test case
429  do k=2,npz
430  c_global = max(cmax(k), c_global)
431  enddo
432  endif
433  nsplt = int(1. + c_global)
434  if ( is_master() .and. nsplt > 4 ) write(*,*) 'Tracer_2d_split=', nsplt, c_global
435  else
436  nsplt = q_split
437  endif
438 
439 !--------------------------------------------------------------------------------
440 
441  if( nsplt /= 1 ) then
442 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,mfx,cy,yfx,mfy,cmax,nsplt,ksplt) &
443 !$OMP private( frac )
444  do k=1,npz
445 
446 #ifdef GLOBAL_CFL
447  ksplt(k) = nsplt
448 #else
449  ksplt(k) = int(1. + cmax(k))
450 #endif
451  frac = 1. / real(ksplt(k))
452 
453  do j=jsd,jed
454  do i=is,ie+1
455  cx(i,j,k) = cx(i,j,k) * frac
456  xfx(i,j,k) = xfx(i,j,k) * frac
457  enddo
458  enddo
459  do j=js,je
460  do i=is,ie+1
461  mfx(i,j,k) = mfx(i,j,k) * frac
462  enddo
463  enddo
464 
465  do j=js,je+1
466  do i=isd,ied
467  cy(i,j,k) = cy(i,j,k) * frac
468  yfx(i,j,k) = yfx(i,j,k) * frac
469  enddo
470  enddo
471  do j=js,je+1
472  do i=is,ie
473  mfy(i,j,k) = mfy(i,j,k) * frac
474  enddo
475  enddo
476 
477  enddo
478  endif
479 
480  do it=1,nsplt
481  call timing_on('COMM_TOTAL')
482  call timing_on('COMM_TRACER')
483  call complete_group_halo_update(q_pack, domain)
484  call timing_off('COMM_TRACER')
485  call timing_off('COMM_TOTAL')
486 
487 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq,ksplt,&
488 !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm,lim_fac) &
489 !$OMP private(dp2, ra_x, ra_y, fx, fy)
490  do k=1,npz
491 
492  if ( it .le. ksplt(k) ) then
493 
494  do j=js,je
495  do i=is,ie
496  dp2(i,j) = dp1(i,j,k) + (mfx(i,j,k)-mfx(i+1,j,k)+mfy(i,j,k)-mfy(i,j+1,k))*rarea(i,j)
497  enddo
498  enddo
499 
500  do j=jsd,jed
501  do i=is,ie
502  ra_x(i,j) = area(i,j) + xfx(i,j,k) - xfx(i+1,j,k)
503  enddo
504  enddo
505  do j=js,je
506  do i=isd,ied
507  ra_y(i,j) = area(i,j) + yfx(i,j,k) - yfx(i,j+1,k)
508  enddo
509  enddo
510 
511  do iq=1,nq
512  if ( it==1 .and. trdm>1.e-4 ) then
513  call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), &
514  npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), &
515  gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k), &
516  mass=dp1(isd,jsd,k), nord=nord_tr, damp_c=trdm)
517  else
518  call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), &
519  npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), &
520  gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k))
521  endif
522  do j=js,je
523  do i=is,ie
524  q(i,j,k,iq) = ( q(i,j,k,iq)*dp1(i,j,k) + &
525  (fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j) )/dp2(i,j)
526  enddo
527  enddo
528  enddo
529 
530  if ( it /= nsplt ) then
531  do j=js,je
532  do i=is,ie
533  dp1(i,j,k) = dp2(i,j)
534  enddo
535  enddo
536  endif
537 
538  endif ! ksplt
539 
540  enddo ! npz
541 
542  if ( it /= nsplt ) then
543  call timing_on('COMM_TOTAL')
544  call timing_on('COMM_TRACER')
545  call start_group_halo_update(q_pack, q, domain)
546  call timing_off('COMM_TRACER')
547  call timing_off('COMM_TOTAL')
548  endif
549 
550  enddo ! nsplt
551 
552 
553 end subroutine tracer_2d
554 
555 
556 subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, &
557  nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, &
558  k_split, neststruct, parent_grid, n_map, lim_fac)
560  type(fv_grid_bounds_type), intent(IN) :: bd
561  integer, intent(IN) :: npx
562  integer, intent(IN) :: npy
563  integer, intent(IN) :: npz
564  integer, intent(IN) :: nq
565  integer, intent(IN) :: hord, nord_tr
566  integer, intent(IN) :: q_split, k_split, n_map
567  integer, intent(IN) :: id_divg
568  real , intent(IN) :: dt, trdm
569  real , intent(IN) :: lim_fac
570  type(group_halo_update_type), intent(inout) :: q_pack
571  real , intent(INOUT) :: q(bd%isd:bd%ied,bd%jsd:bd%jed,npz,nq)
572  real , intent(INOUT) :: dp1(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
573  real , intent(INOUT) :: mfx(bd%is:bd%ie+1,bd%js:bd%je, npz)
574  real , intent(INOUT) :: mfy(bd%is:bd%ie ,bd%js:bd%je+1,npz)
575  real , intent(INOUT) :: cx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz)
576  real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz)
577  type(fv_grid_type), intent(IN), target :: gridstruct
578  type(fv_nest_type), intent(INOUT) :: neststruct
579  type(fv_atmos_type), pointer, intent(IN) :: parent_grid
580  type(domain2d), intent(INOUT) :: domain
581 
582 ! Local Arrays
583  real :: dp2(bd%is:bd%ie,bd%js:bd%je)
584  real :: fx(bd%is:bd%ie+1,bd%js:bd%je )
585  real :: fy(bd%is:bd%ie , bd%js:bd%je+1)
586  real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed)
587  real :: ra_y(bd%isd:bd%ied,bd%js:bd%je)
588  real :: xfx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz)
589  real :: yfx(bd%isd:bd%ied,bd%js: bd%je+1, npz)
590  real :: cmax(npz)
591  real :: cmax_t
592  real :: c_global
593  real :: frac, rdt
594  real :: recip_nsplt, reg_bc_update_time
595  integer :: nsplt, nsplt_parent, msg_split_steps = 1
596  integer :: i,j,k,it,iq
597 
598  real, pointer, dimension(:,:) :: area, rarea
599  real, pointer, dimension(:,:,:) :: sin_sg
600  real, pointer, dimension(:,:) :: dxa, dya, dx, dy
601 
602  integer :: is, ie, js, je
603  integer :: isd, ied, jsd, jed
604 
605  is = bd%is
606  ie = bd%ie
607  js = bd%js
608  je = bd%je
609  isd = bd%isd
610  ied = bd%ied
611  jsd = bd%jsd
612  jed = bd%jed
613 
614  area => gridstruct%area
615  rarea => gridstruct%rarea
616 
617  sin_sg => gridstruct%sin_sg
618  dxa => gridstruct%dxa
619  dya => gridstruct%dya
620  dx => gridstruct%dx
621  dy => gridstruct%dy
622 
623 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, &
624 !$OMP sin_sg,cy,yfx,dya,dx)
625  do k=1,npz
626  do j=jsd,jed
627  do i=is,ie+1
628  if (cx(i,j,k) > 0.) then
629  xfx(i,j,k) = cx(i,j,k)*dxa(i-1,j)*dy(i,j)*sin_sg(i-1,j,3)
630  else
631  xfx(i,j,k) = cx(i,j,k)*dxa(i,j)*dy(i,j)*sin_sg(i,j,1)
632  endif
633  enddo
634  enddo
635  do j=js,je+1
636  do i=isd,ied
637  if (cy(i,j,k) > 0.) then
638  yfx(i,j,k) = cy(i,j,k)*dya(i,j-1)*dx(i,j)*sin_sg(i,j-1,4)
639  else
640  yfx(i,j,k) = cy(i,j,k)*dya(i,j)*dx(i,j)*sin_sg(i,j,2)
641  endif
642  enddo
643  enddo
644  enddo
645 
646 !--------------------------------------------------------------------------------
647  if ( q_split == 0 ) then
648 ! Determine nsplt
649 
650 !$OMP parallel do default(none) shared(is,ie,js,je,npz,cmax,cx,cy,sin_sg) &
651 !$OMP private(cmax_t )
652  do k=1,npz
653  cmax(k) = 0.
654  if ( k < 4 ) then
655 ! Top layers: C < max( abs(c_x), abs(c_y) )
656  do j=js,je
657  do i=is,ie
658  cmax_t = max( abs(cx(i,j,k)), abs(cy(i,j,k)) )
659  cmax(k) = max( cmax_t, cmax(k) )
660  enddo
661  enddo
662  else
663  do j=js,je
664  do i=is,ie
665  cmax_t = max(abs(cx(i,j,k)), abs(cy(i,j,k))) + 1.-sin_sg(i,j,5)
666  cmax(k) = max( cmax_t, cmax(k) )
667  enddo
668  enddo
669  endif
670  enddo
671  call mp_reduce_max(cmax,npz)
672 
673 ! find global max courant number and define nsplt to scale cx,cy,mfx,mfy
674  c_global = cmax(1)
675  if ( npz /= 1 ) then ! if NOT shallow water test case
676  do k=2,npz
677  c_global = max(cmax(k), c_global)
678  enddo
679  endif
680  nsplt = int(1. + c_global)
681  if ( is_master() .and. nsplt > 3 ) write(*,*) 'Tracer_2d_split=', nsplt, c_global
682  else
683  nsplt = q_split
684  if (gridstruct%nested .and. neststruct%nestbctype > 1) msg_split_steps = max(q_split/parent_grid%flagstruct%q_split,1)
685  endif
686 
687 !--------------------------------------------------------------------------------
688 
689  frac = 1. / real(nsplt)
690  recip_nsplt = 1. / real(nsplt)
691 
692  if( nsplt /= 1 ) then
693 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,frac,xfx,mfx,cy,yfx,mfy)
694  do k=1,npz
695  do j=jsd,jed
696  do i=is,ie+1
697  cx(i,j,k) = cx(i,j,k) * frac
698  xfx(i,j,k) = xfx(i,j,k) * frac
699  enddo
700  enddo
701  do j=js,je
702  do i=is,ie+1
703  mfx(i,j,k) = mfx(i,j,k) * frac
704  enddo
705  enddo
706 
707  do j=js,je+1
708  do i=isd,ied
709  cy(i,j,k) = cy(i,j,k) * frac
710  yfx(i,j,k) = yfx(i,j,k) * frac
711  enddo
712  enddo
713 
714  do j=js,je+1
715  do i=is,ie
716  mfy(i,j,k) = mfy(i,j,k) * frac
717  enddo
718  enddo
719  enddo
720  endif
721 
722 
723  do it=1,nsplt
724  if ( gridstruct%nested ) then
725  neststruct%tracer_nest_timestep = neststruct%tracer_nest_timestep + 1
726  end if
727  call timing_on('COMM_TOTAL')
728  call timing_on('COMM_TRACER')
729  call complete_group_halo_update(q_pack, domain)
730  call timing_off('COMM_TRACER')
731  call timing_off('COMM_TOTAL')
732 
733  if (gridstruct%nested) then
734  do iq=1,nq
735  call nested_grid_bc_apply_intt(q(isd:ied,jsd:jed,:,iq), &
736  0, 0, npx, npy, npz, bd, &
737  real(neststruct%tracer_nest_timestep)+real(nsplt*k_split), real(nsplt*k_split), &
738  neststruct%q_BC(iq), bctype=neststruct%nestbctype )
739  enddo
740  endif
741 
742  if (gridstruct%regional) then
743  reg_bc_update_time=current_time_in_seconds+ (real(n_map-1) + real(it-1)*recip_nsplt)*dt
744  do iq=1,nq
745  call regional_boundary_update(q(:,:,:,iq), 'q', &
746  isd, ied, jsd, jed, npz, &
747  is, ie, js, je, &
748  isd, ied, jsd, jed, &
749  reg_bc_update_time, &
750  iq )
751  enddo
752  call timing_on('COMM_TOTAL')
753  call mpp_update_domains(q, domain, complete=.true.)
754  call timing_off('COMM_TOTAL')
755  endif
756 
757 
758 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq, &
759 !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm,lim_fac) &
760 !$OMP private(dp2, ra_x, ra_y, fx, fy)
761  do k=1,npz
762 
763  do j=js,je
764  do i=is,ie
765  dp2(i,j) = dp1(i,j,k) + (mfx(i,j,k)-mfx(i+1,j,k)+mfy(i,j,k)-mfy(i,j+1,k))*rarea(i,j)
766  enddo
767  enddo
768 
769  do j=jsd,jed
770  do i=is,ie
771  ra_x(i,j) = area(i,j) + xfx(i,j,k) - xfx(i+1,j,k)
772  enddo
773  enddo
774  do j=js,je
775  do i=isd,ied
776  ra_y(i,j) = area(i,j) + yfx(i,j,k) - yfx(i,j+1,k)
777  enddo
778  enddo
779 
780  do iq=1,nq
781  if ( it==1 .and. trdm>1.e-4 ) then
782  call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), &
783  npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), &
784  gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k), &
785  mass=dp1(isd,jsd,k), nord=nord_tr, damp_c=trdm)
786  else
787  call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), &
788  npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), &
789  gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k))
790  endif
791  do j=js,je
792  do i=is,ie
793  q(i,j,k,iq) = ( q(i,j,k,iq)*dp1(i,j,k) + &
794  (fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j) )/dp2(i,j)
795  enddo
796  enddo
797  enddo
798  enddo ! npz
799 
800  if ( it /= nsplt ) then
801  call timing_on('COMM_TOTAL')
802  call timing_on('COMM_TRACER')
803  call start_group_halo_update(q_pack, q, domain)
804  call timing_off('COMM_TRACER')
805  call timing_off('COMM_TOTAL')
806  endif
807 
808  enddo ! nsplt
809 
810  if ( id_divg > 0 ) then
811  rdt = 1./(frac*dt)
812 
813 !$OMP parallel do default(none) shared(is,ie,js,je,npz,dp1,xfx,yfx,rarea,rdt)
814  do k=1,npz
815  do j=js,je
816  do i=is,ie
817  dp1(i,j,k) = (xfx(i+1,j,k)-xfx(i,j,k) + yfx(i,j+1,k)-yfx(i,j,k))*rarea(i,j)*rdt
818  enddo
819  enddo
820  enddo
821  endif
822 
823  end subroutine tracer_2d_nested
824 
825 end module fv_tracer2d_mod
subroutine, public regional_boundary_update(array, bc_vbl_name, lbnd_x, ubnd_x, lbnd_y, ubnd_y, ubnd_z, is, ie, js, je, isd, ied, jsd, jed, fcst_time, index4)
subroutine, public fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, gridstruct, bd, ra_x, ra_y, lim_fac, mfx, mfy, mass, nord, damp_c)
The subroutine &#39;fv_tp_2d&#39; contains the FV advection scheme .
Definition: tp_core.F90:109
real, public current_time_in_seconds
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
subroutine timing_off(blk_name)
The subroutine &#39;timing_off&#39; stops a timer.
Definition: fv_timing.F90:180
The type &#39;fv_grid_type&#39; is made up of grid-dependent information from fv_grid_tools and fv_grid_utils...
Definition: fv_arrays.F90:123
real, dimension(:,:,:), allocatable nest_fx_west_accum
Definition: fv_tracer2d.F90:83
real, dimension(:,:,:), allocatable nest_fx_south_accum
Definition: fv_tracer2d.F90:83
The module &#39;fv_timing&#39; contains FV3 timers.
Definition: fv_timing.F90:24
The module &#39;boundary&#39; contains utility routines for grid nesting and boundary conditions.
Definition: boundary.F90:25
The module &#39;tp_core&#39; is a collection of routines to support FV transport.
Definition: tp_core.F90:24
The module &#39;fv_tracer2d.F90&#39; performs sub-cycled tracer advection.
Definition: fv_tracer2d.F90:64
subroutine, public tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, k_split, neststruct, parent_grid, n_map, lim_fac)
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24
subroutine timing_on(blk_name)
The subroutine &#39;timing_on&#39; starts a timer.
Definition: fv_timing.F90:116
real, dimension(:,:,:), allocatable nest_fx_east_accum
Definition: fv_tracer2d.F90:83
subroutine, public copy_corners(q, npx, npy, dir, bounded_domain, bd, sw_corner, se_corner, nw_corner, ne_corner)
Definition: tp_core.F90:248
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
The subroutine &#39;nested_grid_BC_apply_intT&#39; performs linear interpolation or extrapolation in time for...
Definition: boundary.F90:2171
subroutine, public tracer_2d_1l(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac)
The subroutine &#39;tracer_2d_1L&#39; performs 2-D horizontal-to-lagrangian transport.
Definition: fv_tracer2d.F90:94
real, dimension(:,:,:), allocatable nest_fx_north_accum
Definition: fv_tracer2d.F90:83
subroutine, public tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, lim_fac)
The subroutine &#39;tracer_2d&#39; is the standard routine for sub-cycled tracer advection.