FV3DYCORE  Version1.0.0
fv_mp_mod.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the FV3 dynamical core.
5 !*
6 !* The FV3 dynamical core is free software: you can redistribute it
7 !* and/or modify it under the terms of the
8 !* GNU Lesser General Public License as published by the
9 !* Free Software Foundation, either version 3 of the License, or
10 !* (at your option) any later version.
11 !*
12 !* The FV3 dynamical core is distributed in the hope that it will be
13 !* useful, but WITHOUT ANYWARRANTY; without even the implied warranty
14 !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 !* See the GNU General Public License for more details.
16 !*
17 !* You should have received a copy of the GNU Lesser General Public
18 !* License along with the FV3 dynamical core.
19 !* If not, see <http://www.gnu.org/licenses/>.
20 !***********************************************************************
21 
24  module fv_mp_mod
25 
26 ! <table>
27 ! <tr>
28 ! <th>Module Name</th>
29 ! <th>Functions Included</th>
30 ! </tr>
31 ! <tr>
32 ! <td>ensemble_manager_mod</td>
33 ! <td>get_ensemble_id</td>
34 ! </tr>
35 ! <tr>
36 ! <td>fms_mod</td>
37 ! <td>fms_init, fms_end</td>
38 ! </tr>
39 ! <tr>
40 ! <td>fms_io_mod</td>
41 ! <td>set_domain</td>
42 ! </tr>
43 ! <tr>
44 ! <td>fv_arrays_mod</td>
45 ! <td>fv_atmos_type</td>
46 ! </tr>
47 ! <td>mpp_mod</td>
48 ! <td>FATAL, MPP_DEBUG, NOTE, MPP_CLOCK_SYNC,MPP_CLOCK_DETAILED, WARNING,
49 ! mpp_pe, mpp_npes, mpp_node, mpp_root_pe, mpp_error, mpp_set_warn_level,
50 ! mpp_declare_pelist, mpp_set_current_pelist, mpp_sync, mpp_clock_begin,
51 ! mpp_clock_end, mpp_clock_id,mpp_chksum, stdout, stderr, mpp_broadcast,
52 ! mpp_send, mpp_recv, mpp_sync_self, EVENT_RECV, mpp_gather,mpp_get_current_pelist,</td>
53 ! </tr>
54 ! <tr>
55 ! <td>mpp_domains_mod</td>
56 ! <td>GLOBAL_DATA_DOMAIN, BITWISE_EXACT_SUM, BGRID_NE, FOLD_NORTH_EDGE, CGRID_NE,
57 ! MPP_DOMAIN_TIME, CYCLIC_GLOBAL_DOMAIN, NUPDATE,EUPDATE, XUPDATE, YUPDATE, SCALAR_PAIR,
58 ! domain1D, domain2D, DomainCommunicator2D, mpp_get_ntile_count,mpp_get_compute_domain,
59 ! mpp_get_data_domain,mpp_global_field, mpp_global_sum, mpp_global_max, mpp_global_min,
60 ! mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain,mpp_check_field,
61 ! mpp_define_layout,mpp_get_neighbor_pe, mpp_define_mosaic, mpp_define_io_domain,
62 ! NORTH, NORTH_EAST, EAST, SOUTH_EAST, SOUTH, SOUTH_WEST, WEST, NORTH_WEST,
63 ! mpp_start_group_update, mpp_complete_group_update,mpp_create_group_update,
64 ! mpp_reset_group_update_field,group_halo_update_type => mpp_group_update_type,
65 ! mpp_define_domains, mpp_define_nest_domains, nest_domain_type,
66 ! pp_get_C2F_index, mpp_update_nest_fine, mpp_get_F2C_index, mpp_update_nest_coarse,
67 ! mpp_get_domain_shift</td>
68 ! </tr>
69 ! </table>
70 
71 #if defined(SPMD)
72 ! !USES:
73  use fms_mod, only : fms_init, fms_end
74  use mpp_mod, only : fatal, mpp_debug, note, mpp_clock_sync,mpp_clock_detailed, warning
75  use mpp_mod, only : mpp_pe, mpp_npes, mpp_node, mpp_root_pe, mpp_error, mpp_set_warn_level
76  use mpp_mod, only : mpp_declare_pelist, mpp_set_current_pelist, mpp_sync
77  use mpp_mod, only : mpp_clock_begin, mpp_clock_end, mpp_clock_id
78  use mpp_mod, only : mpp_chksum, stdout, stderr, mpp_broadcast
79  use mpp_mod, only : mpp_send, mpp_recv, mpp_sync_self, event_recv, mpp_gather
80  use mpp_domains_mod, only : global_data_domain, bitwise_exact_sum, bgrid_ne, fold_north_edge, cgrid_ne
81  use mpp_domains_mod, only : mpp_domain_time, cyclic_global_domain, nupdate,eupdate, xupdate, yupdate, scalar_pair
82  use mpp_domains_mod, only : domain1d, domain2d, domaincommunicator2d, mpp_get_ntile_count
83  use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain
84  use mpp_domains_mod, only : mpp_global_field, mpp_global_sum, mpp_global_max, mpp_global_min
85  use mpp_domains_mod, only : mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain
86  use mpp_domains_mod, only : mpp_check_field, mpp_define_layout
87  use mpp_domains_mod, only : mpp_get_neighbor_pe, mpp_define_mosaic, mpp_define_io_domain
88  use mpp_domains_mod, only : north, north_east, east, south_east
89  use mpp_domains_mod, only : south, south_west, west, north_west
90  use mpp_domains_mod, only : mpp_start_group_update, mpp_complete_group_update
91  use mpp_domains_mod, only : mpp_group_update_initialized, mpp_do_group_update
92  use mpp_domains_mod, only : mpp_create_group_update,mpp_reset_group_update_field
93  use mpp_domains_mod, only : group_halo_update_type => mpp_group_update_type
94  use mpp_parameter_mod, only : wupdate, eupdate, supdate, nupdate, xupdate, yupdate
95  use fv_arrays_mod, only: fv_atmos_type
96  use fms_io_mod, only: set_domain
97  use mpp_mod, only : mpp_get_current_pelist
98  use mpp_domains_mod, only : mpp_define_domains
99  use mpp_domains_mod, only : mpp_define_nest_domains, nest_domain_type
100  use mpp_domains_mod, only : mpp_get_c2f_index, mpp_update_nest_fine
101  use mpp_domains_mod, only : mpp_get_f2c_index, mpp_update_nest_coarse
102  use mpp_domains_mod, only : mpp_get_domain_shift
103  use ensemble_manager_mod, only : get_ensemble_id
104 
105  implicit none
106  private
107 
108  integer, parameter:: ng = 3 ! Number of ghost zones required
109 
110 #include "mpif.h"
111  integer, parameter :: XDir=1
112  integer, parameter :: YDir=2
113  integer :: commglobal, ierror, npes
114 
115  !need tile as a module variable so that some of the mp_ routines below will work
116  integer::tile
117 
118  integer, allocatable, dimension(:) :: npes_tile, tile1, tile2
119  integer, allocatable, dimension(:) :: istart1, iend1, jstart1, jend1
120  integer, allocatable, dimension(:) :: istart2, iend2, jstart2, jend2
121  integer, allocatable, dimension(:,:) :: layout2D, global_indices
122  integer :: numthreads, gid, masterproc
123 
124  logical :: master
125 
126  type(nest_domain_type), allocatable, dimension(:) :: nest_domain
127  integer :: this_pe_grid = 0
128  integer, EXTERNAL :: omp_get_thread_num, omp_get_num_threads
129 
130  integer :: npes_this_grid
131 
132  !! CLEANUP: these are currently here for convenience
133  !! Right now calling switch_current_atm sets these to the value on the "current" grid
134  !! (as well as changing the "current" domain)
135  integer :: is, ie, js, je
136  integer :: isd, ied, jsd, jed
137  integer :: isc, iec, jsc, jec
138 
139 #ifdef CCPP
140  public commglobal
141 #endif
142  public mp_start, mp_assign_gid, mp_barrier, mp_stop!, npes
143  public domain_decomp, mp_bcst, mp_reduce_max, mp_reduce_sum, mp_gather
144  public mp_reduce_min
145  public fill_corners, xdir, ydir
146  public switch_current_domain, switch_current_atm, broadcast_domains
147  public is_master, setup_master
148  !The following variables are declared public by this module for convenience;
149  !they will need to be switched when domains are switched
150 !!! CLEANUP: ng is a PARAMETER and is OK to be shared by a use statement
151  public is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec, ng
152  public start_group_halo_update, complete_group_halo_update
153  public group_halo_update_type
154 
155  interface start_group_halo_update
156  module procedure start_var_group_update_2d
157  module procedure start_var_group_update_3d
158  module procedure start_var_group_update_4d
159  module procedure start_vector_group_update_2d
160  module procedure start_vector_group_update_3d
161  end interface start_group_halo_update
162 
163  INTERFACE fill_corners
164  MODULE PROCEDURE fill_corners_2d_r4
165  MODULE PROCEDURE fill_corners_2d_r8
166  MODULE PROCEDURE fill_corners_xy_2d_r4
167  MODULE PROCEDURE fill_corners_xy_2d_r8
168  MODULE PROCEDURE fill_corners_xy_3d_r4
169  MODULE PROCEDURE fill_corners_xy_3d_r8
170  END INTERFACE
171 
172  INTERFACE fill_corners_agrid
173  MODULE PROCEDURE fill_corners_agrid_r4
174  MODULE PROCEDURE fill_corners_agrid_r8
175  END INTERFACE
176 
177  INTERFACE fill_corners_cgrid
178  MODULE PROCEDURE fill_corners_cgrid_r4
179  MODULE PROCEDURE fill_corners_cgrid_r8
180  END INTERFACE
181 
182  INTERFACE fill_corners_dgrid
183  MODULE PROCEDURE fill_corners_dgrid_r4
184  MODULE PROCEDURE fill_corners_dgrid_r8
185  END INTERFACE
186 
189  INTERFACE mp_bcst
190  MODULE PROCEDURE mp_bcst_i
191  MODULE PROCEDURE mp_bcst_r4
192  MODULE PROCEDURE mp_bcst_r8
193  MODULE PROCEDURE mp_bcst_1d_r4
194  MODULE PROCEDURE mp_bcst_1d_r8
195  MODULE PROCEDURE mp_bcst_2d_r4
196  MODULE PROCEDURE mp_bcst_2d_r8
197  MODULE PROCEDURE mp_bcst_3d_r4
198  MODULE PROCEDURE mp_bcst_3d_r8
199  MODULE PROCEDURE mp_bcst_4d_r4
200  MODULE PROCEDURE mp_bcst_4d_r8
201  MODULE PROCEDURE mp_bcst_1d_i
202  MODULE PROCEDURE mp_bcst_2d_i
203  MODULE PROCEDURE mp_bcst_3d_i
204  MODULE PROCEDURE mp_bcst_4d_i
205  END INTERFACE
206 
210  INTERFACE mp_reduce_min
211  MODULE PROCEDURE mp_reduce_min_r4
212  MODULE PROCEDURE mp_reduce_min_r8
213  END INTERFACE
214 
218  INTERFACE mp_reduce_max
219  MODULE PROCEDURE mp_reduce_max_r4_1d
220  MODULE PROCEDURE mp_reduce_max_r4
221  MODULE PROCEDURE mp_reduce_max_r8_1d
222  MODULE PROCEDURE mp_reduce_max_r8
223  MODULE PROCEDURE mp_reduce_max_i
224  END INTERFACE
225 
226 
230  INTERFACE mp_reduce_sum
231  MODULE PROCEDURE mp_reduce_sum_r4
232  MODULE PROCEDURE mp_reduce_sum_r4_1d
233  MODULE PROCEDURE mp_reduce_sum_r4_1darr
234  MODULE PROCEDURE mp_reduce_sum_r4_2darr
235  MODULE PROCEDURE mp_reduce_sum_r8
236  MODULE PROCEDURE mp_reduce_sum_r8_1d
237  MODULE PROCEDURE mp_reduce_sum_r8_1darr
238  MODULE PROCEDURE mp_reduce_sum_r8_2darr
239  END INTERFACE
240 
243  ! WARNING only works with one level (ldim == 1)
244  INTERFACE mp_gather
245  MODULE PROCEDURE mp_gather_4d_r4
246  MODULE PROCEDURE mp_gather_3d_r4
247  MODULE PROCEDURE mp_gather_3d_r8
248  END INTERFACE
249 
250  integer :: halo_update_type = 1
251 
252 contains
253 
254  subroutine mp_assign_gid
255 
256  gid = mpp_pe()
257  npes = mpp_npes()
258 
259  end subroutine mp_assign_gid
260 
261 !-------------------------------------------------------------------------------
262 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
264  subroutine mp_start(commID, halo_update_type_in)
265  integer, intent(in), optional :: commID
266  integer, intent(in), optional :: halo_update_type_in
267 
268  integer :: ios
269  integer :: unit
270 
271  masterproc = mpp_root_pe()
272  commglobal = mpi_comm_world
273  if( PRESENT(commid) )then
274  commglobal = commid
275  end if
276  halo_update_type = halo_update_type_in
277 
278  numthreads = 1
279 !$OMP PARALLEL
280 !$OMP MASTER
281 !$ numthreads = omp_get_num_threads()
282 !$OMP END MASTER
283 !$OMP END PARALLEL
284 
285  if ( mpp_pe()==mpp_root_pe() ) then
286  master = .true.
287  unit = stdout()
288  write(unit,*) 'Starting PEs : ', mpp_npes() !Should be for current pelist
289  write(unit,*) 'Starting Threads : ', numthreads
290  else
291  master = .false.
292  endif
293 
294  if (mpp_npes() > 1) call mpi_barrier(commglobal, ierror)
295 
296  end subroutine mp_start
297 !
298 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
299 !-------------------------------------------------------------------------------
300 
301  logical function is_master()
302 
303  is_master = master
304 
305  end function is_master
306 
307  subroutine setup_master(pelist_local)
308 
309  integer, intent(IN) :: pelist_local(:)
310 
311  if (any(gid == pelist_local)) then
312 
313  masterproc = pelist_local(1)
314  master = (gid == masterproc)
315 
316  endif
317 
318  end subroutine setup_master
319 
320 !-------------------------------------------------------------------------------
321 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
323  subroutine mp_barrier()
324 
325  call mpi_barrier(commglobal, ierror)
326 
327  end subroutine mp_barrier
328 !
329 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
330 !-------------------------------------------------------------------------------
331 
332 !-------------------------------------------------------------------------------
333 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
335  subroutine mp_stop()
336 
337  call mpi_barrier(commglobal, ierror)
338  if (gid==masterproc) print*, 'Stopping PEs : ', npes
339  call fms_end()
340  ! call MPI_FINALIZE (ierror)
341 
342  end subroutine mp_stop
343 !
344 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
345 !-------------------------------------------------------------------------------
346 
347 !-------------------------------------------------------------------------------
348 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
350  subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout)
351 
352  integer, intent(IN) :: npx,npy,grid_type
353  integer, intent(INOUT) :: nregions
354  logical, intent(IN):: nested
355  type(fv_atmos_type), intent(INOUT), target :: Atm
356  integer, intent(INOUT) :: layout(2), io_layout(2)
357 
358  integer, allocatable :: pe_start(:), pe_end(:)
359 
360  integer :: nx,ny,n,num_alloc
361  character(len=32) :: type = "unknown"
362  logical :: is_symmetry
363  logical :: debug=.false.
364  integer, allocatable :: tile_id(:)
365 
366  integer i
367  integer :: npes_x, npes_y
368 
369  integer, pointer :: pelist(:), grid_number, num_contact, npes_per_tile
370  logical, pointer :: square_domain
371  type(domain2D), pointer :: domain, domain_for_coupler
372 
373  nx = npx-1
374  ny = npy-1
375 
376  !! Init pointers
377  pelist => atm%pelist
378  grid_number => atm%grid_number
379  num_contact => atm%num_contact
380  domain => atm%domain
381  domain_for_coupler => atm%domain_for_coupler
382  npes_per_tile => atm%npes_per_tile
383 
384  npes_x = layout(1)
385  npes_y = layout(2)
386 
387 
388  call mpp_domains_init(mpp_domain_time)
389 
390  select case(nregions)
391  case ( 1 ) ! Lat-Lon "cyclic"
392 
393  select case (grid_type)
394  case (0,1,2) !Gnomonic nested grid
395  if (nested) then
396  type = "Cubed-sphere nested grid"
397  else
398  type = "Cubed-sphere, single face"
399  end if
400  nregions = 1
401  num_contact = 0
402  npes_per_tile = npes_x*npes_y !/nregions !Set up for concurrency
403  is_symmetry = .true.
404  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
405 
406  if ( npes_x == 0 ) then
407  npes_x = layout(1)
408  endif
409  if ( npes_y == 0 ) then
410  npes_y = layout(2)
411  endif
412 
413  if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) atm%gridstruct%square_domain = .true.
414 
415  if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then
416  write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y
417  call mp_stop
418  call exit(1)
419  endif
420 
421  layout = (/npes_x,npes_y/)
422  case (3) ! Lat-Lon "cyclic"
423  type="Lat-Lon: cyclic"
424  nregions = 4
425  num_contact = 8
426  if( mod(npes,nregions) .NE. 0 ) then
427  call mpp_error(note,'TEST_MPP_DOMAINS: for Cyclic mosaic, npes should be multiple of nregions. ' // &
428  'No test is done for Cyclic mosaic. ' )
429  return
430  end if
431  npes_per_tile = npes/nregions
432  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
433  layout = (/1,npes_per_tile/) ! force decomp only in Lat-Direction
434  case (4) ! Cartesian, double periodic
435  type="Cartesian: double periodic"
436  nregions = 1
437  num_contact = 2
438  npes_per_tile = npes/nregions
439  if(npes_x*npes_y == npes_per_tile) then
440  layout = (/npes_x,npes_y/)
441  else
442  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
443  endif
444  case (5) ! latlon patch
445  type="Lat-Lon: patch"
446  nregions = 1
447  num_contact = 0
448  npes_per_tile = npes/nregions
449  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
450  case (6) ! latlon strip
451  type="Lat-Lon: strip"
452  nregions = 1
453  num_contact = 1
454  npes_per_tile = npes/nregions
455  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
456  case (7) ! Cartesian, channel
457  type="Cartesian: channel"
458  nregions = 1
459  num_contact = 1
460  npes_per_tile = npes/nregions
461  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
462  end select
463 
464  case ( 6 ) ! Cubed-Sphere
465  type="Cubic: cubed-sphere"
466  if (nested) then
467  call mpp_error(fatal, 'For a nested grid with grid_type < 3 nregions_domain must equal 1.')
468  endif
469  nregions = 6
470  num_contact = 12
471  !--- cubic grid always have six tiles, so npes should be multiple of 6
472  npes_per_tile = npes_x*npes_y
473  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
474 
475  if ( npes_x == 0 ) then
476  npes_x = layout(1)
477  endif
478  if ( npes_y == 0 ) then
479  npes_y = layout(2)
480  endif
481 
482  if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) atm%gridstruct%square_domain = .true.
483 
484  if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then
485  write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y
486  310 format('Invalid layout, NPES_X:',i4.4,'NPES_Y:',i4.4,'ncells_X:',i4.4,'ncells_Y:',i4.4)
487  call mp_stop
488  call exit(1)
489  endif
490 
491  layout = (/npes_x,npes_y/)
492  case default
493  call mpp_error(fatal, 'domain_decomp: no such test: '//type)
494  end select
495 
496  allocate(layout2d(2,nregions), global_indices(4,nregions), npes_tile(nregions) )
497  allocate(pe_start(nregions),pe_end(nregions))
498  npes_tile = npes_per_tile
499  do n = 1, nregions
500  global_indices(:,n) = (/1,npx-1,1,npy-1/)
501  layout2d(:,n) = layout
502  pe_start(n) = pelist(1) + (n-1)*layout(1)*layout(2)
503  pe_end(n) = pe_start(n) + layout(1)*layout(2) -1
504  end do
505  num_alloc=max(1,num_contact)
506  allocate(tile1(num_alloc), tile2(num_alloc) )
507  allocate(istart1(num_alloc), iend1(num_alloc), jstart1(num_alloc), jend1(num_alloc) )
508  allocate(istart2(num_alloc), iend2(num_alloc), jstart2(num_alloc), jend2(num_alloc) )
509 
510  is_symmetry = .true.
511  select case(nregions)
512  case ( 1 )
513 
514  select case (grid_type)
515  case (0,1,2) !Gnomonic nested grid
516  !No contacts, don't need to do anything
517  case (3) ! Lat-Lon "cyclic"
518  !--- Contact line 1, between tile 1 (EAST) and tile 2 (WEST)
519  tile1(1) = 1; tile2(1) = 2
520  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
521  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
522  !--- Contact line 2, between tile 1 (SOUTH) and tile 3 (NORTH) --- cyclic
523  tile1(2) = 1; tile2(2) = 3
524  istart1(2) = 1; iend1(2) = nx; jstart1(2) = 1; jend1(2) = 1
525  istart2(2) = 1; iend2(2) = nx; jstart2(2) = ny; jend2(2) = ny
526  !--- Contact line 3, between tile 1 (WEST) and tile 2 (EAST) --- cyclic
527  tile1(3) = 1; tile2(3) = 2
528  istart1(3) = 1; iend1(3) = 1; jstart1(3) = 1; jend1(3) = ny
529  istart2(3) = nx; iend2(3) = nx; jstart2(3) = 1; jend2(3) = ny
530  !--- Contact line 4, between tile 1 (NORTH) and tile 3 (SOUTH)
531  tile1(4) = 1; tile2(4) = 3
532  istart1(4) = 1; iend1(4) = nx; jstart1(4) = ny; jend1(4) = ny
533  istart2(4) = 1; iend2(4) = nx; jstart2(4) = 1; jend2(4) = 1
534  !--- Contact line 5, between tile 2 (SOUTH) and tile 4 (NORTH) --- cyclic
535  tile1(5) = 2; tile2(5) = 4
536  istart1(5) = 1; iend1(5) = nx; jstart1(5) = 1; jend1(5) = 1
537  istart2(5) = 1; iend2(5) = nx; jstart2(5) = ny; jend2(5) = ny
538  !--- Contact line 6, between tile 2 (NORTH) and tile 4 (SOUTH)
539  tile1(6) = 2; tile2(6) = 4
540  istart1(6) = 1; iend1(6) = nx; jstart1(6) = ny; jend1(6) = ny
541  istart2(6) = 1; iend2(6) = nx; jstart2(6) = 1; jend2(6) = 1
542  !--- Contact line 7, between tile 3 (EAST) and tile 4 (WEST)
543  tile1(7) = 3; tile2(7) = 4
544  istart1(7) = nx; iend1(7) = nx; jstart1(7) = 1; jend1(7) = ny
545  istart2(7) = 1; iend2(7) = 1; jstart2(7) = 1; jend2(7) = ny
546  !--- Contact line 8, between tile 3 (WEST) and tile 4 (EAST) --- cyclic
547  tile1(8) = 3; tile2(8) = 4
548  istart1(8) = 1; iend1(8) = 1; jstart1(8) = 1; jend1(8) = ny
549  istart2(8) = nx; iend2(8) = nx; jstart2(8) = 1; jend2(8) = ny
550  is_symmetry = .false.
551  case (4) ! Cartesian, double periodic
552  !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST)
553  tile1(1) = 1; tile2(1) = 1
554  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
555  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
556  !--- Contact line 2, between tile 1 (SOUTH) and tile 1 (NORTH) --- cyclic
557  tile1(2) = 1; tile2(2) = 1
558  istart1(2) = 1; iend1(2) = nx; jstart1(2) = 1; jend1(2) = 1
559  istart2(2) = 1; iend2(2) = nx; jstart2(2) = ny; jend2(2) = ny
560  case (5) ! latlon patch
561 
562  case (6) !latlon strip
563  !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST)
564  tile1(1) = 1; tile2(1) = 1
565  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
566  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
567  case (7) ! Cartesian, channel
568  !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST)
569  tile1(1) = 1; tile2(1) = 1
570  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
571  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
572  end select
573 
574  case ( 6 ) ! Cubed-Sphere
575  !--- Contact line 1, between tile 1 (EAST) and tile 2 (WEST)
576  tile1(1) = 1; tile2(1) = 2
577  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
578  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
579  !--- Contact line 2, between tile 1 (NORTH) and tile 3 (WEST)
580  tile1(2) = 1; tile2(2) = 3
581  istart1(2) = 1; iend1(2) = nx; jstart1(2) = ny; jend1(2) = ny
582  istart2(2) = 1; iend2(2) = 1; jstart2(2) = ny; jend2(2) = 1
583  !--- Contact line 3, between tile 1 (WEST) and tile 5 (NORTH)
584  tile1(3) = 1; tile2(3) = 5
585  istart1(3) = 1; iend1(3) = 1; jstart1(3) = 1; jend1(3) = ny
586  istart2(3) = nx; iend2(3) = 1; jstart2(3) = ny; jend2(3) = ny
587  !--- Contact line 4, between tile 1 (SOUTH) and tile 6 (NORTH)
588  tile1(4) = 1; tile2(4) = 6
589  istart1(4) = 1; iend1(4) = nx; jstart1(4) = 1; jend1(4) = 1
590  istart2(4) = 1; iend2(4) = nx; jstart2(4) = ny; jend2(4) = ny
591  !--- Contact line 5, between tile 2 (NORTH) and tile 3 (SOUTH)
592  tile1(5) = 2; tile2(5) = 3
593  istart1(5) = 1; iend1(5) = nx; jstart1(5) = ny; jend1(5) = ny
594  istart2(5) = 1; iend2(5) = nx; jstart2(5) = 1; jend2(5) = 1
595  !--- Contact line 6, between tile 2 (EAST) and tile 4 (SOUTH)
596  tile1(6) = 2; tile2(6) = 4
597  istart1(6) = nx; iend1(6) = nx; jstart1(6) = 1; jend1(6) = ny
598  istart2(6) = nx; iend2(6) = 1; jstart2(6) = 1; jend2(6) = 1
599  !--- Contact line 7, between tile 2 (SOUTH) and tile 6 (EAST)
600  tile1(7) = 2; tile2(7) = 6
601  istart1(7) = 1; iend1(7) = nx; jstart1(7) = 1; jend1(7) = 1
602  istart2(7) = nx; iend2(7) = nx; jstart2(7) = ny; jend2(7) = 1
603  !--- Contact line 8, between tile 3 (EAST) and tile 4 (WEST)
604  tile1(8) = 3; tile2(8) = 4
605  istart1(8) = nx; iend1(8) = nx; jstart1(8) = 1; jend1(8) = ny
606  istart2(8) = 1; iend2(8) = 1; jstart2(8) = 1; jend2(8) = ny
607  !--- Contact line 9, between tile 3 (NORTH) and tile 5 (WEST)
608  tile1(9) = 3; tile2(9) = 5
609  istart1(9) = 1; iend1(9) = nx; jstart1(9) = ny; jend1(9) = ny
610  istart2(9) = 1; iend2(9) = 1; jstart2(9) = ny; jend2(9) = 1
611  !--- Contact line 10, between tile 4 (NORTH) and tile 5 (SOUTH)
612  tile1(10) = 4; tile2(10) = 5
613  istart1(10) = 1; iend1(10) = nx; jstart1(10) = ny; jend1(10) = ny
614  istart2(10) = 1; iend2(10) = nx; jstart2(10) = 1; jend2(10) = 1
615  !--- Contact line 11, between tile 4 (EAST) and tile 6 (SOUTH)
616  tile1(11) = 4; tile2(11) = 6
617  istart1(11) = nx; iend1(11) = nx; jstart1(11) = 1; jend1(11) = ny
618  istart2(11) = nx; iend2(11) = 1; jstart2(11) = 1; jend2(11) = 1
619  !--- Contact line 12, between tile 5 (EAST) and tile 6 (WEST)
620  tile1(12) = 5; tile2(12) = 6
621  istart1(12) = nx; iend1(12) = nx; jstart1(12) = 1; jend1(12) = ny
622  istart2(12) = 1; iend2(12) = 1; jstart2(12) = 1; jend2(12) = ny
623  end select
624 
625  if ( any(pelist == gid) ) then
626  allocate(tile_id(nregions))
627  if( nested ) then
628  if( nregions .NE. 1 ) then
629  call mpp_error(fatal, 'domain_decomp: nregions should be 1 for nested region, contact developer')
630  endif
631  tile_id(1) = 7 ! currently we assuming the nested tile is nested in one face of cubic sphere grid.
632  ! we need a more general way to deal with nested grid tile id.
633  else
634  do n = 1, nregions
635  tile_id(n) = n
636  enddo
637  endif
638  call mpp_define_mosaic(global_indices, layout2d, domain, nregions, num_contact, tile1, tile2, &
639  istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, &
640  pe_start=pe_start, pe_end=pe_end, symmetry=is_symmetry, &
641  shalo = ng, nhalo = ng, whalo = ng, ehalo = ng, tile_id=tile_id, name = type)
642  call mpp_define_mosaic(global_indices, layout2d, domain_for_coupler, nregions, num_contact, tile1, tile2, &
643  istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, &
644  pe_start=pe_start, pe_end=pe_end, symmetry=is_symmetry, &
645  shalo = 1, nhalo = 1, whalo = 1, ehalo = 1, tile_id=tile_id, name = type)
646  deallocate(tile_id)
647  call mpp_define_io_domain(domain, io_layout)
648  call mpp_define_io_domain(domain_for_coupler, io_layout)
649 
650  endif
651 
652  deallocate(pe_start,pe_end)
653  deallocate(layout2d, global_indices, npes_tile)
654  deallocate(tile1, tile2)
655  deallocate(istart1, iend1, jstart1, jend1)
656  deallocate(istart2, iend2, jstart2, jend2)
657 
658  !--- find the tile number
659  atm%tile = (gid-pelist(1))/npes_per_tile+1
660  if (any(pelist == gid)) then
661  npes_this_grid = npes_per_tile*nregions
662  tile = atm%tile
663  call mpp_get_compute_domain( domain, is, ie, js, je )
664  call mpp_get_data_domain ( domain, isd, ied, jsd, jed )
665 
666  atm%bd%is = is
667  atm%bd%js = js
668  atm%bd%ie = ie
669  atm%bd%je = je
670 
671  atm%bd%isd = isd
672  atm%bd%jsd = jsd
673  atm%bd%ied = ied
674  atm%bd%jed = jed
675 
676  atm%bd%isc = is
677  atm%bd%jsc = js
678  atm%bd%iec = ie
679  atm%bd%jec = je
680 
681  if (debug .and. nregions==1) then
682  tile=1
683  write(*,200) tile, is, ie, js, je
684  ! call mp_stop
685  ! stop
686  endif
687 200 format(i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ')
688  else
689 
690  atm%bd%is = 0
691  atm%bd%js = 0
692  atm%bd%ie = -1
693  atm%bd%je = -1
694 
695  atm%bd%isd = 0
696  atm%bd%jsd = 0
697  atm%bd%ied = -1
698  atm%bd%jed = -1
699 
700  atm%bd%isc = 0
701  atm%bd%jsc = 0
702  atm%bd%iec = -1
703  atm%bd%jec = -1
704 
705  endif
706 
707  end subroutine domain_decomp
708 !
709 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
710 !-------------------------------------------------------------------------------
711 
712 subroutine start_var_group_update_2d(group, array, domain, flags, position, whalo, ehalo, shalo, nhalo, complete)
713  type(group_halo_update_type), intent(inout) :: group
714  real, dimension(:,:), intent(inout) :: array
715  type(domain2D), intent(inout) :: domain
716  integer, optional, intent(in) :: flags
717  integer, optional, intent(in) :: position
718  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
719  logical, optional, intent(in) :: complete
721  real :: d_type
722  logical :: is_complete
723 ! Arguments:
724 ! (inout) group - The data type that store information for group update.
725 ! This data will be used in do_group_pass.
726 ! (inout) array - The array which is having its halos points exchanged.
727 ! (in) domain - contains domain information.
728 ! (in) flags - An optional integer indicating which directions the
729 ! data should be sent.
730 ! (in) position - An optional argument indicating the position. This is
731 ! may be CORNER, but is CENTER by default.
732 ! (in) complete - An optional argument indicating whether the halo updates
733 ! should be initiated immediately or wait for second
734 ! pass_..._start call. Omitting complete is the same as
735 ! setting complete to .true.
736 
737  if (mpp_group_update_initialized(group)) then
738  call mpp_reset_group_update_field(group,array)
739  else
740  call mpp_create_group_update(group, array, domain, flags=flags, position=position, &
741  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
742  endif
743 
744  is_complete = .true.
745  if(present(complete)) is_complete = complete
746  if(is_complete .and. halo_update_type == 1) then
747  call mpp_start_group_update(group, domain, d_type)
748  endif
749 
750 end subroutine start_var_group_update_2d
751 
752 
753 subroutine start_var_group_update_3d(group, array, domain, flags, position, whalo, ehalo, shalo, nhalo, complete)
754  type(group_halo_update_type), intent(inout) :: group
755  real, dimension(:,:,:), intent(inout) :: array
756  type(domain2D), intent(inout) :: domain
757  integer, optional, intent(in) :: flags
758  integer, optional, intent(in) :: position
759  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
760  logical, optional, intent(in) :: complete
762  real :: d_type
763  logical :: is_complete
764 
765 ! Arguments:
766 ! (inout) group - The data type that store information for group update.
767 ! This data will be used in do_group_pass.
768 ! (inout) array - The array which is having its halos points exchanged.
769 ! (in) domain - contains domain information.
770 ! (in) flags - An optional integer indicating which directions the
771 ! data should be sent.
772 ! (in) position - An optional argument indicating the position. This is
773 ! may be CORNER, but is CENTER by default.
774 ! (in) complete - An optional argument indicating whether the halo updates
775 ! should be initiated immediately or wait for second
776 ! pass_..._start call. Omitting complete is the same as
777 ! setting complete to .true.
778 
779  if (mpp_group_update_initialized(group)) then
780  call mpp_reset_group_update_field(group,array)
781  else
782  call mpp_create_group_update(group, array, domain, flags=flags, position=position, &
783  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
784  endif
785 
786  is_complete = .true.
787  if(present(complete)) is_complete = complete
788  if(is_complete .and. halo_update_type == 1 ) then
789  call mpp_start_group_update(group, domain, d_type)
790  endif
791 
792 end subroutine start_var_group_update_3d
793 
794 subroutine start_var_group_update_4d(group, array, domain, flags, position, whalo, ehalo, shalo, nhalo, complete)
795  type(group_halo_update_type), intent(inout) :: group
796  real, dimension(:,:,:,:), intent(inout) :: array
797  type(domain2D), intent(inout) :: domain
798  integer, optional, intent(in) :: flags
799  integer, optional, intent(in) :: position
801  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
802  logical, optional, intent(in) :: complete
804  real :: d_type
805  logical :: is_complete
806 
807 ! Arguments:
808 ! (inout) group - The data type that store information for group update.
809 ! This data will be used in do_group_pass.
810 ! (inout) array - The array which is having its halos points exchanged.
811 ! (in) domain - contains domain information.
812 ! (in) flags - An optional integer indicating which directions the
813 ! data should be sent.
814 ! (in) position - An optional argument indicating the position. This is
815 ! may be CORNER, but is CENTER by default.
816 ! (in) complete - An optional argument indicating whether the halo updates
817 ! should be initiated immediately or wait for second
818 ! pass_..._start call. Omitting complete is the same as
819 ! setting complete to .true.
820 
821  integer :: dirflag
822 
823  if (mpp_group_update_initialized(group)) then
824  call mpp_reset_group_update_field(group,array)
825  else
826  call mpp_create_group_update(group, array, domain, flags=flags, position=position, &
827  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
828  endif
829 
830  is_complete = .true.
831  if(present(complete)) is_complete = complete
832  if(is_complete .and. halo_update_type == 1 ) then
833  call mpp_start_group_update(group, domain, d_type)
834  endif
835 
836 end subroutine start_var_group_update_4d
837 
838 
839 
840 subroutine start_vector_group_update_2d(group, u_cmpt, v_cmpt, domain, flags, gridtype, whalo, ehalo, shalo, nhalo, complete)
841  type(group_halo_update_type), intent(inout) :: group
842  real, dimension(:,:), intent(inout) :: u_cmpt, v_cmpt
845  type(domain2d), intent(inout) :: domain
846  integer, optional, intent(in) :: flags
847  integer, optional, intent(in) :: gridtype
850  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
851  logical, optional, intent(in) :: complete
853  real :: d_type
854  logical :: is_complete
855 
856 ! Arguments:
857 ! (inout) group - The data type that store information for group update.
858 ! This data will be used in do_group_pass.
859 ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which
860 ! is having its halos points exchanged.
861 ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair
862 ! which is having its halos points exchanged.
863 ! (in) domain - Contains domain decomposition information.
864 ! (in) flags - An optional integer indicating which directions the
865 ! data should be sent.
866 ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE,
867 ! CGRID_NE or DGRID_NE, indicating where the two components of the
868 ! vector are discretized.
869 ! (in) complete - An optional argument indicating whether the halo updates
870 ! should be initiated immediately or wait for second
871 ! pass_..._start call. Omitting complete is the same as
872 ! setting complete to .true.
873 
874  if (mpp_group_update_initialized(group)) then
875  call mpp_reset_group_update_field(group,u_cmpt, v_cmpt)
876  else
877  call mpp_create_group_update(group, u_cmpt, v_cmpt, domain, &
878  flags=flags, gridtype=gridtype, &
879  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
880  endif
881 
882  is_complete = .true.
883  if(present(complete)) is_complete = complete
884  if(is_complete .and. halo_update_type == 1 ) then
885  call mpp_start_group_update(group, domain, d_type)
886  endif
887 
888 end subroutine start_vector_group_update_2d
889 
890 subroutine start_vector_group_update_3d(group, u_cmpt, v_cmpt, domain, flags, gridtype, whalo, ehalo, shalo, nhalo, complete)
891  type(group_halo_update_type), intent(inout) :: group
892  real, dimension(:,:,:), intent(inout) :: u_cmpt, v_cmpt !! The nominal zonal (u) and meridional (v)
893  !! components of the vector pair that
894  !! is having its halos points exchanged.
895  type(domain2d), intent(inout) :: domain
896  integer, optional, intent(in) :: flags
897  integer, optional, intent(in) :: gridtype
900  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
901  logical, optional, intent(in) :: complete
903  real :: d_type
904  logical :: is_complete
905 
906 ! Arguments:
907 ! (inout) group - The data type that store information for group update.
908 ! This data will be used in do_group_pass.
909 ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which
910 ! is having its halos points exchanged.
911 ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair
912 ! which is having its halos points exchanged.
913 ! (in) domain - Contains domain decomposition information.
914 ! (in) flags - An optional integer indicating which directions the
915 ! data should be sent.
916 ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE,
917 ! CGRID_NE or DGRID_NE, indicating where the two components of the
918 ! vector are discretized.
919 ! (in) complete - An optional argument indicating whether the halo updates
920 ! should be initiated immediately or wait for second
921 ! pass_..._start call. Omitting complete is the same as
922 ! setting complete to .true.
923 
924  if (mpp_group_update_initialized(group)) then
925  call mpp_reset_group_update_field(group,u_cmpt, v_cmpt)
926  else
927  call mpp_create_group_update(group, u_cmpt, v_cmpt, domain, &
928  flags=flags, gridtype=gridtype, &
929  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
930  endif
931 
932  is_complete = .true.
933  if(present(complete)) is_complete = complete
934  if(is_complete .and. halo_update_type == 1) then
935  call mpp_start_group_update(group, domain, d_type)
936  endif
937 
938 end subroutine start_vector_group_update_3d
939 
940 
941 subroutine complete_group_halo_update(group, domain)
942  type(group_halo_update_type), intent(inout) :: group
943  type(domain2d), intent(inout) :: domain
944  real :: d_type
945 
946 ! Arguments:
947 ! (inout) group - The data type that store information for group update.
948 ! (in) domain - Contains domain decomposition information.
949 
950  if( halo_update_type == 1 ) then
951  call mpp_complete_group_update(group, domain, d_type)
952  else
953  call mpp_do_group_update(group, domain, d_type)
954  endif
955 
956 end subroutine complete_group_halo_update
957 
958 
959 
960 
961 subroutine broadcast_domains(Atm)
962 
963  type(fv_atmos_type), intent(INOUT) :: Atm(:)
964 
965  integer :: n, i1, i2, j1, j2, i
966  integer :: ens_root_pe, ensemble_id
967 
968  !I think the idea is that each process needs to properly be part of a pelist,
969  !the pelist on which the domain is currently defined is ONLY for the pes which have the domain.
970 
971  ! This is needed to set the proper pelist for the ensemble. The pelist
972  ! needs to include the non-nested+nested tile for the ensemble.
973  ensemble_id = get_ensemble_id()
974  ens_root_pe = (ensemble_id-1)*npes
975 
976  !Pelist needs to be set to ALL ensemble PEs for broadcast_domain to work
977  call mpp_set_current_pelist((/ (i,i=ens_root_pe,npes-1+ens_root_pe) /))
978  do n=1,size(atm)
979  call mpp_broadcast_domain(atm(n)%domain)
980  call mpp_broadcast_domain(atm(n)%domain_for_coupler)
981  end do
982 
983 end subroutine broadcast_domains
984 
985 subroutine switch_current_domain(new_domain,new_domain_for_coupler)
986 
987  type(domain2D), intent(in), target :: new_domain, new_domain_for_coupler
988  logical, parameter :: debug = .false.
989 
990  !--- find the tile number
991  !tile = mpp_pe()/npes_per_tile+1
992  !ntiles = mpp_get_ntile_count(new_domain)
993  call mpp_get_compute_domain( new_domain, is, ie, js, je )
994  isc = is ; jsc = js
995  iec = ie ; jec = je
996  call mpp_get_data_domain ( new_domain, isd, ied, jsd, jed )
997 ! if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true.
998 
999 ! if (debug .AND. (gid==masterproc)) write(*,200) tile, is, ie, js, je
1000 !200 format('New domain: ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ')
1001 
1002  call set_domain(new_domain)
1003 
1004 
1005 end subroutine switch_current_domain
1006 
1007 subroutine switch_current_atm(new_Atm, switch_domain)
1008 
1009  type(fv_atmos_type), intent(IN), target :: new_Atm
1010  logical, intent(IN), optional :: switch_domain
1011  logical, parameter :: debug = .false.
1012  logical :: swD
1013 
1014  if (debug .AND. (gid==masterproc)) print*, 'SWITCHING ATM STRUCTURES', new_atm%grid_number
1015  if (present(switch_domain)) then
1016  swd = switch_domain
1017  else
1018  swd = .true.
1019  end if
1020  if (swd) call switch_current_domain(new_atm%domain, new_atm%domain_for_coupler)
1021 
1022 !!$ if (debug .AND. (gid==masterproc)) WRITE(*,'(A, 6I5)') 'NEW GRID DIMENSIONS: ', &
1023 !!$ isd, ied, jsd, jed, new_Atm%npx, new_Atm%npy
1024 
1025 end subroutine switch_current_atm
1026 
1027 !-------------------------------------------------------------------------------
1028 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1029 !
1030  subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID)
1031  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: q
1032  integer, intent(IN):: npx,npy
1033  integer, intent(IN):: FILL
1034  logical, OPTIONAL, intent(IN) :: AGRID, BGRID
1035  integer :: i,j
1036 
1037  if (present(bgrid)) then
1038  if (bgrid) then
1039  select case (fill)
1040  case (xdir)
1041  do j=1,ng
1042  do i=1,ng
1043  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
1044  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
1045  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1046  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1047  enddo
1048  enddo
1049  case (ydir)
1050  do j=1,ng
1051  do i=1,ng
1052  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner
1053  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner
1054  if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner
1055  if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner
1056  enddo
1057  enddo
1058  case default
1059  do j=1,ng
1060  do i=1,ng
1061  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
1062  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
1063  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1064  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1065  enddo
1066  enddo
1067  end select
1068  endif
1069  elseif (present(agrid)) then
1070  if (agrid) then
1071  select case (fill)
1072  case (xdir)
1073  do j=1,ng
1074  do i=1,ng
1075  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner
1076  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner
1077  if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner
1078  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner
1079  enddo
1080  enddo
1081  case (ydir)
1082  do j=1,ng
1083  do i=1,ng
1084  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1085  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1086  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1087  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1088  enddo
1089  enddo
1090  case default
1091  do j=1,ng
1092  do i=1,ng
1093  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1094  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1095  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1096  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1097  enddo
1098  enddo
1099  end select
1100  endif
1101  endif
1102 
1103  end subroutine fill_corners_2d_r4
1104 !
1105 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1106 !-------------------------------------------------------------------------------
1107 !-------------------------------------------------------------------------------
1108 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1109 !
1110  subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID)
1111  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: q
1112  integer, intent(IN):: npx,npy
1113  integer, intent(IN):: FILL ! <X-Dir or Y-Dir
1114  logical, OPTIONAL, intent(IN) :: AGRID, BGRID
1115  integer :: i,j
1116 
1117  if (present(bgrid)) then
1118  if (bgrid) then
1119  select case (fill)
1120  case (xdir)
1121  do j=1,ng
1122  do i=1,ng
1123  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
1124  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
1125  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1126  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1127  enddo
1128  enddo
1129  case (ydir)
1130  do j=1,ng
1131  do i=1,ng
1132  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner
1133  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner
1134  if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner
1135  if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner
1136  enddo
1137  enddo
1138  case default
1139  do j=1,ng
1140  do i=1,ng
1141  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
1142  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
1143  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1144  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1145  enddo
1146  enddo
1147  end select
1148  endif
1149  elseif (present(agrid)) then
1150  if (agrid) then
1151  select case (fill)
1152  case (xdir)
1153  do j=1,ng
1154  do i=1,ng
1155  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner
1156  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner
1157  if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner
1158  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner
1159  enddo
1160  enddo
1161  case (ydir)
1162  do j=1,ng
1163  do i=1,ng
1164  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1165  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1166  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1167  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1168  enddo
1169  enddo
1170  case default
1171  do j=1,ng
1172  do i=1,ng
1173  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1174  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1175  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1176  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1177  enddo
1178  enddo
1179  end select
1180  endif
1181  endif
1182 
1183  end subroutine fill_corners_2d_r8
1184 !
1185 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1186 !-------------------------------------------------------------------------------
1187 
1188 !-------------------------------------------------------------------------------
1189 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1190 ! fill_corners_xy_2d_r8
1191  subroutine fill_corners_xy_2d_r8(x, y, npx, npy, DGRID, AGRID, CGRID, VECTOR)
1192  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1193  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1194  integer, intent(IN):: npx,npy
1195  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1196  integer :: i,j
1197 
1198  real(kind=8) :: mySign
1199 
1200  mysign = 1.0
1201  if (present(vector)) then
1202  if (vector) mysign = -1.0
1203  endif
1204 
1205  if (present(dgrid)) then
1206  call fill_corners_dgrid(x, y, npx, npy, mysign)
1207  elseif (present(cgrid)) then
1208  call fill_corners_cgrid(x, y, npx, npy, mysign)
1209  elseif (present(agrid)) then
1210  call fill_corners_agrid(x, y, npx, npy, mysign)
1211  else
1212  call fill_corners_agrid(x, y, npx, npy, mysign)
1213  endif
1214 
1215  end subroutine fill_corners_xy_2d_r8
1216 !
1217 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1218 !-------------------------------------------------------------------------------
1219 
1220 !-------------------------------------------------------------------------------
1221 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1222 ! fill_corners_xy_2d_r4
1223  subroutine fill_corners_xy_2d_r4(x, y, npx, npy, DGRID, AGRID, CGRID, VECTOR)
1224  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1225  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1226  integer, intent(IN):: npx,npy
1227  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1228  integer :: i,j
1229 
1230  real(kind=4) :: mySign
1231 
1232  mysign = 1.0
1233  if (present(vector)) then
1234  if (vector) mysign = -1.0
1235  endif
1236 
1237  if (present(dgrid)) then
1238  call fill_corners_dgrid(x, y, npx, npy, mysign)
1239  elseif (present(cgrid)) then
1240  call fill_corners_cgrid(x, y, npx, npy, mysign)
1241  elseif (present(agrid)) then
1242  call fill_corners_agrid(x, y, npx, npy, mysign)
1243  else
1244  call fill_corners_agrid(x, y, npx, npy, mysign)
1245  endif
1246 
1247  end subroutine fill_corners_xy_2d_r4
1248 !
1249 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1250 !-------------------------------------------------------------------------------
1251 
1252 !-------------------------------------------------------------------------------
1253 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1254 ! fill_corners_xy_3d_r8
1255  subroutine fill_corners_xy_3d_r8(x, y, npx, npy, npz, DGRID, AGRID, CGRID, VECTOR)
1256  real(kind=8), DIMENSION(isd:,jsd:,:), intent(INOUT):: x
1257  real(kind=8), DIMENSION(isd:,jsd:,:), intent(INOUT):: y
1258  integer, intent(IN):: npx,npy,npz
1259  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1260  integer :: i,j,k
1261 
1262  real(kind=8) :: mySign
1263 
1264  mysign = 1.0
1265  if (present(vector)) then
1266  if (vector) mysign = -1.0
1267  endif
1268 
1269  if (present(dgrid)) then
1270  do k=1,npz
1271  call fill_corners_dgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1272  enddo
1273  elseif (present(cgrid)) then
1274  do k=1,npz
1275  call fill_corners_cgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1276  enddo
1277  elseif (present(agrid)) then
1278  do k=1,npz
1279  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1280  enddo
1281  else
1282  do k=1,npz
1283  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1284  enddo
1285  endif
1286 
1287  end subroutine fill_corners_xy_3d_r8
1288 !
1289 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1290 !-------------------------------------------------------------------------------
1291 
1292 !-------------------------------------------------------------------------------
1293 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1294 ! fill_corners_xy_3d_r4
1295  subroutine fill_corners_xy_3d_r4(x, y, npx, npy, npz, DGRID, AGRID, CGRID, VECTOR)
1296  real(kind=4), DIMENSION(isd:,jsd:,:), intent(INOUT):: x
1297  real(kind=4), DIMENSION(isd:,jsd:,:), intent(INOUT):: y
1298  integer, intent(IN):: npx,npy,npz
1299  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1300  integer :: i,j,k
1301 
1302  real(kind=4) :: mySign
1303 
1304  mysign = 1.0
1305  if (present(vector)) then
1306  if (vector) mysign = -1.0
1307  endif
1308 
1309  if (present(dgrid)) then
1310  do k=1,npz
1311  call fill_corners_dgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1312  enddo
1313  elseif (present(cgrid)) then
1314  do k=1,npz
1315  call fill_corners_cgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1316  enddo
1317  elseif (present(agrid)) then
1318  do k=1,npz
1319  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1320  enddo
1321  else
1322  do k=1,npz
1323  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1324  enddo
1325  endif
1326 
1327  end subroutine fill_corners_xy_3d_r4
1328 !
1329 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1330 !-------------------------------------------------------------------------------
1331 
1332 !-------------------------------------------------------------------------------
1333 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1334 !
1335  subroutine fill_corners_dgrid_r8(x, y, npx, npy, mySign)
1336  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1337  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1338  integer, intent(IN):: npx,npy
1339  real(kind=8), intent(IN) :: mySign
1340  integer :: i,j
1341 
1342  do j=1,ng
1343  do i=1,ng
1344  ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner
1345  ! if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = mySign*y(j+1 ,npy-1+i) !NW Corner
1346  ! if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = mySign*y(npx-j,1-i ) !SE Corner
1347  ! if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = y(npx-j,npy-1+i) !NE Corner
1348  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1349  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = y(1-j ,npy-i) !NW Corner
1350  if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = y(npx+j,i ) !SE Corner
1351  if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = mysign*y(npx+j,npy-i) !NE Corner
1352  enddo
1353  enddo
1354  do j=1,ng
1355  do i=1,ng
1356  ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner
1357  ! if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = mySign*x(1-j ,npy-i) !NW Corner
1358  ! if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = mySign*x(npx-1+j,i+1 ) !SE Corner
1359  ! if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = x(npx-1+j,npy-i) !NE Corner
1360  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mysign*x(j ,1-i ) !SW Corner
1361  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = x(j ,npy+i) !NW Corner
1362  if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = x(npx-j ,1-i ) !SE Corner
1363  if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = mysign*x(npx-j ,npy+i) !NE Corner
1364  enddo
1365  enddo
1366 
1367  end subroutine fill_corners_dgrid_r8
1368 !
1369 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1370 !-------------------------------------------------------------------------------
1371 
1372 !-------------------------------------------------------------------------------
1373 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1374 !
1375  subroutine fill_corners_dgrid_r4(x, y, npx, npy, mySign)
1376  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1377  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1378  integer, intent(IN):: npx,npy
1379  real(kind=4), intent(IN) :: mySign
1380  integer :: i,j
1381 
1382  do j=1,ng
1383  do i=1,ng
1384  ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner
1385  ! if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = mySign*y(j+1 ,npy-1+i) !NW Corner
1386  ! if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = mySign*y(npx-j,1-i ) !SE Corner
1387  ! if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = y(npx-j,npy-1+i) !NE Corner
1388  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1389  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = y(1-j ,npy-i) !NW Corner
1390  if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = y(npx+j,i ) !SE Corner
1391  if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = mysign*y(npx+j,npy-i) !NE Corner
1392  enddo
1393  enddo
1394  do j=1,ng
1395  do i=1,ng
1396  ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner
1397  ! if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = mySign*x(1-j ,npy-i) !NW Corner
1398  ! if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = mySign*x(npx-1+j,i+1 ) !SE Corner
1399  ! if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = x(npx-1+j,npy-i) !NE Corner
1400  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mysign*x(j ,1-i ) !SW Corner
1401  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = x(j ,npy+i) !NW Corner
1402  if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = x(npx-j ,1-i ) !SE Corner
1403  if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = mysign*x(npx-j ,npy+i) !NE Corner
1404  enddo
1405  enddo
1406 
1407  end subroutine fill_corners_dgrid_r4
1408 !
1409 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1410 !-------------------------------------------------------------------------------
1411 
1412 !-------------------------------------------------------------------------------
1413 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1414 !
1415  subroutine fill_corners_cgrid_r4(x, y, npx, npy, mySign)
1416  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1417  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1418  integer, intent(IN):: npx,npy
1419  real(kind=4), intent(IN) :: mySign
1420  integer :: i,j
1421 
1422  do j=1,ng
1423  do i=1,ng
1424  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner
1425  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy-1+j) = mysign*y(j ,npy+i) !NW Corner
1426  if ((ie+1==npx) .and. (js == 1)) x(npx+i ,1-j ) = mysign*y(npx-j ,1-i ) !SE Corner
1427  if ((ie+1==npx) .and. (je+1==npy)) x(npx+i ,npy-1+j) = y(npx-j ,npy+i) !NE Corner
1428  enddo
1429  enddo
1430  do j=1,ng
1431  do i=1,ng
1432  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner
1433  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy+j) = mysign*x(1-j ,npy-i) !NW Corner
1434  if ((ie+1==npx) .and. (js == 1)) y(npx-1+i,1-j ) = mysign*x(npx+j,i ) !SE Corner
1435  if ((ie+1==npx) .and. (je+1==npy)) y(npx-1+i,npy+j) = x(npx+j,npy-i) !NE Corner
1436  enddo
1437  enddo
1438 
1439  end subroutine fill_corners_cgrid_r4
1440 !
1441 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1442 !-------------------------------------------------------------------------------
1443 
1444 !-------------------------------------------------------------------------------
1445 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1446 !
1447  subroutine fill_corners_cgrid_r8(x, y, npx, npy, mySign)
1448  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1449  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1450  integer, intent(IN):: npx,npy
1451  real(kind=8), intent(IN) :: mySign
1452  integer :: i,j
1453 
1454  do j=1,ng
1455  do i=1,ng
1456  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner
1457  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy-1+j) = mysign*y(j ,npy+i) !NW Corner
1458  if ((ie+1==npx) .and. (js == 1)) x(npx+i ,1-j ) = mysign*y(npx-j ,1-i ) !SE Corner
1459  if ((ie+1==npx) .and. (je+1==npy)) x(npx+i ,npy-1+j) = y(npx-j ,npy+i) !NE Corner
1460  enddo
1461  enddo
1462  do j=1,ng
1463  do i=1,ng
1464  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner
1465  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy+j) = mysign*x(1-j ,npy-i) !NW Corner
1466  if ((ie+1==npx) .and. (js == 1)) y(npx-1+i,1-j ) = mysign*x(npx+j,i ) !SE Corner
1467  if ((ie+1==npx) .and. (je+1==npy)) y(npx-1+i,npy+j) = x(npx+j,npy-i) !NE Corner
1468  enddo
1469  enddo
1470 
1471  end subroutine fill_corners_cgrid_r8
1472 !
1473 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1474 !-------------------------------------------------------------------------------
1475 
1476 !-------------------------------------------------------------------------------
1477 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1478 !
1479  subroutine fill_corners_agrid_r4(x, y, npx, npy, mySign)
1480  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1481  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1482  integer, intent(IN):: npx,npy
1483  real(kind=4), intent(IN) :: mySign
1484  integer :: i,j
1485 
1486  do j=1,ng
1487  do i=1,ng
1488  if ((is== 1) .and. (js== 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1489  if ((is== 1) .and. (je==npy-1)) x(1-i ,npy-1+j) = y(1-j ,npy-1-i+1) !NW Corner
1490  if ((ie==npx-1) .and. (js== 1)) x(npx-1+i,1-j ) = y(npx-1+j,i ) !SE Corner
1491  if ((ie==npx-1) .and. (je==npy-1)) x(npx-1+i,npy-1+j) = mysign*y(npx-1+j,npy-1-i+1) !NE Corner
1492  enddo
1493  enddo
1494  do j=1,ng
1495  do i=1,ng
1496  if ((is== 1) .and. (js== 1)) y(1-j ,1-i ) = mysign*x(i ,1-j ) !SW Corner
1497  if ((is== 1) .and. (je==npy-1)) y(1-j ,npy-1+i) = x(i ,npy-1+j) !NW Corner
1498  if ((ie==npx-1) .and. (js== 1)) y(npx-1+j,1-i ) = x(npx-1-i+1,1-j ) !SE Corner
1499  if ((ie==npx-1) .and. (je==npy-1)) y(npx-1+j,npy-1+i) = mysign*x(npx-1-i+1,npy-1+j) !NE Corner
1500  enddo
1501  enddo
1502 
1503  end subroutine fill_corners_agrid_r4
1504 !
1505 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1506 !-------------------------------------------------------------------------------
1507 
1508 !-------------------------------------------------------------------------------
1509 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1510 !
1511  subroutine fill_corners_agrid_r8(x, y, npx, npy, mySign)
1512  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1513  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1514  integer, intent(IN):: npx,npy
1515  real(kind=8), intent(IN) :: mySign
1516  integer :: i,j
1517 
1518  do j=1,ng
1519  do i=1,ng
1520  if ((is== 1) .and. (js== 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1521  if ((is== 1) .and. (je==npy-1)) x(1-i ,npy-1+j) = y(1-j ,npy-1-i+1) !NW Corner
1522  if ((ie==npx-1) .and. (js== 1)) x(npx-1+i,1-j ) = y(npx-1+j,i ) !SE Corner
1523  if ((ie==npx-1) .and. (je==npy-1)) x(npx-1+i,npy-1+j) = mysign*y(npx-1+j,npy-1-i+1) !NE Corner
1524  enddo
1525  enddo
1526  do j=1,ng
1527  do i=1,ng
1528  if ((is== 1) .and. (js== 1)) y(1-j ,1-i ) = mysign*x(i ,1-j ) !SW Corner
1529  if ((is== 1) .and. (je==npy-1)) y(1-j ,npy-1+i) = x(i ,npy-1+j) !NW Corner
1530  if ((ie==npx-1) .and. (js== 1)) y(npx-1+j,1-i ) = x(npx-1-i+1,1-j ) !SE Corner
1531  if ((ie==npx-1) .and. (je==npy-1)) y(npx-1+j,npy-1+i) = mysign*x(npx-1-i+1,npy-1+j) !NE Corner
1532  enddo
1533  enddo
1534 
1535  end subroutine fill_corners_agrid_r8
1536 !
1537 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1538 !-------------------------------------------------------------------------------
1539 
1540 !!$!-------------------------------------------------------------------------------
1541 !!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1542 !!$!
1543 !!$! mp_corner_comm :: Point-based MPI communcation routine for Cubed-Sphere
1544 !!$! ghosted corner point on B-Grid
1545 !!$! this routine sends 24 16-byte messages
1546 !!$!
1547 !!$ subroutine mp_corner_comm(q, npx, npy, tile)
1548 !!$ integer, intent(IN) :: npx,npy, tile
1549 !!$ real , intent(INOUT):: q(isd:ied+1,jsd:jed+1)
1550 !!$
1551 !!$ integer, parameter :: ntiles = 6
1552 !!$
1553 !!$ real :: qsend(24)
1554 !!$ real :: send_tag, recv_tag
1555 !!$ integer :: sqest(24), rqest(24)
1556 !!$ integer :: Stats(24*MPI_STATUS_SIZE)
1557 !!$ integer :: nsend, nrecv, nread
1558 !!$ integer :: dest_gid, src_gid
1559 !!$ integer :: n
1560 !!$
1561 !!$ qsend = 1.e25
1562 !!$ nsend=0
1563 !!$ nrecv=0
1564 !!$
1565 !!$ if ( mod(tile,2) == 0 ) then
1566 !!$! Even Face LL and UR pairs 6 2-way
1567 !!$ if ( (is==1) .and. (js==1) ) then
1568 !!$ nsend=nsend+1
1569 !!$ qsend(nsend) = q(is,js+1)
1570 !!$ send_tag = 300+tile
1571 !!$ dest_gid = (tile-2)*npes_x*npes_y - 1
1572 !!$ if (dest_gid < 0) dest_gid=npes+dest_gid
1573 !!$ recv_tag = 100+(tile-2)
1574 !!$ if (tile==2) recv_tag = 100+(ntiles)
1575 !!$ src_gid = (tile-3)*npes_x*npes_y
1576 !!$ src_gid = src_gid + npes_x*(npes_y-1) + npes_x - 1
1577 !!$ if (src_gid < 0) src_gid=npes+src_gid
1578 !!$ if (npes>6) then
1579 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1580 !!$ dest_gid, send_tag, &
1581 !!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, &
1582 !!$ src_gid, recv_tag, &
1583 !!$ commglobal, Stats, ierror )
1584 !!$ nsend=nsend-1
1585 !!$ else
1586 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1587 !!$ send_tag, commglobal, sqest(nsend), ierror )
1588 !!$ nrecv=nrecv+1
1589 !!$ call MPI_IRECV( q(is-1,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1590 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1591 !!$ endif
1592 !!$ endif
1593 !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then
1594 !!$ nsend=nsend+1
1595 !!$ qsend(nsend) = q(ie,je+1)
1596 !!$ send_tag = 100+tile
1597 !!$ dest_gid = (tile+1)*npes_x*npes_y
1598 !!$ if (dest_gid+1 > npes) dest_gid=dest_gid-npes
1599 !!$ recv_tag = 300+(tile+2)
1600 !!$ if (tile==6) recv_tag = 300+2
1601 !!$ src_gid = (tile+1)*npes_x*npes_y
1602 !!$ if (src_gid+1 > npes) src_gid=src_gid-npes
1603 !!$ if (npes>6) then
1604 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1605 !!$ dest_gid, send_tag, &
1606 !!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, &
1607 !!$ src_gid, recv_tag, &
1608 !!$ commglobal, Stats, ierror )
1609 !!$ nsend=nsend-1
1610 !!$ else
1611 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1612 !!$ send_tag, commglobal, sqest(nsend), ierror )
1613 !!$ nrecv=nrecv+1
1614 !!$ call MPI_IRECV( q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1615 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1616 !!$ endif
1617 !!$ endif
1618 !!$! wait for comm to complete
1619 !!$ if (npes==6) then
1620 !!$ if (nsend>0) then
1621 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1622 !!$
1623 !!$
1624 !!$
1625 !!$ endif
1626 !!$ if (nrecv>0) then
1627 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1628 !!$
1629 !!$
1630 !!$
1631 !!$ endif
1632 !!$ nsend=0 ; nrecv=0
1633 !!$ endif
1634 !!$
1635 !!$! Even Face LR 1 pair ; 1 1-way
1636 !!$ if ( (tile==2) .and. (ie==npx-1) .and. (js==1) ) then
1637 !!$ nsend=nsend+1
1638 !!$ qsend(nsend) = q(ie,js)
1639 !!$ send_tag = 200+tile
1640 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x-1
1641 !!$ recv_tag = 200+(tile+2)
1642 !!$ src_gid = dest_gid
1643 !!$ if (npes>6) then
1644 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1645 !!$ dest_gid, send_tag, &
1646 !!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, &
1647 !!$ src_gid, recv_tag, &
1648 !!$ commglobal, Stats, ierror )
1649 !!$ nsend=nsend-1
1650 !!$ else
1651 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1652 !!$ send_tag, commglobal, sqest(nsend), ierror )
1653 !!$ nrecv=nrecv+1
1654 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1655 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1656 !!$ endif
1657 !!$ endif
1658 !!$ if ( (tile==4) .and. (ie==npx-1) .and. (js==1) ) then
1659 !!$ nsend=nsend+1
1660 !!$ qsend(nsend) = q(ie+1,js+1)
1661 !!$ send_tag = 200+tile
1662 !!$ dest_gid = (tile-3)*npes_x*npes_y + npes_x-1
1663 !!$ recv_tag = 200+(tile-2)
1664 !!$ src_gid = dest_gid
1665 !!$ if (npes>6) then
1666 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1667 !!$ dest_gid, send_tag, &
1668 !!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, &
1669 !!$ src_gid, recv_tag, &
1670 !!$ commglobal, Stats, ierror )
1671 !!$ nsend=nsend-1
1672 !!$ else
1673 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1674 !!$ send_tag, commglobal, sqest(nsend), ierror )
1675 !!$ nrecv=nrecv+1
1676 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1677 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1678 !!$ endif
1679 !!$ nsend=nsend+1
1680 !!$ qsend(nsend) = q(ie,js)
1681 !!$ send_tag = 200+tile
1682 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x-1
1683 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1684 !!$ send_tag, commglobal, sqest(nsend), ierror )
1685 !!$ endif
1686 !!$ if ( (tile==6) .and. (ie==npx-1) .and. (js==1) ) then
1687 !!$ recv_tag = 200+(tile-2)
1688 !!$ src_gid = (tile-3)*npes_x*npes_y + npes_x-1
1689 !!$ nrecv=nrecv+1
1690 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1691 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1692 !!$ endif
1693 !!$
1694 !!$! wait for comm to complete
1695 !!$ if (npes==6) then
1696 !!$ if (nsend>0) then
1697 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1698 !!$
1699 !!$
1700 !!$
1701 !!$ endif
1702 !!$ if (nrecv>0) then
1703 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1704 !!$
1705 !!$
1706 !!$
1707 !!$ endif
1708 !!$ nsend=0 ; nrecv=0
1709 !!$ endif
1710 !!$
1711 !!$! Send to Odd face LR 3 1-way
1712 !!$ if ( (is==1) .and. (js==1) ) then
1713 !!$ nsend=nsend+1
1714 !!$ qsend(nsend) = q(is+1,js)
1715 !!$ send_tag = 200+tile
1716 !!$ dest_gid = (tile-2)*npes_x*npes_y + npes_x-1
1717 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1718 !!$ send_tag, commglobal, sqest(nsend), ierror )
1719 !!$ endif
1720 !!$
1721 !!$! Receive Even Face UL 3 1-way
1722 !!$ if ( (is==1) .and. (je==npy-1) ) then
1723 !!$ recv_tag = 400+(tile-1)
1724 !!$ src_gid = (tile-2)*npes_x*npes_y + npes_x*(npes_y-1) + npes_x-1
1725 !!$ nrecv=nrecv+1
1726 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1727 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1728 !!$ endif
1729 !!$
1730 !!$ else
1731 !!$
1732 !!$! Odd Face LL and UR pairs 6 2-way
1733 !!$ if ( (is==1) .and. (js==1) ) then
1734 !!$ nsend=nsend+1
1735 !!$ qsend(nsend) = q(is+1,js)
1736 !!$ send_tag = 300+tile
1737 !!$ dest_gid = (tile-2)*npes_x*npes_y - 1
1738 !!$ if (dest_gid < 0) dest_gid=npes+dest_gid
1739 !!$ recv_tag = 100+(tile-2)
1740 !!$ if (tile==1) recv_tag = 100+(ntiles-tile)
1741 !!$ src_gid = (tile-3)*npes_x*npes_y
1742 !!$ src_gid = src_gid + npes_x*(npes_y-1) + npes_x - 1
1743 !!$ if (src_gid < 0) src_gid=npes+src_gid
1744 !!$ if (npes>6) then
1745 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1746 !!$ dest_gid, send_tag, &
1747 !!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, &
1748 !!$ src_gid, recv_tag, &
1749 !!$ commglobal, Stats, ierror )
1750 !!$ nsend=nsend-1
1751 !!$ else
1752 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1753 !!$ send_tag, commglobal, sqest(nsend), ierror )
1754 !!$ nrecv=nrecv+1
1755 !!$ call MPI_IRECV( q(is-1,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1756 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1757 !!$ endif
1758 !!$ endif
1759 !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then
1760 !!$ nsend=nsend+1
1761 !!$ qsend(nsend) = q(ie+1,je)
1762 !!$ send_tag = 100+tile
1763 !!$ dest_gid = (tile+1)*npes_x*npes_y
1764 !!$ if (dest_gid+1 > npes) dest_gid=dest_gid-npes
1765 !!$ recv_tag = 300+(tile+2)
1766 !!$ if (tile==5) recv_tag = 300+1
1767 !!$ src_gid = (tile+1)*npes_x*npes_y
1768 !!$ if (src_gid+1 > npes) src_gid=src_gid-npes
1769 !!$ if (npes>6) then
1770 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1771 !!$ dest_gid, send_tag, &
1772 !!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, &
1773 !!$ src_gid, recv_tag, &
1774 !!$ commglobal, Stats, ierror )
1775 !!$ nsend=nsend-1
1776 !!$ else
1777 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1778 !!$ send_tag, commglobal, sqest(nsend), ierror )
1779 !!$ nrecv=nrecv+1
1780 !!$ call MPI_IRECV( q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1781 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1782 !!$ endif
1783 !!$ endif
1784 !!$! wait for comm to complete
1785 !!$ if (npes==6) then
1786 !!$ if (nsend>0) then
1787 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1788 !!$
1789 !!$
1790 !!$
1791 !!$ endif
1792 !!$ if (nrecv>0) then
1793 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1794 !!$
1795 !!$
1796 !!$
1797 !!$ endif
1798 !!$ nsend=0 ; nrecv=0
1799 !!$ endif
1800 !!$
1801 !!$! Odd Face UL 1 pair ; 1 1-way
1802 !!$ if ( (tile==1) .and. (is==1) .and. (je==npy-1) ) then
1803 !!$ nsend=nsend+1
1804 !!$ qsend(nsend) = q(is,je)
1805 !!$ send_tag = 400+tile
1806 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x*(npes_y-1)
1807 !!$ recv_tag = 400+(tile+2)
1808 !!$ src_gid = dest_gid
1809 !!$ if (npes>6) then
1810 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1811 !!$ dest_gid, send_tag, &
1812 !!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, &
1813 !!$ src_gid, recv_tag, &
1814 !!$ commglobal, Stats, ierror )
1815 !!$ nsend=nsend-1
1816 !!$ else
1817 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1818 !!$ send_tag, commglobal, sqest(nsend), ierror )
1819 !!$ nrecv=nrecv+1
1820 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1821 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1822 !!$ endif
1823 !!$ endif
1824 !!$ if ( (tile==3) .and. (is==1) .and. (je==npy-1) ) then
1825 !!$ nsend=nsend+1
1826 !!$ qsend(nsend) = q(is+1,je+1)
1827 !!$ send_tag = 400+tile
1828 !!$ dest_gid = npes_x*(npes_y-1)
1829 !!$ recv_tag = 400+(tile-2)
1830 !!$ src_gid = dest_gid
1831 !!$ if (npes>6) then
1832 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1833 !!$ dest_gid, send_tag, &
1834 !!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, &
1835 !!$ src_gid, recv_tag, &
1836 !!$ commglobal, Stats, ierror )
1837 !!$ nsend=nsend-1
1838 !!$ else
1839 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1840 !!$ send_tag, commglobal, sqest(nsend), ierror )
1841 !!$ nrecv=nrecv+1
1842 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1843 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1844 !!$ endif
1845 !!$ nsend=nsend+1
1846 !!$ qsend(nsend) = q(is,je)
1847 !!$ send_tag = 400+tile
1848 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x*(npes_y-1)
1849 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1850 !!$ send_tag, commglobal, sqest(nsend), ierror )
1851 !!$ endif
1852 !!$ if ( (tile==5) .and. (is==1) .and. (je==npy-1) ) then
1853 !!$ recv_tag = 400+(tile-2)
1854 !!$ src_gid = (tile-3)*npes_x*npes_y + npes_x*(npes_y-1)
1855 !!$ nrecv=nrecv+1
1856 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1857 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1858 !!$ endif
1859 !!$
1860 !!$! wait for comm to complete
1861 !!$ if (npes==6) then
1862 !!$ if (nsend>0) then
1863 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1864 !!$
1865 !!$
1866 !!$
1867 !!$ endif
1868 !!$ if (nrecv>0) then
1869 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1870 !!$
1871 !!$
1872 !!$
1873 !!$ endif
1874 !!$ nsend=0 ; nrecv=0
1875 !!$ endif
1876 !!$
1877 !!$! Send to Even face UL 3 1-way
1878 !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then
1879 !!$ nsend=nsend+1
1880 !!$ qsend(nsend) = q(ie,je+1)
1881 !!$ send_tag = 400+tile
1882 !!$ dest_gid = tile*npes_x*npes_y + npes_x*(npes_y-1)
1883 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1884 !!$ send_tag, commglobal, sqest(nsend), ierror )
1885 !!$ endif
1886 !!$
1887 !!$! Receive Odd Face LR 3 1-way
1888 !!$ if ( (ie==npx-1) .and. (js==1) ) then
1889 !!$ recv_tag = 200+(tile+1)
1890 !!$ src_gid = (tile-1)*npes_x*npes_y + npes_x*npes_y
1891 !!$ nrecv=nrecv+1
1892 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1893 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1894 !!$ endif
1895 !!$
1896 !!$ endif
1897 !!$
1898 !!$! wait for comm to complete
1899 !!$ if (nsend>0) then
1900 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1901 !!$
1902 !!$
1903 !!$
1904 !!$ endif
1905 !!$ if (nrecv>0) then
1906 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1907 !!$
1908 !!$
1909 !!$
1910 !!$ endif
1911 !!$
1912 !!$ end subroutine mp_corner_comm
1913 !!$!
1914 !!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1915 !!$!-------------------------------------------------------------------------------
1916 
1917 !-------------------------------------------------------------------------------
1918 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1920  subroutine mp_gather_4d_r4(q, i1,i2, j1,j2, idim, jdim, kdim, ldim)
1921  integer, intent(IN) :: i1,i2, j1,j2
1922  integer, intent(IN) :: idim, jdim, kdim, ldim
1923  real(kind=4), intent(INOUT):: q(idim,jdim,kdim,ldim)
1924  integer :: i,j,k,l,n,icnt
1925  integer :: Lsize, Lsize_buf(1)
1926  integer :: Gsize
1927  integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid)
1928  integer :: Ldims(5), Gdims(5*npes_this_grid)
1929  real(kind=4), allocatable, dimension(:) :: larr, garr
1930 
1931  ldims(1) = i1
1932  ldims(2) = i2
1933  ldims(3) = j1
1934  ldims(4) = j2
1935  ldims(5) = tile
1936  do l=1,npes_this_grid
1937  cnts(l) = 5
1938  ldispl(l) = 5*(l-1)
1939  enddo
1940  call mpp_gather(ldims, gdims)
1941 ! call MPI_GATHERV(Ldims, 5, MPI_INTEGER, Gdims, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
1942 
1943  lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) ) * kdim
1944  do l=1,npes_this_grid
1945  cnts(l) = 1
1946  ldispl(l) = l-1
1947  enddo
1948  lsizes(:)=1
1949  lsize_buf(1) = lsize
1950  call mpp_gather(lsize_buf, lsizes)
1951 ! call MPI_GATHERV(Lsize, 1, MPI_INTEGER, LsizeS, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
1952 
1953  allocate ( larr(lsize) )
1954  icnt = 1
1955  do k=1,kdim
1956  do j=j1,j2
1957  do i=i1,i2
1958  larr(icnt) = q(i,j,k,tile)
1959  icnt=icnt+1
1960  enddo
1961  enddo
1962  enddo
1963  ldispl(1) = 0.0
1964 ! call mp_bcst(LsizeS(1))
1965  call mpp_broadcast(lsizes, npes_this_grid, masterproc)
1966  gsize = lsizes(1)
1967  do l=2,npes_this_grid
1968 ! call mp_bcst(LsizeS(l))
1969  ldispl(l) = ldispl(l-1) + lsizes(l-1)
1970  gsize = gsize + lsizes(l)
1971  enddo
1972  allocate ( garr(gsize) )
1973 
1974  call mpp_gather(larr, lsize, garr, lsizes)
1975 ! call MPI_GATHERV(larr, Lsize, MPI_REAL, garr, LsizeS, Ldispl, MPI_REAL, masterproc, commglobal, ierror)
1976 
1977  if (gid==masterproc) then
1978  do n=2,npes_this_grid
1979  icnt=1
1980  do l=gdims( (n-1)*5 + 5 ), gdims( (n-1)*5 + 5 )
1981  do k=1,kdim
1982  do j=gdims( (n-1)*5 + 3 ), gdims( (n-1)*5 + 4 )
1983  do i=gdims( (n-1)*5 + 1 ), gdims( (n-1)*5 + 2 )
1984  q(i,j,k,l) = garr(ldispl(n)+icnt)
1985  icnt=icnt+1
1986  enddo
1987  enddo
1988  enddo
1989  enddo
1990  enddo
1991  endif
1992  deallocate( larr )
1993  deallocate( garr )
1994 
1995  end subroutine mp_gather_4d_r4
1996 !
1997 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1998 !-------------------------------------------------------------------------------
1999 
2000 
2001 !-------------------------------------------------------------------------------
2002 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2003 !
2004 ! mp_gather_3d_r4 :: Call SPMD Gather
2005 !
2006  subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim)
2007  integer, intent(IN) :: i1,i2, j1,j2
2008  integer, intent(IN) :: idim, jdim, ldim
2009  real(kind=4), intent(INOUT):: q(idim,jdim,ldim)
2010  integer :: i,j,l,n,icnt
2011  integer :: Lsize, Lsize_buf(1)
2012  integer :: Gsize
2013  integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid)
2014  integer :: Ldims(5), Gdims(5*npes_this_grid)
2015  real(kind=4), allocatable, dimension(:) :: larr, garr
2016 
2017  ldims(1) = i1
2018  ldims(2) = i2
2019  ldims(3) = j1
2020  ldims(4) = j2
2021  ldims(5) = tile
2022  do l=1,npes_this_grid
2023  cnts(l) = 5
2024  ldispl(l) = 5*(l-1)
2025  enddo
2026 ! call MPI_GATHERV(Ldims, 5, MPI_INTEGER, Gdims, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
2027  call mpp_gather(ldims, gdims)
2028 
2029  lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) )
2030  do l=1,npes_this_grid
2031  cnts(l) = 1
2032  ldispl(l) = l-1
2033  enddo
2034  lsizes(:)=1
2035  lsize_buf(1) = lsize
2036  call mpp_gather(lsize_buf, lsizes)
2037 ! call MPI_GATHERV(Lsize, 1, MPI_INTEGER, LsizeS, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
2038 
2039  allocate ( larr(lsize) )
2040  icnt = 1
2041  do j=j1,j2
2042  do i=i1,i2
2043  larr(icnt) = q(i,j,tile)
2044  icnt=icnt+1
2045  enddo
2046  enddo
2047  ldispl(1) = 0.0
2048 ! call mp_bcst(LsizeS(1))
2049  call mpp_broadcast(lsizes, npes_this_grid, masterproc)
2050  gsize = lsizes(1)
2051  do l=2,npes_this_grid
2052 ! call mp_bcst(LsizeS(l))
2053  ldispl(l) = ldispl(l-1) + lsizes(l-1)
2054  gsize = gsize + lsizes(l)
2055  enddo
2056  allocate ( garr(gsize) )
2057  call mpp_gather(larr, lsize, garr, lsizes)
2058 ! call MPI_GATHERV(larr, Lsize, MPI_REAL, garr, LsizeS, Ldispl, MPI_REAL, masterproc, commglobal, ierror)
2059  if (gid==masterproc) then
2060  do n=2,npes_this_grid
2061  icnt=1
2062  do l=gdims( (n-1)*5 + 5 ), gdims( (n-1)*5 + 5 )
2063  do j=gdims( (n-1)*5 + 3 ), gdims( (n-1)*5 + 4 )
2064  do i=gdims( (n-1)*5 + 1 ), gdims( (n-1)*5 + 2 )
2065  q(i,j,l) = garr(ldispl(n)+icnt)
2066  icnt=icnt+1
2067  enddo
2068  enddo
2069  enddo
2070  enddo
2071  endif
2072  deallocate( larr )
2073  deallocate( garr )
2074 
2075  end subroutine mp_gather_3d_r4
2076 !
2077 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2078 !-------------------------------------------------------------------------------
2079 
2080 !-------------------------------------------------------------------------------
2081 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2082 !
2083 ! mp_gather_3d_r8 :: Call SPMD Gather
2084 !
2085  subroutine mp_gather_3d_r8(q, i1,i2, j1,j2, idim, jdim, ldim)
2086  integer, intent(IN) :: i1,i2, j1,j2
2087  integer, intent(IN) :: idim, jdim, ldim
2088  real(kind=8), intent(INOUT):: q(idim,jdim,ldim)
2089  integer :: i,j,l,n,icnt
2090  integer :: Lsize, Lsize_buf(1)
2091  integer :: Gsize
2092  integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid)
2093  integer :: Ldims(5), Gdims(5*npes_this_grid)
2094  real(kind=8), allocatable, dimension(:) :: larr, garr
2095 
2096  ldims(1) = i1
2097  ldims(2) = i2
2098  ldims(3) = j1
2099  ldims(4) = j2
2100  ldims(5) = tile
2101  do l=1,npes_this_grid
2102  cnts(l) = 5
2103  ldispl(l) = 5*(l-1)
2104  enddo
2105 ! call MPI_GATHER(Ldims, 5, MPI_INTEGER, Gdims, cnts, MPI_INTEGER, masterproc, commglobal, ierror)
2106  call mpp_gather(ldims, gdims)
2107  lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) )
2108  do l=1,npes_this_grid
2109  cnts(l) = 1
2110  ldispl(l) = l-1
2111  enddo
2112  lsizes(:)=0.
2113 
2114 ! call MPI_GATHERV(Lsize, 1, MPI_INTEGER, LsizeS, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
2115  lsize_buf(1) = lsize
2116  call mpp_gather(lsize_buf, lsizes)
2117 
2118  allocate ( larr(lsize) )
2119  icnt = 1
2120  do j=j1,j2
2121  do i=i1,i2
2122  larr(icnt) = q(i,j,tile)
2123  icnt=icnt+1
2124  enddo
2125  enddo
2126  ldispl(1) = 0.0
2127  call mpp_broadcast(lsizes, npes_this_grid, masterproc)
2128 ! call mp_bcst(LsizeS(1))
2129  gsize = lsizes(1)
2130  do l=2,npes_this_grid
2131 ! call mp_bcst(LsizeS(l))
2132  ldispl(l) = ldispl(l-1) + lsizes(l-1)
2133  gsize = gsize + lsizes(l)
2134  enddo
2135 
2136  allocate ( garr(gsize) )
2137  call mpp_gather(larr, lsize, garr, lsizes)
2138 ! call MPI_GATHERV(larr, Lsize, MPI_DOUBLE_PRECISION, garr, LsizeS, Ldispl, MPI_DOUBLE_PRECISION, masterproc, commglobal, ierror)
2139  if (gid==masterproc) then
2140  do n=2,npes_this_grid
2141  icnt=1
2142  do l=gdims( (n-1)*5 + 5 ), gdims( (n-1)*5 + 5 )
2143  do j=gdims( (n-1)*5 + 3 ), gdims( (n-1)*5 + 4 )
2144  do i=gdims( (n-1)*5 + 1 ), gdims( (n-1)*5 + 2 )
2145  q(i,j,l) = garr(ldispl(n)+icnt)
2146  icnt=icnt+1
2147  enddo
2148  enddo
2149  enddo
2150  enddo
2151  endif
2152  deallocate( larr )
2153  deallocate( garr )
2154 
2155  end subroutine mp_gather_3d_r8
2156 !
2157 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2158 !-------------------------------------------------------------------------------
2159 
2160 !-------------------------------------------------------------------------------
2161 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2162 !
2163 ! mp_bcst_i :: Call SPMD broadcast
2164 !
2165  subroutine mp_bcst_i(q)
2166  integer, intent(INOUT) :: q
2167 
2168  call mpi_bcast(q, 1, mpi_integer, masterproc, commglobal, ierror)
2169 
2170  end subroutine mp_bcst_i
2171 !
2172 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2173 !-------------------------------------------------------------------------------
2174 
2175 !-------------------------------------------------------------------------------
2176 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2177 !
2178 ! mp_bcst_r4 :: Call SPMD broadcast
2179 !
2180  subroutine mp_bcst_r4(q)
2181  real(kind=4), intent(INOUT) :: q
2182 
2183  call mpi_bcast(q, 1, mpi_real, masterproc, commglobal, ierror)
2184 
2185  end subroutine mp_bcst_r4
2186 !
2187 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2188 !-------------------------------------------------------------------------------
2189 
2190 !-------------------------------------------------------------------------------
2191 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2192 !
2193 ! mp_bcst_r8 :: Call SPMD broadcast
2194 !
2195  subroutine mp_bcst_r8(q)
2196  real(kind=8), intent(INOUT) :: q
2197 
2198  call mpi_bcast(q, 1, mpi_double_precision, masterproc, commglobal, ierror)
2199 
2200  end subroutine mp_bcst_r8
2201 !
2202 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2203 !-------------------------------------------------------------------------------
2204 
2205 !-------------------------------------------------------------------------------
2206 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2207 !
2208 ! mp_bcst_1d_r4 :: Call SPMD broadcast
2209 !
2210  subroutine mp_bcst_1d_r4(q, idim)
2211  integer, intent(IN) :: idim
2212  real(kind=4), intent(INOUT) :: q(idim)
2213 
2214  call mpi_bcast(q, idim, mpi_real, masterproc, commglobal, ierror)
2215 
2216  end subroutine mp_bcst_1d_r4
2217 !
2218 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2219 !-------------------------------------------------------------------------------
2220 
2221 !-------------------------------------------------------------------------------
2222 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2223 !
2224 ! mp_bcst_1d_r8 :: Call SPMD broadcast
2225 !
2226  subroutine mp_bcst_1d_r8(q, idim)
2227  integer, intent(IN) :: idim
2228  real(kind=8), intent(INOUT) :: q(idim)
2229 
2230  call mpi_bcast(q, idim, mpi_double_precision, masterproc, commglobal, ierror)
2231 
2232  end subroutine mp_bcst_1d_r8
2233 !
2234 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2235 !-------------------------------------------------------------------------------
2236 
2237 !-------------------------------------------------------------------------------
2238 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2239 !
2240 ! mp_bcst_2d_r4 :: Call SPMD broadcast
2241 !
2242  subroutine mp_bcst_2d_r4(q, idim, jdim)
2243  integer, intent(IN) :: idim, jdim
2244  real(kind=4), intent(INOUT) :: q(idim,jdim)
2245 
2246  call mpi_bcast(q, idim*jdim, mpi_real, masterproc, commglobal, ierror)
2247 
2248  end subroutine mp_bcst_2d_r4
2249 !
2250 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2251 !-------------------------------------------------------------------------------
2252 
2253 !-------------------------------------------------------------------------------
2254 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2255 !
2256 ! mp_bcst_2d_r8 :: Call SPMD broadcast
2257 !
2258  subroutine mp_bcst_2d_r8(q, idim, jdim)
2259  integer, intent(IN) :: idim, jdim
2260  real(kind=8), intent(INOUT) :: q(idim,jdim)
2261 
2262  call mpi_bcast(q, idim*jdim, mpi_double_precision, masterproc, commglobal, ierror)
2263 
2264  end subroutine mp_bcst_2d_r8
2265 !
2266 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2267 !-------------------------------------------------------------------------------
2268 
2269 !-------------------------------------------------------------------------------
2270 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2271 !
2272 ! mp_bcst_3d_r4 :: Call SPMD broadcast
2273 !
2274  subroutine mp_bcst_3d_r4(q, idim, jdim, kdim)
2275  integer, intent(IN) :: idim, jdim, kdim
2276  real(kind=4), intent(INOUT) :: q(idim,jdim,kdim)
2277 
2278  call mpi_bcast(q, idim*jdim*kdim, mpi_real, masterproc, commglobal, ierror)
2279 
2280  end subroutine mp_bcst_3d_r4
2281 !
2282 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2283 !-------------------------------------------------------------------------------
2284 
2285 !-------------------------------------------------------------------------------
2286 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2287 !
2288 ! mp_bcst_3d_r8 :: Call SPMD broadcast
2289 !
2290  subroutine mp_bcst_3d_r8(q, idim, jdim, kdim)
2291  integer, intent(IN) :: idim, jdim, kdim
2292  real(kind=8), intent(INOUT) :: q(idim,jdim,kdim)
2293 
2294  call mpi_bcast(q, idim*jdim*kdim, mpi_double_precision, masterproc, commglobal, ierror)
2295 
2296  end subroutine mp_bcst_3d_r8
2297 !
2298 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2299 !-------------------------------------------------------------------------------
2300 
2301 !-------------------------------------------------------------------------------
2302 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2303 !
2304 ! mp_bcst_4d_r4 :: Call SPMD broadcast
2305 !
2306  subroutine mp_bcst_4d_r4(q, idim, jdim, kdim, ldim)
2307  integer, intent(IN) :: idim, jdim, kdim, ldim
2308  real(kind=4), intent(INOUT) :: q(idim,jdim,kdim,ldim)
2309 
2310  call mpi_bcast(q, idim*jdim*kdim*ldim, mpi_real, masterproc, commglobal, ierror)
2311 
2312  end subroutine mp_bcst_4d_r4
2313 !
2314 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2315 !-------------------------------------------------------------------------------
2316 
2317 !-------------------------------------------------------------------------------
2318 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2319 !
2320 ! mp_bcst_4d_r8 :: Call SPMD broadcast
2321 !
2322  subroutine mp_bcst_4d_r8(q, idim, jdim, kdim, ldim)
2323  integer, intent(IN) :: idim, jdim, kdim, ldim
2324  real(kind=8), intent(INOUT) :: q(idim,jdim,kdim,ldim)
2325 
2326  call mpi_bcast(q, idim*jdim*kdim*ldim, mpi_double_precision, masterproc, commglobal, ierror)
2327 
2328  end subroutine mp_bcst_4d_r8
2329 !
2330 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2331 !-------------------------------------------------------------------------------
2332 
2333 !-------------------------------------------------------------------------------
2334 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2335 !
2336 ! mp_bcst_3d_i :: Call SPMD broadcast
2337 !
2338  subroutine mp_bcst_3d_i(q, idim, jdim, kdim)
2339  integer, intent(IN) :: idim, jdim, kdim
2340  integer, intent(INOUT) :: q(idim,jdim,kdim)
2341 
2342  call mpi_bcast(q, idim*jdim*kdim, mpi_integer, masterproc, commglobal, ierror)
2343 
2344  end subroutine mp_bcst_3d_i
2345 !
2346 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2347 !-------------------------------------------------------------------------------
2348 
2349 !-------------------------------------------------------------------------------
2350 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2351 !
2352 ! mp_bcst_1d_i :: Call SPMD broadcast
2353 !
2354  subroutine mp_bcst_1d_i(q, idim)
2355  integer, intent(IN) :: idim
2356  integer, intent(INOUT) :: q(idim)
2357 
2358  call mpi_bcast(q, idim, mpi_integer, masterproc, commglobal, ierror)
2359 
2360  end subroutine mp_bcst_1d_i
2361 !
2362 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2363 !-------------------------------------------------------------------------------
2364 
2365 !-------------------------------------------------------------------------------
2366 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2367 !
2368 ! mp_bcst_2d_i :: Call SPMD broadcast
2369 !
2370  subroutine mp_bcst_2d_i(q, idim, jdim)
2371  integer, intent(IN) :: idim, jdim
2372  integer, intent(INOUT) :: q(idim,jdim)
2373 
2374  call mpi_bcast(q, idim*jdim, mpi_integer, masterproc, commglobal, ierror)
2375 
2376  end subroutine mp_bcst_2d_i
2377 !
2378 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2379 !-------------------------------------------------------------------------------
2380 !-------------------------------------------------------------------------------
2381 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2382 !
2383 ! mp_bcst_4d_i :: Call SPMD broadcast
2384 !
2385  subroutine mp_bcst_4d_i(q, idim, jdim, kdim, ldim)
2386  integer, intent(IN) :: idim, jdim, kdim, ldim
2387  integer, intent(INOUT) :: q(idim,jdim,kdim,ldim)
2388 
2389  call mpi_bcast(q, idim*jdim*kdim*ldim, mpi_integer, masterproc, commglobal, ierror)
2390 
2391  end subroutine mp_bcst_4d_i
2392 !
2393 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2394 !-------------------------------------------------------------------------------
2395 
2396 
2397 !-------------------------------------------------------------------------------
2398 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2399 !
2400 ! mp_reduce_max_r4_1d :: Call SPMD REDUCE_MAX
2401 !
2402  subroutine mp_reduce_max_r4_1d(mymax,npts)
2403  integer, intent(IN) :: npts
2404  real(kind=4), intent(INOUT) :: mymax(npts)
2405 
2406  real(kind=4) :: gmax(npts)
2407 
2408  call mpi_allreduce( mymax, gmax, npts, mpi_real, mpi_max, &
2409  commglobal, ierror )
2410 
2411  mymax = gmax
2412 
2413  end subroutine mp_reduce_max_r4_1d
2414 !
2415 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2416 !-------------------------------------------------------------------------------
2417 
2418 
2419 !-------------------------------------------------------------------------------
2420 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2421 !
2422 ! mp_reduce_max_r8_1d :: Call SPMD REDUCE_MAX
2423 !
2424  subroutine mp_reduce_max_r8_1d(mymax,npts)
2425  integer, intent(IN) :: npts
2426  real(kind=8), intent(INOUT) :: mymax(npts)
2427 
2428  real(kind=8) :: gmax(npts)
2429 
2430  call mpi_allreduce( mymax, gmax, npts, mpi_double_precision, mpi_max, &
2431  commglobal, ierror )
2432 
2433  mymax = gmax
2434 
2435  end subroutine mp_reduce_max_r8_1d
2436 !
2437 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2438 !-------------------------------------------------------------------------------
2439 
2440 
2441 !-------------------------------------------------------------------------------
2442 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2443 !
2444 ! mp_reduce_max_r4 :: Call SPMD REDUCE_MAX
2445 !
2446  subroutine mp_reduce_max_r4(mymax)
2447  real(kind=4), intent(INOUT) :: mymax
2448 
2449  real(kind=4) :: gmax
2450 
2451  call mpi_allreduce( mymax, gmax, 1, mpi_real, mpi_max, &
2452  commglobal, ierror )
2453 
2454  mymax = gmax
2455 
2456  end subroutine mp_reduce_max_r4
2457 
2458 !-------------------------------------------------------------------------------
2459 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2460 !
2461 ! mp_reduce_max_r8 :: Call SPMD REDUCE_MAX
2462 !
2463  subroutine mp_reduce_max_r8(mymax)
2464  real(kind=8), intent(INOUT) :: mymax
2465 
2466  real(kind=8) :: gmax
2467 
2468  call mpi_allreduce( mymax, gmax, 1, mpi_double_precision, mpi_max, &
2469  commglobal, ierror )
2470 
2471  mymax = gmax
2472 
2473  end subroutine mp_reduce_max_r8
2474 
2475  subroutine mp_reduce_min_r4(mymin)
2476  real(kind=4), intent(INOUT) :: mymin
2477 
2478  real(kind=4) :: gmin
2479 
2480  call mpi_allreduce( mymin, gmin, 1, mpi_real, mpi_min, &
2481  commglobal, ierror )
2482 
2483  mymin = gmin
2484 
2485  end subroutine mp_reduce_min_r4
2486 
2487  subroutine mp_reduce_min_r8(mymin)
2488  real(kind=8), intent(INOUT) :: mymin
2489 
2490  real(kind=8) :: gmin
2491 
2492  call mpi_allreduce( mymin, gmin, 1, mpi_double_precision, mpi_min, &
2493  commglobal, ierror )
2494 
2495  mymin = gmin
2496 
2497  end subroutine mp_reduce_min_r8
2498 !
2499 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2500 !-------------------------------------------------------------------------------
2501 
2502 !-------------------------------------------------------------------------------
2503 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2504 !
2505 ! mp_bcst_4d_i :: Call SPMD REDUCE_MAX
2506 !
2507  subroutine mp_reduce_max_i(mymax)
2508  integer, intent(INOUT) :: mymax
2509 
2510  integer :: gmax
2511 
2512  call mpi_allreduce( mymax, gmax, 1, mpi_integer, mpi_max, &
2513  commglobal, ierror )
2514 
2515  mymax = gmax
2516 
2517  end subroutine mp_reduce_max_i
2518 !
2519 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2520 !-------------------------------------------------------------------------------
2521 
2522 !-------------------------------------------------------------------------------
2523 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2524 !
2525 ! mp_reduce_sum_r4 :: Call SPMD REDUCE_SUM
2526 !
2527  subroutine mp_reduce_sum_r4(mysum)
2528  real(kind=4), intent(INOUT) :: mysum
2529 
2530  real(kind=4) :: gsum
2531 
2532  call mpi_allreduce( mysum, gsum, 1, mpi_real, mpi_sum, &
2533  commglobal, ierror )
2534 
2535  mysum = gsum
2536 
2537  end subroutine mp_reduce_sum_r4
2538 !
2539 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2540 !-------------------------------------------------------------------------------
2541 
2542 !-------------------------------------------------------------------------------
2543 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2544 !
2545 ! mp_reduce_sum_r8 :: Call SPMD REDUCE_SUM
2546 !
2547  subroutine mp_reduce_sum_r8(mysum)
2548  real(kind=8), intent(INOUT) :: mysum
2549 
2550  real(kind=8) :: gsum
2551 
2552  call mpi_allreduce( mysum, gsum, 1, mpi_double_precision, mpi_sum, &
2553  commglobal, ierror )
2554 
2555  mysum = gsum
2556 
2557  end subroutine mp_reduce_sum_r8
2558 !
2559 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2560 !-------------------------------------------------------------------------------
2561 
2562 
2563 !-------------------------------------------------------------------------------
2564 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
2565 ! !
2566 !
2567 ! mp_reduce_sum_r4_1darr :: Call SPMD REDUCE_SUM
2568 !
2569  subroutine mp_reduce_sum_r4_1darr(mysum, npts)
2570  integer, intent(in) :: npts
2571  real(kind=4), intent(inout) :: mysum(npts)
2572  real(kind=4) :: gsum(npts)
2573 
2574  gsum = 0.0
2575  call mpi_allreduce( mysum, gsum, npts, mpi_real, mpi_sum, &
2576  commglobal, ierror )
2577 
2578  mysum = gsum
2579 
2580  end subroutine mp_reduce_sum_r4_1darr
2581 !
2582 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2583 ! !
2584 !-------------------------------------------------------------------------------
2585 
2586 !-------------------------------------------------------------------------------
2587 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
2588 ! !
2589 !
2590 ! mp_reduce_sum_r4_2darr :: Call SPMD REDUCE_SUM
2591 !
2592  subroutine mp_reduce_sum_r4_2darr(mysum, npts1,npts2)
2593  integer, intent(in) :: npts1,npts2
2594  real(kind=4), intent(inout) :: mysum(npts1,npts2)
2595  real(kind=4) :: gsum(npts1,npts2)
2596 
2597  gsum = 0.0
2598  call mpi_allreduce( mysum, gsum, npts1*npts2, mpi_real, mpi_sum, &
2599  commglobal, ierror )
2600 
2601  mysum = gsum
2602 
2603  end subroutine mp_reduce_sum_r4_2darr
2604 !
2605 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2606 ! !
2607 !-------------------------------------------------------------------------------
2608 
2609 !-------------------------------------------------------------------------------
2610 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2611 !
2612 ! mp_reduce_sum_r4_1d :: Call SPMD REDUCE_SUM
2613 !
2614  subroutine mp_reduce_sum_r4_1d(mysum, sum1d, npts)
2615  integer, intent(in) :: npts
2616  real(kind=4), intent(in) :: sum1d(npts)
2617  real(kind=4), intent(INOUT) :: mysum
2618 
2619  real(kind=4) :: gsum
2620  integer :: i
2621 
2622  mysum = 0.0
2623  do i=1,npts
2624  mysum = mysum + sum1d(i)
2625  enddo
2626 
2627  call mpi_allreduce( mysum, gsum, 1, mpi_double_precision, mpi_sum, &
2628  commglobal, ierror )
2629 
2630  mysum = gsum
2631 
2632  end subroutine mp_reduce_sum_r4_1d
2633 !
2634 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2635 !-------------------------------------------------------------------------------
2636 
2637 !-------------------------------------------------------------------------------
2638 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2639 !
2640 ! mp_reduce_sum_r8_1d :: Call SPMD REDUCE_SUM
2641 !
2642  subroutine mp_reduce_sum_r8_1d(mysum, sum1d, npts)
2643  integer, intent(in) :: npts
2644  real(kind=8), intent(in) :: sum1d(npts)
2645  real(kind=8), intent(INOUT) :: mysum
2646 
2647  real(kind=8) :: gsum
2648  integer :: i
2649 
2650  mysum = 0.0
2651  do i=1,npts
2652  mysum = mysum + sum1d(i)
2653  enddo
2654 
2655  call mpi_allreduce( mysum, gsum, 1, mpi_double_precision, mpi_sum, &
2656  commglobal, ierror )
2657 
2658  mysum = gsum
2659 
2660  end subroutine mp_reduce_sum_r8_1d
2661 !
2662 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2663 !-------------------------------------------------------------------------------
2664 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
2665 ! !
2666 !
2667 ! mp_reduce_sum_r8_1darr :: Call SPMD REDUCE_SUM
2668 !
2669  subroutine mp_reduce_sum_r8_1darr(mysum, npts)
2670  integer, intent(in) :: npts
2671  real(kind=8), intent(inout) :: mysum(npts)
2672  real(kind=8) :: gsum(npts)
2673 
2674  gsum = 0.0
2675 
2676  call mpi_allreduce( mysum, gsum, npts, mpi_double_precision, &
2677  mpi_sum, &
2678  commglobal, ierror )
2679 
2680  mysum = gsum
2681 
2682  end subroutine mp_reduce_sum_r8_1darr
2683 !
2684 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2685 ! !
2686 
2687 !-------------------------------------------------------------------------------
2688 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
2689 ! !
2690 !
2691 ! mp_reduce_sum_r8_2darr :: Call SPMD REDUCE_SUM
2692 !
2693  subroutine mp_reduce_sum_r8_2darr(mysum, npts1,npts2)
2694  integer, intent(in) :: npts1,npts2
2695  real(kind=8), intent(inout) :: mysum(npts1,npts2)
2696  real(kind=8) :: gsum(npts1,npts2)
2697 
2698  gsum = 0.0
2699 
2700  call mpi_allreduce( mysum, gsum, npts1*npts2, &
2701  mpi_double_precision, mpi_sum, &
2702  commglobal, ierror )
2703 
2704  mysum = gsum
2705 
2706  end subroutine mp_reduce_sum_r8_2darr
2707 !
2708 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2709 ! !
2710 
2711 #else
2712  implicit none
2713  private
2714  integer :: masterproc = 0
2715  integer :: gid = 0
2716  integer, parameter:: ng = 3 ! Number of ghost zones required
2717  public gid, masterproc, ng
2718 #endif
2719 
2720  end module fv_mp_mod
2721 !-------------------------------------------------------------------------------
2722 
2723 
The module &#39;fv_mp_mod&#39; is a single program multiple data (SPMD) parallel decompostion/communication m...
Definition: fv_mp_mod.F90:24
The module &#39;fv_arrays&#39; contains the &#39;fv_atmos_type&#39; and associated datatypes.
Definition: fv_arrays.F90:24