WAVEWATCH III  beta 0.0.1
yowexchangemodule Module Reference

Has only the ghost nodes assign to a neighbor domain. More...

Data Types

type  t_neighbordomain
 Holds some data belong to a neighbor Domain. More...
 

Functions/Subroutines

subroutine creatempitype (this)
 
subroutine, public initnbrdomains (nConnD)
 
subroutine, public creatempitypes ()
 
subroutine, public pdlib_exchange1dreal (U)
 exchange values in U. More...
 
subroutine, public pdlib_exchange2dreal (U)
 
subroutine, public setdimsize (second)
 set the size of the second and third dimension for exchange More...
 
subroutine, public finalizeexchangemodule ()
 
subroutine, public pdlib_exchange2dreal_zero (U)
 

Variables

type(t_neighbordomain), dimension(:), allocatable, public neighbordomains
 Knows for all domains neighbors, which node we must send or revc from neighbor domains from 1 to nConnDomains. More...
 
integer, public nconndomains = 0
 Number of neighbor domains. More...
 
integer, public n2nddim = 1
 number of the second dimension for exchange More...
 
integer, public nnthdim = 1
 number of the second dimension for exchange (nth only for wave model) More...
 

Detailed Description

Has only the ghost nodes assign to a neighbor domain.

Function/Subroutine Documentation

◆ creatempitype()

subroutine yowexchangemodule::creatempitype ( class(t_neighbordomain), intent(inout)  this)

Definition at line 151 of file yowexchangeModule.F90.

151  use yowerr
152  use mpi
153  use yownodepool, only: ghostgl, np, ipgl
154  use yowdatapool, only: rtype, itype
155  implicit none
156  class(t_neighborDomain), intent(inout) :: this
157 
158  integer :: ierr
159  integer :: dsplSend(this%numNodesToSend)
160  integer :: dsplRecv(this%numNodesToReceive)
161 
162  ! MPI datatypes for size(U) == npa+1 U(0:npa)
163  dsplsend = ipgl(this%nodesToSend)
164  dsplrecv = ghostgl(this%nodesToReceive) + np
165 
166  ! p1D real
167  call mpi_type_create_indexed_block(this%numNodesToSend, 1, dsplsend, rtype, this%p1DRsendType_zero, ierr)
168  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
169  call mpi_type_commit(this%p1DRsendType_zero,ierr)
170  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
171 
172  call mpi_type_create_indexed_block(this%numNodesToReceive, 1, dsplrecv, rtype, this%p1DRrecvType_zero, ierr)
173  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
174  call mpi_type_commit(this%p1DRrecvType_zero,ierr)
175  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
176 
177  ! MPI datatypes for size(U) == npa U(1:npa)
178  dsplsend(:) = dsplsend(:) - 1 ! C count from 0; FORTRAN count from 1
179  dsplrecv(:) = dsplrecv(:) - 1
180 
181  ! p1D real
182  call mpi_type_create_indexed_block(this%numNodesToSend, 1, dsplsend, rtype, this%p1DRsendType,ierr)
183  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
184  call mpi_type_commit(this%p1DRsendType,ierr)
185  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
186 
187  call mpi_type_create_indexed_block(this%numNodesToReceive, 1, dsplrecv, rtype, this%p1DRrecvType,ierr)
188  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
189  call mpi_type_commit(this%p1DRrecvType,ierr)
190  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
191 
192  ! MPI datatypes for size(U) == npa+1 U(0:npa)
193  ! p2D real second dim is n2ndDim long
194  dsplsend = (ipgl(this%nodesToSend)) * n2nddim
195  dsplrecv = (ghostgl(this%nodesToReceive) + np) * n2nddim
196  call mpi_type_create_indexed_block(this%numNodesToSend, n2nddim, dsplsend, rtype, this%p2DRsendType_zero,ierr)
197  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
198  call mpi_type_commit(this%p2DRsendType_zero,ierr)
199  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
200 
201  call mpi_type_create_indexed_block(this%numNodesToReceive, n2nddim, dsplrecv, rtype, this%p2DRrecvType_zero,ierr)
202  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
203  call mpi_type_commit(this%p2DRrecvType_zero,ierr)
204  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
205 
206  ! MPI datatypes for size(U) == npa U(1:npa)
207  ! p2D real second dim is n2ndDim long
208  dsplsend = (ipgl(this%nodesToSend)-1) * n2nddim
209  dsplrecv = (ghostgl(this%nodesToReceive) + np -1) * n2nddim
210  call mpi_type_create_indexed_block(this%numNodesToSend, n2nddim, dsplsend, rtype, this%p2DRsendType1,ierr)
211  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
212  call mpi_type_commit(this%p2DRsendType1,ierr)
213  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
214 
215  call mpi_type_create_indexed_block(this%numNodesToReceive, n2nddim, dsplrecv, rtype, this%p2DRrecvType1,ierr)
216  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
217  call mpi_type_commit(this%p2DRrecvType1,ierr)
218  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
219 
220 

References yownodepool::ghostgl, yownodepool::ipgl, yowdatapool::itype, n2nddim, yownodepool::np, yowerr::parallel_abort(), and yowdatapool::rtype.

◆ creatempitypes()

subroutine, public yowexchangemodule::creatempitypes

Definition at line 236 of file yowexchangeModule.F90.

236  implicit none
237  integer :: i
238 
239  do i=1, nconndomains
240  call neighbordomains(i)%createMPIType()
241  end do

References nconndomains, and neighbordomains.

Referenced by yowpdlibmain::exchangeghostids().

◆ finalizeexchangemodule()

subroutine, public yowexchangemodule::finalizeexchangemodule

Definition at line 381 of file yowexchangeModule.F90.

381  implicit none
382  integer :: i
383 
384  if(allocated(neighbordomains)) then
385  do i=1, size(neighbordomains)
386  call neighbordomains(i)%finalize()
387  end do
388  deallocate(neighbordomains)
389  endif

References yowdatapool::comm, yowdatapool::myrank, nconndomains, neighbordomains, yownodepool::npa, yowerr::parallel_abort(), and yowdatapool::rkind.

Referenced by yowpdlibmain::finalizepd(), and initnbrdomains().

◆ initnbrdomains()

subroutine, public yowexchangemodule::initnbrdomains ( integer, intent(in)  nConnD)

Definition at line 224 of file yowexchangeModule.F90.

224  use yowerr
225  implicit none
226  integer, intent(in) :: nConnD
227  integer :: stat
228 
229  call finalizeexchangemodule()
230  nconndomains = nconnd
231  allocate(neighbordomains(nconndomains), stat=stat)
232  if(stat/=0) CALL abort('neighborDomains allocation failure')

References yowerr::abort(), finalizeexchangemodule(), nconndomains, and neighbordomains.

Referenced by yowpdlibmain::findconndomains().

◆ pdlib_exchange1dreal()

subroutine, public yowexchangemodule::pdlib_exchange1dreal ( real(kind=rkind), dimension(:), intent(inout)  U)

exchange values in U.

Parameters
[in,out]Uarray with values to exchange. np+ng long. Send values from U(1:np) to other threads. Receive values from other threads and updates U(np+1:np+ng
Note
MPI recv tag: 10000 + MPI rank
MPI send tag: 10000 + neighbor MPI rank

Definition at line 251 of file yowexchangeModule.F90.

251  use yowdatapool, only: comm, myrank, rkind
252  use yownodepool, only: t_node, nodes_global, np, ng, ghosts, npa
253  use yowerr
254  use mpi
255  implicit none
256  real(kind=rkind), intent(inout) :: u(:)
257 
258  integer :: i, ierr, tag
259  integer :: sendRqst(nConnDomains), recvRqst(nConnDomains)
260  integer :: recvStat(MPI_STATUS_SIZE, nConnDomains), sendStat(MPI_STATUS_SIZE, nConnDomains)
261  character(len=140) :: errmsg
262 
263  if(size(u) /= npa) then
264  WRITE(errmsg, *) 'size(U)=', size(u), ' but npa=', npa
265  CALL abort(errmsg)
266  endif
267 
268  ! post receives
269  do i=1, nconndomains
270  tag = 10000 + myrank
271  call mpi_irecv(u, 1, neighbordomains(i)%p1DRrecvType, &
272  neighbordomains(i)%domainID-1, tag, comm, &
273  recvrqst(i), ierr)
274  if(ierr/=mpi_success) then
275  CALL parallel_abort("MPI_IRecv", ierr)
276  endif
277  enddo
278 
279  ! post sends
280  do i=1, nconndomains
281  tag = 10000 + (neighbordomains(i)%domainID-1)
282  call mpi_isend(u, 1, neighbordomains(i)%p1DRsendType, &
283  neighbordomains(i)%domainID-1, tag, comm, &
284  sendrqst(i), ierr);
285  if(ierr/=mpi_success) then
286  CALL parallel_abort("MPI_ISend", ierr)
287  endif
288  end do
289 
290  ! Wait for completion
291  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
292  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
293  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
294  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)

References yowerr::abort(), yowdatapool::comm, yownodepool::ghosts(), yowdatapool::myrank, nconndomains, neighbordomains, yownodepool::ng, yownodepool::nodes_global, yownodepool::np, yownodepool::npa, yowerr::parallel_abort(), and yowdatapool::rkind.

Referenced by yowpdlibmain::computetria_ien_si_ccon(), w3wavset::differentiate_xydir_mapsta(), w3wavset::differentiate_xydir_native(), pdlib_w3profsmd::pdlib_w3xypfsfct2(), pdlib_w3profsmd::pdlib_w3xypfsn2(), pdlib_w3profsmd::pdlib_w3xypfspsi2(), pdlib_w3profsmd::set_ug_iobp_pdlib_init(), w3wavset::trig_wave_setup_apply_fct(), w3wavset::trig_wave_setup_apply_precond(), w3wavset::trig_wave_setup_computation(), and w3triamd::ug_gradients().

◆ pdlib_exchange2dreal()

subroutine, public yowexchangemodule::pdlib_exchange2dreal ( real(kind=rkind), dimension(:,:), intent(inout)  U)

Definition at line 303 of file yowexchangeModule.F90.

303  use yowdatapool, only: comm, myrank, rkind
304  use yownodepool, only: t_node, nodes_global, np, ng, ghosts, npa
305  use yowerr
306  use mpi
307  USE w3odatmd, only : iaproc
308  implicit none
309  real(kind=rkind), intent(inout) :: u(:,:)
310 
311  integer :: i, ierr, tag
312  integer :: sendRqst(nConnDomains), recvRqst(nConnDomains)
313  integer :: recvStat(MPI_STATUS_SIZE, nConnDomains), sendStat(MPI_STATUS_SIZE, nConnDomains)
314 
315 
316 #ifdef W3_DEBUGEXCH
317  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 3'
318  FLUSH(740+iaproc)
319 #endif
320 
321  ! post receives
322 #ifdef W3_DEBUGEXCH
323  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 4'
324  FLUSH(740+iaproc)
325 #endif
326  do i=1, nconndomains
327  tag = 30000 + myrank
328  call mpi_irecv(u, 1, neighbordomains(i)%p2DRrecvType1, &
329  neighbordomains(i)%domainID-1, tag, comm, &
330  recvrqst(i), ierr)
331  if(ierr/=mpi_success) then
332  CALL parallel_abort("MPI_IRecv", ierr)
333  endif
334  enddo
335 #ifdef W3_DEBUGEXCH
336  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 5'
337  FLUSH(740+iaproc)
338 #endif
339 
340  ! post sends
341  do i=1, nconndomains
342  tag = 30000 + (neighbordomains(i)%domainID-1)
343  call mpi_isend(u, 1, neighbordomains(i)%p2DRsendType1, &
344  neighbordomains(i)%domainID-1, tag, comm, &
345  sendrqst(i), ierr)
346  if(ierr/=mpi_success) then
347  CALL parallel_abort("MPI_ISend", ierr)
348  endif
349  end do
350 #ifdef W3_DEBUGEXCH
351  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 6'
352  FLUSH(740+iaproc)
353 #endif
354 
355  ! Wait for completion
356  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
357  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
358 #ifdef W3_DEBUGEXCH
359  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 11'
360  FLUSH(740+iaproc)
361 #endif
362  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
363  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
364 #ifdef W3_DEBUGEXCH
365  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 12'
366  FLUSH(740+iaproc)
367 #endif

References yowdatapool::comm, yownodepool::ghosts(), w3odatmd::iaproc, yowdatapool::myrank, nconndomains, neighbordomains, yownodepool::ng, yownodepool::nodes_global, yownodepool::np, yownodepool::npa, yowerr::parallel_abort(), and yowdatapool::rkind.

Referenced by pdlib_w3profsmd::pdlib_explicit_block(), and pdlib_w3profsmd::pdlib_jacobi_gauss_seidel_block().

◆ pdlib_exchange2dreal_zero()

subroutine, public yowexchangemodule::pdlib_exchange2dreal_zero ( real(kind=rkind), dimension(n2nddim,0:npa), intent(inout)  U)
Note
MPI recv tag: 30001 + MPI rank
MPI send tag: 30001 + neighbor MPI rank

Definition at line 468 of file yowexchangeModule.F90.

468  use yowdatapool, only: comm, myrank, rkind
469  use yownodepool, only: npa
470  use yowerr
471  use mpi
472  implicit none
473  real(kind=rkind), intent(inout) :: u(n2nddim,0:npa)
474 
475  integer :: i, ierr, tag
476  integer :: sendRqst(nConnDomains), recvRqst(nConnDomains)
477  integer :: recvStat(MPI_STATUS_SIZE, nConnDomains), sendStat(MPI_STATUS_SIZE, nConnDomains)
478  character(len=200) errstr
479 
480  ! It is impossible to add these range checks because assumed shape array start vom 1:npa+1 even if you allocate it from 0:npa
481  ! if(size(U,2) /= npa+1) then
482  ! write(errstr, *) "size(U,2) /= npa+1", size(U,2), "should be", npa+1
483  ! ABORT(errstr)
484  ! endif
485  !
486  ! if(ubound(U,2) /= npa) then
487  ! write(errstr, *) "ubound(U,2) /= npa", ubound(U,2), "should be", npa
488  ! ABORT(errstr)
489  ! endif
490  !
491  ! if(lbound(U,2) /= 0) then
492  ! write(errstr, *) "lbound(U,2) /= 0", lbound(U,2), "should be 0"
493  ! ABORT(errstr)
494  ! endif
495 
496  ! if((size(U,1) /= n2ndDim) ) then
497  ! write(errstr, *) "size(U,1) /= n2ndDim size(U,1)=", size(U,1), " n2ndDim=", n2ndDim
498  ! ABORT(errstr)
499  ! endif
500 
501  ! post receives
502  do i=1, nconndomains
503  tag = 30001 + myrank
504  call mpi_irecv(u, &
505  1, &
506  neighbordomains(i)%p2DRrecvType_zero, &
507  neighbordomains(i)%domainID-1, &
508  tag, &
509  comm, &
510  recvrqst(i), &
511  ierr)
512  if(ierr/=mpi_success) then
513  CALL parallel_abort("MPI_IRecv", ierr)
514  endif
515  enddo
516 
517  ! post sends
518  do i=1, nconndomains
519  tag = 30001 + (neighbordomains(i)%domainID-1)
520  call mpi_isend(u, &
521  1, &
522  neighbordomains(i)%p2DRsendType_zero, &
523  neighbordomains(i)%domainID-1, &
524  tag, &
525  comm, &
526  sendrqst(i), &
527  ierr);
528  if(ierr/=mpi_success) then
529  CALL parallel_abort("MPI_ISend", ierr)
530  endif
531  end do
532 
533 
534  ! Wait for completion
535  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
536  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
537  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
538  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)

References yowdatapool::comm, yowdatapool::myrank, n2nddim, nconndomains, neighbordomains, yownodepool::npa, yowerr::parallel_abort(), and yowdatapool::rkind.

Referenced by pdlib_w3profsmd::pdlib_explicit_block(), pdlib_w3profsmd::pdlib_jacobi_gauss_seidel_block(), wminiomd::wmioeg(), and wminiomd::wmiohg().

◆ setdimsize()

subroutine, public yowexchangemodule::setdimsize ( integer, intent(in)  second)

set the size of the second and third dimension for exchange

Note
the size of the first dimension is npa
call this before initPD()

Definition at line 375 of file yowexchangeModule.F90.

375  implicit none
376  integer, intent(in) :: second
377  n2nddim = second

References n2nddim.

Referenced by yowpdlibmain::initfromgriddim().

Variable Documentation

◆ n2nddim

integer, public yowexchangemodule::n2nddim = 1

number of the second dimension for exchange

Definition at line 108 of file yowexchangeModule.F90.

108  integer, public :: n2ndDim = 1

Referenced by creatempitype(), pdlib_exchange2dreal_zero(), and setdimsize().

◆ nconndomains

integer, public yowexchangemodule::nconndomains = 0

◆ neighbordomains

type(t_neighbordomain), dimension(:), allocatable, public yowexchangemodule::neighbordomains

Knows for all domains neighbors, which node we must send or revc from neighbor domains from 1 to nConnDomains.

Definition at line 102 of file yowexchangeModule.F90.

102  type(t_neighborDomain), public, allocatable :: neighborDomains(:)

Referenced by creatempitypes(), yowpdlibmain::exchangeghostids(), finalizeexchangemodule(), yowpdlibmain::findconndomains(), initnbrdomains(), pdlib_exchange1dreal(), pdlib_exchange2dreal(), and pdlib_exchange2dreal_zero().

◆ nnthdim

integer, public yowexchangemodule::nnthdim = 1

number of the second dimension for exchange (nth only for wave model)

Definition at line 111 of file yowexchangeModule.F90.

111  integer, public :: nnthDim = 1
yownodepool::ghostgl
integer, dimension(:), allocatable, public ghostgl
Ghost global to local mapping np_global long.
Definition: yownodepool.F90:129
yowerr::parallel_abort
subroutine parallel_abort(string, error)
Definition: yowerr.F90:43
w3odatmd::iaproc
integer, pointer iaproc
Definition: w3odatmd.F90:457
yowerr
Has some subroutine to make a nice error message.
Definition: yowerr.F90:39
yownodepool::npa
integer, public npa
number of ghost + resident nodes this partition holds
Definition: yownodepool.F90:99
yownodepool::ipgl
integer, dimension(:), allocatable, public ipgl
Node global to local mapping np_global long.
Definition: yownodepool.F90:120
yownodepool
Has data that belong to nodes.
Definition: yownodepool.F90:39
yownodepool::ghosts
type(t_node) function, pointer, public ghosts(id)
return pointer to the (global) (ghost) node Ghost nodes are nodes in the global node array,...
Definition: yownodepool.F90:172
yownodepool::t_node
Holds the nodes data.
Definition: yownodepool.F90:48
w3odatmd
Definition: w3odatmd.F90:3
yownodepool::np
integer, public np
number of nodes, local
Definition: yownodepool.F90:93
yowdatapool
Has fancy data.
Definition: yowdatapool.F90:39
yowerr::abort
subroutine abort(string, line, file, errno)
print various error strings and exit.
Definition: yowerr.F90:93
yownodepool::nodes_global
type(t_node), dimension(:), allocatable, target, public nodes_global
all nodes with their data.
Definition: yownodepool.F90:103
yownodepool::ng
integer, public ng
number of ghost nodes this partition holds
Definition: yownodepool.F90:96