WAVEWATCH III  beta 0.0.1
yowexchangeModule.F90
Go to the documentation of this file.
1 !PDLIB Software License
2 !
3 !Software, as understood herein, shall be broadly interpreted as being inclusive of algorithms,
4 !source code, object code, data bases and related documentation, all of which shall be furnished
5 !free of charge to the Licensee. Corrections, upgrades or enhancements may be furnished and, if
6 !furnished, shall also be furnished to the Licensee without charge. NOAA, however, is not
7 !required to develop or furnish such corrections, upgrades or enhancements.
8 !Roland & Partner software, whether that initially furnished or corrections or upgrades,
9 !are furnished "as is". Roland & Partner furnishes its software without any warranty
10 !whatsoever and is not responsible for any direct, indirect or consequential damages
11 !that may be incurred by the Licensee. Warranties of merchantability, fitness for any
12 !particular purpose, title, and non-infringement, are specifically negated.
13 !The Licensee is not required to develop any software related to the licensed software.
14 !However, in the event that the Licensee does so, the Licensee is required to offer same
15 !to Roland & Partner for inclusion under the instant licensing terms with Roland & Partner
16 !licensed software along with documentation regarding its principles, use and its advantages.
17 !This includes changes to the wave model proper including numerical and physical approaches
18 !to wave modeling, and boundary layer parameterizations embedded in the wave model
19 !A Licensee may reproduce sufficient software to satisfy its needs.
20 !All copies shall bear the name of the software with any version number
21 !as well as replicas of any applied copyright notice, trademark notice,
22 !other notices and credit lines. Additionally, if the copies have been modified,
23 !e.g. with deletions or additions, this shall be so stated and identified.
24 !All of Licensee's employees who have a need to use the software may have access
25 !to the software but only after reading the instant license and stating, in writing,
26 !that they have read and understood the license and have agreed to its terms.
27 !Licensee is responsible for employing reasonable efforts to assure
28 !that only those of its employees that should have access to the software, in fact, have access.
29 !The Licensee may use the software for any purpose relating to sea state prediction.
30 !No disclosure of any portion of the software, whether by means of a media or verbally,
31 !may be made to any third party by the Licensee or the Licensee's employees
32 !The Licensee is responsible for compliance with any applicable export or
33 !import control laws of the United States, the European Union and Germany.
34 !
35 !© 2009 Roland&Partner, Georgenstr.32, 64297 Germany. All rights reserved.
36 !PDLIB is a trademark of Roland & Partner. No unauthorized use without permission.
37 !
40  use yowdatapool, only: rkind
41  use mpi, only: mpi_datatype_null
42  implicit none
43  private
47 
49  type, public :: t_neighbordomain
50 
53  integer :: domainid = 0
54 
59  integer :: numnodestoreceive = 0
60 
63  integer, allocatable :: nodestoreceive(:)
64 
66  integer :: numnodestosend = 0
67 
70  integer, allocatable :: nodestosend(:)
71 
72  ! MPI datatypes for size(U) == npa+1 U(0:npa)
73 
75  integer :: p1drsendtype_zero = mpi_datatype_null
76  integer :: p1drrecvtype_zero = mpi_datatype_null
77 
79  integer :: p2drsendtype_zero = mpi_datatype_null
80  integer :: p2drrecvtype_zero = mpi_datatype_null
81 
82  ! MPI datatypes for size(U) == npa U(1:npa)
84  integer :: p1drsendtype = mpi_datatype_null
85  integer :: p1drrecvtype = mpi_datatype_null
87  integer :: p2drsendtype1 = mpi_datatype_null
88  integer :: p2drrecvtype1 = mpi_datatype_null
89  integer :: p2drsendtype2 = mpi_datatype_null
90  integer :: p2drrecvtype2 = mpi_datatype_null
91 
92  contains
93  ! procedure :: exchangeGhostIds
94  ! final :: finalizeNeighborDomain
95  procedure :: finalize
96  procedure :: creatempitype
97 
98  end type t_neighbordomain
99 
102  type(t_neighbordomain), public, allocatable :: neighbordomains(:)
103 
105  integer, public :: nconndomains = 0
106 
108  integer, public :: n2nddim = 1
109 
111  integer, public :: nnthdim = 1
112 
113 
114 contains
115 
116 
117  subroutine finalize(this)
118  use yowerr
119  use mpi
120  implicit none
121  class(t_neighbordomain), intent(inout) :: this
122  integer :: ierr
123 
124  if(allocated(this%nodesToSend)) deallocate(this%nodesToSend)
125  if(allocated(this%nodesToReceive)) deallocate(this%nodesToReceive)
126 
127  call mpi_type_free(this%p1DRsendType_zero, ierr)
128  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
129  call mpi_type_free(this%p1DRrecvType_zero, ierr)
130  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
131  call mpi_type_free(this%p2DRsendType_zero, ierr)
132  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
133  call mpi_type_free(this%p2DRrecvType_zero, ierr)
134  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
135  call mpi_type_free(this%p1DRsendType, ierr)
136  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
137  call mpi_type_free(this%p1DRrecvType, ierr)
138  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
139  call mpi_type_free(this%p2DRsendType1, ierr)
140  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
141  call mpi_type_free(this%p2DRrecvType1, ierr)
142  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
143  call mpi_type_free(this%p2DRsendType2, ierr)
144  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
145  call mpi_type_free(this%p2DRrecvType2, ierr)
146  if(ierr /= mpi_success) CALL parallel_abort("freeMPItype", ierr)
147  end subroutine finalize
148 
149  ! create MPI indexed datatype for this neighborDomain
150  subroutine creatempitype(this)
151  use yowerr
152  use mpi
153  use yownodepool, only: ghostgl, np, ipgl
154  use yowdatapool, only: rtype, itype
155  implicit none
156  class(t_neighbordomain), intent(inout) :: this
157 
158  integer :: ierr
159  integer :: dsplSend(this%numNodesToSend)
160  integer :: dsplRecv(this%numNodesToReceive)
161 
162  ! MPI datatypes for size(U) == npa+1 U(0:npa)
163  dsplsend = ipgl(this%nodesToSend)
164  dsplrecv = ghostgl(this%nodesToReceive) + np
165 
166  ! p1D real
167  call mpi_type_create_indexed_block(this%numNodesToSend, 1, dsplsend, rtype, this%p1DRsendType_zero, ierr)
168  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
169  call mpi_type_commit(this%p1DRsendType_zero,ierr)
170  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
171 
172  call mpi_type_create_indexed_block(this%numNodesToReceive, 1, dsplrecv, rtype, this%p1DRrecvType_zero, ierr)
173  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
174  call mpi_type_commit(this%p1DRrecvType_zero,ierr)
175  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
176 
177  ! MPI datatypes for size(U) == npa U(1:npa)
178  dsplsend(:) = dsplsend(:) - 1 ! C count from 0; FORTRAN count from 1
179  dsplrecv(:) = dsplrecv(:) - 1
180 
181  ! p1D real
182  call mpi_type_create_indexed_block(this%numNodesToSend, 1, dsplsend, rtype, this%p1DRsendType,ierr)
183  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
184  call mpi_type_commit(this%p1DRsendType,ierr)
185  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
186 
187  call mpi_type_create_indexed_block(this%numNodesToReceive, 1, dsplrecv, rtype, this%p1DRrecvType,ierr)
188  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
189  call mpi_type_commit(this%p1DRrecvType,ierr)
190  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
191 
192  ! MPI datatypes for size(U) == npa+1 U(0:npa)
193  ! p2D real second dim is n2ndDim long
194  dsplsend = (ipgl(this%nodesToSend)) * n2nddim
195  dsplrecv = (ghostgl(this%nodesToReceive) + np) * n2nddim
196  call mpi_type_create_indexed_block(this%numNodesToSend, n2nddim, dsplsend, rtype, this%p2DRsendType_zero,ierr)
197  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
198  call mpi_type_commit(this%p2DRsendType_zero,ierr)
199  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
200 
201  call mpi_type_create_indexed_block(this%numNodesToReceive, n2nddim, dsplrecv, rtype, this%p2DRrecvType_zero,ierr)
202  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
203  call mpi_type_commit(this%p2DRrecvType_zero,ierr)
204  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
205 
206  ! MPI datatypes for size(U) == npa U(1:npa)
207  ! p2D real second dim is n2ndDim long
208  dsplsend = (ipgl(this%nodesToSend)-1) * n2nddim
209  dsplrecv = (ghostgl(this%nodesToReceive) + np -1) * n2nddim
210  call mpi_type_create_indexed_block(this%numNodesToSend, n2nddim, dsplsend, rtype, this%p2DRsendType1,ierr)
211  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
212  call mpi_type_commit(this%p2DRsendType1,ierr)
213  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
214 
215  call mpi_type_create_indexed_block(this%numNodesToReceive, n2nddim, dsplrecv, rtype, this%p2DRrecvType1,ierr)
216  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
217  call mpi_type_commit(this%p2DRrecvType1,ierr)
218  if(ierr /= mpi_success) CALL parallel_abort("createMPIType", ierr)
219 
220 
221  end subroutine creatempitype
222 
223  subroutine initnbrdomains(nConnD)
224  use yowerr
225  implicit none
226  integer, intent(in) :: nconnd
227  integer :: stat
228 
230  nconndomains = nconnd
231  allocate(neighbordomains(nconndomains), stat=stat)
232  if(stat/=0) CALL abort('neighborDomains allocation failure')
233  end subroutine initnbrdomains
234 
235  subroutine creatempitypes()
236  implicit none
237  integer :: i
238 
239  do i=1, nconndomains
240  call neighbordomains(i)%createMPIType()
241  end do
242  end subroutine creatempitypes
243 
250  subroutine pdlib_exchange1dreal(U)
252  use yownodepool, only: t_node, nodes_global, np, ng, ghosts, npa
253  use yowerr
254  use mpi
255  implicit none
256  real(kind=rkind), intent(inout) :: u(:)
257 
258  integer :: i, ierr, tag
259  integer :: sendrqst(nconndomains), recvrqst(nconndomains)
260  integer :: recvstat(mpi_status_size, nconndomains), sendstat(mpi_status_size, nconndomains)
261  character(len=140) :: errmsg
262 
263  if(size(u) /= npa) then
264  WRITE(errmsg, *) 'size(U)=', size(u), ' but npa=', npa
265  CALL abort(errmsg)
266  endif
267 
268  ! post receives
269  do i=1, nconndomains
270  tag = 10000 + myrank
271  call mpi_irecv(u, 1, neighbordomains(i)%p1DRrecvType, &
272  neighbordomains(i)%domainID-1, tag, comm, &
273  recvrqst(i), ierr)
274  if(ierr/=mpi_success) then
275  CALL parallel_abort("MPI_IRecv", ierr)
276  endif
277  enddo
278 
279  ! post sends
280  do i=1, nconndomains
281  tag = 10000 + (neighbordomains(i)%domainID-1)
282  call mpi_isend(u, 1, neighbordomains(i)%p1DRsendType, &
283  neighbordomains(i)%domainID-1, tag, comm, &
284  sendrqst(i), ierr);
285  if(ierr/=mpi_success) then
286  CALL parallel_abort("MPI_ISend", ierr)
287  endif
288  end do
289 
290  ! Wait for completion
291  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
292  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
293  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
294  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
295  end subroutine pdlib_exchange1dreal
296 
297 
302  subroutine pdlib_exchange2dreal(U)
304  use yownodepool, only: t_node, nodes_global, np, ng, ghosts, npa
305  use yowerr
306  use mpi
307  USE w3odatmd, only : iaproc
308  implicit none
309  real(kind=rkind), intent(inout) :: u(:,:)
310 
311  integer :: i, ierr, tag
312  integer :: sendrqst(nconndomains), recvrqst(nconndomains)
313  integer :: recvstat(mpi_status_size, nconndomains), sendstat(mpi_status_size, nconndomains)
314 
315 
316 #ifdef W3_DEBUGEXCH
317  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 3'
318  FLUSH(740+iaproc)
319 #endif
320 
321  ! post receives
322 #ifdef W3_DEBUGEXCH
323  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 4'
324  FLUSH(740+iaproc)
325 #endif
326  do i=1, nconndomains
327  tag = 30000 + myrank
328  call mpi_irecv(u, 1, neighbordomains(i)%p2DRrecvType1, &
329  neighbordomains(i)%domainID-1, tag, comm, &
330  recvrqst(i), ierr)
331  if(ierr/=mpi_success) then
332  CALL parallel_abort("MPI_IRecv", ierr)
333  endif
334  enddo
335 #ifdef W3_DEBUGEXCH
336  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 5'
337  FLUSH(740+iaproc)
338 #endif
339 
340  ! post sends
341  do i=1, nconndomains
342  tag = 30000 + (neighbordomains(i)%domainID-1)
343  call mpi_isend(u, 1, neighbordomains(i)%p2DRsendType1, &
344  neighbordomains(i)%domainID-1, tag, comm, &
345  sendrqst(i), ierr)
346  if(ierr/=mpi_success) then
347  CALL parallel_abort("MPI_ISend", ierr)
348  endif
349  end do
350 #ifdef W3_DEBUGEXCH
351  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 6'
352  FLUSH(740+iaproc)
353 #endif
354 
355  ! Wait for completion
356  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
357  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
358 #ifdef W3_DEBUGEXCH
359  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 11'
360  FLUSH(740+iaproc)
361 #endif
362  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
363  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
364 #ifdef W3_DEBUGEXCH
365  WRITE(740+iaproc,*) 'PDLIB_exchange2Dreal, step 12'
366  FLUSH(740+iaproc)
367 #endif
368  end subroutine pdlib_exchange2dreal
369 
370 
374  subroutine setdimsize(second)
375  implicit none
376  integer, intent(in) :: second
377  n2nddim = second
378  end subroutine setdimsize
379 
380  subroutine finalizeexchangemodule()
381  implicit none
382  integer :: i
383 
384  if(allocated(neighbordomains)) then
385  do i=1, size(neighbordomains)
386  call neighbordomains(i)%finalize()
387  end do
388  deallocate(neighbordomains)
389  endif
390  end subroutine finalizeexchangemodule
397  subroutine pdlib_exchange1dreal_zero(U)
398  use yowdatapool, only: comm, myrank, rkind
399  use yownodepool, only: npa
400  use yowerr
401  use mpi
402  implicit none
403  real(kind=rkind), intent(inout) :: u(0:npa)
404 
405  integer :: i, ierr, tag
406  integer :: sendrqst(nconndomains), recvrqst(nconndomains)
407  integer :: recvstat(mpi_status_size, nconndomains), sendstat(mpi_status_size, nconndomains)
408  character(len=200) errstr
409 
410  ! It is impossible to add these range checks because assumed shape array start vom 1:npa+1 even if you allocate it from 0:npa
411 
412  ! if(size(U) /= npa+1) then
413  ! write(errstr, *) "size(U) /= npa+1", size(U), "should be", npa+1
414  ! ABORT(errstr)
415  ! endif
416  !
417  ! if(ubound(U,1) /= npa) then
418  ! write(errstr, *) "ubound(U) /= npa", ubound(U), "should be", npa
419  ! ABORT(errstr)
420  ! endif
421  !
422  ! if(lbound(U,1) /= 0) then
423  ! write(errstr, *) "lbound(U) /= 0", lbound(U), "should be 0"
424  ! ABORT(errstr)
425  ! endif
426 
427  ! post receives
428  do i=1, nconndomains
429  tag = 10001 + myrank
430  call mpi_irecv(u, &
431  1, &
432  neighbordomains(i)%p1DRrecvType_zero, &
433  neighbordomains(i)%domainID-1, &
434  tag, &
435  comm, &
436  recvrqst(i), &
437  ierr)
438  if(ierr/=mpi_success) then
439  CALL parallel_abort("MPI_IRecv", ierr)
440  endif
441  enddo
442 
443  ! post sends
444  do i=1, nconndomains
445  tag = 10001 + (neighbordomains(i)%domainID-1)
446  call mpi_isend(u, &
447  1, &
448  neighbordomains(i)%p1DRsendType_zero, &
449  neighbordomains(i)%domainID-1, &
450  tag, &
451  comm, &
452  sendrqst(i), &
453  ierr);
454  if(ierr/=mpi_success) then
455  CALL parallel_abort("MPI_ISend", ierr)
456  endif
457  end do
458 
459  ! Wait for completion
460  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
461  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
462  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
463  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
464  end subroutine pdlib_exchange1dreal_zero
467  subroutine pdlib_exchange2dreal_zero(U)
469  use yownodepool, only: npa
470  use yowerr
471  use mpi
472  implicit none
473  real(kind=rkind), intent(inout) :: u(n2nddim,0:npa)
474 
475  integer :: i, ierr, tag
476  integer :: sendrqst(nconndomains), recvrqst(nconndomains)
477  integer :: recvstat(mpi_status_size, nconndomains), sendstat(mpi_status_size, nconndomains)
478  character(len=200) errstr
479 
480  ! It is impossible to add these range checks because assumed shape array start vom 1:npa+1 even if you allocate it from 0:npa
481  ! if(size(U,2) /= npa+1) then
482  ! write(errstr, *) "size(U,2) /= npa+1", size(U,2), "should be", npa+1
483  ! ABORT(errstr)
484  ! endif
485  !
486  ! if(ubound(U,2) /= npa) then
487  ! write(errstr, *) "ubound(U,2) /= npa", ubound(U,2), "should be", npa
488  ! ABORT(errstr)
489  ! endif
490  !
491  ! if(lbound(U,2) /= 0) then
492  ! write(errstr, *) "lbound(U,2) /= 0", lbound(U,2), "should be 0"
493  ! ABORT(errstr)
494  ! endif
495 
496  ! if((size(U,1) /= n2ndDim) ) then
497  ! write(errstr, *) "size(U,1) /= n2ndDim size(U,1)=", size(U,1), " n2ndDim=", n2ndDim
498  ! ABORT(errstr)
499  ! endif
500 
501  ! post receives
502  do i=1, nconndomains
503  tag = 30001 + myrank
504  call mpi_irecv(u, &
505  1, &
506  neighbordomains(i)%p2DRrecvType_zero, &
507  neighbordomains(i)%domainID-1, &
508  tag, &
509  comm, &
510  recvrqst(i), &
511  ierr)
512  if(ierr/=mpi_success) then
513  CALL parallel_abort("MPI_IRecv", ierr)
514  endif
515  enddo
516 
517  ! post sends
518  do i=1, nconndomains
519  tag = 30001 + (neighbordomains(i)%domainID-1)
520  call mpi_isend(u, &
521  1, &
522  neighbordomains(i)%p2DRsendType_zero, &
523  neighbordomains(i)%domainID-1, &
524  tag, &
525  comm, &
526  sendrqst(i), &
527  ierr);
528  if(ierr/=mpi_success) then
529  CALL parallel_abort("MPI_ISend", ierr)
530  endif
531  end do
532 
533 
534  ! Wait for completion
535  call mpi_waitall(nconndomains, recvrqst, recvstat,ierr)
536  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
537  call mpi_waitall(nconndomains, sendrqst, sendstat,ierr)
538  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
539  end subroutine pdlib_exchange2dreal_zero
540 
541 
542 end module yowexchangemodule
yownodepool::ghostgl
integer, dimension(:), allocatable, public ghostgl
Ghost global to local mapping np_global long.
Definition: yownodepool.F90:129
yowerr::parallel_abort
subroutine parallel_abort(string, error)
Definition: yowerr.F90:43
yowexchangemodule::pdlib_exchange1dreal
subroutine, public pdlib_exchange1dreal(U)
exchange values in U.
Definition: yowexchangeModule.F90:251
yowexchangemodule::initnbrdomains
subroutine, public initnbrdomains(nConnD)
Definition: yowexchangeModule.F90:224
yowexchangemodule::pdlib_exchange2dreal
subroutine, public pdlib_exchange2dreal(U)
Definition: yowexchangeModule.F90:303
yowexchangemodule::nnthdim
integer, public nnthdim
number of the second dimension for exchange (nth only for wave model)
Definition: yowexchangeModule.F90:111
w3odatmd::iaproc
integer, pointer iaproc
Definition: w3odatmd.F90:457
yowexchangemodule::pdlib_exchange2dreal_zero
subroutine, public pdlib_exchange2dreal_zero(U)
Definition: yowexchangeModule.F90:468
yowerr
Has some subroutine to make a nice error message.
Definition: yowerr.F90:39
yownodepool::npa
integer, public npa
number of ghost + resident nodes this partition holds
Definition: yownodepool.F90:99
yowexchangemodule::creatempitype
subroutine creatempitype(this)
Definition: yowexchangeModule.F90:151
yowdatapool::rkind
integer, parameter rkind
double precision.
Definition: yowdatapool.F90:47
yowdatapool::myrank
integer, save myrank
The thread id.
Definition: yowdatapool.F90:62
yownodepool::ipgl
integer, dimension(:), allocatable, public ipgl
Node global to local mapping np_global long.
Definition: yownodepool.F90:120
yownodepool
Has data that belong to nodes.
Definition: yownodepool.F90:39
yownodepool::ghosts
type(t_node) function, pointer, public ghosts(id)
return pointer to the (global) (ghost) node Ghost nodes are nodes in the global node array,...
Definition: yownodepool.F90:172
yowdatapool::rtype
integer, save rtype
Definition: yowdatapool.F90:76
yownodepool::t_node
Holds the nodes data.
Definition: yownodepool.F90:48
w3odatmd
Definition: w3odatmd.F90:3
yowexchangemodule::neighbordomains
type(t_neighbordomain), dimension(:), allocatable, public neighbordomains
Knows for all domains neighbors, which node we must send or revc from neighbor domains from 1 to nCon...
Definition: yowexchangeModule.F90:102
yownodepool::np
integer, public np
number of nodes, local
Definition: yownodepool.F90:93
yowexchangemodule::creatempitypes
subroutine, public creatempitypes()
Definition: yowexchangeModule.F90:236
yowdatapool
Has fancy data.
Definition: yowdatapool.F90:39
yowexchangemodule
Has only the ghost nodes assign to a neighbor domain.
Definition: yowexchangeModule.F90:39
yowexchangemodule::n2nddim
integer, public n2nddim
number of the second dimension for exchange
Definition: yowexchangeModule.F90:108
yowdatapool::comm
integer, save, public comm
MPI Communicator.
Definition: yowdatapool.F90:66
yowerr::abort
subroutine abort(string, line, file, errno)
print various error strings and exit.
Definition: yowerr.F90:93
yownodepool::nodes_global
type(t_node), dimension(:), allocatable, target, public nodes_global
all nodes with their data.
Definition: yownodepool.F90:103
yowdatapool::itype
integer, save itype
MPI Integer Type.
Definition: yowdatapool.F90:70
yowexchangemodule::setdimsize
subroutine, public setdimsize(second)
set the size of the second and third dimension for exchange
Definition: yowexchangeModule.F90:375
yowexchangemodule::nconndomains
integer, public nconndomains
Number of neighbor domains.
Definition: yowexchangeModule.F90:105
yowexchangemodule::finalizeexchangemodule
subroutine, public finalizeexchangemodule()
Definition: yowexchangeModule.F90:381
yownodepool::ng
integer, public ng
number of ghost nodes this partition holds
Definition: yownodepool.F90:96
yowexchangemodule::t_neighbordomain
Holds some data belong to a neighbor Domain.
Definition: yowexchangeModule.F90:49