WAVEWATCH III  beta 0.0.1
yowpdlibmain.F90
Go to the documentation of this file.
1 !PDLIB Software License
2 !
3 !Software, as understood herein, shall be broadly interpreted as being inclusive of algorithms,
4 !source code, object code, data bases and related documentation, all of which shall be furnished
5 !free of charge to the Licensee. Corrections, upgrades or enhancements may be furnished and, if
6 !furnished, shall also be furnished to the Licensee without charge. NOAA, however, is not
7 !required to develop or furnish such corrections, upgrades or enhancements.
8 !Roland & Partner software, whether that initially furnished or corrections or upgrades,
9 !are furnished "as is". Roland & Partner furnishes its software without any warranty
10 !whatsoever and is not responsible for any direct, indirect or consequential damages
11 !that may be incurred by the Licensee. Warranties of merchantability, fitness for any
12 !particular purpose, title, and non-infringement, are specifically negated.
13 !The Licensee is not required to develop any software related to the licensed software.
14 !However, in the event that the Licensee does so, the Licensee is required to offer same
15 !to Roland & Partner for inclusion under the instant licensing terms with Roland & Partner
16 !licensed software along with documentation regarding its principles, use and its advantages.
17 !This includes changes to the wave model proper including numerical and physical approaches
18 !to wave modeling, and boundary layer parameterizations embedded in the wave model
19 !A Licensee may reproduce sufficient software to satisfy its needs.
20 !All copies shall bear the name of the software with any version number
21 !as well as replicas of any applied copyright notice, trademark notice,
22 !other notices and credit lines. Additionally, if the copies have been modified,
23 !e.g. with deletions or additions, this shall be so stated and identified.
24 !All of Licensee's employees who have a need to use the software may have access
25 !to the software but only after reading the instant license and stating, in writing,
26 !that they have read and understood the license and have agreed to its terms.
27 !Licensee is responsible for employing reasonable efforts to assure
28 !that only those of its employees that should have access to the software, in fact, have access.
29 !The Licensee may use the software for any purpose relating to sea state prediction.
30 !No disclosure of any portion of the software, whether by means of a media or verbally,
31 !may be made to any third party by the Licensee or the Licensee's employees
32 !The Licensee is responsible for compliance with any applicable export or
33 !import control laws of the United States, the European Union and Germany.
34 !
35 !© 2009 Roland&Partner, Georgenstr.32, 64297 Germany. All rights reserved.
36 !PDLIB is a trademark of Roland & Partner. No unauthorized use without permission.
37 !
43  use yowerr
44  use yowdatapool, only : rkind
45  use w3servmd, only : print_memcheck
46 
47  !module default
48  implicit none
49 
50  private
51  public :: initfromgriddim, finalizepd
52 
53 contains
54 
65  subroutine initfromgriddim(MNP, MNE, INE_global, secDim, MPIcomm)
68  use yowelementpool, only: ne_global,ne
69  use yowsidepool, only: ns, ns_global
72 
73  integer, intent(in) :: mnp, mne
74  integer, intent(in) :: ine_global(3,mne)
75  integer, intent(in) :: secdim
76  integer, intent(in) :: mpicomm
77  integer :: istat, memunit
78 
79  ! note: myrank=0 until after initMPI is called, so only rank=0 file
80  ! contains the 'section 1' information
81  memunit = 70000+myrank
82 
83  call setdimsize(secdim)
84  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 1')
85 #ifdef W3_DEBUGINIT
86  print *, '1: MPIcomm=', mpicomm
87 #endif
88  call initmpi(mpicomm)
89 
90  memunit = 70000+myrank
91  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 2')
92 #ifdef W3_DEBUGINIT
93  print *, '2: After initMPI'
94 #endif
95  call assignmesh(mnp, mne)
96  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 3')
97 #ifdef W3_DEBUGINIT
98  print *, '3: After assignMesh'
99 #endif
100  call prepartition()
101  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 4')
102 #ifdef W3_DEBUGINIT
103  print *, '3: After prePartition'
104 #endif
105  call findconnnodes(ine_global)
106  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 5')
107 #ifdef W3_DEBUGINIT
108  print *, '4: After findConnNodes'
109 #endif
110  if(debugprepartition) then
111  if(myrank == 0) then
112  write(*,*) "pre-partition"
113  write(*,*) "# Nodes: ", np_global
114  write(*,*) "# Elements: ", ne_global
115  write(*,*) "# Sides ", ns_global
116  write(*,*) "np_perProcSum :", np_perprocsum
117  end if
118  write(*,*) "Thread", myrank, "# local nodes ", np
119  write(*,*) "Thread", myrank, "# local sides" , ns
120  endif
121 
122  ! call writeMesh()
123 #ifdef W3_DEBUGINIT
124  print *, '4.1: After findConnNodes'
125 #endif
126  ! CALL REAL_MPI_BARRIER_PDLIB(MPIcomm, "Before call to runParmetis")
127  call runparmetis(mnp)
128  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 6')
129  ! CALL REAL_MPI_BARRIER_PDLIB(MPIcomm, "After call to runParmetis")
130 #ifdef W3_DEBUGINIT
131  print *, '5: After runParmetis'
132 #endif
133  call postpartition
134  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 7')
135 #ifdef W3_DEBUGINIT
136  print *, 'Before findGhostNodes'
137 #endif
138  call findghostnodes
139  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 8')
140 
141  call findconndomains
142  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 9')
143 
144  call exchangeghostids
145  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 10')
146 
147  call postpartition2(ine_global)
148  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 11')
149 
150  call initrankmodule
151  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 12')
152 
154  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 13')
155 
156  call computeia_ja_posi_nnz
157  call print_memcheck(memunit, 'memcheck_____:'//' WW3_PDLIB SECTION 14')
158 
159  if(debugpostpartition) then
160  if(myrank == 0) then
161  write(*,*) "New data after partition"
162  write(*,*) "# Nodes: ", np_global
163  write(*,*) "# Elements: ", ne_global
164  write(*,*) "# Sides ", ns_global
165  write(*,*) "np_perProcSum :", np_perprocsum
166  end if
167  write(*,*) "Thread", myrank, "# local elements ", ne
168  write(*,*) "Thread", myrank, "# local nodes ", np
169  write(*,*) "Thread", myrank, "# local sides" , ns
170  write(*,*) "Thread", myrank, "# of ghosts", ng
171  write(*,*) "Thread", myrank, "# of neighbor domains", nconndomains
172  endif
173  end subroutine initfromgriddim
174 
175 
176 
177  SUBROUTINE real_mpi_barrier_pdlib(TheComm, string)
178 
179  include "mpif.h"
180  integer, intent(in) :: TheComm
181  character(*), intent(in) :: string
182  integer NbProc, eRank
183  integer :: istatus(MPI_STATUS_SIZE)
184  integer ierr, iField(1), iProc
185  ! Print *, 'Start of REAL_MPI_BARRIER_PDLIB'
186  CALL mpi_comm_rank(thecomm, erank, ierr)
187  CALL mpi_comm_size(thecomm, nbproc, ierr)
188  ! Print *, ' eRank=', eRank, ' NbProc=', NbProc
189  ifield(1)=1
190  IF (erank .eq. 0) THEN
191  DO iproc=2,nbproc
192  ! Print *, ' Before MPI_RECV 1 iProc=', iProc
193  CALL mpi_recv(ifield, 1, mpi_integer, iproc-1, 711, thecomm, istatus, ierr)
194  ! Print *, ' Before MPI_SEND 1'
195  CALL mpi_send(ifield, 1, mpi_integer, iproc-1, 712, thecomm, ierr)
196  END DO
197  ELSE
198  ! Print *, ' Before MPI_SEND 2 eRank=', eRank
199  CALL mpi_send(ifield, 1, mpi_integer, 0, 711, thecomm, ierr)
200  ! Print *, ' Before MPI_RECV 2 eRank=', eRank
201  CALL mpi_recv(ifield, 1, mpi_integer, 0, 712, thecomm, istatus, ierr)
202  END IF
203  ! Print *, 'Passing barrier string=', string
204  END SUBROUTINE real_mpi_barrier_pdlib
205  !--------------------------------------------------------------------------
206  ! Init MPI
207  !--------------------------------------------------------------------------
208 
210  subroutine initmpi(MPIcomm)
211  use yowdatapool, only: comm, ntasks, myrank
212  use yowerr
213  use mpi
214 
215  integer, intent(in) :: MPIcomm
216  logical :: flag
217  integer :: ierr
218 #ifdef W3_DEBUGINIT
219  print *, '2: MPIcomm=', mpicomm
220 #endif
221  if(mpicomm == mpi_comm_null) then
222  CALL abort("A null communicator is not allowed")
223  endif
224 
225  comm = mpicomm
226  call mpi_initialized(flag, ierr)
227  if(ierr/=mpi_success) call parallel_abort(error=ierr)
228 
229  if(flag .eqv. .false.) then
230 #ifdef W3_DEBUGINIT
231  print *, 'Before MPI_INIT yowpdlibmain'
232 #endif
233  call mpi_init(ierr)
234 #ifdef W3_DEBUGINIT
235  print *, 'After MPI_INIT yowpdlibmain'
236 #endif
237  if(ierr/=mpi_success) call parallel_abort(error=ierr)
238  endif
239 
240  ! Get number of processors
241  call mpi_comm_size(comm, ntasks,ierr)
242  if(ierr/=mpi_success) call parallel_abort(error=ierr)
243 
244  ! Get rank
245  call mpi_comm_rank(comm, myrank,ierr)
246  if(ierr/=mpi_success) call parallel_abort(error=ierr)
247  end subroutine initmpi
248 
249 
257  subroutine assignmesh(MNP, MNE)
259  use yowelementpool, only: ne_global
260  use yowerr, only: parallel_abort
261 
262  integer, intent(in) :: MNP, MNE
263  !integer, intent(in) :: INE(3,MNE)
264  integer :: stat, i
265 
266  np_global=mnp
267  if(allocated(nodes_global)) deallocate(nodes_global)
268  allocate(nodes_global(np_global), stat=stat);
269  if(stat/=0) CALL abort('nodes_global() allocate failure')
270 
271  do i =1, np_global
272  nodes_global(i)%id_global = i
273  end do
274 
275  ne_global=mne
276 
277  !if(allocated(INE_global)) deallocate(INE_global)
278  !allocate(INE_global(3, ne_global), stat=stat);
279  !if(stat/=0) CALL ABORT('INE_global allocate failure')
280  !INE_global = INE
281  end subroutine assignmesh
282 
283 
284  !-------------------------------------------------------------------------
285  ! pre-partition: divide the mesh into nTasks parts.
286  !-------------------------------------------------------------------------
287 
292  subroutine prepartition
294  use yowerr, only: parallel_abort
296 
297  integer :: i, stat
298 
299  ! determine equal number of nodes in each processor (except for the last one).
300  ! and create a provisional node local to global mapping iplg
301 
302  ! start the arrays from 0, because the first thread id is 0
303  if(allocated(np_perproc)) deallocate(np_perproc)
304  allocate(np_perproc(0:ntasks-1), stat=stat)
305  if(stat/=0) call parallel_abort('np_perProc allocation failure')
306 
307  if(allocated(np_perprocsum)) deallocate(np_perprocsum)
308  allocate(np_perprocsum(0:ntasks), stat=stat)
309  if(stat/=0) call parallel_abort('np_perProcSum allocation failure')
310 
311  np_perprocsum = 0
312  np = np_global / ntasks
313 
314  do i = 0, ntasks-2
315  np_perproc(i) = np
316  np_perprocsum(i+1) = np_perprocsum(i) + np
317  end do
318 
321  np = np_perproc(myrank)
322 
323  ! create a provisional node local to global mapping iplg
324  ! this will override later with the data from parmetis
325  if(allocated(iplg)) deallocate(iplg)
326  allocate(iplg(np), stat=stat);
327  if(stat/=0) call parallel_abort(' iplg allocate failure')
328  do i = 1, np
329  iplg(i) = i + np_perprocsum(myrank)
330  end do
331  end subroutine prepartition
332 
333  !-------------------------------------------------------------------------
334  ! Create the connected Nodes array
335  !-------------------------------------------------------------------------
336 
341  subroutine findconnnodes(INE_global)
344  use yowelementpool, only: ne_global
345  use yowsidepool, only: ns, ns_global
346 
347  integer, intent(in) :: INE_global(3,ne_global)
348  integer :: i, j, stat
349  type(t_node), pointer :: node
350  integer JPREV, JNEXT
351 
352  ! Loop over all nlements
353  ! look at their nodes
354  ! get the node 1, insert node 2 and 3 into the connected nodes array
355  ! do that for node 2 and 3 again
356 
357  ! implementation is some different
358  ! loop over alle elements to get the # of connected nodes
359  ! allocate space
360  ! loop a second time to insert the connected nodes
361 
362  ! first loop
363  do i = 1, ne_global
364  do j = 1, 3
365  node => nodes_global(ine_global(j,i))
366  call node%insertConnNode()
367  call node%insertConnNode()
368  end do
369  end do
370 
371  maxconnnodes = maxval(nodes_global(:)%nConnNodes)
372  nodes_global(:)%nConnNodes = 0
373 
374  ! allocate space
376  if(allocated(connnodes_data)) deallocate(connnodes_data)
377  allocate(connnodes_data(np_global, maxconnnodes), stat = stat)
378  if(stat/=0) call parallel_abort('connNodes allocation failure')
379 
380  ! second loop
381  do i = 1, ne_global
382  DO j=1,3
383  IF (j .eq. 3) THEN
384  jnext = 1
385  ELSE
386  jnext = j + 1
387  END IF
388  IF (j .eq. 1) THEN
389  jprev = 3
390  ELSE
391  jprev = j - 1
392  END IF
393  node => nodes_global(ine_global(j,i))
394  call node%insertConnNode(ine_global(jnext,i))
395  call node%insertConnNode(ine_global(jprev,i))
396  END DO
397 
398  end do
399 
400  ns = 0
401  ! calc # sides local
402  do i = 1, np
403  node => nodes(i)
404  ns = ns + node%nConnNodes
405  end do
406 
407  do i = 1, np_global
408  node => nodes_global(i)
409  ns_global = ns_global + node%nConnNodes
410  end do
411  end subroutine findconnnodes
412 
413 
414  !------------------------------------------------------------------------
415  ! Collect all data for parmetis und partition the mesh
416  !------------------------------------------------------------------------
417 
421  subroutine runparmetis(MNP)
425  use yowsidepool, only: ns
426  use yowelementpool, only: ne, ne_global
427  use w3gdatmd, only: xgrd, ygrd
428  use mpi
429 
430  integer, intent(in) :: MNP
431 
432  ! Parmetis
433  ! Node neighbor information
434  integer :: wgtflag, numflag, ndims, nparts, edgecut, ncon
435  integer, allocatable :: xadj(:), part(:), vwgt(:), adjwgt(:), vtxdist(:), options(:), adjncy(:), iweights(:)
436  ! parmetis need single precision
437  real(4), allocatable :: xyz(:), tpwgts(:), ubvec(:)
438  integer :: IP_glob, itmp
439  integer :: ref
440  logical :: lexist = .false.
441 
442  ! Node to domain mapping.
443  ! np_global long. give the domain number for die global node number
444  integer, allocatable :: node2domain(:)
445 
446  ! Mics
447  integer :: i, j, stat, ierr
448  type(t_node), pointer :: node, nodeNeighbor
449 
450  ! CALL REAL_MPI_BARRIER_PDLIB(comm, "runParmetis, step 1")
451  ! Create xadj and adjncy arrays. They holds the nodes neighbors in CSR Format
452  ! Here, the adjacency structure of a graph is represented by two arrays,
453  ! xadj[n+1] and adjncy[m]; n vertices and m edges. Every edge is listen twice
454  allocate(adjncy(ns), stat=stat)
455  if(stat/=0) call parallel_abort('adjncy allocation failure')
456  allocate(xadj(np+1), stat=stat)
457  if(stat/=0) call parallel_abort('xadj allocation failure')
458  ! CALL REAL_MPI_BARRIER_PDLIB(comm, "runParmetis, step 2")
459 
460  xadj = 0
461  xadj(1) = 1
462  adjncy = 0
463  do i=1,np
464  node => nodes(i)
465  xadj(i+1) = xadj(i) + node%nConnNodes
466  if(debugparmetis) write(710+myrank,*) i, xadj(i), xadj(i+1)
467  if(node%nConnNodes == 0) then
468  write(*,*) "Thread", myrank,"global node has no conn nodes", node%id_global
469  endif
470  do j=1, node%nConnNodes
471  nodeneighbor => node%connNodes(j)
472  adjncy(j + xadj(i) - 1) = nodeneighbor%id_global
473  if(debugparmetis) write(710+myrank,*) i, j, j + xadj(i) - 1, adjncy(j + xadj(i) - 1), nodeneighbor%id_global
474  end do
475  end do
476  ! CALL REAL_MPI_BARRIER_PDLIB(comm, "runParmetis, step 3")
477 
478  ! Option for Parmetis
479  allocate(options(3))
480  options(1)=1 ! 0: default options; 1: user options
481  if(debugparmetis) then
482  options(2)=15
483  else
484  options(2)=0 ! Level of information returned: see defs.h in ParMETIS-Lib dir
485  endif
486  options(3)=15 ! Random number seed
487  CALL real_mpi_barrier_pdlib(comm, "runParmetis, step 4")
488  ! Fortran-style numbering that starts from 1
489  numflag = 1
490  ! # dimensions of the space in which the graph is embedded
491  ndims = 2
492  ! # parts the mesh is divide. Usually nTasks
493  nparts = ntasks
494  ! # weight per node
495  ncon = 1
496  ! wgtflag: 0: none (vwgt and adjwgt are NULL); 1: edges (vwgt is NULL); 2: vertices (adjwgt is NULL); 3: both vertices & edges;
497 
498 #ifdef WEIGHTS
499  wgtflag = 2
500  INQUIRE ( file='weights.ww3', exist = lexist )
501  IF (lexist) THEN
502  OPEN(100001,file='weights.ww3',form='FORMATTED',status='unknown')
503  allocate(iweights(np_global)); iweights = 0
504  do i = 1, np_global
505  read(100001,*) iweights(i)
506  enddo
507  CLOSE(100001)
508  ELSE
509  wgtflag = 0
510  ENDIF
511 #else
512  wgtflag = 0
513 #endif
514 
515  ! Create weights
516  allocate(vwgt(np*ncon), stat=stat)
517  if(stat/=0) call parallel_abort('vwgt allocation failure')
518 
519 #ifdef WEIGHTS
520  if (lexist) then
521  do i = 1, np
522  itmp = max(1,int(real((iweights(iplg(i))+100))))
523  !vwgt(i) = max(1,int(real(iweights(iplg(i)))/maxval(iweights)) * 10)
524  !vwgt(i) = max(1,iweights(iplg(i)))
525  !vwgt(i) = max(1,itmp)
526  vwgt(i) = itmp
527  enddo
528  vwgt = 1
529  deallocate(iweights)
530  else
531  vwgt = 1
532  endif
533 #else
534  vwgt = 1
535 #endif
536 
537  allocate(adjwgt(ns), stat=stat)
538  if(stat/=0) call parallel_abort('adjwgt allocation failure')
540  adjwgt = 1
541 
542  ! Vertex weight fraction
543  allocate(tpwgts(ncon*ntasks),stat=stat)
544  if(stat/=0) call parallel_abort('partition: tpwgts allocation failure')
545  tpwgts=1.0/real(ntasks)
546 
547  ! Imbalance tolerance
548  allocate(ubvec(ncon),stat=stat)
549  if(stat/=0) call parallel_abort('partition: ubvec allocation failure')
550  ubvec=1.01
551  CALL real_mpi_barrier_pdlib(comm, "runParmetis, step 5")
552 
553  ! Partition dual graph
554  allocate(xyz(2*np),stat=stat)
555  if(stat/=0) call parallel_abort('xyz: ubvec allocation failure')
556  if(debugparmetis) write(710+myrank,*) 'np_global, ne_global, np, npa, ne'
557  if(debugparmetis) write(710+myrank,*) np_global, ne_global, np, npa, ne
558  do i = 1, np
559  ip_glob = iplg(i)
560  !AR: this is questionable ...
561  xyz(2*(i-1)+1) = real(xgrd(1,ip_glob))
562  xyz(2*(i-1)+2) = real(ygrd(1,ip_glob))
563  if(debugparmetis) then
564  write(710+myrank,*) i, np, xyz(2*(i-1)+1), xyz(2*(i-1)+2)
565  call flush(710+myrank)
566  endif
567  end do
568  CALL real_mpi_barrier_pdlib(comm, "runParmetis, step 6")
569 
570  ! Partition array returned from ParMeTiS
571  allocate(part(np),stat=stat)
572  if(stat/=0) call parallel_abort('part: ubvec allocation failure')
573 
574  ! ParMeTiS vertex distribution array (starts at 1)
575  allocate(vtxdist(ntasks+1),stat=stat)
576  if(stat/=0) call parallel_abort('partition: vtxdist allocation failure')
577 
578  call mpi_allgather(np_perprocsum(myrank)+1, 1, itype, vtxdist, 1, itype, comm, ierr)
579  if(ierr/=mpi_success) call parallel_abort('partition: mpi_allgather',ierr)
580  vtxdist(ntasks+1)=np_global+1
581  ! CALL REAL_MPI_BARRIER_PDLIB(comm, "runParmetis, step 7")
582 
583  ! check vtxdist
584  ! myrank stats from 0
585  if((vtxdist(myrank+2) - vtxdist(myrank+1)) < 1) then
586  write(*,*) "Thread", myrank, "has no nodes"
587  write(*,*) "vtxdist", vtxdist
588  CALL abort("Poor initial vertex distribution detected")
589  endif
590 
591 
592 
593  ! My notes from manual:
594  ! p: # of processors;
595  ! n: total # of vertices (local) in graph sense;
596  ! m: total # of neighboring vertices ("edges"); double counted between neighboring vertice u and v.
597  ! ncon: # of weights for each vertex;
598  ! int(in) vtxdist(p+1): Processor j stores vertices vtxdist(j):vtxdist(j+1)-1
599  ! int (in) xadj(n+1), adjncy(m):
600  ! locally, vertex j's neighboring vertices are adjncy(xadj(j):xadj(j+1)-1). adjncy points to global index;
601  ! int(in) vwgt(ncon*n), adjwgt(m): weights at vertices and "edges". Format of adjwgt follows adjncy;
602  ! int(in) wgtflag: 0: none (vwgt and adjwgt are NULL);
603  ! 1: edges (vwgt is NULL); 2: vertices (adjwgt is NULL); 3: both vertices & edges;
604  ! int(in) numflag: 0: C-style numbering from 0; 1: FORTRAN style from 1;
605  ! int(in) ndims: 2 or 3 (D);
606  ! float(in) xyz(ndims*n): coordinate for vertex j is xyz(j*ndims:(j+1)*ndims-1);
607  ! int(in) nparts: # of desired sub-domains (usually nTasks);
608  ! float(in) tpwgts(ncon*nparts): =1/nparts if sub-domains are to be of same size for each vertex weight;
609  ! float(in) ubvec(ncon): imbalance tolerance for each weight;
610  ! int(in) options: additonal parameters for the routine (see above);
611  ! int(out) edgecut: # of edges that are cut by the partitioning;
612  ! int(out) part(): array size = # of local vertices. It stores indices of local vertices.
613 
614  ! write(1112+myrank,*) "Thread",myrank,"sum;vtxdist", sum(vtxdist), vtxdist
615  ! write(1112+myrank,*) "Thread",myrank,"sum;xadj", sum(xadj), xadj
616  ! write(1112+myrank,*) "Thread",myrank,"sum;adjncy", sum(adjncy), adjncy
617 
618  CALL real_mpi_barrier_pdlib(comm, "runParmetis, step 8")
619 
620  if(debugparmetis) then
621  write(710+myrank,*) vtxdist, xadj, adjncy, &
622  vwgt, & !vwgt - ignore weights
623  adjwgt, & ! adjwgt - ignore weights
624  wgtflag, &
625  numflag,ndims,ncon,nparts,tpwgts,ubvec,options, &
626  edgecut,part,comm
627  call flush(710+myrank)
628  endif
629 
630  !if(debugParmetis) write(710+myrank,*) "Run ParMETIS now..."
631 #ifdef W3_SCOTCH
632  call scotch_parmetis_v3_partgeomkway(vtxdist, xadj, adjncy, &
633  vwgt, & !vwgt - ignore weights
634  adjwgt, & ! adjwgt - ignore weights
635  wgtflag, &
636  numflag,ndims,xyz,ncon,nparts,tpwgts,ubvec,options, &
637  edgecut,part, comm,ref)
638 #endif
639 
640 #ifdef W3_METIS
641  call parmetis_v3_partgeomkway(vtxdist, xadj, adjncy, &
642  vwgt, & !vwgt - ignore weights
643  adjwgt, & ! adjwgt - ignore weights
644  wgtflag, &
645  numflag,ndims,xyz,ncon,nparts,tpwgts,ubvec,options, &
646  edgecut,part, comm)
647 #endif
648 
649 
650  CALL real_mpi_barrier_pdlib(comm, "runParmetis, step 9")
651 
652  if(ntasks == 1) then
653  ! write(*,*) myrank, "minval part", minval(part)
654  if(minval(part) == 0) then
655  part(:) = part(:) + 1
656  endif
657  endif
658 
659  ! write(*,*) myrank, "edge cuted", edgecut
660 
661  ! Collect the parmetis data from all threads
662  ! and create a global node to domain number mapping
663  allocate(node2domain(np_global),stat=stat)
664  if(stat/=0) call parallel_abort(' node2domain allocation failure')
665  !
666  call mpi_allgatherv(part, np, itype, node2domain, np_perproc, np_perprocsum, itype, comm, ierr)
667  if(ierr/=mpi_success) call parallel_abort('mpi_allgatherv ',ierr)
668  !
669  do i = 1, np_global
670  node => nodes_global(i)
671  node%domainID = node2domain(node%id_global)
672  end do
673  ! CALL REAL_MPI_BARRIER_PDLIB(comm, "runParmetis, step 10")
674 
675 
676  ! write out partition info for katerfempresenter
677  if(debugpartition) write(600,*) node2domain
678  ! Print *, 'runparmetis step 1'
679 
680  if(allocated(xadj)) deallocate(xadj)
681  ! Print *, 'runparmetis step 2'
682  if(allocated(adjncy)) deallocate(adjncy)
683  ! Print *, 'runparmetis step 3'
684  if(allocated(part)) deallocate(part)
685  ! Print *, 'runparmetis step 4'
686  if(allocated(vwgt)) deallocate(vwgt)
687  ! Print *, 'runparmetis step 5'
688  if(allocated(adjwgt)) deallocate(adjwgt)
689  ! Print *, 'runparmetis step 6'
690  if(allocated(xyz)) deallocate(xyz)
691  ! Print *, 'runparmetis step 7'
692  if(allocated(tpwgts)) deallocate(tpwgts)
693  ! Print *, 'runparmetis step 8'
694  if(allocated(ubvec)) deallocate(ubvec)
695  ! Print *, 'runparmetis step 9'
696  if(allocated(vtxdist)) deallocate(vtxdist)
697  ! Print *, 'runparmetis step 10'
698  if(allocated(node2domain)) deallocate(node2domain)
699  ! Print *, 'runparmetis step 11'
700  end subroutine runparmetis
701 
702  !------------------------------------------------------------------------
703  ! with the new data from parmetis, recalculate some variables
704  !------------------------------------------------------------------------
705 
706  ! Create for every boundary node a dummy node and for every dummy node two local elements
707  ! extend x,y,z, INE
708  ! rebuild NCONE, CONE, NCONN, CONN
709  ! alter nd, nde
710  !subroutine dummyNodes
711  ! use yowExchangeModule
712  ! use yowNodepool, only: npa, nb, nd, PDLIB_NCONN, PDLIB_CONN, boundaryNodes, nodes, x, y, z, outwardNormal, XY, dummy2boundary, BNDprevnext
713  ! use yowElementpool, only: ne, nde, INE, NCONE, CONE
714  ! implicit none
715  ! integer :: i, IP, IPprev, IPnext, newIP, newIE
716  ! integer, allocatable :: INEnew(:,:)
717  ! real(rkind) :: vec(2), newPoint(2), newLength
718  ! real(rkind), allocatable :: xNew(:), yNew(:), zNew(:)
719  !
720  ! nd = nb
721  !
722  ! allocate(dummy2boundary(nd))
723  !
724  ! nde = 2*nd
725  ! allocate(INEnew(3,ne+nde))
726  ! INEnew(:,1:ne) = INE(:,1:ne)
727  !
728  ! allocate(xNew(npa+nd), yNew(npa+nd), zNew(npa+nd))
729  ! xNew(1:npa) = x(1:npa)
730  ! yNew(1:npa) = y(1:npa)
731  ! zNew(1:npa) = z(1:npa)
732  !
733  ! nd = 0
734  ! nde = 0
735  ! do i=1, nb
736  ! nd = nd + 1
737  ! IP = boundaryNodes(i)
738  ! dummy2boundary(nd) = IP
739  !
740  ! call BNDprevnext(IP, IPprev, IPnext)
741  ! newLength = max(distanceTo(XY(IP), XY(IPprev)), distanceTo(XY(IP), XY(IPnext)))
742  ! newPoint = XY(IP) + outwardNormal(:,i) * newLength
743  ! newIP = npa+i
744  ! xNew(newIP) = newPoint(1)
745  ! yNew(newIP) = newPoint(2)
746  ! zNew(newIP) = z(IP)
747  ! NCONN(IP) = NCONN(IP) + 1
748  ! CONN(NCONN(IP),IP) = newIP
749  !
750  ! nde = nde +1
751  ! newIE = ne+nde
752  ! INEnew(1,newIE) = IP
753  ! INEnew(2,newIE) = IPprev
754  ! INEnew(3,newIE) = newIP
755  ! NCONE(IP) = NCONE(IP) +1
756  ! CONE(NCONE(IP),IP) = newIE
757  !
758  ! nde = nde +1
759  ! newIE = ne+nde
760  ! INEnew(1,newIE) = IP
761  ! INEnew(2,newIE) = newIP
762  ! INEnew(3,newIE) = IPnext
763  ! NCONE(IP) = NCONE(IP) +1
764  ! CONE(NCONE(IP),IP) = newIE
765  ! end do
766  !
767  ! call move_alloc(INEnew, INE)
768  ! call move_alloc(xNew, x)
769  ! call move_alloc(yNew, y)
770  ! call move_alloc(zNew, z)
771  !
772  ! ! Die Anzahl der angeschlossenen Knoten fuer Geisterknoten wird fuer findBoundaryNodes and Dummynodes gebraucht.
773  ! call exchange(NCONN)
774  ! call exchange(NCONE)
775  !end subroutine
776 
781  subroutine postpartition
783  use yowdatapool, only: myrank, ntasks
785  use yowsidepool, only: ns
786 
787  integer :: i, j, stat
788  type(t_node), pointer :: node
789 
790  ! determine how many nodes now belong to which thread
791  ! and set the nodes local id
792  np_perproc = 0
793  do i = 1, np_global
794  ! fortran counts from 1. np_perProc from 0
795  np_perproc(nodes_global(i)%domainID-1) = np_perproc(nodes_global(i)%domainID-1)+1
796  ! set the new local id
797  nodes_global(i)%id = np_perproc(nodes_global(i)%domainID-1)
798  end do
799 
800  np_perprocsum(0) = 0
801  do i = 1, ntasks-1
802  np_perprocsum(i) = np_perprocsum(i-1) + np_perproc(i-1)
803  end do
804 
805  np = np_perproc(myrank)
806 
807  ! create the new node local to global mapping iplg. This is not the final one.
808  ! create a iplg with ghost nodes in findGhostNodes
809  if(allocated(iplg)) deallocate(iplg)
810  allocate(iplg(np), stat=stat)
811  if(stat/=0) call parallel_abort('iplg second allocation failure')
812  iplg = 0
813 
814  if(allocated(ipgl)) deallocate(ipgl)
815  allocate(ipgl(np_global), stat=stat)
816  if(stat/=0) call parallel_abort('ipgl allocation failure')
817  ipgl = 0
818 
819  j = 1
820  do i = 1, np_global
821  node => nodes_global(i)
822  if(node%domainID == myrank+1) then
823  iplg(j) = i
824  ipgl(i) = j
825  j = j + 1
826  endif
827  end do
828 
829  ! calc # sides local again, because the nodes now belongs to another domain
830  ns = 0
831  do i = 1, np
832  node => nodes(i)
833  ns = ns + node%nConnNodes
834  end do
835  end subroutine postpartition
836 
837 
838  !----------------------------------------------------------------------
839  ! find the ghost nodes of the local domain
840  !----------------------------------------------------------------------
841 
844  subroutine findghostnodes
846  use yowdatapool, only: myrank
848 
849  integer :: i, j, k, stat
850  type(t_node), pointer :: node, nodeNeighbor, nodeGhost
852  integer, save, allocatable :: ghostTemp(:)
853 #ifdef W3_DEBUGINIT
854  print *, 'Passing in findGhostNodes'
855 #endif
856 
857  ! iterate over all local nodes and look at their neighbors
858  ! has the neighbor another domain id, than it is a ghost
859 
860  ! implementation is some different
861  ! loop over all nodes to get the # of ghost nodes
862  ! allocate space
863  ! loop a second time to insert the ghost nodes
865 
866  ! first loop. find out how many ghost nodes we have (with double entries)
867  ng = 0
868  do i = 1, np
869  node => nodes(i)
870  do j = 1, node%nConnNodes
871  nodeneighbor => node%connNodes(j)
872  if(nodeneighbor%domainID /= node%domainID) then
873  ! yes, we found a ghost
874  ng = ng + 1
875  end if
876  end do
877  end do
878 
879  allocate(ghosttemp(ng), stat=stat)
880  if(stat/=0) call parallel_abort('ghostTemp allocation failure')
881 
882 #ifdef W3_DEBUGINIT
883  print *, 'np_global=', np_global
884 #endif
885  IF (allocated(ghostgl)) THEN
886  print *, 'ghostgl is already allocated'
887  END IF
888  allocate(ghostgl(np_global), stat=stat)
889  if(stat/=0) call parallel_abort('ghostgl allocation failure')
890  ghostgl = 0
891 
892  ! second loop. fill ghostlg. ignore double entries
893  ng = 0
894  ! iterate over all local nodes
895  do i = 1, np
896  node => nodes(i)
897 
898  ! check their neighbors
899  secondloop: do j = 1, node%nConnNodes
900  nodeneighbor => node%connNodes(j)
901 
902  if(nodeneighbor%domainID /= node%domainID) then
903  ! yes, we found a ghost
904  ! check if this ghost is allready in the ghost list
905  do k = 1, ng
906  nodeghost => nodes_global(ghosttemp(k))
907  if(nodeneighbor%id_global == nodeghost%id_global) then
908  ! yes, we allready know this ghost.
909  ! check the next neighbor
910  cycle secondloop
911  end if
912  end do
913 
914  ! no we don't know this ghost. insert it
915  ng = ng + 1
916  ghosttemp(ng) = nodeneighbor%id_global
917  ghostgl(nodeneighbor%id_global) = ng
918  end if
919  end do secondloop
920  end do
921 
922  ! reallocate the gosttemp array becouse it is longer then the new ng
923  if(allocated(ghostlg)) deallocate(ghostlg)
924  allocate(ghostlg(ng), stat=stat)
925  if(stat/=0) call parallel_abort('ghostlg allocation failure')
926  ghostlg = ghosttemp(1:ng)
927  deallocate(ghosttemp)
928 
929  npa = np + ng
930 
931  ! check if ghostlg contains only uniqe values
932  do i=1, ng
933  do j=i+1, ng
934  if(ghostlg(i) == ghostlg(j)) then
935  write(*,*) "double global ghost id in ghostlg(i,j)", i, j
936  stop "double global ghost id in ghostlg(i,j)"
937  endif
938  end do
939  end do
940 
941 
942  ! create the new node local to global mapping iplg with ghost. final one.
943  if(allocated(iplg)) deallocate(iplg)
944  allocate(iplg(npa), stat=stat)
945  if(stat/=0) call parallel_abort('iplg second allocation failure')
946  iplg = 0
947 
948  j = 1
949  do i = 1, np_global
950  node => nodes_global(i)
951  if(node%domainID == myrank+1) then
952  iplg(j) = i
953  j = j + 1
954  endif
955  end do
956 
957  iplg(np+1: npa) = ghostlg(1:ng)
958  end subroutine findghostnodes
959  !-------------------------------------------------------------------------------
960  ! find the number of connected domains and their ghosts
961  !-------------------------------------------------------------------------------
962 
967  subroutine findconndomains
969  use yownodepool, only: ghosts, ng, t_node
970  use yowdatapool, only: ntasks, myrank
972 
973  integer :: i, stat, itemp
974  type(t_node), pointer :: ghost
975 
976  ! # of ghost per neighbor domain
977  integer, allocatable :: numberGhostPerNeighborDomainTemp(:)
978  ! look up table. domainID to neighbor (ID)
979  integer, allocatable :: domainID2NeighborTemp(:)
980 
981  ! Part 1) find # neighbor domains
982 
983  ! allocate this array with a fixed size of nTasks. even if we not have
984  ! so many neighbor domains, nTasks will never be very large
985  allocate(numberghostperneighbordomaintemp(ntasks), stat=stat)
986  if(stat/=0) call parallel_abort('numberGhostPerNeighborDomainTemp allocation failure')
987  numberghostperneighbordomaintemp = 0
988 
989  allocate(domainid2neighbortemp(ntasks), stat=stat)
990  if(stat/=0) call parallel_abort('domainID2NeighborTemp allocation failure')
991  domainid2neighbortemp = 0
992 
993 
994  ! iterate over all ghost nodes an get their thread id
995  itemp = 0
996  do i = 1, ng
997  ghost => ghosts(i)
998 
999  ! sum how many ghost belongs to the ghost domainID
1000  numberghostperneighbordomaintemp(ghost%domainID) = numberghostperneighbordomaintemp(ghost%domainID) + 1
1001 
1002  ! check if this ghost domainID is allready in the domains list
1003  if(domainid2neighbortemp(ghost%domainID) /= 0) then
1004  ! yes we have allready insert this domain id
1005  cycle
1006  end if
1007 
1008  ! no we dont know this domain id. insert it
1009  itemp = itemp + 1
1010  domainid2neighbortemp(ghost%domainID) = itemp
1011  end do
1012 
1013  ! Part 2) assign the ghost nodes to their domains
1014  call initnbrdomains(itemp)
1015 
1016  do i = 1, ntasks
1017  if(numberghostperneighbordomaintemp(i) /= 0) then
1018  neighbordomains(domainid2neighbortemp(i) )%domainID = i
1019 
1020  allocate(neighbordomains(domainid2neighbortemp(i))%nodesToReceive(numberghostperneighbordomaintemp(i)), stat=stat)
1021  if(stat/=0) call parallel_abort('neighborDomains%ghosts allocation failure')
1022  neighbordomains(domainid2neighbortemp(i))%nodesToReceive = 0
1023  end if
1024  end do
1025 
1026  do i = 1, ng
1027  ghost => ghosts(i)
1028  itemp = domainid2neighbortemp(ghost%domainID)
1029 
1030  neighbordomains(itemp)%numNodesToReceive = neighbordomains(itemp)%numNodesToReceive + 1
1031  neighbordomains(itemp)%nodesToReceive(neighbordomains(itemp)%numNodesToReceive) = ghost%id_global
1032  end do
1033 
1034  if(allocated(numberghostperneighbordomaintemp)) deallocate(numberghostperneighbordomaintemp)
1035  if(allocated(domainid2neighbortemp)) deallocate(domainid2neighbortemp)
1036  end subroutine findconndomains
1037 
1038 
1039  !-------------------------------------------------------------------------------
1040  ! exchange Ghost Ids so every thread knows which nodes he has to send to the
1041  ! other parition
1042  !-------------------------------------------------------------------------------
1043 
1051  subroutine exchangeghostids
1053  use yownodepool, only: np, t_node, nodes
1054  use yowdatapool, only: ntasks, myrank, comm
1056  use mpi
1057 
1058  integer :: i, j, k
1059  integer :: ierr
1060  ! uniq tag that identify the sender and which information he sends
1061  integer :: tag
1062  ! we use non-blocking send and recv subroutines
1063  ! store the send status
1064  integer :: sendRequest(nConnDomains)
1065  ! store the revc status
1066  integer :: recvRequest(nConnDomains)
1067  ! status to verify if one communication fails or not
1068  integer :: status(MPI_STATUS_SIZE, nConnDomains);
1069 
1070 
1071  type(t_node), pointer :: node
1072 
1073  ! send to all domain neighbors how many ghosts nodes we want from him and which ones
1074  do i=1, nconndomains
1075  ! create a uniq tag for this domain
1076  tag = neighbordomains(i)%domainID*10 + 1
1077  ! send to the neighbor how many ghost nodes we want from him
1078  call mpi_isend(neighbordomains(i)%numNodesToReceive, &
1079  1, &
1080  mpi_int, &
1081  neighbordomains(i)%domainID-1, &
1082  tag, &
1083  comm, &
1084  sendrequest(i), &
1085  ierr);
1086  if(ierr/=mpi_success) then
1087  write(*,*) "mpi send failure"
1088  endif
1089 
1090  tag = neighbordomains(i)%domainID*10 + 2
1091  ! send to the neighbor which ghost nodes we want from him
1092  call mpi_isend(neighbordomains(i)%nodesToReceive, &
1093  neighbordomains(i)%numNodesToReceive, &
1094  mpi_int, &
1095  neighbordomains(i)%domainID-1, &
1096  tag, &
1097  comm, &
1099  sendrequest(i), &
1100  ierr);
1101  if(ierr/=mpi_success) then
1102  write(*,*) "mpi send failure"
1103  endif
1104 
1105  ! receive from neighbor how many ghost nodes we have to send him
1106  tag = (myrank+1)*10 + 1
1107  call mpi_irecv(neighbordomains(i)%numNodesToSend, &
1108  1, &
1109  mpi_int, &
1110  neighbordomains(i)%domainID-1, &
1111  tag, &
1112  comm, &
1113  recvrequest(i), &
1114  ierr)
1115  if(ierr/=mpi_success) then
1116  write(*,*) "mpi recv failure"
1117  endif
1118  end do
1119 
1120  ! wait for communication end
1121  call mpi_waitall(nconndomains, recvrequest, status, ierr)
1122 
1123  ! test for all neighbor domains
1124  do i=1, nconndomains
1125  ! test if the neighbor wants more ghost nodes than we have
1126  if(neighbordomains(i)%numNodesToSend > np) then
1127  write(*,'(i5, a, i5, a, i5,a, i5, a, /)', advance='no') myrank, " ERROR neighbordomain ", neighbordomains(i)%domainID, &
1128  " wants ", neighbordomains(i)%numNodesToSend, &
1129  " nodes, but we have only ", np, " nodes"
1130  CALL abort("")
1131  end if
1132  end do
1133 
1134  ! receive from all neighbor domains which nodes we must send him
1135  do i=1, nconndomains
1136  allocate(neighbordomains(i)%nodesToSend(neighbordomains(i)%numNodesToSend))
1137  neighbordomains(i)%nodesToSend = 0
1138 
1139  ! receive from neighbor which nodes we must send
1140  tag = (myrank+1)*10 + 2
1141  call mpi_irecv(neighbordomains(i)%nodesToSend, &
1142  neighbordomains(i)%numNodesToSend, &
1143  mpi_int, &
1144  neighbordomains(i)%domainID-1, &
1145  tag, &
1146  comm, &
1147  recvrequest(i), &
1148  ierr)
1149  if(ierr/=mpi_success) then
1150  CALL parallel_abort("mpi recv failure", ierr)
1151  endif
1152  end do
1153 
1154  ! wait for communication end
1155  call mpi_waitall(nconndomains, recvrequest, status, ierr)
1156 
1157  ! test for all neighbor domains
1158  do i=1, nconndomains
1159  ! test if the neighbor wants nodes that we don't own
1160  outerloop: do j=1, neighbordomains(i)%numNodesToSend
1161  ! compare with all local nodes
1162  do k=1, np
1163  node => nodes(k)
1164  if(node%id_global == neighbordomains(i)%nodesToSend(j)) then
1165  cycle outerloop
1166  end if
1167  end do
1168  write(*,*) myrank, "Neighbordomain", neighbordomains(i)%domainID, &
1169  " want Node", neighbordomains(i)%nodesToSend(j), &
1170  " but we don't own this node"
1171  stop
1172  end do outerloop
1173  end do
1174 
1175  call creatempitypes()
1176  end subroutine exchangeghostids
1177 
1180  subroutine postpartition2(INE_global)
1182  use yowerr, only: parallel_abort
1183  use yowdatapool, only: myrank
1185  use yownodepool, only: x, y, z
1186  use w3gdatmd, only: xgrd, ygrd, zb
1187 
1188  integer, intent(in) :: INE_global(3,ne_global)
1189 
1190  integer :: i, j, k, stat, IP_glob
1191  type(t_node), pointer :: node
1192  logical :: assigned
1193 
1194  ! to find the local elements, iterate over all global elements and check their node domain IDs
1195 
1196  ! step 1: calc the number of local elements
1197  ne = 0
1198  do i=1, ne_global
1199  if (belongto(ine_global(:,i))) then
1200  ne = ne +1
1201  endif
1202  end do
1203 
1204  ! step 2: fill the local element index array
1205  if(allocated(ine)) deallocate(ine)
1206  allocate(ine(3, ne), stat=stat)
1207  if(stat/=0) call parallel_abort('INE allocation failure')
1208  ine = 0
1209 
1210  ne = 0
1211  do i=1, ne_global
1212  ! yes, this element belongs to this domain
1213  if (belongto(ine_global(:,i))) then
1214  ne = ne + 1
1215  do j=1, 3
1216  assigned = .false.
1217  node => nodes_global(ine_global(j,i))
1218  if(node%domainID == myrank+1) then
1219  ine(j, ne) = node%id
1220  assigned = .true.
1221  else
1222  ! the element have a ghost node
1225  do k=1, ng
1226  if(node%id_global == ghostlg(k)) then
1227  ! conversion: the ghost nodes are stored behind the local nodes.
1228  if(ine(j,ne) /= 0) then
1229  write(*,*) "will write to INE(j, ne) but there is allready a value", j, ne, ine(j, ne)
1230  endif
1231  ine(j, ne) = np + k
1232  node%id = np+k
1233  assigned = .true.
1234  ! write(*,*) myrank, "node to ele", node%id_global-1, i-1, np+k
1235  exit
1236  endif
1237  end do
1238  endif
1239  if(assigned .eqv. .false.) then
1240  write(*,*) "Can't assign global node to INE", node%id_global
1241  endif
1242  end do
1243  endif
1244  end do
1245 
1246  ! check if INE contains 0.
1247  do i=1, ne
1248  if(minval(abs(ine(:,i))) == 0) then
1249  write(*,*) "0 in INE ne=", ne
1250  stop "0 in INE"
1251  endif
1252  end do
1253 
1254  if(maxval(ine) /= npa) then
1255  write(*,*) "MAXVAL(INE) /= npa ERROR?"
1256  endif
1257 
1258  ! create element local to global mapping ielg
1259  if(allocated(ielg)) deallocate(ielg)
1260  allocate(ielg(ne), stat=stat)
1261  if(stat/=0) call parallel_abort('ielg allocation failure')
1262  ielg = 0
1263 
1264  j = 0
1265  do i=1, ne_global
1266  if (belongto(ine_global(:,i))) then
1267  j = j +1
1268  ielg(j) = i
1269  end if
1270  end do
1271 
1272  ! fill the local x,y arrays
1273  if(allocated(x)) deallocate(x)
1274  allocate(x(npa), stat=stat)
1275  if(stat/=0) call parallel_abort('x allocation failure')
1276 
1277  if(allocated(y)) deallocate(y)
1278  allocate(y(npa), stat=stat)
1279  if(stat/=0) call parallel_abort('y allocation failure')
1280 
1281  if(allocated(z)) deallocate(z)
1282  allocate(z(npa), stat=stat)
1283  if(stat/=0) call parallel_abort('z allocation failure')
1284 
1285  do i=1, np
1286  ip_glob = iplg(i)
1287  x(i) = xgrd(1,ip_glob)
1288  y(i) = ygrd(1,ip_glob)
1289  z(i) = zb(ip_glob)
1290  end do
1291 
1292  do i=1, ng
1293  ip_glob = ghostlg(i)
1294  x(np+i) = xgrd(1,ip_glob)
1295  y(np+i) = ygrd(1,ip_glob)
1296  z(np+i) = zb(ip_glob)
1297  end do
1298 
1299  end subroutine postpartition2
1300  !**********************************************************************
1301  !* *
1302  !**********************************************************************
1303  subroutine computetria_ien_si_ccon
1306  use yowerr, only: parallel_abort
1307  use yowdatapool, only: myrank
1308  use yownodepool, only: np_global, np, iplg, t_node, ghostlg, ng, npa
1310 
1311  integer I1, I2, I3, stat, IE, NI(3)
1312  real :: DXP1, DXP2, DXP3, DYP1, DYP2, DYP3, DBLTMP, TRIA03
1313  logical :: CROSSES_DATELINE
1314 
1315  allocate(pdlib_si(npa), pdlib_ccon(npa), pdlib_ien(6,ne), pdlib_tria(ne), pdlib_tria03(ne), stat=stat)
1316  if(stat/=0) call parallel_abort('SI allocation failure')
1317 
1318  pdlib_si(:) = 0.0d0 ! Median Dual Patch Area of each Node
1319  pdlib_ccon(:) = 0 ! Number of connected Elements
1320  DO ie = 1 , ne
1321  i1 = ine(1,ie)
1322  i2 = ine(2,ie)
1323  i3 = ine(3,ie)
1324  ni = ine(:,ie)
1325 
1326  dxp1=x(i2) - x(i1)
1327  dyp1=y(i2) - y(i1)
1328  dxp2=x(i3) - x(i2)
1329  dyp2=y(i3) - y(i2)
1330  dxp3=x(i1) - x(i3)
1331  dyp3=y(i1) - y(i3)
1332  CALL element_crosses_dateline(dxp1, dxp2, dxp3, crosses_dateline)
1333  IF (crosses_dateline) THEN
1334  CALL correct_dx_gt180(dxp1)
1335  CALL correct_dx_gt180(dxp2)
1336  CALL correct_dx_gt180(dxp3)
1337  ENDIF
1338 
1339  pdlib_ien(1,ie) = - dyp2
1340  pdlib_ien(2,ie) = dxp2
1341  pdlib_ien(3,ie) = - dyp3
1342  pdlib_ien(4,ie) = dxp3
1343  pdlib_ien(5,ie) = - dyp1
1344  pdlib_ien(6,ie) = dxp1
1345  dbltmp = (dxp3*dyp1 - dyp3*dxp1)*0.5
1346  pdlib_tria(ie) = dbltmp
1347  IF (pdlib_tria(ie) .lt. tiny(1.)) THEN
1348  WRITE(*,*) pdlib_ien(:,ie)
1349  WRITE(*,*)
1350  WRITE(*,*) 'AREA SMALLER ZERO IN PDLIB', ie, ne, pdlib_tria(ie)
1351  stop
1352  ENDIF
1353 
1354  pdlib_ccon(i1) = pdlib_ccon(i1) + 1
1355  pdlib_ccon(i2) = pdlib_ccon(i2) + 1
1356  pdlib_ccon(i3) = pdlib_ccon(i3) + 1
1357  tria03 = pdlib_tria(ie)/3.d0
1358  pdlib_si(i1) = pdlib_si(i1) + tria03
1359  pdlib_si(i2) = pdlib_si(i2) + tria03
1360  pdlib_si(i3) = pdlib_si(i3) + tria03
1361  pdlib_tria03(ie) = tria03
1362  ENDDO
1364  end subroutine computetria_ien_si_ccon
1365  !**********************************************************************
1366  !* *
1367  !**********************************************************************
1368  subroutine element_crosses_dateline(RX1, RX2, RX3, CROSSES_DATELINE)
1369  ! Purpose: understanding if an element crosses the dateline.
1370  ! An element crossing the dateline has, e.g. a node with lon < 180
1371  ! and another 2 with lon > -180
1372 
1373  REAL(rkind), INTENT(IN) :: RX1, RX2, RX3
1374  LOGICAL, INTENT(OUT) :: CROSSES_DATELINE
1375  INTEGER :: R1GT180, R2GT180, R3GT180
1376  r1gt180 = merge(1, 0, abs(rx1).GT.180)
1377  r2gt180 = merge(1, 0, abs(rx2).GT.180)
1378  r3gt180 = merge(1, 0, abs(rx3).GT.180)
1379  ! if R1GT180+R2GT180+R3GT180 .eq. 0 the element does not cross the dateline
1380  ! if R1GT180+R2GT180+R3GT180 .eq. 1 the element contains the pole
1381  ! if R1GT180+R2GT180+R3GT180 .eq. 2 the element crosses the dateline
1382  crosses_dateline = r1gt180+r2gt180+r3gt180 .EQ. 2
1383  end subroutine element_crosses_dateline
1384  !**********************************************************************
1385  !* *
1386  !**********************************************************************
1387  subroutine correct_dx_gt180(DXP)
1388  ! Purpose: the absolute zonal distance between 2 points is always <= 180
1389  ! This subroutine corrects the zonal distance to satifsy
1390  ! this requirement
1391 
1392  REAL(rkind), INTENT(INOUT) :: DXP
1393  IF (dxp .le. -180) THEN
1394  dxp=dxp + 360
1395  END IF
1396  IF (dxp .ge. 180) THEN
1397  dxp=dxp - 360
1398  END IF
1399  end subroutine correct_dx_gt180
1400  !**********************************************************************
1401  !* *
1402  !**********************************************************************
1403  subroutine computeia_ja_posi_nnz
1404  use yowelementpool, only: ne, ne_global, ine, ielg
1405  use yowerr, only: parallel_abort
1406  use yowdatapool, only: myrank
1411 
1412  integer CHILF(npa)
1413  integer istat
1414  integer MAXMNECON
1415  integer IE, J, I, IP, K, IP_J, IP_K, IP_I
1416  integer I1, I2, I3, POS, POS_J, POS_K
1417  integer COUNT_MAX
1418  integer, allocatable :: CELLVERTEX(:,:,:), PTABLE(:,:)
1419  integer :: ITMP(npa)
1420  maxmnecon = maxval(pdlib_ccon)
1421  ALLOCATE(cellvertex(npa,maxmnecon,2), stat=istat)
1422  IF (istat/=0) CALL parallel_abort('ComputeIA_JA_POSI_NNZ, allocate error 4')
1423  !
1424  cellvertex(:,:,:) = 0
1425  chilf = 0
1426  DO ie = 1, ne
1427  DO j=1,3
1428  i = ine(j,ie)
1429  chilf(i) = chilf(i)+1
1430  cellvertex(i,chilf(i),1) = ie
1431  cellvertex(i,chilf(i),2) = j
1432  END DO
1433  ENDDO
1434  !
1435  ! Emulates loop structure and counts max. entries in the different pointers that have to be designed
1436  !
1437  j = 0
1438  DO ip = 1, npa
1439  DO i = 1, pdlib_ccon(ip)
1440  j = j + 1
1441  END DO
1442  END DO
1443  count_max = j
1444  ALLOCATE (pdlib_ie_cell(count_max), pdlib_pos_cell(count_max), stat=istat)
1445  IF (istat/=0) CALL parallel_abort('wwm_fluctsplit, allocate error 5a')
1446  ALLOCATE (pdlib_ie_cell2(maxmnecon,npa), pdlib_pos_cell2(maxmnecon, npa), stat=istat)
1447  IF (istat/=0) CALL parallel_abort('wwm_fluctsplit, allocate error 5b')
1448  ! Just a remapping from CELLVERTEX ... Element number in the
1449  ! order of the occurence in the loop during runtime
1450  pdlib_ie_cell = 0
1451  ! Just a remapping from CELLVERTEX ... Position of the node
1452  ! in the Element index -"-
1453  pdlib_pos_cell = 0
1454  j = 0
1455  DO ip = 1, npa
1456  DO i = 1, pdlib_ccon(ip)
1457  j = j + 1
1458  pdlib_ie_cell(j) = cellvertex(ip,i,1)
1459  pdlib_pos_cell(j) = cellvertex(ip,i,2)
1460  pdlib_ie_cell2(i,ip) = cellvertex(ip,i,1)
1461  pdlib_pos_cell2(i,ip) = cellvertex(ip,i,2)
1462  END DO
1463  END DO
1464  deallocate(cellvertex)
1465 
1466  ALLOCATE(ptable(count_max,7), stat=istat)
1467  IF (istat/=0) CALL parallel_abort('wwm_fluctsplit, allocate error 6')
1468  ALLOCATE(pdlib_ja_ie(3,3,ne), stat=istat)
1469  IF (istat/=0) CALL parallel_abort('wwm_fluctsplit, allocate error 6.1')
1470 
1471  j = 0
1472  ptable(:,:) = 0.
1473  DO ip = 1, npa
1474  DO i = 1, pdlib_ccon(ip)
1475  j = j + 1
1476  ie = pdlib_ie_cell(j)
1477  pos = pdlib_pos_cell(j)
1478  i1 = ine(1,ie)
1479  i2 = ine(2,ie)
1480  i3 = ine(3,ie)
1481  IF (pos == 1) THEN
1482  pos_j = 2
1483  pos_k = 3
1484  ELSE IF (pos == 2) THEN
1485  pos_j = 3
1486  pos_k = 1
1487  ELSE
1488  pos_j = 1
1489  pos_k = 2
1490  END IF
1491  ip_i = ip
1492  ip_j = ine(pos_j,ie)
1493  ip_k = ine(pos_k,ie)
1494  ptable(j,1) = ip_i ! Node numbers of the connected elements
1495  ptable(j,2) = ip_j
1496  ptable(j,3) = ip_k
1497  ptable(j,4) = pos ! Position of the nodes in the element index
1498  ptable(j,5) = pos_j
1499  ptable(j,6) = pos_k
1500  ptable(j,7) = ie ! Element numbers same as PDLIB_IE_CELL
1501  END DO
1502  END DO
1503  !
1504  ! Count number of nonzero entries in the matrix ...
1505  ! Basically, each connected element may have two off-diagonal
1506  ! contribution and one diagonal related to the connected vertex itself ...
1507  !
1508  j = 0
1509  pdlib_nnz = 0
1510  itmp = 0
1511  DO ip = 1, npa
1512  itmp = 0
1513  DO i = 1, pdlib_ccon(ip)
1514  j = j + 1
1515  ip_j = ptable(j,2)
1516  ip_k = ptable(j,3)
1517  itmp(ip) = 1
1518  itmp(ip_j) = 1
1519  itmp(ip_k) = 1
1520  END DO
1521  pdlib_nnz = pdlib_nnz + sum(itmp)
1522  END DO
1523  !
1524  ! Allocate sparse matrix pointers using the Compressed Sparse Row Format CSR ... this is now done only of npa nodes
1525  ! The next step is to do it for the whole Matrix npa * MSC * MDC
1526  ! see ...:x
1527  !
1528  ALLOCATE (pdlib_ja(pdlib_nnz), pdlib_ia(npa+1), pdlib_ja_p(pdlib_nnz), stat=istat)
1529  IF (istat/=0) CALL parallel_abort('wwm_fluctsplit, allocate error 6a')
1530  ALLOCATE (pdlib_ia_p(npa+1), pdlib_posi(3,count_max), pdlib_i_diag(npa), stat=istat)
1531  IF (istat/=0) CALL parallel_abort('wwm_fluctsplit, allocate error 6b')
1532  pdlib_ja = 0
1533  pdlib_ia = 0
1534  pdlib_ja_p = 0
1535  pdlib_ia_p = 0
1536  pdlib_posi = 0
1537  ! Points to the position of the matrix entry in the mass matrix
1538  ! according to the CSR matrix format see p. 124
1539  j = 0
1540  k = 0
1541  pdlib_ia(1) = 1
1542  pdlib_ia_p(1) = 0
1543  DO ip = 1, npa ! Run through all rows
1544  itmp=0
1545  DO i = 1, pdlib_ccon(ip) ! Check how many entries there are ...
1546  j = j + 1
1547  ip_j = ptable(j,2)
1548  ip_k = ptable(j,3)
1549  itmp(ip) = 1
1550  itmp(ip_j) = 1
1551  itmp(ip_k) = 1
1552  END DO
1553  DO i = 1, npa ! Run through all columns
1554  IF (itmp(i) .GT. 0) THEN
1555  k = k + 1
1556  pdlib_ja(k) = i
1557  pdlib_ja_p(k) = i-1
1558  END IF
1559  END DO
1560  pdlib_ia(ip + 1) = k + 1
1561  pdlib_ia_p(ip + 1) = k
1562  END DO
1563  pdlib_posi = 0
1564  j = 0
1565  DO ip = 1, npa
1566  DO i = 1, pdlib_ccon(ip)
1567  j = j + 1
1568  ip_j = ptable(j,2)
1569  ip_k = ptable(j,3)
1570  DO k = pdlib_ia(ip), pdlib_ia(ip+1) - 1
1571  IF (ip == pdlib_ja(k)) pdlib_posi(1,j) = k
1572  IF (ip == pdlib_ja(k)) pdlib_i_diag(ip) = k
1573  IF (ip_j == pdlib_ja(k)) pdlib_posi(2,j) = k
1574  IF (ip_k == pdlib_ja(k)) pdlib_posi(3,j) = k
1575  END DO
1576  END DO
1577  END DO
1578  j=0
1579  DO ip=1,npa
1580  DO i = 1, pdlib_ccon(ip)
1581  j = j + 1
1582  ie = pdlib_ie_cell(j)
1583  pos = pdlib_pos_cell(j)
1584  i1 = pdlib_posi(1,j)
1585  i2 = pdlib_posi(2,j)
1586  i3 = pdlib_posi(3,j)
1587  pdlib_ja_ie(pos,1,ie) = i1
1588  pdlib_ja_ie(pos,2,ie) = i2
1589  pdlib_ja_ie(pos,3,ie) = i3
1590  END DO
1591  END DO
1592  deallocate(ptable)
1593  end subroutine computeia_ja_posi_nnz
1594  !**********************************************************************
1595  !* *
1596  !**********************************************************************
1597  subroutine finalizepd()
1599  use yownodepool, only: finalizenodepool
1602 
1603  call finalizerankmodule()
1604  call finalizeexchangemodule()
1605  call finalizeelementpool()
1606  call finalizenodepool()
1607  end subroutine finalizepd
1608 
1609 end module yowpdlibmain
1610 !**********************************************************************
1611 !* *
1612 !**********************************************************************
yownodepool::ghostgl
integer, dimension(:), allocatable, public ghostgl
Ghost global to local mapping np_global long.
Definition: yownodepool.F90:129
yowerr::parallel_abort
subroutine parallel_abort(string, error)
Definition: yowerr.F90:43
yowpdlibmain::postpartition2
subroutine postpartition2(INE_global)
this collects all data which depends on ghost information alter: ne, INE, x, y, z,...
Definition: yowpdlibmain.F90:1181
include
cmake src_list cmake include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/check_switches.cmake) check_switches("$
Definition: CMakeLists.txt:15
yowelementpool::ielg
integer, dimension(:), allocatable, target, public ielg
global element array.
Definition: yowelementpool.F90:65
yownodepool::pdlib_ia
integer, dimension(:), allocatable, target, public pdlib_ia
Definition: yownodepool.F90:81
yowexchangemodule::pdlib_exchange1dreal
subroutine, public pdlib_exchange1dreal(U)
exchange values in U.
Definition: yowexchangeModule.F90:251
yowrankmodule::initrankmodule
subroutine, public initrankmodule()
allocate and exchange
Definition: yowrankModule.F90:76
yowpdlibmain::computetria_ien_si_ccon
subroutine computetria_ien_si_ccon
Definition: yowpdlibmain.F90:1304
w3gdatmd::ygrd
double precision, dimension(:,:), pointer ygrd
Definition: w3gdatmd.F90:1205
yowelementpool
Definition: yowelementpool.F90:38
yowexchangemodule::initnbrdomains
subroutine, public initnbrdomains(nConnD)
Definition: yowexchangeModule.F90:224
w3gdatmd::zb
real, dimension(:), pointer zb
Definition: w3gdatmd.F90:1195
yownodepool::pdlib_ie_cell
integer, dimension(:), allocatable, target, public pdlib_ie_cell
Definition: yownodepool.F90:83
yownodepool::maxconnnodes
integer, public maxconnnodes
max number of conntected nodes to a node
Definition: yownodepool.F90:106
yownodepool::pdlib_i_diag
integer, dimension(:), allocatable, target, public pdlib_i_diag
Definition: yownodepool.F90:85
w3gdatmd::xgrd
double precision, dimension(:,:), pointer xgrd
Definition: w3gdatmd.F90:1205
yowelementpool::finalizeelementpool
subroutine, public finalizeelementpool()
Definition: yowelementpool.F90:102
yowpdlibmain::assignmesh
subroutine assignmesh(MNP, MNE)
Definition: yowpdlibmain.F90:258
yowpdlibmain::exchangeghostids
subroutine exchangeghostids
exchange Ghost Ids so every thread knows which nodes he has to send to the other parition.
Definition: yowpdlibmain.F90:1052
yownodepool::iplg
integer, dimension(:), allocatable, public iplg
Node local to global mapping.
Definition: yownodepool.F90:116
yowpdlibmain::findghostnodes
subroutine findghostnodes
find the ghost nodes of the local domain alter: ng, ghosts(), ghostlg, ghostgl, npa,...
Definition: yowpdlibmain.F90:845
yowerr
Has some subroutine to make a nice error message.
Definition: yowerr.F90:39
yownodepool::npa
integer, public npa
number of ghost + resident nodes this partition holds
Definition: yownodepool.F90:99
yowpdlibmain::initfromgriddim
subroutine, public initfromgriddim(MNP, MNE, INE_global, secDim, MPIcomm)
Definition: yowpdlibmain.F90:66
yownodepool::connnodes_data
integer, dimension(:,:), allocatable, public connnodes_data
conntected Node Array.
Definition: yownodepool.F90:112
yownodepool::pdlib_ccon
integer, dimension(:), allocatable, target, public pdlib_ccon
Definition: yownodepool.F90:81
yowdatapool::rkind
integer, parameter rkind
double precision.
Definition: yowdatapool.F90:47
yownodepool::pdlib_ia_p
integer, dimension(:), allocatable, target, public pdlib_ia_p
Definition: yownodepool.F90:82
yownodepool::pdlib_si
real(rkind), dimension(:), allocatable, target, public pdlib_si
Definition: yownodepool.F90:80
yowelementpool::ne
integer, public ne
number of local elements
Definition: yowelementpool.F90:48
yowpdlibmain::findconndomains
subroutine findconndomains
find the number of connected domains and their ghosts 1) Iterate over all ghost nodes and look at the...
Definition: yowpdlibmain.F90:968
yownodepool::np_global
integer, public np_global
number of nodes, global
Definition: yownodepool.F90:89
yowsidepool::ns
integer ns
Number of sides, local.
Definition: yowsidepool.F90:46
yowrankmodule::ipgl_npa
integer, dimension(:), allocatable, public ipgl_npa
Definition: yowrankModule.F90:70
yowdatapool::myrank
integer, save myrank
The thread id.
Definition: yowdatapool.F90:62
yowdatapool::debugparmetis
logical, parameter debugparmetis
Definition: yowdatapool.F90:51
yowdatapool::debugprepartition
logical, parameter debugprepartition
Definition: yowdatapool.F90:49
yownodepool::ipgl
integer, dimension(:), allocatable, public ipgl
Node global to local mapping np_global long.
Definition: yownodepool.F90:120
yownodepool::y
real(rkind), dimension(:), allocatable, target, public y
Definition: yownodepool.F90:79
yownodepool::pdlib_ja_ie
integer, dimension(:,:,:), allocatable, target, public pdlib_ja_ie
Definition: yownodepool.F90:82
yownodepool
Has data that belong to nodes.
Definition: yownodepool.F90:39
yowpdlibmain::finalizepd
subroutine, public finalizepd()
Definition: yowpdlibmain.F90:1598
w3servmd
Definition: w3servmd.F90:3
yownodepool::pdlib_ja_p
integer, dimension(:), allocatable, target, public pdlib_ja_p
Definition: yownodepool.F90:82
yownodepool::ghosts
type(t_node) function, pointer, public ghosts(id)
return pointer to the (global) (ghost) node Ghost nodes are nodes in the global node array,...
Definition: yownodepool.F90:172
yownodepool::x
real(rkind), dimension(:), allocatable, target, public x
coordinates of the local + ghost nodes.
Definition: yownodepool.F90:79
yownodepool::pdlib_ja
integer, dimension(:), allocatable, target, public pdlib_ja
Definition: yownodepool.F90:81
yowrankmodule
Provides access to some information of all threads e.g.
Definition: yowrankModule.F90:44
yowpdlibmain::postpartition
subroutine postpartition
recalculate some variables parmetis change the number of sides per domain.
Definition: yowpdlibmain.F90:782
yowelementpool::belongto
logical function, public belongto(ele_in, rank)
Returns true if the element belongs to rank.
Definition: yowelementpool.F90:74
w3servmd::print_memcheck
subroutine print_memcheck(iun, msg)
Write memory statistics if requested.
Definition: w3servmd.F90:2033
yowpdlibmain::element_crosses_dateline
subroutine element_crosses_dateline(RX1, RX2, RX3, CROSSES_DATELINE)
Definition: yowpdlibmain.F90:1369
yownodepool::t_node
Holds the nodes data.
Definition: yownodepool.F90:48
yownodepool::pdlib_tria03
real(rkind), dimension(:), allocatable, target, public pdlib_tria03
Definition: yownodepool.F90:80
yownodepool::pdlib_pos_cell
integer, dimension(:), allocatable, target, public pdlib_pos_cell
Definition: yownodepool.F90:83
yownodepool::ghostlg
integer, dimension(:), allocatable, public ghostlg
Ghost local to global mapping ng long.
Definition: yownodepool.F90:125
yowexchangemodule::neighbordomains
type(t_neighbordomain), dimension(:), allocatable, public neighbordomains
Knows for all domains neighbors, which node we must send or revc from neighbor domains from 1 to nCon...
Definition: yowexchangeModule.F90:102
yownodepool::np
integer, public np
number of nodes, local
Definition: yownodepool.F90:93
yowexchangemodule::creatempitypes
subroutine, public creatempitypes()
Definition: yowexchangeModule.F90:236
yowdatapool
Has fancy data.
Definition: yowdatapool.F90:39
yowsidepool
Definition: yowsidepool.F90:38
yowexchangemodule
Has only the ghost nodes assign to a neighbor domain.
Definition: yowexchangeModule.F90:39
file
file(STRINGS ${CMAKE_BINARY_DIR}/switch switch_strings) separate_arguments(switches UNIX_COMMAND $
Definition: CMakeLists.txt:3
yowdatapool::debugpartition
logical, parameter debugpartition
write partition information into file fort.600.
Definition: yowdatapool.F90:55
yownodepool::z
real(rkind), dimension(:), allocatable, target, public z
Definition: yownodepool.F90:79
yowdatapool::comm
integer, save, public comm
MPI Communicator.
Definition: yowdatapool.F90:66
yowerr::abort
subroutine abort(string, line, file, errno)
print various error strings and exit.
Definition: yowerr.F90:93
yowdatapool::debugpostpartition
logical, parameter debugpostpartition
Definition: yowdatapool.F90:50
yowelementpool::ine
integer, dimension(:,:), allocatable, target, public ine
number of elements of the augmented domain
Definition: yowelementpool.F90:56
yowrankmodule::finalizerankmodule
subroutine, public finalizerankmodule()
Definition: yowrankModule.F90:249
yowelementpool::ne_global
integer, public ne_global
number of elements, global
Definition: yowelementpool.F90:45
yownodepool::pdlib_tria
real(rkind), dimension(:), allocatable, target, public pdlib_tria
Definition: yownodepool.F90:80
yownodepool::pdlib_posi
integer, dimension(:,:), allocatable, target, public pdlib_posi
Definition: yownodepool.F90:85
yowpdlibmain::runparmetis
subroutine runparmetis(MNP)
Collect all data for parmetis und partition the mesh after that, we know for every node the domain ID...
Definition: yowpdlibmain.F90:422
yownodepool::np_perproc
integer, dimension(:), allocatable, public np_perproc
Numbers of Nodes pro Processor.
Definition: yownodepool.F90:133
yownodepool::nodes_global
type(t_node), dimension(:), allocatable, target, public nodes_global
all nodes with their data.
Definition: yownodepool.F90:103
w3gdatmd
Definition: w3gdatmd.F90:16
yowdatapool::itype
integer, save itype
MPI Integer Type.
Definition: yowdatapool.F90:70
yowpdlibmain
Definition: yowpdlibmain.F90:42
yownodepool::pdlib_ien
real(rkind), dimension(:,:), allocatable, target, public pdlib_ien
Definition: yownodepool.F90:80
yowexchangemodule::setdimsize
subroutine, public setdimsize(second)
set the size of the second and third dimension for exchange
Definition: yowexchangeModule.F90:375
yownodepool::pdlib_ie_cell2
integer, dimension(:,:), allocatable, target, public pdlib_ie_cell2
Definition: yownodepool.F90:84
yownodepool::pdlib_pos_cell2
integer, dimension(:,:), allocatable, target, public pdlib_pos_cell2
Definition: yownodepool.F90:84
yowpdlibmain::findconnnodes
subroutine findconnnodes(INE_global)
create the connected Nodes array loop over all elements and their nodes.
Definition: yowpdlibmain.F90:342
yowexchangemodule::nconndomains
integer, public nconndomains
Number of neighbor domains.
Definition: yowexchangeModule.F90:105
yownodepool::nodes
type(t_node) function, pointer, public nodes(id_local)
return pointer to the (global) node from the local id.
Definition: yownodepool.F90:159
yownodepool::finalizenodepool
subroutine, public finalizenodepool()
Definition: yownodepool.F90:220
yowpdlibmain::real_mpi_barrier_pdlib
subroutine real_mpi_barrier_pdlib(TheComm, string)
Definition: yowpdlibmain.F90:178
yownodepool::np_perprocsum
integer, dimension(:), allocatable, public np_perprocsum
Number of Nodes pro Processor totalize.
Definition: yownodepool.F90:138
yowexchangemodule::finalizeexchangemodule
subroutine, public finalizeexchangemodule()
Definition: yowexchangeModule.F90:381
yowdatapool::ntasks
integer, save ntasks
Number of threads.
Definition: yowdatapool.F90:58
yownodepool::pdlib_nnz
integer, public pdlib_nnz
Definition: yownodepool.F90:90
yownodepool::ng
integer, public ng
number of ghost nodes this partition holds
Definition: yownodepool.F90:96
yowpdlibmain::prepartition
subroutine prepartition
pre-partition the mesh just divide the mesh into nTasks parts and create a premature iplg alter: np_p...
Definition: yowpdlibmain.F90:293
yowsidepool::ns_global
integer ns_global
Number of sides, global Number of nodes neighbors?.
Definition: yowsidepool.F90:43