WAVEWATCH III  beta 0.0.1
yowrankmodule Module Reference

Provides access to some information of all threads e.g. More...

Data Types

type  t_rank
 

Functions/Subroutines

subroutine, public initrankmodule ()
 allocate and exchange More...
 
subroutine exchangeiplg ()
 send iplg from this thread to every neighbor thread More...
 
subroutine calcistart ()
 
subroutine, public finalizerankmodule ()
 

Variables

type(t_rank), dimension(:), allocatable, public rank
 Provides access to some information of all threads e.g. More...
 
integer, dimension(:), allocatable, public ipgl_to_proc
 
integer, dimension(:), allocatable, public ipgl_tot
 
integer, dimension(:), allocatable, public ipgl_npa
 

Detailed Description

Provides access to some information of all threads e.g.

iplg

Function/Subroutine Documentation

◆ calcistart()

subroutine yowrankmodule::calcistart

Definition at line 238 of file yowrankModule.F90.

238  use yowdatapool, only: ntasks, myrank
239  implicit none
240  integer :: ir
241 
242  rank(1)%IStart = 1
243  do ir=2, ntasks
244  rank(ir)%IStart = rank(ir-1)%IStart + rank(ir-1)%np
245  end do

References yowdatapool::myrank, yowdatapool::ntasks, and rank.

Referenced by initrankmodule().

◆ exchangeiplg()

subroutine yowrankmodule::exchangeiplg

send iplg from this thread to every neighbor thread

Definition at line 91 of file yowrankModule.F90.

91  use yownodepool, only: np, npa, iplg, np_global
92  use yowdatapool, only: ntasks, myrank, comm, itype
93  use mpi
94  implicit none
95  integer :: i, ierr, stat
96  integer :: sendRqst(nTasks), recvRqst(nTasks)
97  integer :: recvStat(MPI_STATUS_SIZE, nTasks), sendStat(MPI_STATUS_SIZE, nTasks)
98  integer IPglob, J, istat
99 
100  ! step1 exchange np
101  ! step2 exchange npa
102  ! step3 allocate rank%iplg
103  ! step4 exchange iplg
104 
105  ! step1 exchange np
106  ! post receives
107  do i=1, ntasks
108  if(i /= myrank+1) then
109  call mpi_irecv(rank(i)%np, 1, itype, i-1, &
110  42, comm, recvrqst(i), ierr)
111  if(ierr/=mpi_success) then
112  CALL parallel_abort("MPI_IRecv", ierr)
113  endif
114  else
115  recvrqst(i) = mpi_request_null
116  endif
117  end do
118 
119  ! post sends
120  do i=1, ntasks
121  if(i /= myrank+1) then
122  call mpi_isend(np, 1, itype, i-1, &
123  42, comm, sendrqst(i), ierr)
124  if(ierr/=mpi_success) then
125  CALL parallel_abort("MPI_ISend", ierr)
126  endif
127  else
128  sendrqst(i) = mpi_request_null
129  endif
130  end do
131 
132  rank(myrank+1)%np = np
133 
134  ! Wait for completion
135  call mpi_waitall(ntasks, recvrqst, recvstat,ierr)
136  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
137  call mpi_waitall(ntasks, sendrqst, sendstat,ierr)
138  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
139 
140  ! step2 exchange npa
141  ! post receives
142  do i=1, ntasks
143  if(i /= myrank+1) then
144  call mpi_irecv(rank(i)%npa, 1, itype, i-1, &
145  42, comm, recvrqst(i), ierr)
146  if(ierr/=mpi_success) then
147  CALL parallel_abort("MPI_IRecv", ierr)
148  endif
149  else
150  recvrqst(i) = mpi_request_null
151  endif
152  end do
153 
154  ! post sends
155  do i=1, ntasks
156  if(i /= myrank+1) then
157  call mpi_isend(npa, 1, itype, i-1, &
158  42, comm, sendrqst(i), ierr)
159  if(ierr/=mpi_success) then
160  CALL parallel_abort("MPI_ISend", ierr)
161  endif
162  else
163  sendrqst(i) = mpi_request_null
164  endif
165  end do
166 
167  rank(myrank+1)%npa = npa
168 
169  ! Wait for completion
170  call mpi_waitall(ntasks, recvrqst, recvstat,ierr)
171  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
172  call mpi_waitall(ntasks, sendrqst, sendstat,ierr)
173  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
174 
175  ! step3 allocal rank%iplg
176  do i=1, ntasks
177  if(allocated(rank(i)%iplg)) deallocate(rank(i)%iplg)
178  allocate(rank(i)%iplg(rank(i)%npa), stat=stat)
179  if(stat/=0) CALL abort('rank%iplg allocation failure')
180  rank(i)%iplg = 0
181  end do
182 
183  ! step4 exchange iplg
184  ! post receives
185  do i=1, ntasks
186  if(i /= myrank+1) then
187  call mpi_irecv(rank(i)%iplg, rank(i)%npa, itype, i-1, &
188  42, comm, recvrqst(i), ierr)
189  if(ierr/=mpi_success) then
190  CALL parallel_abort("MPI_IRecv", ierr)
191  endif
192  else
193  recvrqst(i) = mpi_request_null
194  endif
195  end do
196 
197  ! post sends
198  do i=1, ntasks
199  if(i /= myrank+1) then
200  call mpi_isend(iplg, npa, itype, i-1, &
201  42, comm, sendrqst(i), ierr)
202  if(ierr/=mpi_success) then
203  CALL parallel_abort("MPI_ISend", ierr)
204  endif
205  else
206  sendrqst(i) = mpi_request_null
207  endif
208  end do
209 
210  rank(myrank+1)%iplg = iplg
211 
212  ! Wait for completion
213  call mpi_waitall(ntasks, recvrqst, recvstat,ierr)
214  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
215  call mpi_waitall(ntasks, sendrqst, sendstat,ierr)
216  if(ierr/=mpi_success) CALL parallel_abort("waitall", ierr)
217 
218  allocate(ipgl_to_proc(np_global), ipgl_tot(np_global), stat=istat)
219  if(istat /= 0) CALL parallel_abort("allocatation error", 1)
220  do i=1,ntasks
221  DO j=1,rank(i)%np
222  ipglob=rank(i)%iplg(j)
223  ipgl_to_proc(ipglob)=i
224  ipgl_tot(ipglob)=j
225  END DO
226  END DO
227  allocate(ipgl_npa(np_global), stat=istat)
228  if(istat /= 0) CALL parallel_abort("allocatation error b", 1)
229  ipgl_npa=0
230  DO j=1,rank(myrank+1)%npa
231  ipglob=rank(myrank+1)%iplg(j)
232  ipgl_npa(ipglob)=j
233  END DO

References yowerr::abort(), yowdatapool::comm, ipgl_npa, ipgl_to_proc, ipgl_tot, yownodepool::iplg, yowdatapool::itype, yowdatapool::myrank, yownodepool::np, yownodepool::np_global, yownodepool::npa, yowdatapool::ntasks, yowerr::parallel_abort(), and rank.

Referenced by initrankmodule().

◆ finalizerankmodule()

subroutine, public yowrankmodule::finalizerankmodule

Definition at line 249 of file yowrankModule.F90.

249  implicit none
250  integer :: i
251 
252  if(allocated(rank)) then
253  do i=1, size(rank)
254  if(allocated(rank(i)%iplg)) deallocate(rank(i)%iplg)
255  end do
256  deallocate(rank)
257  endif

References rank.

Referenced by yowpdlibmain::finalizepd().

◆ initrankmodule()

subroutine, public yowrankmodule::initrankmodule

allocate and exchange

Definition at line 76 of file yowrankModule.F90.

76  use yowdatapool, only: ntasks, myrank
77  implicit none
78  integer :: stat
79 
80  if(allocated(rank)) deallocate(rank)
81  allocate(rank(ntasks), stat=stat)
82  if(stat/=0) CALL abort('rank allocation failure')
83 
84  call exchangeiplg()
85  call calcistart()

References yowerr::abort(), calcistart(), exchangeiplg(), yowdatapool::myrank, yowdatapool::ntasks, and rank.

Referenced by yowpdlibmain::initfromgriddim().

Variable Documentation

◆ ipgl_npa

◆ ipgl_to_proc

integer, dimension(:), allocatable, public yowrankmodule::ipgl_to_proc

Definition at line 69 of file yowrankModule.F90.

69  integer, public, allocatable :: IPGL_TO_PROC(:), ipgl_tot(:)

Referenced by exchangeiplg(), w3parall::get_jsea_ibelong(), w3parall::init_get_jsea_isproc(), and w3parall::synchronize_ipgl_etc_array().

◆ ipgl_tot

integer, dimension(:), allocatable, public yowrankmodule::ipgl_tot

◆ rank

type(t_rank), dimension(:), allocatable, public yowrankmodule::rank

Provides access to some information of all threads e.g.

iplg

Note
range [1:nTasks]
range[myrank] are filled with the values from this rank

Definition at line 68 of file yowrankModule.F90.

68  type(t_rank),public, allocatable :: rank(:)

Referenced by calcistart(), exchangeiplg(), finalizerankmodule(), initrankmodule(), w3parall::set_up_nseal_nsealm(), wmesmfmd::setupimpbmsk(), w3wdatmd::w3dimw(), and w3initmd::w3init().

yowerr::parallel_abort
subroutine parallel_abort(string, error)
Definition: yowerr.F90:43
yowrankmodule::ipgl_npa
integer, dimension(:), allocatable, public ipgl_npa
Definition: yowrankModule.F90:70
yownodepool
Has data that belong to nodes.
Definition: yownodepool.F90:39
yowdatapool
Has fancy data.
Definition: yowdatapool.F90:39