UPP (develop)
Loading...
Searching...
No Matches
SETUP_SERVERS.f
Go to the documentation of this file.
1
5
20 SUBROUTINE setup_servers(MYPE, &
21 & NPES, &
22 & INUMQ, &
23 & MPI_COMM_COMP, &
24 & MPI_COMM_INTER)
25!
26
27!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
28 implicit none
29!
30 include 'mpif.h'
31
32 integer,intent(out) :: MYPE,NPES,INUMQ,MPI_COMM_COMP, &
33 MPI_COMM_INTER
34 integer comdup,ierr,npes_mod,iquilt_group,iqserver, &
35 istaq,iendq,icolor,istaxx,iendxx,ixx,irlr,icc,iss,issl, &
36 jj,i,kk
37 integer iworld,igroup,igroup_x,iworld_minus
38 integer, allocatable :: irank ( : )
39 logical yes
40 character*4 get
41!-----------------------------------------------------------------------
42!
43! INITIALIZE MPI
44! RETRIEVE THE NUMBER OF TOTAL MPI TASKS AND MY RANK
45!
46 call mpi_init(ierr)
47 call mpi_comm_rank(mpi_comm_world,mype,ierr)
48 write(*,*)' mype=',mype,' ierr=',ierr
49 call mpi_comm_size(mpi_comm_world,npes,ierr)
50 write(*,*)' npes=',npes,' ierr=',ierr
51!
52! SPECIFY ONE I/O SERVER AS LONG AS THERE ARE MORE THAN 1 MPI TASK
53!
54 if ( npes > 1 ) then
55! npes_mod = npes - 1
56 npes_mod = npes ! turn off quilt
57 else
58 npes_mod = 1
59 end if
60!
61! AT THIS POINT NPES IS THE TOTAL NUMBER OF MPI TASKS. WE WILL
62! RESET THIS AT THE END OF THE SUBROUTINE TO THE NUMBER OF MPI
63! TASKS THAT ARE WORKING ON THE POSTING
64!
65! FIRST, HOWEVER, WE NEED TO MAKE SURE THAT A SUFFICIENT NUMBER
66! OF MPI TASKS HAVE BEEN INITIATED. IF NOT, WE WILL STOP.
67!
68 IF ( npes < npes_mod ) THEN
69 print *, ' ***********************************************'
70 print *, ' ***********************************************'
71 print *, ' *************MAJOR PROBLEM*********************'
72 print *, ' *************MAJOR PROBLEM*********************'
73 print *, ' *************MAJOR PROBLEM*********************'
74 print *, ' *************MAJOR PROBLEM*********************'
75 print *
76 print *, ' THERE ARE INSUFFICIENT MPI TASKS TO CONTINUE'
77 print *, ' YOU MUST SPECIFY AT LEAST ',npes_mod,' TASKS'
78 print *, ' STOPPING NOW'
79 print *, ' HASTA LA VISTA BABY'
80 print *
81 print *, ' *************MAJOR PROBLEM*********************'
82 print *, ' *************MAJOR PROBLEM*********************'
83 print *, ' *************MAJOR PROBLEM*********************'
84 print *, ' *************MAJOR PROBLEM*********************'
85 print *, ' ***********************************************'
86 print *, ' ***********************************************'
87 CALL mpi_abort(mpi_comm_world,1,ierr)
88 END IF
89!
90! OK, WE HAVE A SUFFICIENT NUMBER OF MPI TASKS TO CONTINUE
91!
92! HOW MANY GROUPS OF SERVERS ? THE DEFAULT IS 1 GROUP
93! THE ENVIRONMENT VARIABLE, SERVER_GROUPS, CAN BE USED TO
94! SPECIFY MORE SERVER GROUPS
95!
96 get = '1'
97!was call getenv('SERVER_GROUPS',get)
98 read(get,fmt='(i4)') iquilt_group
99 iquilt_group = max(iquilt_group,1)
100!
101! ERROR CHECK NUMBER OF GROUPS - THE MAXIMUM IS 100 - THIS IS A LOT
102!
103 if ( iquilt_group > 100 ) then
104 print *, ' ***** IQUILT_GROUP IS GREATER THAN 100'
105 print *, ' ***** DO YOU REALLY WANT THIS ?'
106 print *, ' ***** IF SO THEN INCREASE SIZE IN mpp.h'
107 print *, ' ***** ALSO, CHANGE IF CHECK IN SETUP_SERVERS'
108 print *, ' ***** RESETTING THE NUMBER OF SERVER GROUPS TO 100'
109 print *, ' ***** WE ARE CONTINUING .... '
110 iquilt_group = 100
111 end if
112 if ( mype == 0 ) then
113 print *, ' we will try to run with ',iquilt_group,' server groups'
114 end if
115!
116! COMPUTE THE NUMBER OF SERVERS PER GROUP
117! ALL MPI TASKS BEYOND NPES_MOD WILL BE SERVERS
118! IF THE NUMBER OF SERVERS IS NOT EQUALLY DIVISIBLE BY
119! THE NUMBER OF GROUPS OF SERVERS THEN SOME GROUPS MAY HAVE
120! MORE SERVERS THEN OTHERS - THIS IS FINE
121! NOTE THAT WE REQUIrE AT LEAST ONE SERVER PER GROUP
122! WE MAY NEED TO REDUCE THE NUMBER OF SERVER GROUPS IF
123! IT EXCEEDS THE NUMBER OF SERVERS
124!
125 iqserver = npes - npes_mod
126 if ( iqserver == 0 ) then ! iquilt_group=0 for running with no quilt
127 if ( mype == 0 ) then
128 print *, ' *** you specified 0 I/O servers '
129 print *, ' CHKOUT will write a file'
130 end if
131 iquilt_group = 0
132 inumq = 0
133 else ! iquilt_group=1 for running with 1 quilt
134 call para_range(1,iqserver,1,0,istaq,iendq)
135 inumq = iendq-istaq+1
136 if ( mype == 0 ) print *, ' i, inumq = ',i+1,inumq
137 end if
138 if ( iquilt_group > iqserver ) then
139 iquilt_group = iqserver
140 print *, ' ***** NOT ENOUGH SERVERS'
141 print *, ' ***** WE NEED TO REDUCE THE NUMB OF SERVER GROUPS'
142 print *, ' ***** NUMB OF SERVER GROUPS IS ', iquilt_group
143 end if
144
145! do i = 0, iquilt_group - 1
146! call para_range(1,iqserver,iquilt_group,i,istaq,iendq)
147! inumq(i+1) = iendq-istaq+1
148! if ( mype == 0 ) print *, ' i, inumq = ',i+1,inumq(i+1)
149! end do
150
151
152!
153! SETUP THE "COLOR" FOR MPI_COMM_SPLIT
154! THOSE TASKS WHICH WILL DO MODEL INTEGRATION WILL BE COLOR 0
155! THE SERVER TASKS WILL HAVE THE COLOR OF THE GROUP NUMBER THAT
156! THEY WILL BELONG
157!
158 if ( mype < npes_mod ) then
159 icolor = 0
160 else
161 istaxx = npes_mod
162! do i = 1, 1 ! modification for using only one quilt server group
163 iendxx = istaxx + inumq - 1
164 if ( mype >= istaxx .and. mype <= iendxx ) then
165 icolor = 1
166 end if
167! istaxx = iendxx + 1
168! end do
169 end if
170! print *,'mype=',mype,'icolor=',icolor
171!
172! SPLIT THE COMMUNICATOR - THE NEW INTRACOMMUNICATOR FOR ALL TASKS
173! IS MPI_COMM_COMP. MPI_COMM_WORLD IS STILL AVAILABLE BUT IT DOES
174! REFER TO ALL THE MPI TASKS ( MODEL INTEGRATION AND I/O SERVING )
175!
176 call mpi_comm_dup(mpi_comm_world,comdup,ierr)
177 call mpi_comm_split(comdup,icolor,mype,mpi_comm_comp,ierr)
178! print *,'mype=',mype,'npes=',npes,'after comm split'
179!
180! AT THIS POINT WE HAVE A NEW COMMUNICATOR, MPI_COMM_COMP,
181! THAT CAN BE USED BY THE FORECASTS TASKS AND THE I/O SERVER TASKS
182! FOR THEIR INTERNAL COMMUNICATIONS. ONTO THE INTERCOMMUNICATORS ...
183!
184! NOW WE MUST CREATE THE INTERCOMMUNICATORS FOR USE BETWEEN THE MPI
185! TASKS DOING THE MODEL INTEGRATION AND THE MPI TASKS FOR EACH
186! SERVER GROUP. THE FIRST STEP IS TO EXCLUDE THE TASKS THAT DONT
187! BELONG. WE WILL DO THIS FOR EACH SERVER GROUP BY EXCLUDING THE TASKS
188! FROM ALL OF THE OTHER SERVER GROUPS.
189!
190 allocate ( irank( iqserver ) )
191 ixx = npes_mod
192 do i = 1, iquilt_group
193 yes = .true.
194 if ( mype < npes_mod ) then
195 irlr = ixx
196 else
197 irlr = 0
198 end if
199 icc = 0
200 iss = npes_mod
201! THIS IS THE FIRST POSSIBLE TASK ID THAT COULD BE EXCLUDED
202 do jj = 1, iquilt_group
203 if ( jj /= i ) then
204 issl = iss
205 do kk = 1, inumq
206 icc = icc + 1
207 irank(icc) = issl
208 if ( mype == issl ) yes = .false.
209 issl = issl + 1
210 end do
211 end if
212 iss = iss + inumq
213 end do
214!
215! AT THIS POINT WE HAVE AN ARRAY, IRANK, WITH TASK IDS TO EXCLUDE
216! THERE ARE ICC OF THEM.
217! CREATE A NEW GROUP WITH THE TASKS FROM THE OTHER SERVER GROUPS
218! EXCLUDED AND THEN CREATE A NEW COMMUNICATOR ( IWORLD_MINUS ) THAT
219! CONTAINS ONLY THE MPI TASKS DOING THE MODEL INTEGRATION AND THE
220! TASKS THAT BLONG TO THE SERVER GROUP WE ARE CONSIDERING.
221!
222 iworld = mpi_comm_world
223 call mpi_comm_group(iworld,igroup,ierr)
224 call mpi_group_excl(igroup,icc,irank,igroup_x,ierr)
225 call mpi_comm_create(iworld,igroup_x,iworld_minus,ierr)
226 call mpi_group_free(igroup,ierr)
227 call mpi_group_free(igroup_x,ierr)
228!
229! AT THIS POINT WE HAVE A COMMUNICATOR THAT EXCLUDES THE TASKS WE DONT WANT.
230! CREATE AN INTERCOMMUNICATOR FOR USE BETWEEN THE MPI TASKS DOING THE MODEL
231! INTEGRATION AND THE I/O SERVER GROUP WE ARE CONSIDERING. THIS PROCESS IS
232! A COLLECTIVE ROUTINE SO IT CAN ONLY BE DONE BY THE TASKS THAT HAVE NOT
233! BEEN EXCLUDED. SAVE THIS NEW COMMUNICATOR IN MPI_COMM_INTER FOR USE BY
234! THE TASKS THAT BELONG TO THE SERVER GROUP THAT WE ARE CONSIDERING. THE
235! TASKS THAT ARE PERFORMING THE MODEL INTEGRATION WILL REFERENCE
236! MPI_COMM_INTER_ARRAY() SINCE WE WILL NEED TO SELECT WHICH SERVER
237! GROUP WE WISH TO COMMUNICATE WITH.
238!
239 if ( yes ) then
240 call mpi_intercomm_create(mpi_comm_comp,0,iworld_minus,irlr,0, &
241 mpi_comm_inter,ierr)
242 end if
243!
244 call mpi_barrier(mpi_comm_world,ierr)
245!
246 end do ! end do for loop over the number of server groups
247!
248!***
249!*** NPES IS REALLY THE NUMBER OF TASKS WORKING ON THE MODEL INTEGRATION
250!***
251 npes = npes - iqserver
252 print *,'mype=',mype,'npes_new=',npes
253!
254 IF(mype==0) THEN
255 print *, ' The Posting is using ',npes,' MPI task'
256 print *, ' There are ',iqserver,' I/O servers'
257 END IF
258!***
259 deallocate ( irank )
260!
261 END
subroutine para_range(n1, n2, nprocs, irank, ista, iend)
Sets up decomposition values.
Definition PARA_RANGE.f:21
subroutine setup_servers(mype, npes, inumq, mpi_comm_comp, mpi_comm_inter)
This subroutine is to setup I/O servers.