MODFLOW 6  version 6.7.0.dev1
USGS Modular Hydrologic Model
mpiruncontrolmodule Module Reference

Data Types

type  mpiruncontroltype
 

Functions/Subroutines

class(runcontroltype) function, pointer, public create_mpi_run_control ()
 
subroutine mpi_ctrl_start (this)
 
subroutine wait_for_debugger (this)
 
subroutine mpi_ctrl_finish (this)
 
subroutine mpi_ctrl_after_con_cr (this)
 Actions after creating connections. More...
 

Function/Subroutine Documentation

◆ create_mpi_run_control()

class(runcontroltype) function, pointer, public mpiruncontrolmodule::create_mpi_run_control

Definition at line 34 of file MpiRunControl.F90.

35  class(RunControlType), pointer :: controller
36  ! local
37  class(MpiRunControlType), pointer :: mpi_controller
38 
39  allocate (mpi_controller)
40  controller => mpi_controller
41 
Here is the caller graph for this function:

◆ mpi_ctrl_after_con_cr()

subroutine mpiruncontrolmodule::mpi_ctrl_after_con_cr ( class(mpiruncontroltype this)

Definition at line 172 of file MpiRunControl.F90.

180  class(MpiRunControlType) :: this
181  ! local
182  integer(I4B) :: i, j, id, irank
183  integer(I4B) :: nr_models, nr_exgs, nr_remotes, max_nr_remotes
184  type(STLVecInt) :: remote_models, remote_exgs
185  integer(I4B), dimension(:, :), pointer :: remote_models_per_process
186  integer(I4B), dimension(:, :), pointer :: remote_exgs_per_process
187  class(VirtualModelType), pointer :: vm
188  class(VirtualExchangeType), pointer :: ve
189  type(MpiWorldType), pointer :: mpi_world
190  integer :: ierr
191 
192  mpi_world => get_mpi_world()
193 
194  ! activate halo through base
195  call this%RunControlType%after_con_cr()
196 
197  ! compose list of remote models/exchanges to receive
198  call remote_models%init()
199  nr_models = virtual_model_list%Count()
200  do i = 1, nr_models
202  if (vm%is_active .and. .not. vm%is_local) then
203  ! remote and active
204  call remote_models%push_back(vm%id)
205  end if
206  end do
207  call remote_exgs%init()
208  nr_exgs = virtual_exchange_list%Count()
209  do i = 1, nr_exgs
211  if (ve%is_active .and. .not. ve%is_local) then
212  ! remote and active
213  call remote_exgs%push_back(ve%id)
214  end if
215  end do
216 
217  ! Models: find max for allocation
218  nr_remotes = remote_models%size
219  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
220  mpi_world%comm, ierr)
221  call check_mpi(ierr)
222 
223  allocate (remote_models_per_process(max_nr_remotes, nr_procs))
224  remote_models_per_process = 0
225 
226  ! Models: fill local portion and reduce
227  do i = 1, remote_models%size
228  remote_models_per_process(i, proc_id + 1) = remote_models%at(i)
229  end do
230  call mpi_allreduce(mpi_in_place, remote_models_per_process, &
231  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
232  mpi_world%comm, ierr)
233  call check_mpi(ierr)
234 
235  ! Models: set remotes to virtual models
236  do i = 1, nr_procs
237  do j = 1, max_nr_remotes
238  id = remote_models_per_process(j, i)
239  if (id > 0) then
240  ! assign zero-based rank number to virtual model
241  vm => get_virtual_model(id)
242  if (vm%is_local) then
243  ! only for local models
244  irank = i - 1
245  call vm%rcv_ranks%push_back_unique(irank)
246  end if
247  end if
248  end do
249  end do
250 
251  ! Exchanges: find max for allocation
252  nr_remotes = remote_exgs%size
253  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
254  mpi_world%comm, ierr)
255  call check_mpi(ierr)
256 
257  allocate (remote_exgs_per_process(max_nr_remotes, nr_procs))
258  remote_exgs_per_process = 0
259 
260  ! Exchanges: fill local portion and reduce
261  do i = 1, remote_exgs%size
262  remote_exgs_per_process(i, proc_id + 1) = remote_exgs%at(i)
263  end do
264  call mpi_allreduce(mpi_in_place, remote_exgs_per_process, &
265  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
266  mpi_world%comm, ierr)
267  call check_mpi(ierr)
268 
269  ! Exchanges: set remotes to virtual exchanges
270  do i = 1, nr_procs
271  do j = 1, max_nr_remotes
272  id = remote_exgs_per_process(j, i)
273  if (id > 0) then
274  ! assign zero-based rank number to virtual exchange
275  ve => get_virtual_exchange(id)
276  if (ve%is_local) then
277  ! only for local exchanges
278  irank = i - 1
279  call ve%rcv_ranks%push_back_unique(irank)
280  end if
281  end if
282  end do
283  end do
284 
285  ! clean up
286  call remote_models%destroy()
287  call remote_exgs%destroy()
288 
289  deallocate (remote_models_per_process)
290  deallocate (remote_exgs_per_process)
291 
type(listtype), public virtual_model_list
type(listtype), public virtual_exchange_list
class(virtualexchangetype) function, pointer, public get_virtual_exchange_from_list(list, idx)
class(virtualexchangetype) function, pointer, public get_virtual_exchange(exg_id)
Returns a virtual exchange with the specified id.
class(virtualmodeltype) function, pointer, public get_virtual_model_from_list(model_list, idx)
The Virtual Exchange is based on two Virtual Models and is therefore not always strictly local or rem...
Here is the call graph for this function:

◆ mpi_ctrl_finish()

subroutine mpiruncontrolmodule::mpi_ctrl_finish ( class(mpiruncontroltype this)
private

Definition at line 136 of file MpiRunControl.F90.

138  class(MpiRunControlType) :: this
139  ! local
140  integer :: ierr
141  integer(I4B) :: tmr_final_par
142 
143  ! timer
144  tmr_final_par = -1
145  call g_prof%start("Finalize MPI and PETSc", tmr_final_par)
146 
147  ! release MPI related memory in router before MPI_Finalize
148  call this%virtual_data_mgr%router%finalize()
149 
150  ! finish mpi
151 #if defined(__WITH_PETSC__)
152  ! NB: PetscFinalize calls MPI_Finalize only when MPI_Init
153  ! was called before PetscInitialize
154  call petscfinalize(ierr)
155  chkerrq(ierr)
156 #else
157  call mpi_finalize(ierr)
158  call check_mpi(ierr)
159 #endif
160 
161  pstop_alternative => null()
162 
163  call g_prof%stop(tmr_final_par)
164 
165  ! finish everything else by calling parent
166  call this%RunControlType%finish()
167 
procedure(pstop_iface), pointer pstop_alternative
Definition: ErrorUtil.f90:5
Here is the call graph for this function:

◆ mpi_ctrl_start()

subroutine mpiruncontrolmodule::mpi_ctrl_start ( class(mpiruncontroltype this)
private

Definition at line 44 of file MpiRunControl.F90.

46  ! local
47  integer(I4B) :: tmr_init_par
48 
49  class(MpiRunControlType) :: this
50  ! local
51  integer :: ierr
52  character(len=*), parameter :: petsc_db_file = '.petscrc'
53  logical(LGP) :: petsc_db_exists, wait_dbg, is_parallel_mode
54  type(MpiWorldType), pointer :: mpi_world
55 
56  ! add timed section for parallel initialization
57  tmr_init_par = -1
58  call g_prof%start("Initialize MPI and PETSc", tmr_init_par)
59 
60  ! set mpi abort function
61  pstop_alternative => mpi_stop
62 
63  wait_dbg = .false.
64  mpi_world => get_mpi_world()
65 
66  ! if PETSc we need their initialize
67 #if defined(__WITH_PETSC__)
68  ! PetscInitialize calls MPI_Init only when it is not called yet,
69  ! which could be through the API. If it is already called, we
70  ! should assign the MPI communicator to PETSC_COMM_WORLD first
71  ! (PETSc manual)
72  if (mpi_world%has_comm()) then
73  petsc_comm_world = mpi_world%comm
74  end if
75 
76  inquire (file=petsc_db_file, exist=petsc_db_exists)
77  if (.not. petsc_db_exists) then
78  call petscinitialize(petsc_null_character, ierr)
79  chkerrq(ierr)
80  else
81  call petscinitialize(petsc_db_file, ierr)
82  chkerrq(ierr)
83  end if
84 
85  if (.not. mpi_world%has_comm()) then
86  call mpi_world%set_comm(petsc_comm_world)
87  end if
88 
89  call petscoptionshasname(petsc_null_options, petsc_null_character, &
90  '-wait_dbg', wait_dbg, ierr)
91  chkerrq(ierr)
92  call petscoptionshasname(petsc_null_options, petsc_null_character, &
93  '-p', is_parallel_mode, ierr)
94  chkerrq(ierr)
95 #else
96  if (.not. mpi_world%has_comm()) then
97  call mpi_init(ierr)
98  call check_mpi(ierr)
99  call mpi_world%set_comm(mpi_comm_world)
100  end if
101 #endif
102 
103  call mpi_world%init()
104 
105  call mpi_comm_size(mpi_world%comm, nr_procs, ierr)
106  call mpi_comm_rank(mpi_world%comm, proc_id, ierr)
107 
108  ! possibly wait to attach debugger here
109  if (wait_dbg) call this%wait_for_debugger()
110 
111  ! done with parallel pre-work
112  call g_prof%stop(tmr_init_par)
113 
114  ! start everything else by calling parent
115  call this%RunControlType%start()
116 
Here is the call graph for this function:

◆ wait_for_debugger()

subroutine mpiruncontrolmodule::wait_for_debugger ( class(mpiruncontroltype this)

Definition at line 119 of file MpiRunControl.F90.

120  class(MpiRunControlType) :: this
121  ! local
122  integer :: ierr
123  integer(I4B) :: icnt
124  type(MpiWorldType), pointer :: mpi_world
125 
126  mpi_world => get_mpi_world()
127  if (proc_id == 0) then
128  icnt = 0
129  write (*, *) 'Hit enter to continue...'
130  read (*, *)
131  end if
132  call mpi_barrier(mpi_world%comm, ierr)
133 
Here is the call graph for this function: