MODFLOW 6  version 6.6.0.dev0
USGS Modular Hydrologic Model
mpiruncontrolmodule Module Reference

Data Types

type  mpiruncontroltype
 

Functions/Subroutines

class(runcontroltype) function, pointer, public create_mpi_run_control ()
 
subroutine mpi_ctrl_start (this)
 
subroutine wait_for_debugger (this)
 
subroutine mpi_ctrl_finish (this)
 
subroutine mpi_ctrl_after_con_cr (this)
 Actions after creating connections. More...
 

Function/Subroutine Documentation

◆ create_mpi_run_control()

class(runcontroltype) function, pointer, public mpiruncontrolmodule::create_mpi_run_control

Definition at line 33 of file MpiRunControl.F90.

34  class(RunControlType), pointer :: controller
35  ! local
36  class(MpiRunControlType), pointer :: mpi_controller
37 
38  allocate (mpi_controller)
39  controller => mpi_controller
40 
Here is the caller graph for this function:

◆ mpi_ctrl_after_con_cr()

subroutine mpiruncontrolmodule::mpi_ctrl_after_con_cr ( class(mpiruncontroltype this)

Definition at line 152 of file MpiRunControl.F90.

160  class(MpiRunControlType) :: this
161  ! local
162  integer(I4B) :: i, j, id, irank
163  integer(I4B) :: nr_models, nr_exgs, nr_remotes, max_nr_remotes
164  type(STLVecInt) :: remote_models, remote_exgs
165  integer(I4B), dimension(:, :), pointer :: remote_models_per_process
166  integer(I4B), dimension(:, :), pointer :: remote_exgs_per_process
167  class(VirtualModelType), pointer :: vm
168  class(VirtualExchangeType), pointer :: ve
169  type(MpiWorldType), pointer :: mpi_world
170  integer :: ierr
171 
172  mpi_world => get_mpi_world()
173 
174  ! activate halo through base
175  call this%RunControlType%after_con_cr()
176 
177  ! compose list of remote models/exchanges to receive
178  call remote_models%init()
179  nr_models = virtual_model_list%Count()
180  do i = 1, nr_models
182  if (vm%is_active .and. .not. vm%is_local) then
183  ! remote and active
184  call remote_models%push_back(vm%id)
185  end if
186  end do
187  call remote_exgs%init()
188  nr_exgs = virtual_exchange_list%Count()
189  do i = 1, nr_exgs
191  if (ve%is_active .and. .not. ve%is_local) then
192  ! remote and active
193  call remote_exgs%push_back(ve%id)
194  end if
195  end do
196 
197  ! Models: find max for allocation
198  nr_remotes = remote_models%size
199  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
200  mpi_world%comm, ierr)
201  call check_mpi(ierr)
202 
203  allocate (remote_models_per_process(max_nr_remotes, nr_procs))
204  remote_models_per_process = 0
205 
206  ! Models: fill local portion and reduce
207  do i = 1, remote_models%size
208  remote_models_per_process(i, proc_id + 1) = remote_models%at(i)
209  end do
210  call mpi_allreduce(mpi_in_place, remote_models_per_process, &
211  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
212  mpi_world%comm, ierr)
213  call check_mpi(ierr)
214 
215  ! Models: set remotes to virtual models
216  do i = 1, nr_procs
217  do j = 1, max_nr_remotes
218  id = remote_models_per_process(j, i)
219  if (id > 0) then
220  ! assign zero-based rank number to virtual model
221  vm => get_virtual_model(id)
222  if (vm%is_local) then
223  ! only for local models
224  irank = i - 1
225  call vm%rcv_ranks%push_back_unique(irank)
226  end if
227  end if
228  end do
229  end do
230 
231  ! Exchanges: find max for allocation
232  nr_remotes = remote_exgs%size
233  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
234  mpi_world%comm, ierr)
235  call check_mpi(ierr)
236 
237  allocate (remote_exgs_per_process(max_nr_remotes, nr_procs))
238  remote_exgs_per_process = 0
239 
240  ! Exchanges: fill local portion and reduce
241  do i = 1, remote_exgs%size
242  remote_exgs_per_process(i, proc_id + 1) = remote_exgs%at(i)
243  end do
244  call mpi_allreduce(mpi_in_place, remote_exgs_per_process, &
245  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
246  mpi_world%comm, ierr)
247  call check_mpi(ierr)
248 
249  ! Exchanges: set remotes to virtual exchanges
250  do i = 1, nr_procs
251  do j = 1, max_nr_remotes
252  id = remote_exgs_per_process(j, i)
253  if (id > 0) then
254  ! assign zero-based rank number to virtual exchange
255  ve => get_virtual_exchange(id)
256  if (ve%is_local) then
257  ! only for local exchanges
258  irank = i - 1
259  call ve%rcv_ranks%push_back_unique(irank)
260  end if
261  end if
262  end do
263  end do
264 
265  ! clean up
266  call remote_models%destroy()
267  call remote_exgs%destroy()
268 
269  deallocate (remote_models_per_process)
270  deallocate (remote_exgs_per_process)
271 
type(listtype), public virtual_model_list
type(listtype), public virtual_exchange_list
class(virtualexchangetype) function, pointer, public get_virtual_exchange_from_list(list, idx)
class(virtualexchangetype) function, pointer, public get_virtual_exchange(exg_id)
Returns a virtual exchange with the specified id.
class(virtualmodeltype) function, pointer, public get_virtual_model_from_list(model_list, idx)
The Virtual Exchange is based on two Virtual Models and is therefore not always strictly local or rem...
Here is the call graph for this function:

◆ mpi_ctrl_finish()

subroutine mpiruncontrolmodule::mpi_ctrl_finish ( class(mpiruncontroltype this)
private

Definition at line 126 of file MpiRunControl.F90.

128  class(MpiRunControlType) :: this
129  ! local
130  integer :: ierr
131 
132  ! finish mpi
133 #if defined(__WITH_PETSC__)
134  ! NB: PetscFinalize calls MPI_Finalize only when MPI_Init
135  ! was called before PetscInitialize
136  call petscfinalize(ierr)
137  chkerrq(ierr)
138 #else
139  call mpi_finalize(ierr)
140  call check_mpi(ierr)
141 #endif
142 
143  pstop_alternative => null()
144 
145  ! finish everything else by calling parent
146  call this%RunControlType%finish()
147 
procedure(pstop_iface), pointer pstop_alternative
Definition: ErrorUtil.f90:5
Here is the call graph for this function:

◆ mpi_ctrl_start()

subroutine mpiruncontrolmodule::mpi_ctrl_start ( class(mpiruncontroltype this)
private

Definition at line 43 of file MpiRunControl.F90.

45 
46  class(MpiRunControlType) :: this
47  ! local
48  integer :: ierr
49  character(len=*), parameter :: petsc_db_file = '.petscrc'
50  logical(LGP) :: petsc_db_exists, wait_dbg, is_parallel_mode
51  type(MpiWorldType), pointer :: mpi_world
52 
53  ! set mpi abort function
54  pstop_alternative => mpi_stop
55 
56  wait_dbg = .false.
57  mpi_world => get_mpi_world()
58 
59  ! if PETSc we need their initialize
60 #if defined(__WITH_PETSC__)
61  ! PetscInitialize calls MPI_Init only when it is not called yet,
62  ! which could be through the API. If it is already called, we
63  ! should assign the MPI communicator to PETSC_COMM_WORLD first
64  ! (PETSc manual)
65  if (mpi_world%has_comm()) then
66  petsc_comm_world = mpi_world%comm
67  end if
68 
69  inquire (file=petsc_db_file, exist=petsc_db_exists)
70  if (.not. petsc_db_exists) then
71  call petscinitialize(petsc_null_character, ierr)
72  chkerrq(ierr)
73  else
74  call petscinitialize(petsc_db_file, ierr)
75  chkerrq(ierr)
76  end if
77 
78  if (.not. mpi_world%has_comm()) then
79  call mpi_world%set_comm(petsc_comm_world)
80  end if
81 
82  call petscoptionshasname(petsc_null_options, petsc_null_character, &
83  '-wait_dbg', wait_dbg, ierr)
84  chkerrq(ierr)
85  call petscoptionshasname(petsc_null_options, petsc_null_character, &
86  '-p', is_parallel_mode, ierr)
87  chkerrq(ierr)
88 #else
89  if (.not. mpi_world%has_comm()) then
90  call mpi_init(ierr)
91  call check_mpi(ierr)
92  call mpi_world%set_comm(mpi_comm_world)
93  end if
94 #endif
95 
96  call mpi_world%init()
97 
98  call mpi_comm_size(mpi_world%comm, nr_procs, ierr)
99  call mpi_comm_rank(mpi_world%comm, proc_id, ierr)
100 
101  ! possibly wait to attach debugger here
102  if (wait_dbg) call this%wait_for_debugger()
103 
104  ! start everything else by calling parent
105  call this%RunControlType%start()
106 
Here is the call graph for this function:

◆ wait_for_debugger()

subroutine mpiruncontrolmodule::wait_for_debugger ( class(mpiruncontroltype this)

Definition at line 109 of file MpiRunControl.F90.

110  class(MpiRunControlType) :: this
111  ! local
112  integer :: ierr
113  integer(I4B) :: icnt
114  type(MpiWorldType), pointer :: mpi_world
115 
116  mpi_world => get_mpi_world()
117  if (proc_id == 0) then
118  icnt = 0
119  write (*, *) 'Hit enter to continue...'
120  read (*, *)
121  end if
122  call mpi_barrier(mpi_world%comm, ierr)
123 
Here is the call graph for this function: