MODFLOW 6  version 6.7.0.dev1
USGS Modular Hydrologic Model
MpiRunControl.F90
Go to the documentation of this file.
2 #if defined(__WITH_PETSC__)
3 #include <petsc/finclude/petscksp.h>
4  ! cannot use all symbols because of clash with 'use mpi'
5  use petscksp, only: petsc_comm_world, petscinitialize, petscfinalize, &
6  petsc_null_character, petsc_null_options
7 #endif
8  use mpi
11  use simstagesmodule
12  use profilermodule
13  use kindmodule, only: i4b, lgp, dp
14  use stlvecintmodule
17  implicit none
18  private
19 
20  public :: create_mpi_run_control
21 
22  type, public, extends(runcontroltype) :: mpiruncontroltype
23  contains
24  ! override
25  procedure :: start => mpi_ctrl_start
26  procedure :: finish => mpi_ctrl_finish
27  procedure :: after_con_cr => mpi_ctrl_after_con_cr
28  ! private
29  procedure, private :: wait_for_debugger
30  end type mpiruncontroltype
31 
32 contains
33 
34  function create_mpi_run_control() result(controller)
35  class(runcontroltype), pointer :: controller
36  ! local
37  class(mpiruncontroltype), pointer :: mpi_controller
38 
39  allocate (mpi_controller)
40  controller => mpi_controller
41 
42  end function create_mpi_run_control
43 
44  subroutine mpi_ctrl_start(this)
46  ! local
47  integer(I4B) :: tmr_init_par
48 
49  class(mpiruncontroltype) :: this
50  ! local
51  integer :: ierr
52  character(len=*), parameter :: petsc_db_file = '.petscrc'
53  logical(LGP) :: petsc_db_exists, wait_dbg, is_parallel_mode
54  type(mpiworldtype), pointer :: mpi_world
55 
56  ! add timed section for parallel initialization
57  tmr_init_par = -1
58  call g_prof%start("Initialize MPI and PETSc", tmr_init_par)
59 
60  ! set mpi abort function
62 
63  wait_dbg = .false.
64  mpi_world => get_mpi_world()
65 
66  ! if PETSc we need their initialize
67 #if defined(__WITH_PETSC__)
68  ! PetscInitialize calls MPI_Init only when it is not called yet,
69  ! which could be through the API. If it is already called, we
70  ! should assign the MPI communicator to PETSC_COMM_WORLD first
71  ! (PETSc manual)
72  if (mpi_world%has_comm()) then
73  petsc_comm_world = mpi_world%comm
74  end if
75 
76  inquire (file=petsc_db_file, exist=petsc_db_exists)
77  if (.not. petsc_db_exists) then
78  call petscinitialize(petsc_null_character, ierr)
79  chkerrq(ierr)
80  else
81  call petscinitialize(petsc_db_file, ierr)
82  chkerrq(ierr)
83  end if
84 
85  if (.not. mpi_world%has_comm()) then
86  call mpi_world%set_comm(petsc_comm_world)
87  end if
88 
89  call petscoptionshasname(petsc_null_options, petsc_null_character, &
90  '-wait_dbg', wait_dbg, ierr)
91  chkerrq(ierr)
92  call petscoptionshasname(petsc_null_options, petsc_null_character, &
93  '-p', is_parallel_mode, ierr)
94  chkerrq(ierr)
95 #else
96  if (.not. mpi_world%has_comm()) then
97  call mpi_init(ierr)
98  call check_mpi(ierr)
99  call mpi_world%set_comm(mpi_comm_world)
100  end if
101 #endif
102 
103  call mpi_world%init()
104 
105  call mpi_comm_size(mpi_world%comm, nr_procs, ierr)
106  call mpi_comm_rank(mpi_world%comm, proc_id, ierr)
107 
108  ! possibly wait to attach debugger here
109  if (wait_dbg) call this%wait_for_debugger()
110 
111  ! done with parallel pre-work
112  call g_prof%stop(tmr_init_par)
113 
114  ! start everything else by calling parent
115  call this%RunControlType%start()
116 
117  end subroutine mpi_ctrl_start
118 
119  subroutine wait_for_debugger(this)
120  class(mpiruncontroltype) :: this
121  ! local
122  integer :: ierr
123  integer(I4B) :: icnt
124  type(mpiworldtype), pointer :: mpi_world
125 
126  mpi_world => get_mpi_world()
127  if (proc_id == 0) then
128  icnt = 0
129  write (*, *) 'Hit enter to continue...'
130  read (*, *)
131  end if
132  call mpi_barrier(mpi_world%comm, ierr)
133 
134  end subroutine wait_for_debugger
135 
136  subroutine mpi_ctrl_finish(this)
138  class(mpiruncontroltype) :: this
139  ! local
140  integer :: ierr
141  integer(I4B) :: tmr_final_par
142 
143  ! timer
144  tmr_final_par = -1
145  call g_prof%start("Finalize MPI and PETSc", tmr_final_par)
146 
147  ! release MPI related memory in router before MPI_Finalize
148  call this%virtual_data_mgr%router%finalize()
149 
150  ! finish mpi
151 #if defined(__WITH_PETSC__)
152  ! NB: PetscFinalize calls MPI_Finalize only when MPI_Init
153  ! was called before PetscInitialize
154  call petscfinalize(ierr)
155  chkerrq(ierr)
156 #else
157  call mpi_finalize(ierr)
158  call check_mpi(ierr)
159 #endif
160 
161  pstop_alternative => null()
162 
163  call g_prof%stop(tmr_final_par)
164 
165  ! finish everything else by calling parent
166  call this%RunControlType%finish()
167 
168  end subroutine mpi_ctrl_finish
169 
170  !> @brief Actions after creating connections
171  !<
172  subroutine mpi_ctrl_after_con_cr(this)
180  class(mpiruncontroltype) :: this
181  ! local
182  integer(I4B) :: i, j, id, irank
183  integer(I4B) :: nr_models, nr_exgs, nr_remotes, max_nr_remotes
184  type(stlvecint) :: remote_models, remote_exgs
185  integer(I4B), dimension(:, :), pointer :: remote_models_per_process
186  integer(I4B), dimension(:, :), pointer :: remote_exgs_per_process
187  class(virtualmodeltype), pointer :: vm
188  class(virtualexchangetype), pointer :: ve
189  type(mpiworldtype), pointer :: mpi_world
190  integer :: ierr
191 
192  mpi_world => get_mpi_world()
193 
194  ! activate halo through base
195  call this%RunControlType%after_con_cr()
196 
197  ! compose list of remote models/exchanges to receive
198  call remote_models%init()
199  nr_models = virtual_model_list%Count()
200  do i = 1, nr_models
202  if (vm%is_active .and. .not. vm%is_local) then
203  ! remote and active
204  call remote_models%push_back(vm%id)
205  end if
206  end do
207  call remote_exgs%init()
208  nr_exgs = virtual_exchange_list%Count()
209  do i = 1, nr_exgs
211  if (ve%is_active .and. .not. ve%is_local) then
212  ! remote and active
213  call remote_exgs%push_back(ve%id)
214  end if
215  end do
216 
217  ! Models: find max for allocation
218  nr_remotes = remote_models%size
219  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
220  mpi_world%comm, ierr)
221  call check_mpi(ierr)
222 
223  allocate (remote_models_per_process(max_nr_remotes, nr_procs))
224  remote_models_per_process = 0
225 
226  ! Models: fill local portion and reduce
227  do i = 1, remote_models%size
228  remote_models_per_process(i, proc_id + 1) = remote_models%at(i)
229  end do
230  call mpi_allreduce(mpi_in_place, remote_models_per_process, &
231  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
232  mpi_world%comm, ierr)
233  call check_mpi(ierr)
234 
235  ! Models: set remotes to virtual models
236  do i = 1, nr_procs
237  do j = 1, max_nr_remotes
238  id = remote_models_per_process(j, i)
239  if (id > 0) then
240  ! assign zero-based rank number to virtual model
241  vm => get_virtual_model(id)
242  if (vm%is_local) then
243  ! only for local models
244  irank = i - 1
245  call vm%rcv_ranks%push_back_unique(irank)
246  end if
247  end if
248  end do
249  end do
250 
251  ! Exchanges: find max for allocation
252  nr_remotes = remote_exgs%size
253  call mpi_allreduce(nr_remotes, max_nr_remotes, 1, mpi_integer, mpi_max, &
254  mpi_world%comm, ierr)
255  call check_mpi(ierr)
256 
257  allocate (remote_exgs_per_process(max_nr_remotes, nr_procs))
258  remote_exgs_per_process = 0
259 
260  ! Exchanges: fill local portion and reduce
261  do i = 1, remote_exgs%size
262  remote_exgs_per_process(i, proc_id + 1) = remote_exgs%at(i)
263  end do
264  call mpi_allreduce(mpi_in_place, remote_exgs_per_process, &
265  max_nr_remotes * nr_procs, mpi_integer, mpi_max, &
266  mpi_world%comm, ierr)
267  call check_mpi(ierr)
268 
269  ! Exchanges: set remotes to virtual exchanges
270  do i = 1, nr_procs
271  do j = 1, max_nr_remotes
272  id = remote_exgs_per_process(j, i)
273  if (id > 0) then
274  ! assign zero-based rank number to virtual exchange
275  ve => get_virtual_exchange(id)
276  if (ve%is_local) then
277  ! only for local exchanges
278  irank = i - 1
279  call ve%rcv_ranks%push_back_unique(irank)
280  end if
281  end if
282  end do
283  end do
284 
285  ! clean up
286  call remote_models%destroy()
287  call remote_exgs%destroy()
288 
289  deallocate (remote_models_per_process)
290  deallocate (remote_exgs_per_process)
291 
292  end subroutine mpi_ctrl_after_con_cr
293 
294 end module mpiruncontrolmodule
procedure(pstop_iface), pointer pstop_alternative
Definition: ErrorUtil.f90:5
This module defines variable data types.
Definition: kind.f90:8
subroutine mpi_ctrl_finish(this)
subroutine wait_for_debugger(this)
class(runcontroltype) function, pointer, public create_mpi_run_control()
subroutine mpi_ctrl_after_con_cr(this)
Actions after creating connections.
subroutine mpi_ctrl_start(this)
type(mpiworldtype) function, pointer, public get_mpi_world()
Definition: MpiWorld.f90:32
subroutine, public mpi_stop(status)
Definition: MpiWorld.f90:131
subroutine, public check_mpi(mpi_error_code)
Check the MPI error code, report, and.
Definition: MpiWorld.f90:116
type(profilertype), public g_prof
the global timer object (to reduce trivial lines of code)
Definition: Profiler.f90:65
subroutine start(this, title, section_id)
Start section timing, add when not exist yet (i.e. when id < 1)
Definition: Profiler.f90:144
This module contains simulation variables.
Definition: SimVariables.f90:9
integer(i4b) nr_procs
integer(i4b) proc_id
type(listtype), public virtual_model_list
type(listtype), public virtual_exchange_list
class(virtualexchangetype) function, pointer, public get_virtual_exchange_from_list(list, idx)
class(virtualexchangetype) function, pointer, public get_virtual_exchange(exg_id)
Returns a virtual exchange with the specified id.
class(virtualmodeltype) function, pointer, public get_virtual_model_from_list(model_list, idx)
The Virtual Exchange is based on two Virtual Models and is therefore not always strictly local or rem...