MODFLOW 6  version 6.6.0.dev0
USGS Modular Hydrologic Model
MpiRouter.f90
Go to the documentation of this file.
3  use kindmodule, only: i4b, lgp
6  use simstagesmodule, only: stg_to_str
15  use mpiworldmodule
16  use mpi
17  implicit none
18  private
19 
20  public :: create_mpi_router
21 
22  type, public, extends(routerbasetype) :: mpiroutertype
23  integer(I4B), dimension(:), pointer :: model_proc_ids
24  type(stlvecint) :: senders !< the process ids to receive data from
25  type(stlvecint) :: receivers !< the process ids to send data to
26  type(vdcptrtype), dimension(:), pointer :: all_models => null() !< all virtual models from the global list
27  type(vdcptrtype), dimension(:), pointer :: all_exchanges => null() !< all virtual exchanges from the global list
28  type(vdcptrtype), dimension(:), pointer :: rte_models => null() !< the currently active models to be routed
29  type(vdcptrtype), dimension(:), pointer :: rte_exchanges => null() !< the currently active exchanges to be routed
30  type(mpimessagebuildertype) :: message_builder
31  type(mpimessagecachetype) :: msg_cache
32  type(mpiworldtype), pointer :: mpi_world => null()
33  integer(I4B) :: imon !< the output file unit for the mpi monitor
34  logical(LGP) :: enable_monitor !< when true, log diagnostics
35  contains
36  procedure :: initialize => mr_initialize
37  procedure :: route_all => mr_route_all
38  procedure :: route_sln => mr_route_sln
39  procedure :: destroy => mr_destroy
40  ! private
41  procedure, private :: activate
42  procedure, private :: deactivate
43  procedure, private :: update_senders
44  procedure, private :: update_receivers
45  procedure, private :: route_active
46  procedure, private :: is_cached
47  procedure, private :: compose_messages
48  procedure, private :: load_messages
49  end type mpiroutertype
50 
51 contains
52 
53  !> Factory method to create MPI router
54  !<
55  function create_mpi_router() result(router)
56  class(routerbasetype), pointer :: router
57  ! local
58  class(mpiroutertype), pointer :: mpi_router
59 
60  allocate (mpi_router)
61  router => mpi_router
62 
63  end function create_mpi_router
64 
65  subroutine mr_initialize(this)
66  use inputoutputmodule, only: getunit
67  use constantsmodule, only: linelength
68  class(mpiroutertype) :: this
69  ! local
70  integer :: ierr
71  integer(I4B) :: i
72  integer(I4B) :: nr_models, nr_exchanges
73  class(virtualdatacontainertype), pointer :: vdc
74  character(len=LINELENGTH) :: monitor_file
75 
76  ! routing over all when starting
77  this%halo_activated = .false.
78 
79  ! to log or not to log
80  this%enable_monitor = .false.
81 
82  ! initialize the MPI message builder and cache
83  call this%message_builder%init()
84  call this%msg_cache%init()
85 
86  ! get mpi world for our process
87  this%mpi_world => get_mpi_world()
88 
89  ! init address list
90  call this%senders%init()
91  call this%receivers%init()
92 
93  ! find out where models are
94  nr_models = virtual_model_list%Count()
95  nr_exchanges = virtual_exchange_list%Count()
96  allocate (this%model_proc_ids(nr_models))
97  allocate (this%all_models(nr_models))
98  allocate (this%all_exchanges(nr_exchanges))
99 
100  do i = 1, nr_models
102  this%all_models(i)%ptr => vdc
103  if (vdc%is_local) then
104  this%model_proc_ids(i) = proc_id
105  else
106  this%model_proc_ids(i) = 0
107  end if
108  end do
109 
110  call mpi_allreduce(mpi_in_place, this%model_proc_ids, nr_models, &
111  mpi_integer, mpi_sum, this%mpi_world%comm, ierr)
112  call check_mpi(ierr)
113 
114  ! set the process id to the models and exchanges
115  do i = 1, nr_models
117  call vdc%set_orig_rank(this%model_proc_ids(i))
118  end do
119 
120  do i = 1, nr_exchanges
122  this%all_exchanges(i)%ptr => vdc
123  select type (vex => vdc)
124  class is (virtualexchangetype)
125  call vex%set_orig_rank(vex%v_model1%orig_rank)
126  if (vex%v_model1%is_local) then
127  call vex%set_orig_rank(vex%v_model2%orig_rank)
128  end if
129  end select
130  end do
131 
132  ! open log file
133  if (this%enable_monitor) then
134  this%imon = getunit()
135  write (monitor_file, '(a,i0,a)') "mpi.p", proc_id, ".log"
136  open (unit=this%imon, file=monitor_file)
137  call this%message_builder%set_monitor(this%imon)
138 
139  ! write initial info
140  write (this%imon, '(a,/)') ">> initialize MPI Router:"
141  write (this%imon, '(2x,a,i0)') "process id: ", proc_id
142  write (this%imon, '(2x,a,i0)') "nr. of processes: ", nr_procs
143  write (this%imon, '(2x,a,i0)') "nr. of models: ", nr_models
144  write (this%imon, '(2x,a,i0)') "nr. of exchanges: ", nr_exchanges
145  write (this%imon, '(2x,2a)') "model id, processor id:"
146  do i = 1, nr_models
147  write (this%imon, '(4x,2i8)') i, this%model_proc_ids(i)
148  end do
149  write (this%imon, '(a,/)') "<< initialize done"
150  end if
151 
152  end subroutine mr_initialize
153 
154  !> @brief Activate models and exchanges for routing
155  !<
156  subroutine activate(this, models, exchanges)
157  class(mpiroutertype) :: this
158  type(vdcptrtype), dimension(:), pointer :: models
159  type(vdcptrtype), dimension(:), pointer :: exchanges
160 
161  this%rte_models => models
162  this%rte_exchanges => exchanges
163  call this%message_builder%attach_data(models, exchanges)
164 
165  end subroutine activate
166 
167  !> @brief Deactivate data after routing
168  !<
169  subroutine deactivate(this)
170  class(mpiroutertype) :: this
171 
172  this%rte_models => null()
173  this%rte_exchanges => null()
174  call this%message_builder%release_data()
175 
176  end subroutine deactivate
177 
178  !> @brief This will route all remote data from the
179  !! global models and exchanges over MPI, for a
180  !< given stage
181  subroutine mr_route_all(this, stage)
183  class(mpiroutertype) :: this
184  integer(I4B) :: stage
185 
186  if (this%enable_monitor) then
187  write (this%imon, '(/,2a)') ">> routing all: ", stg_to_str(stage)
188  end if
189 
190  ! route all
191  call this%activate(this%all_models, this%all_exchanges)
192  call this%route_active(0, stage)
193  call this%deactivate()
194 
195  if (this%enable_monitor) then
196  write (this%imon, '(a,/)') "<< end routing all"
197  !call mem_print_detailed(this%imon)
198  end if
199 
200  end subroutine mr_route_all
201 
202  !> @brief This will route all remote data from models
203  !! and exchanges in a particular solution over MPI,
204  !< for a given stage
205  subroutine mr_route_sln(this, virtual_sol, stage)
206  class(mpiroutertype) :: this
207  type(virtualsolutiontype) :: virtual_sol
208  integer(I4B) :: stage
209 
210  if (this%enable_monitor) then
211  write (this%imon, '(/,a,i0,2a)') ">> routing solution: ", &
212  virtual_sol%solution_id, ", ", stg_to_str(stage)
213  end if
214 
215  ! route for this solution
216  call this%activate(virtual_sol%models, virtual_sol%exchanges)
217  call this%route_active(virtual_sol%solution_id, stage)
218  call this%deactivate()
219 
220  if (this%enable_monitor) then
221  write (this%imon, '(a)') "<< end routing solution"
222  end if
223 
224  end subroutine mr_route_sln
225 
226  !> @brief Routes the models and exchanges over MPI,
227  !! either constructing the message bodies in a sequence
228  !! of communication steps, or by loading from cache
229  !< for efficiency.
230  subroutine route_active(this, unit, stage)
231  use simmodule, only: store_error
232  class(mpiroutertype) :: this !< this mpi router
233  integer(I4B) :: unit !< the solution id, or equal to 0 when global
234  integer(I4B) :: stage !< the stage to route
235  ! local
236  integer(I4B) :: i
237  integer(I4B) :: rnk
238  integer :: ierr, msg_size
239  logical(LGP) :: from_cache
240  ! mpi handles
241  integer, dimension(:), allocatable :: rcv_req
242  integer, dimension(:), allocatable :: snd_req
243  integer, dimension(:, :), allocatable :: rcv_stat
244 
245  ! message body
246  integer, dimension(:), allocatable :: body_rcv_t
247  integer, dimension(:), allocatable :: body_snd_t
248 
249  ! update address list
250  call this%update_senders()
251  call this%update_receivers()
252 
253  ! allocate body data
254  allocate (body_rcv_t(this%senders%size))
255  allocate (body_snd_t(this%receivers%size))
256 
257  ! allocate handles
258  allocate (rcv_req(this%senders%size))
259  allocate (snd_req(this%receivers%size))
260  allocate (rcv_stat(mpi_status_size, this%senders%size))
261 
262  ! always initialize request handles
263  rcv_req = mpi_request_null
264  snd_req = mpi_request_null
265 
266  if (this%enable_monitor) then
267  write (this%imon, '(2x,a,*(i3))') "process ids sending data: ", &
268  this%senders%get_values()
269  write (this%imon, '(2x,a,*(i3))') "process ids receiving data: ", &
270  this%receivers%get_values()
271  end if
272 
273  ! from cache or build
274  from_cache = this%is_cached(unit, stage)
275  if (.not. from_cache) then
276  call this%compose_messages(unit, stage, body_snd_t, body_rcv_t)
277  else
278  call this%load_messages(unit, stage, body_snd_t, body_rcv_t)
279  end if
280 
281  if (this%enable_monitor) then
282  write (this%imon, '(2x,a)') "== communicating bodies =="
283  end if
284 
285  ! recv bodies
286  do i = 1, this%senders%size
287  rnk = this%senders%at(i)
288  if (this%enable_monitor) then
289  write (this%imon, '(4x,a,i0)') "receiving from process: ", rnk
290  end if
291 
292  call mpi_type_size(body_rcv_t(i), msg_size, ierr)
293  if (msg_size > 0) then
294  call mpi_irecv(mpi_bottom, 1, body_rcv_t(i), rnk, stage, &
295  this%mpi_world%comm, rcv_req(i), ierr)
296  call check_mpi(ierr)
297  end if
298 
299  if (this%enable_monitor) then
300  write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
301  end if
302  end do
303 
304  ! send bodies
305  do i = 1, this%receivers%size
306  rnk = this%receivers%at(i)
307  if (this%enable_monitor) then
308  write (this%imon, '(4x,a,i0)') "sending to process: ", rnk
309  end if
310 
311  call mpi_type_size(body_snd_t(i), msg_size, ierr)
312  if (msg_size > 0) then
313  call mpi_isend(mpi_bottom, 1, body_snd_t(i), rnk, stage, &
314  this%mpi_world%comm, snd_req(i), ierr)
315  call check_mpi(ierr)
316  end if
317 
318  if (this%enable_monitor) then
319  write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
320  end if
321  call flush (this%imon)
322  end do
323 
324  ! wait for exchange of all messages
325  call mpi_waitall(this%senders%size, rcv_req, rcv_stat, ierr)
326  call check_mpi(ierr)
327 
328  deallocate (rcv_req, snd_req, rcv_stat)
329  deallocate (body_rcv_t, body_snd_t)
330 
331  end subroutine route_active
332 
333  !> @brief Constructs the message bodies' MPI datatypes.
334  !!
335  !! Constructs the message bodies' MPI datatypes for a
336  !! unit (a solution) and a given stage. This is done in
337  !! a sequence of 6 steps (distributed over 3 phases):
338  !!
339  !! == synchronizing headers (VdcHeaderType) ==
340  !!
341  !! step 1:
342  !! Receive headers from remote addresses requesting data
343  !! from virtual data containers (models, exchanges, ...) local to this process
344  !! step 2:
345  !! Send headers to remote addresses to indicate for which
346  !! virtual data containers (models, exchanges, ...) data is requested
347  !!
348  !! == synchronizing maps (VdcReceiverMapsType) ==
349  !!
350  !! step 3:
351  !! Based on the received headers, receive element maps (which elements are
352  !! to be sent from a contiguous array) for outgoing data
353  !! step 4:
354  !! Send element maps to remote addresses to specify incoming data
355  !!
356  !! == construct message body data types (VirtualDataContainerType) ==
357  !!
358  !! step 5:
359  !! Construct the message receive body based on the virtual
360  !! data items in the virtual data containers, and cache
361  !!
362  !! step 6:
363  !! Construct the message send body, based on received header data and
364  !! and maps, from the virtual data containers, and cache
365  !<
366  subroutine compose_messages(this, unit, stage, body_snd_t, body_rcv_t)
367  class(mpiroutertype) :: this
368  integer(I4B) :: unit
369  integer(I4B) :: stage
370  integer, dimension(:) :: body_snd_t
371  integer, dimension(:) :: body_rcv_t
372  ! local
373  integer(I4B) :: i, j, k
374  integer(I4B) :: rnk
375  integer :: ierr
376  ! mpi handles
377  integer, dimension(:), allocatable :: rcv_req
378  integer, dimension(:), allocatable :: snd_req
379  integer, dimension(:, :), allocatable :: rcv_stat
380  ! message header
381  integer(I4B) :: max_headers
382  type(vdcheadertype), dimension(:, :), allocatable :: headers
383  integer, dimension(:), allocatable :: hdr_rcv_t
384  integer, dimension(:), allocatable :: hdr_snd_t
385  integer, dimension(:), allocatable :: hdr_rcv_cnt
386  ! maps
387  type(vdcreceivermapstype), dimension(:, :), allocatable :: rcv_maps
388  integer, dimension(:), allocatable :: map_rcv_t
389  integer, dimension(:), allocatable :: map_snd_t
390 
391  ! allocate handles
392  allocate (rcv_req(this%receivers%size))
393  allocate (snd_req(this%senders%size))
394  allocate (rcv_stat(mpi_status_size, this%receivers%size))
395 
396  ! init handles
397  rcv_req = mpi_request_null
398  snd_req = mpi_request_null
399 
400  ! allocate header data
401  max_headers = size(this%rte_models) + size(this%rte_exchanges)
402  allocate (hdr_rcv_t(this%receivers%size))
403  allocate (hdr_snd_t(this%senders%size))
404  allocate (headers(max_headers, this%receivers%size))
405  allocate (hdr_rcv_cnt(this%receivers%size))
406 
407  ! allocate map data
408  allocate (map_snd_t(this%senders%size))
409  allocate (map_rcv_t(this%receivers%size))
410  allocate (rcv_maps(max_headers, this%receivers%size)) ! for every header, we potentially need the maps
411 
412  if (this%enable_monitor) then
413  write (this%imon, '(2x,a)') "== communicating headers =="
414  end if
415 
416  ! first receive headers for outward data
417  do i = 1, this%receivers%size
418  rnk = this%receivers%at(i)
419  if (this%enable_monitor) then
420  write (this%imon, '(4x,a,i0)') "Ireceive header from process: ", rnk
421  end if
422  call this%message_builder%create_header_rcv(hdr_rcv_t(i))
423  call mpi_irecv(headers(:, i), max_headers, hdr_rcv_t(i), rnk, stage, &
424  this%mpi_world%comm, rcv_req(i), ierr)
425  call check_mpi(ierr)
426  end do
427 
428  ! send header for incoming data
429  do i = 1, this%senders%size
430  rnk = this%senders%at(i)
431  if (this%enable_monitor) then
432  write (this%imon, '(4x,a,i0)') "send header to process: ", rnk
433  end if
434  call this%message_builder%create_header_snd(rnk, stage, hdr_snd_t(i))
435  call mpi_isend(mpi_bottom, 1, hdr_snd_t(i), rnk, stage, &
436  this%mpi_world%comm, snd_req(i), ierr)
437  call check_mpi(ierr)
438  end do
439 
440  ! wait for exchange of all headers
441  call mpi_waitall(this%receivers%size, rcv_req, rcv_stat, ierr)
442  call check_mpi(ierr)
443 
444  ! reinit handles
445  rcv_req = mpi_request_null
446  snd_req = mpi_request_null
447 
448  ! after WaitAll we can count incoming headers from statuses
449  do i = 1, this%receivers%size
450  call mpi_get_count(rcv_stat(:, i), hdr_rcv_t(i), hdr_rcv_cnt(i), ierr)
451 
452  if (this%enable_monitor) then
453  rnk = this%senders%at(i)
454  write (this%imon, '(4x,a,i0)') "received headers from process: ", rnk
455  write (this%imon, '(6x,a)') "expecting data for:"
456  do j = 1, hdr_rcv_cnt(i)
457  write (this%imon, '(6x,a,i0,a,a)') "id: ", headers(j, i)%id, &
458  " type: ", trim(vdc_type_to_str(headers(j, i)%container_type))
459  write (this%imon, '(6x,a,99i6)') "map sizes: ", headers(j, i)%map_sizes
460  end do
461  end if
462  end do
463 
464  ! clean up types
465  do i = 1, this%receivers%size
466  call mpi_type_free(hdr_rcv_t(i), ierr)
467  end do
468  do i = 1, this%senders%size
469  call mpi_type_free(hdr_snd_t(i), ierr)
470  end do
471 
472  if (this%enable_monitor) then
473  write (this%imon, '(2x,a)') "== communicating maps =="
474  end if
475 
476  ! allocate space for receiving maps
477  do i = 1, this%receivers%size
478  do j = 1, hdr_rcv_cnt(i)
479  call rcv_maps(j, i)%create(headers(j, i)%map_sizes)
480  end do
481  end do
482 
483  ! receive maps
484  do i = 1, this%receivers%size
485  rnk = this%receivers%at(i)
486  if (this%enable_monitor) then
487  write (this%imon, '(4x,a,i0)') "Ireceive maps from process: ", rnk
488  end if
489 
490  call this%message_builder%create_map_rcv(rcv_maps(:, i), hdr_rcv_cnt(i), &
491  map_rcv_t(i))
492  call mpi_irecv(mpi_bottom, 1, map_rcv_t(i), rnk, stage, &
493  this%mpi_world%comm, rcv_req(i), ierr)
494  call check_mpi(ierr)
495  end do
496 
497  ! send maps
498  do i = 1, this%senders%size
499  rnk = this%senders%at(i)
500  if (this%enable_monitor) then
501  write (this%imon, '(4x,a,i0)') "send map to process: ", rnk
502  end if
503 
504  call this%message_builder%create_map_snd(rnk, stage, map_snd_t(i))
505  call mpi_isend(mpi_bottom, 1, map_snd_t(i), rnk, stage, &
506  this%mpi_world%comm, snd_req(i), ierr)
507  call check_mpi(ierr)
508  end do
509 
510  ! wait on receiving maps
511  call mpi_waitall(this%receivers%size, rcv_req, rcv_stat, ierr)
512  call check_mpi(ierr)
513 
514  ! print maps
515  if (this%enable_monitor) then
516  do i = 1, this%receivers%size
517  rnk = this%receivers%at(i)
518  write (this%imon, '(4x,a,i0)') "received maps from process: ", rnk
519  do j = 1, hdr_rcv_cnt(i)
520  write (this%imon, '(6x,a,i0,a,a)') "id: ", headers(j, i)%id, &
521  " type: ", trim(vdc_type_to_str(headers(j, i)%container_type))
522  do k = 1, nr_vdc_element_maps
523  write (this%imon, '(8x,i0, a,i0)') k, " nr. elements: ", &
524  rcv_maps(j, i)%el_maps(k)%nr_virt_elems
525  if (rcv_maps(j, i)%el_maps(k)%nr_virt_elems > 0) then
526  write (this%imon, '(8x,*(i6))') &
527  rcv_maps(j, i)%el_maps(k)%remote_elem_shift
528  end if
529  end do
530  end do
531  end do
532  end if
533 
534  ! clean up types
535  do i = 1, this%receivers%size
536  call mpi_type_free(map_rcv_t(i), ierr)
537  end do
538  do i = 1, this%senders%size
539  call mpi_type_free(map_snd_t(i), ierr)
540  end do
541 
542  if (this%enable_monitor) then
543  write (this%imon, '(2x,a)') "== composing message bodies =="
544  end if
545 
546  ! construct recv bodies and cache
547  do i = 1, this%senders%size
548  rnk = this%senders%at(i)
549  if (this%enable_monitor) then
550  write (this%imon, '(4x,a,i0)') "build recv body for process: ", rnk
551  end if
552 
553  call this%message_builder%create_body_rcv(rnk, stage, body_rcv_t(i))
554  call this%msg_cache%put(unit, rnk, stage, mpi_bdy_rcv, body_rcv_t(i))
555  end do
556 
557  ! construct send bodies and cache
558  do i = 1, this%receivers%size
559  rnk = this%receivers%at(i)
560  if (this%enable_monitor) then
561  write (this%imon, '(4x,a,i0)') "build send body for process: ", rnk
562  end if
563 
564  call this%message_builder%create_body_snd( &
565  rnk, stage, headers(1:hdr_rcv_cnt(i), i), &
566  rcv_maps(:, i), body_snd_t(i))
567  call this%msg_cache%put(unit, rnk, stage, mpi_bdy_snd, body_snd_t(i))
568  end do
569 
570  ! clean up element maps
571  do i = 1, this%receivers%size
572  do j = 1, hdr_rcv_cnt(i)
573  call rcv_maps(j, i)%destroy()
574  end do
575  end do
576 
577  deallocate (rcv_req, snd_req, rcv_stat)
578  deallocate (hdr_rcv_t, hdr_snd_t, hdr_rcv_cnt)
579  deallocate (headers)
580  deallocate (map_rcv_t, map_snd_t)
581  deallocate (rcv_maps)
582 
583  end subroutine compose_messages
584 
585  !> @brief Load the message body MPI datatypes from cache
586  !<
587  subroutine load_messages(this, unit, stage, body_snd_t, body_rcv_t)
588  class(mpiroutertype) :: this
589  integer(I4B) :: unit
590  integer(I4B) :: stage
591  integer, dimension(:), allocatable :: body_snd_t
592  integer, dimension(:), allocatable :: body_rcv_t
593  ! local
594  integer(I4B) :: i, rnk
595 
596  if (this%enable_monitor) then
597  write (this%imon, '(2x,a)') "... running from cache ..."
598  end if
599 
600  do i = 1, this%receivers%size
601  rnk = this%receivers%at(i)
602  body_snd_t(i) = this%msg_cache%get(unit, rnk, stage, mpi_bdy_snd)
603  end do
604  do i = 1, this%senders%size
605  rnk = this%senders%at(i)
606  body_rcv_t(i) = this%msg_cache%get(unit, rnk, stage, mpi_bdy_rcv)
607  end do
608 
609  end subroutine load_messages
610 
611  subroutine update_senders(this)
612  class(mpiroutertype) :: this
613  ! local
614  integer(I4B) :: i
615  class(virtualdatacontainertype), pointer :: vdc
616 
617  call this%senders%clear()
618 
619  do i = 1, size(this%rte_models)
620  vdc => this%rte_models(i)%ptr
621  if (.not. vdc%is_local .and. vdc%is_active) then
622  call this%senders%push_back_unique(vdc%orig_rank)
623  end if
624  end do
625  do i = 1, size(this%rte_exchanges)
626  vdc => this%rte_exchanges(i)%ptr
627  if (.not. vdc%is_local .and. vdc%is_active) then
628  call this%senders%push_back_unique(vdc%orig_rank)
629  end if
630  end do
631 
632  end subroutine update_senders
633 
634  subroutine update_receivers(this)
635  class(mpiroutertype) :: this
636  ! local
637  integer(I4B) :: i, j
638  class(virtualdatacontainertype), pointer :: vdc
639 
640  call this%receivers%clear()
641 
642  if (.not. this%halo_activated) then
643  ! assuming symmetry for now
644  do i = 1, this%senders%size
645  call this%receivers%push_back(this%senders%at(i))
646  end do
647  else
648  ! get the receivers from the VDCs
649  do i = 1, size(this%rte_models)
650  vdc => this%rte_models(i)%ptr
651  do j = 1, vdc%rcv_ranks%size
652  call this%receivers%push_back_unique(vdc%rcv_ranks%at(j))
653  end do
654  end do
655  do i = 1, size(this%rte_exchanges)
656  vdc => this%rte_exchanges(i)%ptr
657  do j = 1, vdc%rcv_ranks%size
658  call this%receivers%push_back_unique(vdc%rcv_ranks%at(j))
659  end do
660  end do
661  end if
662 
663  end subroutine update_receivers
664 
665  !> @brief Check if this stage is cached
666  !<
667  function is_cached(this, unit, stage) result(in_cache)
668  use simmodule, only: ustop
669  class(mpiroutertype) :: this
670  integer(I4B) :: unit
671  integer(I4B) :: stage
672  logical(LGP) :: in_cache
673  ! local
674  integer(I4B) :: i, rnk
675  integer(I4B) :: no_cache_cnt
676  integer :: cached_type
677 
678  in_cache = .false.
679  no_cache_cnt = 0
680 
681  do i = 1, this%receivers%size
682  rnk = this%receivers%at(i)
683  cached_type = this%msg_cache%get(unit, rnk, stage, mpi_bdy_snd)
684  if (cached_type == no_cached_value) no_cache_cnt = no_cache_cnt + 1
685  end do
686  do i = 1, this%senders%size
687  rnk = this%senders%at(i)
688  cached_type = this%msg_cache%get(unit, rnk, stage, mpi_bdy_rcv)
689  if (cached_type == no_cached_value) no_cache_cnt = no_cache_cnt + 1
690  end do
691 
692  ! it should be all or nothing
693  if (no_cache_cnt == this%receivers%size + this%senders%size) then
694  in_cache = .false.
695  else if (no_cache_cnt == 0) then
696  in_cache = .true.
697  else
698  call ustop("Internal error: MPI message cache corrupt...")
699  end if
700 
701  end function is_cached
702 
703  subroutine mr_destroy(this)
704  class(mpiroutertype) :: this
705 
706  call this%msg_cache%destroy()
707 
708  call this%senders%destroy()
709  call this%receivers%destroy()
710 
711  deallocate (this%model_proc_ids)
712  deallocate (this%all_models)
713  deallocate (this%all_exchanges)
714 
715  end subroutine mr_destroy
716 
717 end module mpiroutermodule
This module contains simulation constants.
Definition: Constants.f90:9
integer(i4b), parameter linelength
maximum length of a standard line
Definition: Constants.f90:45
integer(i4b) function, public getunit()
Get a free unit number.
This module defines variable data types.
Definition: kind.f90:8
subroutine, public mem_print_detailed(iout)
integer(i4b), parameter, public mpi_bdy_snd
sending data (body) to ranks
integer(i4b), parameter, public mpi_bdy_rcv
receiving data (body) from ranks
subroutine mr_route_sln(this, virtual_sol, stage)
This will route all remote data from models and exchanges in a particular solution over MPI,...
Definition: MpiRouter.f90:206
subroutine route_active(this, unit, stage)
Routes the models and exchanges over MPI, either constructing the message bodies in a sequence of com...
Definition: MpiRouter.f90:231
subroutine compose_messages(this, unit, stage, body_snd_t, body_rcv_t)
Constructs the message bodies' MPI datatypes.
Definition: MpiRouter.f90:367
subroutine load_messages(this, unit, stage, body_snd_t, body_rcv_t)
Load the message body MPI datatypes from cache.
Definition: MpiRouter.f90:588
class(routerbasetype) function, pointer, public create_mpi_router()
Factory method to create MPI router.
Definition: MpiRouter.f90:56
logical(lgp) function is_cached(this, unit, stage)
Check if this stage is cached.
Definition: MpiRouter.f90:668
subroutine mr_route_all(this, stage)
This will route all remote data from the global models and exchanges over MPI, for a.
Definition: MpiRouter.f90:182
subroutine activate(this, models, exchanges)
Activate models and exchanges for routing.
Definition: MpiRouter.f90:157
subroutine deactivate(this)
Deactivate data after routing.
Definition: MpiRouter.f90:170
subroutine update_receivers(this)
Definition: MpiRouter.f90:635
subroutine update_senders(this)
Definition: MpiRouter.f90:612
subroutine mr_destroy(this)
Definition: MpiRouter.f90:704
subroutine mr_initialize(this)
Definition: MpiRouter.f90:66
type(mpiworldtype) function, pointer, public get_mpi_world()
Definition: MpiWorld.f90:32
subroutine, public check_mpi(mpi_error_code)
Check the MPI error code, report, and.
Definition: MpiWorld.f90:116
This module contains simulation methods.
Definition: Sim.f90:10
subroutine, public ustop(stopmess, ioutlocal)
Stop the simulation.
Definition: Sim.f90:312
subroutine, public store_error(msg, terminate)
Store an error message.
Definition: Sim.f90:92
character(len=24) function, public stg_to_str(stage)
Converts a stage to its string representation.
Definition: SimStages.f90:31
This module contains simulation variables.
Definition: SimVariables.f90:9
integer(i4b) nr_procs
integer(i4b) proc_id
integer(i4b), parameter, public nr_vdc_element_maps
Definition: VirtualBase.f90:16
character(len=24) function, public vdc_type_to_str(cntr_type)
@ Converts a virtual container type to its string representation
class(virtualdatacontainertype) function, pointer, public get_vdc_from_list(list, idx)
type(listtype), public virtual_model_list
type(listtype), public virtual_exchange_list
Facility to cache the constructed MPI datatypes. This will avoid having to construct them over and ov...
Wrapper for virtual data containers.
The Virtual Exchange is based on two Virtual Models and is therefore not always strictly local or rem...
This bundles all virtual data for a particular solution.