MODFLOW 6  version 6.6.0.dev0
USGS Modular Hydrologic Model
mpiroutermodule Module Reference

Data Types

type  mpiroutertype
 

Functions/Subroutines

class(routerbasetype) function, pointer, public create_mpi_router ()
 Factory method to create MPI router. More...
 
subroutine mr_initialize (this)
 
subroutine activate (this, models, exchanges)
 Activate models and exchanges for routing. More...
 
subroutine deactivate (this)
 Deactivate data after routing. More...
 
subroutine mr_route_all (this, stage)
 This will route all remote data from the global models and exchanges over MPI, for a. More...
 
subroutine mr_route_sln (this, virtual_sol, stage)
 This will route all remote data from models and exchanges in a particular solution over MPI,. More...
 
subroutine route_active (this, unit, stage)
 Routes the models and exchanges over MPI, either constructing the message bodies in a sequence of communication steps, or by loading from cache. More...
 
subroutine compose_messages (this, unit, stage, body_snd_t, body_rcv_t)
 Constructs the message bodies' MPI datatypes. More...
 
subroutine load_messages (this, unit, stage, body_snd_t, body_rcv_t)
 Load the message body MPI datatypes from cache. More...
 
subroutine update_senders (this)
 
subroutine update_receivers (this)
 
logical(lgp) function is_cached (this, unit, stage)
 Check if this stage is cached. More...
 
subroutine mr_destroy (this)
 

Function/Subroutine Documentation

◆ activate()

subroutine mpiroutermodule::activate ( class(mpiroutertype this,
type(vdcptrtype), dimension(:), pointer  models,
type(vdcptrtype), dimension(:), pointer  exchanges 
)

Definition at line 156 of file MpiRouter.f90.

157  class(MpiRouterType) :: this
158  type(VdcPtrType), dimension(:), pointer :: models
159  type(VdcPtrType), dimension(:), pointer :: exchanges
160 
161  this%rte_models => models
162  this%rte_exchanges => exchanges
163  call this%message_builder%attach_data(models, exchanges)
164 

◆ compose_messages()

subroutine mpiroutermodule::compose_messages ( class(mpiroutertype this,
integer(i4b)  unit,
integer(i4b)  stage,
integer, dimension(:)  body_snd_t,
integer, dimension(:)  body_rcv_t 
)

Constructs the message bodies' MPI datatypes for a unit (a solution) and a given stage. This is done in a sequence of 6 steps (distributed over 3 phases):

== synchronizing headers (VdcHeaderType) ==

step 1: Receive headers from remote addresses requesting data from virtual data containers (models, exchanges, ...) local to this process step 2: Send headers to remote addresses to indicate for which virtual data containers (models, exchanges, ...) data is requested

== synchronizing maps (VdcReceiverMapsType) ==

step 3: Based on the received headers, receive element maps (which elements are to be sent from a contiguous array) for outgoing data step 4: Send element maps to remote addresses to specify incoming data

== construct message body data types (VirtualDataContainerType) ==

step 5: Construct the message receive body based on the virtual data items in the virtual data containers, and cache

step 6: Construct the message send body, based on received header data and and maps, from the virtual data containers, and cache

Definition at line 366 of file MpiRouter.f90.

367  class(MpiRouterType) :: this
368  integer(I4B) :: unit
369  integer(I4B) :: stage
370  integer, dimension(:) :: body_snd_t
371  integer, dimension(:) :: body_rcv_t
372  ! local
373  integer(I4B) :: i, j, k
374  integer(I4B) :: rnk
375  integer :: ierr
376  ! mpi handles
377  integer, dimension(:), allocatable :: rcv_req
378  integer, dimension(:), allocatable :: snd_req
379  integer, dimension(:, :), allocatable :: rcv_stat
380  ! message header
381  integer(I4B) :: max_headers
382  type(VdcHeaderType), dimension(:, :), allocatable :: headers
383  integer, dimension(:), allocatable :: hdr_rcv_t
384  integer, dimension(:), allocatable :: hdr_snd_t
385  integer, dimension(:), allocatable :: hdr_rcv_cnt
386  ! maps
387  type(VdcReceiverMapsType), dimension(:, :), allocatable :: rcv_maps
388  integer, dimension(:), allocatable :: map_rcv_t
389  integer, dimension(:), allocatable :: map_snd_t
390 
391  ! allocate handles
392  allocate (rcv_req(this%receivers%size))
393  allocate (snd_req(this%senders%size))
394  allocate (rcv_stat(mpi_status_size, this%receivers%size))
395 
396  ! init handles
397  rcv_req = mpi_request_null
398  snd_req = mpi_request_null
399 
400  ! allocate header data
401  max_headers = size(this%rte_models) + size(this%rte_exchanges)
402  allocate (hdr_rcv_t(this%receivers%size))
403  allocate (hdr_snd_t(this%senders%size))
404  allocate (headers(max_headers, this%receivers%size))
405  allocate (hdr_rcv_cnt(this%receivers%size))
406 
407  ! allocate map data
408  allocate (map_snd_t(this%senders%size))
409  allocate (map_rcv_t(this%receivers%size))
410  allocate (rcv_maps(max_headers, this%receivers%size)) ! for every header, we potentially need the maps
411 
412  if (this%enable_monitor) then
413  write (this%imon, '(2x,a)') "== communicating headers =="
414  end if
415 
416  ! first receive headers for outward data
417  do i = 1, this%receivers%size
418  rnk = this%receivers%at(i)
419  if (this%enable_monitor) then
420  write (this%imon, '(4x,a,i0)') "Ireceive header from process: ", rnk
421  end if
422  call this%message_builder%create_header_rcv(hdr_rcv_t(i))
423  call mpi_irecv(headers(:, i), max_headers, hdr_rcv_t(i), rnk, stage, &
424  this%mpi_world%comm, rcv_req(i), ierr)
425  call check_mpi(ierr)
426  end do
427 
428  ! send header for incoming data
429  do i = 1, this%senders%size
430  rnk = this%senders%at(i)
431  if (this%enable_monitor) then
432  write (this%imon, '(4x,a,i0)') "send header to process: ", rnk
433  end if
434  call this%message_builder%create_header_snd(rnk, stage, hdr_snd_t(i))
435  call mpi_isend(mpi_bottom, 1, hdr_snd_t(i), rnk, stage, &
436  this%mpi_world%comm, snd_req(i), ierr)
437  call check_mpi(ierr)
438  end do
439 
440  ! wait for exchange of all headers
441  call mpi_waitall(this%receivers%size, rcv_req, rcv_stat, ierr)
442  call check_mpi(ierr)
443 
444  ! reinit handles
445  rcv_req = mpi_request_null
446  snd_req = mpi_request_null
447 
448  ! after WaitAll we can count incoming headers from statuses
449  do i = 1, this%receivers%size
450  call mpi_get_count(rcv_stat(:, i), hdr_rcv_t(i), hdr_rcv_cnt(i), ierr)
451 
452  if (this%enable_monitor) then
453  rnk = this%senders%at(i)
454  write (this%imon, '(4x,a,i0)') "received headers from process: ", rnk
455  write (this%imon, '(6x,a)') "expecting data for:"
456  do j = 1, hdr_rcv_cnt(i)
457  write (this%imon, '(6x,a,i0,a,a)') "id: ", headers(j, i)%id, &
458  " type: ", trim(vdc_type_to_str(headers(j, i)%container_type))
459  write (this%imon, '(6x,a,99i6)') "map sizes: ", headers(j, i)%map_sizes
460  end do
461  end if
462  end do
463 
464  ! clean up types
465  do i = 1, this%receivers%size
466  call mpi_type_free(hdr_rcv_t(i), ierr)
467  end do
468  do i = 1, this%senders%size
469  call mpi_type_free(hdr_snd_t(i), ierr)
470  end do
471 
472  if (this%enable_monitor) then
473  write (this%imon, '(2x,a)') "== communicating maps =="
474  end if
475 
476  ! allocate space for receiving maps
477  do i = 1, this%receivers%size
478  do j = 1, hdr_rcv_cnt(i)
479  call rcv_maps(j, i)%create(headers(j, i)%map_sizes)
480  end do
481  end do
482 
483  ! receive maps
484  do i = 1, this%receivers%size
485  rnk = this%receivers%at(i)
486  if (this%enable_monitor) then
487  write (this%imon, '(4x,a,i0)') "Ireceive maps from process: ", rnk
488  end if
489 
490  call this%message_builder%create_map_rcv(rcv_maps(:, i), hdr_rcv_cnt(i), &
491  map_rcv_t(i))
492  call mpi_irecv(mpi_bottom, 1, map_rcv_t(i), rnk, stage, &
493  this%mpi_world%comm, rcv_req(i), ierr)
494  call check_mpi(ierr)
495  end do
496 
497  ! send maps
498  do i = 1, this%senders%size
499  rnk = this%senders%at(i)
500  if (this%enable_monitor) then
501  write (this%imon, '(4x,a,i0)') "send map to process: ", rnk
502  end if
503 
504  call this%message_builder%create_map_snd(rnk, stage, map_snd_t(i))
505  call mpi_isend(mpi_bottom, 1, map_snd_t(i), rnk, stage, &
506  this%mpi_world%comm, snd_req(i), ierr)
507  call check_mpi(ierr)
508  end do
509 
510  ! wait on receiving maps
511  call mpi_waitall(this%receivers%size, rcv_req, rcv_stat, ierr)
512  call check_mpi(ierr)
513 
514  ! print maps
515  if (this%enable_monitor) then
516  do i = 1, this%receivers%size
517  rnk = this%receivers%at(i)
518  write (this%imon, '(4x,a,i0)') "received maps from process: ", rnk
519  do j = 1, hdr_rcv_cnt(i)
520  write (this%imon, '(6x,a,i0,a,a)') "id: ", headers(j, i)%id, &
521  " type: ", trim(vdc_type_to_str(headers(j, i)%container_type))
522  do k = 1, nr_vdc_element_maps
523  write (this%imon, '(8x,i0, a,i0)') k, " nr. elements: ", &
524  rcv_maps(j, i)%el_maps(k)%nr_virt_elems
525  if (rcv_maps(j, i)%el_maps(k)%nr_virt_elems > 0) then
526  write (this%imon, '(8x,*(i6))') &
527  rcv_maps(j, i)%el_maps(k)%remote_elem_shift
528  end if
529  end do
530  end do
531  end do
532  end if
533 
534  ! clean up types
535  do i = 1, this%receivers%size
536  call mpi_type_free(map_rcv_t(i), ierr)
537  end do
538  do i = 1, this%senders%size
539  call mpi_type_free(map_snd_t(i), ierr)
540  end do
541 
542  if (this%enable_monitor) then
543  write (this%imon, '(2x,a)') "== composing message bodies =="
544  end if
545 
546  ! construct recv bodies and cache
547  do i = 1, this%senders%size
548  rnk = this%senders%at(i)
549  if (this%enable_monitor) then
550  write (this%imon, '(4x,a,i0)') "build recv body for process: ", rnk
551  end if
552 
553  call this%message_builder%create_body_rcv(rnk, stage, body_rcv_t(i))
554  call this%msg_cache%put(unit, rnk, stage, mpi_bdy_rcv, body_rcv_t(i))
555  end do
556 
557  ! construct send bodies and cache
558  do i = 1, this%receivers%size
559  rnk = this%receivers%at(i)
560  if (this%enable_monitor) then
561  write (this%imon, '(4x,a,i0)') "build send body for process: ", rnk
562  end if
563 
564  call this%message_builder%create_body_snd( &
565  rnk, stage, headers(1:hdr_rcv_cnt(i), i), &
566  rcv_maps(:, i), body_snd_t(i))
567  call this%msg_cache%put(unit, rnk, stage, mpi_bdy_snd, body_snd_t(i))
568  end do
569 
570  ! clean up element maps
571  do i = 1, this%receivers%size
572  do j = 1, hdr_rcv_cnt(i)
573  call rcv_maps(j, i)%destroy()
574  end do
575  end do
576 
577  deallocate (rcv_req, snd_req, rcv_stat)
578  deallocate (hdr_rcv_t, hdr_snd_t, hdr_rcv_cnt)
579  deallocate (headers)
580  deallocate (map_rcv_t, map_snd_t)
581  deallocate (rcv_maps)
582 
Here is the call graph for this function:

◆ create_mpi_router()

class(routerbasetype) function, pointer, public mpiroutermodule::create_mpi_router

Definition at line 55 of file MpiRouter.f90.

56  class(RouterBaseType), pointer :: router
57  ! local
58  class(MpiRouterType), pointer :: mpi_router
59 
60  allocate (mpi_router)
61  router => mpi_router
62 
Here is the caller graph for this function:

◆ deactivate()

subroutine mpiroutermodule::deactivate ( class(mpiroutertype this)
private

Definition at line 169 of file MpiRouter.f90.

170  class(MpiRouterType) :: this
171 
172  this%rte_models => null()
173  this%rte_exchanges => null()
174  call this%message_builder%release_data()
175 

◆ is_cached()

logical(lgp) function mpiroutermodule::is_cached ( class(mpiroutertype this,
integer(i4b)  unit,
integer(i4b)  stage 
)
private

Definition at line 667 of file MpiRouter.f90.

668  use simmodule, only: ustop
669  class(MpiRouterType) :: this
670  integer(I4B) :: unit
671  integer(I4B) :: stage
672  logical(LGP) :: in_cache
673  ! local
674  integer(I4B) :: i, rnk
675  integer(I4B) :: no_cache_cnt
676  integer :: cached_type
677 
678  in_cache = .false.
679  no_cache_cnt = 0
680 
681  do i = 1, this%receivers%size
682  rnk = this%receivers%at(i)
683  cached_type = this%msg_cache%get(unit, rnk, stage, mpi_bdy_snd)
684  if (cached_type == no_cached_value) no_cache_cnt = no_cache_cnt + 1
685  end do
686  do i = 1, this%senders%size
687  rnk = this%senders%at(i)
688  cached_type = this%msg_cache%get(unit, rnk, stage, mpi_bdy_rcv)
689  if (cached_type == no_cached_value) no_cache_cnt = no_cache_cnt + 1
690  end do
691 
692  ! it should be all or nothing
693  if (no_cache_cnt == this%receivers%size + this%senders%size) then
694  in_cache = .false.
695  else if (no_cache_cnt == 0) then
696  in_cache = .true.
697  else
698  call ustop("Internal error: MPI message cache corrupt...")
699  end if
700 
This module contains simulation methods.
Definition: Sim.f90:10
subroutine, public ustop(stopmess, ioutlocal)
Stop the simulation.
Definition: Sim.f90:312
Here is the call graph for this function:

◆ load_messages()

subroutine mpiroutermodule::load_messages ( class(mpiroutertype this,
integer(i4b)  unit,
integer(i4b)  stage,
integer, dimension(:), allocatable  body_snd_t,
integer, dimension(:), allocatable  body_rcv_t 
)
private

Definition at line 587 of file MpiRouter.f90.

588  class(MpiRouterType) :: this
589  integer(I4B) :: unit
590  integer(I4B) :: stage
591  integer, dimension(:), allocatable :: body_snd_t
592  integer, dimension(:), allocatable :: body_rcv_t
593  ! local
594  integer(I4B) :: i, rnk
595 
596  if (this%enable_monitor) then
597  write (this%imon, '(2x,a)') "... running from cache ..."
598  end if
599 
600  do i = 1, this%receivers%size
601  rnk = this%receivers%at(i)
602  body_snd_t(i) = this%msg_cache%get(unit, rnk, stage, mpi_bdy_snd)
603  end do
604  do i = 1, this%senders%size
605  rnk = this%senders%at(i)
606  body_rcv_t(i) = this%msg_cache%get(unit, rnk, stage, mpi_bdy_rcv)
607  end do
608 

◆ mr_destroy()

subroutine mpiroutermodule::mr_destroy ( class(mpiroutertype this)

Definition at line 703 of file MpiRouter.f90.

704  class(MpiRouterType) :: this
705 
706  call this%msg_cache%destroy()
707 
708  call this%senders%destroy()
709  call this%receivers%destroy()
710 
711  deallocate (this%model_proc_ids)
712  deallocate (this%all_models)
713  deallocate (this%all_exchanges)
714 

◆ mr_initialize()

subroutine mpiroutermodule::mr_initialize ( class(mpiroutertype this)
private

Definition at line 65 of file MpiRouter.f90.

66  use inputoutputmodule, only: getunit
67  use constantsmodule, only: linelength
68  class(MpiRouterType) :: this
69  ! local
70  integer :: ierr
71  integer(I4B) :: i
72  integer(I4B) :: nr_models, nr_exchanges
73  class(VirtualDataContainerType), pointer :: vdc
74  character(len=LINELENGTH) :: monitor_file
75 
76  ! routing over all when starting
77  this%halo_activated = .false.
78 
79  ! to log or not to log
80  this%enable_monitor = .false.
81 
82  ! initialize the MPI message builder and cache
83  call this%message_builder%init()
84  call this%msg_cache%init()
85 
86  ! get mpi world for our process
87  this%mpi_world => get_mpi_world()
88 
89  ! init address list
90  call this%senders%init()
91  call this%receivers%init()
92 
93  ! find out where models are
94  nr_models = virtual_model_list%Count()
95  nr_exchanges = virtual_exchange_list%Count()
96  allocate (this%model_proc_ids(nr_models))
97  allocate (this%all_models(nr_models))
98  allocate (this%all_exchanges(nr_exchanges))
99 
100  do i = 1, nr_models
101  vdc => get_vdc_from_list(virtual_model_list, i)
102  this%all_models(i)%ptr => vdc
103  if (vdc%is_local) then
104  this%model_proc_ids(i) = proc_id
105  else
106  this%model_proc_ids(i) = 0
107  end if
108  end do
109 
110  call mpi_allreduce(mpi_in_place, this%model_proc_ids, nr_models, &
111  mpi_integer, mpi_sum, this%mpi_world%comm, ierr)
112  call check_mpi(ierr)
113 
114  ! set the process id to the models and exchanges
115  do i = 1, nr_models
116  vdc => get_vdc_from_list(virtual_model_list, i)
117  call vdc%set_orig_rank(this%model_proc_ids(i))
118  end do
119 
120  do i = 1, nr_exchanges
121  vdc => get_vdc_from_list(virtual_exchange_list, i)
122  this%all_exchanges(i)%ptr => vdc
123  select type (vex => vdc)
124  class is (virtualexchangetype)
125  call vex%set_orig_rank(vex%v_model1%orig_rank)
126  if (vex%v_model1%is_local) then
127  call vex%set_orig_rank(vex%v_model2%orig_rank)
128  end if
129  end select
130  end do
131 
132  ! open log file
133  if (this%enable_monitor) then
134  this%imon = getunit()
135  write (monitor_file, '(a,i0,a)') "mpi.p", proc_id, ".log"
136  open (unit=this%imon, file=monitor_file)
137  call this%message_builder%set_monitor(this%imon)
138 
139  ! write initial info
140  write (this%imon, '(a,/)') ">> initialize MPI Router:"
141  write (this%imon, '(2x,a,i0)') "process id: ", proc_id
142  write (this%imon, '(2x,a,i0)') "nr. of processes: ", nr_procs
143  write (this%imon, '(2x,a,i0)') "nr. of models: ", nr_models
144  write (this%imon, '(2x,a,i0)') "nr. of exchanges: ", nr_exchanges
145  write (this%imon, '(2x,2a)') "model id, processor id:"
146  do i = 1, nr_models
147  write (this%imon, '(4x,2i8)') i, this%model_proc_ids(i)
148  end do
149  write (this%imon, '(a,/)') "<< initialize done"
150  end if
151 
This module contains simulation constants.
Definition: Constants.f90:9
integer(i4b), parameter linelength
maximum length of a standard line
Definition: Constants.f90:45
integer(i4b) function, public getunit()
Get a free unit number.
Here is the call graph for this function:

◆ mr_route_all()

subroutine mpiroutermodule::mr_route_all ( class(mpiroutertype this,
integer(i4b)  stage 
)
private

Definition at line 181 of file MpiRouter.f90.

183  class(MpiRouterType) :: this
184  integer(I4B) :: stage
185 
186  if (this%enable_monitor) then
187  write (this%imon, '(/,2a)') ">> routing all: ", stg_to_str(stage)
188  end if
189 
190  ! route all
191  call this%activate(this%all_models, this%all_exchanges)
192  call this%route_active(0, stage)
193  call this%deactivate()
194 
195  if (this%enable_monitor) then
196  write (this%imon, '(a,/)') "<< end routing all"
197  !call mem_print_detailed(this%imon)
198  end if
199 
subroutine, public mem_print_detailed(iout)
Here is the call graph for this function:

◆ mr_route_sln()

subroutine mpiroutermodule::mr_route_sln ( class(mpiroutertype this,
type(virtualsolutiontype virtual_sol,
integer(i4b)  stage 
)

Definition at line 205 of file MpiRouter.f90.

206  class(MpiRouterType) :: this
207  type(VirtualSolutionType) :: virtual_sol
208  integer(I4B) :: stage
209 
210  if (this%enable_monitor) then
211  write (this%imon, '(/,a,i0,2a)') ">> routing solution: ", &
212  virtual_sol%solution_id, ", ", stg_to_str(stage)
213  end if
214 
215  ! route for this solution
216  call this%activate(virtual_sol%models, virtual_sol%exchanges)
217  call this%route_active(virtual_sol%solution_id, stage)
218  call this%deactivate()
219 
220  if (this%enable_monitor) then
221  write (this%imon, '(a)') "<< end routing solution"
222  end if
223 
Here is the call graph for this function:

◆ route_active()

subroutine mpiroutermodule::route_active ( class(mpiroutertype this,
integer(i4b)  unit,
integer(i4b)  stage 
)
private
Parameters
thisthis mpi router
unitthe solution id, or equal to 0 when global
stagethe stage to route

Definition at line 230 of file MpiRouter.f90.

231  use simmodule, only: store_error
232  class(MpiRouterType) :: this !< this mpi router
233  integer(I4B) :: unit !< the solution id, or equal to 0 when global
234  integer(I4B) :: stage !< the stage to route
235  ! local
236  integer(I4B) :: i
237  integer(I4B) :: rnk
238  integer :: ierr, msg_size
239  logical(LGP) :: from_cache
240  ! mpi handles
241  integer, dimension(:), allocatable :: rcv_req
242  integer, dimension(:), allocatable :: snd_req
243  integer, dimension(:, :), allocatable :: rcv_stat
244 
245  ! message body
246  integer, dimension(:), allocatable :: body_rcv_t
247  integer, dimension(:), allocatable :: body_snd_t
248 
249  ! update address list
250  call this%update_senders()
251  call this%update_receivers()
252 
253  ! allocate body data
254  allocate (body_rcv_t(this%senders%size))
255  allocate (body_snd_t(this%receivers%size))
256 
257  ! allocate handles
258  allocate (rcv_req(this%senders%size))
259  allocate (snd_req(this%receivers%size))
260  allocate (rcv_stat(mpi_status_size, this%senders%size))
261 
262  ! always initialize request handles
263  rcv_req = mpi_request_null
264  snd_req = mpi_request_null
265 
266  if (this%enable_monitor) then
267  write (this%imon, '(2x,a,*(i3))') "process ids sending data: ", &
268  this%senders%get_values()
269  write (this%imon, '(2x,a,*(i3))') "process ids receiving data: ", &
270  this%receivers%get_values()
271  end if
272 
273  ! from cache or build
274  from_cache = this%is_cached(unit, stage)
275  if (.not. from_cache) then
276  call this%compose_messages(unit, stage, body_snd_t, body_rcv_t)
277  else
278  call this%load_messages(unit, stage, body_snd_t, body_rcv_t)
279  end if
280 
281  if (this%enable_monitor) then
282  write (this%imon, '(2x,a)') "== communicating bodies =="
283  end if
284 
285  ! recv bodies
286  do i = 1, this%senders%size
287  rnk = this%senders%at(i)
288  if (this%enable_monitor) then
289  write (this%imon, '(4x,a,i0)') "receiving from process: ", rnk
290  end if
291 
292  call mpi_type_size(body_rcv_t(i), msg_size, ierr)
293  if (msg_size > 0) then
294  call mpi_irecv(mpi_bottom, 1, body_rcv_t(i), rnk, stage, &
295  this%mpi_world%comm, rcv_req(i), ierr)
296  call check_mpi(ierr)
297  end if
298 
299  if (this%enable_monitor) then
300  write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
301  end if
302  end do
303 
304  ! send bodies
305  do i = 1, this%receivers%size
306  rnk = this%receivers%at(i)
307  if (this%enable_monitor) then
308  write (this%imon, '(4x,a,i0)') "sending to process: ", rnk
309  end if
310 
311  call mpi_type_size(body_snd_t(i), msg_size, ierr)
312  if (msg_size > 0) then
313  call mpi_isend(mpi_bottom, 1, body_snd_t(i), rnk, stage, &
314  this%mpi_world%comm, snd_req(i), ierr)
315  call check_mpi(ierr)
316  end if
317 
318  if (this%enable_monitor) then
319  write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
320  end if
321  call flush (this%imon)
322  end do
323 
324  ! wait for exchange of all messages
325  call mpi_waitall(this%senders%size, rcv_req, rcv_stat, ierr)
326  call check_mpi(ierr)
327 
328  deallocate (rcv_req, snd_req, rcv_stat)
329  deallocate (body_rcv_t, body_snd_t)
330 
subroutine, public store_error(msg, terminate)
Store an error message.
Definition: Sim.f90:92
Here is the call graph for this function:

◆ update_receivers()

subroutine mpiroutermodule::update_receivers ( class(mpiroutertype this)
private

Definition at line 634 of file MpiRouter.f90.

635  class(MpiRouterType) :: this
636  ! local
637  integer(I4B) :: i, j
638  class(VirtualDataContainerType), pointer :: vdc
639 
640  call this%receivers%clear()
641 
642  if (.not. this%halo_activated) then
643  ! assuming symmetry for now
644  do i = 1, this%senders%size
645  call this%receivers%push_back(this%senders%at(i))
646  end do
647  else
648  ! get the receivers from the VDCs
649  do i = 1, size(this%rte_models)
650  vdc => this%rte_models(i)%ptr
651  do j = 1, vdc%rcv_ranks%size
652  call this%receivers%push_back_unique(vdc%rcv_ranks%at(j))
653  end do
654  end do
655  do i = 1, size(this%rte_exchanges)
656  vdc => this%rte_exchanges(i)%ptr
657  do j = 1, vdc%rcv_ranks%size
658  call this%receivers%push_back_unique(vdc%rcv_ranks%at(j))
659  end do
660  end do
661  end if
662 

◆ update_senders()

subroutine mpiroutermodule::update_senders ( class(mpiroutertype this)
private

Definition at line 611 of file MpiRouter.f90.

612  class(MpiRouterType) :: this
613  ! local
614  integer(I4B) :: i
615  class(VirtualDataContainerType), pointer :: vdc
616 
617  call this%senders%clear()
618 
619  do i = 1, size(this%rte_models)
620  vdc => this%rte_models(i)%ptr
621  if (.not. vdc%is_local .and. vdc%is_active) then
622  call this%senders%push_back_unique(vdc%orig_rank)
623  end if
624  end do
625  do i = 1, size(this%rte_exchanges)
626  vdc => this%rte_exchanges(i)%ptr
627  if (.not. vdc%is_local .and. vdc%is_active) then
628  call this%senders%push_back_unique(vdc%orig_rank)
629  end if
630  end do
631