Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_ParticleCommunication.H
Go to the documentation of this file.
1#ifndef AMREX_PARTICLECOMMUNICATION_H_
2#define AMREX_PARTICLECOMMUNICATION_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_Gpu.H>
7#include <AMReX_IntVect.H>
9#include <AMReX_MFIter.H>
10#include <AMReX_OpenMP.H>
11#include <AMReX_Scan.H>
12#include <AMReX_TypeTraits.H>
13#include <AMReX_MakeParticle.H>
14#include <AMReX_ParmParse.H>
15
16#include <algorithm>
17#include <iterator>
18#include <map>
19#include <numeric>
20#include <utility>
21
22namespace amrex {
23
24class ParticleContainerBase;
25
27{
28 template <class PTile>
29 void resizeTiles (std::vector<PTile*>& tiles, const std::vector<int>& sizes, std::vector<int>& offsets) const
30 {
31 for(int i = 0; i < std::ssize(sizes); ++i)
32 {
33 int offset = tiles[i]->numTotalParticles();
34 int nn = tiles[i]->getNumNeighbors();
35 tiles[i]->setNumNeighbors(nn + sizes[i]);
36 offsets.push_back(offset);
37 }
38 }
39};
40
42{
43 template <class PTile>
44 void resizeTiles (std::vector<PTile*>& tiles, const std::vector<int>& sizes, std::vector<int>& offsets) const
45 {
46 int N = static_cast<int>(sizes.size());
47
48 std::map<PTile*, int> tile_sizes;
49 for(int i = 0; i < N; ++i) {
50 tile_sizes[tiles[i]] = tiles[i]->numParticles();
51 }
52
53 for(int i = 0; i < N; ++i)
54 {
55 offsets.push_back(tile_sizes[tiles[i]]);
56 tile_sizes[tiles[i]] += sizes[i];
57 }
58
59 for (auto& kv : tile_sizes) {
60 kv.first->resize(kv.second);
61 }
62 }
63};
64
66{
67 using TileKey = std::pair<int, int>;
68
74
75 void clear ();
76
77 void setNumLevels (int num_levels);
78
79 void resize (int gid, int tid, int lev, int size);
80
81 [[nodiscard]] int numCopies (TileKey const& index, int lev) const
82 {
83 if (m_boxes.size() <= lev) { return 0; }
84 auto mit = m_boxes[lev].find(index);
85 return mit == m_boxes[lev].end() ? 0 : int(mit->second.size());
86 }
87
88 [[nodiscard]] int numLevels () const { return int(m_boxes.size()); }
89};
90
92{
93 using TileKey = std::pair<int, int>;
94
98
99#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
100 struct OmpCopyWork
101 {
102 const int* boxes = nullptr;
103 const int* levs = nullptr;
104 const int* tiles = nullptr;
105 int* dst_indices = nullptr;
106 int num_copies = 0;
107 };
108#endif
109
111 {
112 explicit BuildWorkspace (int a_num_buckets)
113 : num_buckets(a_num_buckets)
114#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
115 , omp_copy_offsets(1, 0)
116#endif
117 {}
118
119 int num_buckets = 0;
121
122#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
123 Vector<unsigned int> omp_thread_box_counts;
124 Vector<OmpCopyWork> omp_copy_work;
125 Vector<Long> omp_copy_offsets;
126#endif
127 };
128
129 template <class PC, class F>
130 void forEachCopyBatch (const PC& pc, const ParticleCopyOp& op, F&& f)
131 {
132 const int num_levels = op.numLevels();
133 m_dst_indices.resize(num_levels);
134 auto&& batch_fn = std::forward<F>(f);
135
136 for (int lev = 0; lev < num_levels; ++lev)
137 {
138 for (const auto& kv : pc.GetParticles(lev))
139 {
140 auto index = kv.first;
141 int num_copies = op.numCopies(index, lev);
142 if (num_copies == 0) { continue; }
143
144 auto& dst_indices = m_dst_indices[lev][index];
145 dst_indices.resize(num_copies);
146
147 batch_fn(lev, index, num_copies, dst_indices);
148 }
149 }
150 }
151
152 template <class PC, class GetBucket>
153 void buildCopies (const PC& pc, const ParticleCopyOp& op,
154 StableOrderedAlgorithm, BuildWorkspace& workspace, GetBucket const& getBucket)
155 {
156 BL_PROFILE("ParticleCopyPlan::buildCopiesStableOrdered");
157
158 forEachCopyBatch(pc, op,
159 [&] (int lev, TileKey const& index, int num_copies, Gpu::DeviceVector<int>& dst_indices)
160 {
161#ifdef AMREX_USE_GPU
162 const Gpu::DeviceVector<int>& d_boxes = op.m_boxes[lev].at(index);
163 Gpu::HostVector<int> h_boxes(d_boxes.size());
164 Gpu::copy(Gpu::deviceToHost, d_boxes.begin(), d_boxes.end(), h_boxes.begin());
165
166 const Gpu::DeviceVector<int>& d_levs = op.m_levels[lev].at(index);
167 Gpu::HostVector<int> h_levs(d_levs.size());
168 Gpu::copy(Gpu::deviceToHost, d_levs.begin(), d_levs.end(), h_levs.begin());
169
170 const Gpu::DeviceVector<int>& d_tiles = op.m_tiles[lev].at(index);
171 Gpu::HostVector<int> h_tiles(d_tiles.size());
172 Gpu::copy(Gpu::deviceToHost, d_tiles.begin(), d_tiles.end(), h_tiles.begin());
173
174 Gpu::HostVector<int> h_dst_indices(num_copies);
175#else
176 const Gpu::DeviceVector<int>& h_boxes = op.m_boxes[lev].at(index);
177 const Gpu::DeviceVector<int>& h_levs = op.m_levels[lev].at(index);
178 const Gpu::DeviceVector<int>& h_tiles = op.m_tiles[lev].at(index);
179
180 Gpu::DeviceVector<int>& h_dst_indices = dst_indices;
181#endif
182 for (int i = 0; i < num_copies; ++i) {
183 int dst_box = h_boxes[i];
184 if (dst_box >= 0) {
185 int dst_tile = h_tiles[i];
186 int dst_lev = h_levs[i];
187 int dst_index = static_cast<int>(workspace.h_box_counts[getBucket(dst_lev, dst_box, dst_tile)]++);
188 h_dst_indices[i] = dst_index;
189 }
190 }
191
192#ifdef AMREX_USE_GPU
194 h_dst_indices.begin(), h_dst_indices.end(),
195 dst_indices.begin());
196#endif
197 });
198 }
199
200#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
201 template <class PC, class GetBucket>
202 void buildCopies (const PC& pc, const ParticleCopyOp& op,
203 TwoPassHostAlgorithm, BuildWorkspace& workspace, GetBucket const& getBucket)
204 {
205 BL_PROFILE("ParticleCopyPlan::buildCopiesTwoPassHost");
206
207 workspace.omp_thread_box_counts.resize(OpenMP::get_max_threads()*workspace.num_buckets, 0U);
208
209 forEachCopyBatch(pc, op,
210 [&op, &workspace] (int lev, TileKey const& index, int num_copies, Gpu::DeviceVector<int>& dst_indices)
211 {
212 const auto* p_boxes = op.m_boxes[lev].at(index).dataPtr();
213 const auto* p_levs = op.m_levels[lev].at(index).dataPtr();
214 const auto* p_tiles = op.m_tiles[lev].at(index).dataPtr();
215 auto* p_dst_indices = dst_indices.dataPtr();
216
217 workspace.omp_copy_work.push_back({p_boxes, p_levs, p_tiles, p_dst_indices, num_copies});
218 workspace.omp_copy_offsets.push_back(workspace.omp_copy_offsets.back() + num_copies);
219 });
220
221 if (workspace.omp_copy_work.empty()) { return; }
222
223 std::fill(workspace.omp_thread_box_counts.begin(), workspace.omp_thread_box_counts.end(), 0U);
224 auto* p_omp_thread_box_counts = workspace.omp_thread_box_counts.data();
225 const auto* p_omp_copy_work = workspace.omp_copy_work.data();
226 const auto* p_omp_copy_offsets = workspace.omp_copy_offsets.data();
227 const Long total_num_copies = workspace.omp_copy_offsets.back();
228
229#pragma omp parallel
230 {
231 int thread_num = OpenMP::get_thread_num();
232 int num_threads = OpenMP::get_num_threads();
233 Long ibegin = thread_num*total_num_copies/num_threads;
234 Long iend = (thread_num+1)*total_num_copies/num_threads;
235 auto* p_thread_box_counts = p_omp_thread_box_counts + thread_num*workspace.num_buckets;
236
237 if (ibegin < iend)
238 {
239 int iwork = static_cast<int>(std::upper_bound(workspace.omp_copy_offsets.begin(),
240 workspace.omp_copy_offsets.end(),
241 ibegin)
242 - workspace.omp_copy_offsets.begin()) - 1;
243 while (iwork < static_cast<int>(workspace.omp_copy_work.size()) &&
244 p_omp_copy_offsets[iwork] < iend)
245 {
246 auto const& work = p_omp_copy_work[iwork];
247 Long global_begin = std::max(ibegin, p_omp_copy_offsets[iwork]);
248 Long global_end = std::min(iend, p_omp_copy_offsets[iwork+1]);
249 int local_begin = static_cast<int>(global_begin - p_omp_copy_offsets[iwork]);
250 int local_end = static_cast<int>(global_end - p_omp_copy_offsets[iwork]);
251 for (int i = local_begin; i < local_end; ++i)
252 {
253 int dst_box = work.boxes[i];
254 if (dst_box >= 0)
255 {
256 int dst_tile = work.tiles[i];
257 int dst_lev = work.levs[i];
258 ++p_thread_box_counts[getBucket(dst_lev, dst_box, dst_tile)];
259 }
260 }
261 ++iwork;
262 }
263 }
264
265#pragma omp barrier
266#pragma omp for
267 for (int ibucket = 0; ibucket < workspace.num_buckets; ++ibucket)
268 {
269 unsigned int offset = workspace.h_box_counts[ibucket];
270 for (int tid = 0; tid < num_threads; ++tid)
271 {
272 auto& count = p_omp_thread_box_counts[tid*workspace.num_buckets + ibucket];
273 unsigned int total = count;
274 count = offset;
275 offset += total;
276 }
277 workspace.h_box_counts[ibucket] = offset;
278 }
279
280 if (ibegin < iend)
281 {
282 int iwork = static_cast<int>(std::upper_bound(workspace.omp_copy_offsets.begin(),
283 workspace.omp_copy_offsets.end(),
284 ibegin)
285 - workspace.omp_copy_offsets.begin()) - 1;
286 while (iwork < static_cast<int>(workspace.omp_copy_work.size()) &&
287 p_omp_copy_offsets[iwork] < iend)
288 {
289 auto const& work = p_omp_copy_work[iwork];
290 Long global_begin = std::max(ibegin, p_omp_copy_offsets[iwork]);
291 Long global_end = std::min(iend, p_omp_copy_offsets[iwork+1]);
292 int local_begin = static_cast<int>(global_begin - p_omp_copy_offsets[iwork]);
293 int local_end = static_cast<int>(global_end - p_omp_copy_offsets[iwork]);
294 for (int i = local_begin; i < local_end; ++i)
295 {
296 int dst_box = work.boxes[i];
297 if (dst_box >= 0)
298 {
299 int dst_tile = work.tiles[i];
300 int dst_lev = work.levs[i];
301 int bucket = getBucket(dst_lev, dst_box, dst_tile);
302 work.dst_indices[i] = static_cast<int>(p_thread_box_counts[bucket]++);
303 }
304 }
305 ++iwork;
306 }
307 }
308 }
309 }
310#endif
311
312 template <class PC, class GetBucket>
313 void buildCopies (const PC& pc, const ParticleCopyOp& op,
315 {
316 BL_PROFILE("ParticleCopyPlan::buildCopiesAtomicScatter");
317
318 auto* p_dst_box_counts = m_box_counts_d.dataPtr();
319
320 forEachCopyBatch(pc, op,
321 [&op, &getBucket, p_dst_box_counts] (int lev, TileKey const& index,
322 int num_copies, Gpu::DeviceVector<int>& dst_indices)
323 {
324 const auto* p_boxes = op.m_boxes[lev].at(index).dataPtr();
325 const auto* p_levs = op.m_levels[lev].at(index).dataPtr();
326 const auto* p_tiles = op.m_tiles[lev].at(index).dataPtr();
327 auto* p_dst_indices = dst_indices.dataPtr();
328
329 amrex::ParallelFor(num_copies, [=] AMREX_GPU_DEVICE (int i)
330 {
331 int dst_box = p_boxes[i];
332 if (dst_box >= 0)
333 {
334 int dst_tile = p_tiles[i];
335 int dst_lev = p_levs[i];
336 int dst_index = static_cast<int>(HostDevice::Atomic::FetchAdd(
337 &p_dst_box_counts[getBucket(dst_lev, dst_box, dst_tile)], 1U));
338 p_dst_indices[i] = dst_index;
339 }
340 });
341 });
342 }
343
344 void finalizeBuildBoxCounts (BuildWorkspace const& workspace, bool use_host_box_counters)
345 {
346 if (use_host_box_counters) {
348 workspace.h_box_counts.begin(), workspace.h_box_counts.end(),
349 m_box_counts_d.begin());
350 }
351
353 m_box_offsets.begin());
354 }
355
356public:
357
359
363
370
372 int m_nrcvs = 0;
375
378
381
384
386
392
395
398
401
404
406
407 template <class PC, std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
408 void build (const PC& pc,
409 const ParticleCopyOp& op,
410 const Vector<int>& int_comp_mask,
411 const Vector<int>& real_comp_mask,
412 int local)
413 {
414 BL_PROFILE("ParticleCopyPlan::build");
415
416 ParmParse pp("particles");
417 pp.query("do_one_sided_comms", m_do_one_sided_comms);
418 const int num_buckets = pc.BufferMap().numBuckets();
419
420 m_local = local;
421 if (local)
422 {
423 m_neighbor_procs = pc.NeighborProcs(local);
424 }
425 else
426 {
428 std::iota(m_neighbor_procs.begin(), m_neighbor_procs.end(), 0);
429 }
430
431 m_box_counts_d.resize(0);
432 m_box_counts_d.resize(num_buckets+1, 0);
433 m_box_offsets.resize(num_buckets+1);
434
435#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
436 constexpr bool use_host_bucket_counters = true;
437#else
438 constexpr bool use_host_bucket_counters = false;
439#endif
440 BuildWorkspace workspace(num_buckets);
441 bool use_host_box_counters = pc.stableRedistribute() || use_host_bucket_counters;
442 if (use_host_box_counters) {
443 workspace.h_box_counts.resize(m_box_counts_d.size(), 0);
444 }
445
446 if (pc.stableRedistribute())
447 {
448 buildCopies(pc, op, StableOrderedAlgorithm{}, workspace, pc.BufferMap().getHostBucketFunctor());
449 }
450 else
451 {
452#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
453 buildCopies(pc, op, TwoPassHostAlgorithm{}, workspace, pc.BufferMap().getBucketFunctor());
454#else
455 buildCopies(pc, op, AtomicScatterAlgorithm{}, workspace, pc.BufferMap().getBucketFunctor());
456#endif
457 }
458
459 finalizeBuildBoxCounts(workspace, use_host_box_counters);
460
461 m_box_counts_h.resize(m_box_counts_d.size());
463 m_box_counts_h.begin());
464
465 m_snd_pad_correction_h.resize(0);
467
470 m_snd_pad_correction_d.begin());
471
472 d_int_comp_mask.resize(int_comp_mask.size());
473 Gpu::copyAsync(Gpu::hostToDevice, int_comp_mask.begin(), int_comp_mask.end(),
474 d_int_comp_mask.begin());
475 d_real_comp_mask.resize(real_comp_mask.size());
476 Gpu::copyAsync(Gpu::hostToDevice, real_comp_mask.begin(), real_comp_mask.end(),
477 d_real_comp_mask.begin());
478
480
481 int NStructReal = PC::ParticleContainerType::NStructReal;
482 int NStructInt = PC::ParticleContainerType::NStructInt;
483
484 int num_real_comm_comp = 0;
485 int comm_comps_start = 0;
486 if constexpr (!PC::ParticleType::is_soa_particle) {
487 comm_comps_start += AMREX_SPACEDIM + NStructReal;
488 }
489 for (int i = comm_comps_start; i < std::ssize(real_comp_mask); ++i) {
490 if (real_comp_mask[i]) {++num_real_comm_comp;}
491 }
492
493 int num_int_comm_comp = 0;
494 for (int i = 2 + NStructInt; i < std::ssize(int_comp_mask); ++i) {
495 if (int_comp_mask[i]) {++num_int_comm_comp;}
496 }
497
498 if constexpr (PC::ParticleType::is_soa_particle) {
499 m_superparticle_size = sizeof(uint64_t); // idcpu
500 } else {
501 m_superparticle_size = sizeof(typename PC::ParticleType);
502 }
503 m_superparticle_size += num_real_comm_comp * sizeof(typename PC::ParticleType::RealType)
504 + num_int_comm_comp * sizeof(int);
505
506 buildMPIStart(pc, pc.BufferMap(), m_superparticle_size);
507 }
508
509 void clear ();
510
511 void buildMPIFinish (const ParticleBufferMap& map);
512
513private:
514
515 void buildMPIStart (const ParticleContainerBase& pc, const ParticleBufferMap& map, Long psize);
516
517 //
518 // Snds - a Vector with the number of bytes that is process will send to each proc.
519 // Rcvs - a Vector that, after calling this method, will contain the
520 // number of bytes this process will receive from each proc.
521 //
522 void doHandShake (const ParticleContainerBase& pc, const Vector<Long>& Snds, Vector<Long>& Rcvs) const;
523
524 //
525 // In the local version of this method, each proc knows which other
526 // procs it could possibly receive messages from, meaning we can do
527 // this purely with point-to-point communication.
528 //
529 void doHandShakeLocal (const Vector<Long>& Snds, Vector<Long>& Rcvs) const;
530
531 //
532 // In the global version, we don't know who we'll receive from, so we
533 // need to do some collective communication first.
534 //
535 static void doHandShakeReduceScatter (const Vector<Long>& Snds, Vector<Long>& Rcvs);
536
537 //
538 // Another version of the global handshake implemented with MPI-3
539 // one-sided communication.
540 //
541 static void doHandShakeOneSided (const ParticleContainerBase& pc,
542 const Vector<Long>& Snds, Vector<Long>& Rcvs);
543
544 //
545 // Another version of the above that is implemented using MPI All-to-All
546 //
547 static void doHandShakeAllToAll (const Vector<Long>& Snds, Vector<Long>& Rcvs);
548
549 bool m_local = false;
550 int m_do_one_sided_comms = 0;
551};
552
554{
555 const unsigned int* m_box_offsets;
556 const std::size_t* m_pad_correction;
557
560
562 : m_box_offsets(plan.m_box_offsets.dataPtr()),
563 m_pad_correction(plan.m_snd_pad_correction_d.dataPtr()),
564 m_get_pid(map.getPIDFunctor()),
565 m_get_bucket(map.getBucketFunctor())
566 {}
567
569 Long operator() (int dst_box, int dst_tile, int dst_lev, std::size_t psize, int i) const
570 {
571 int dst_pid = m_get_pid(dst_lev, dst_box, dst_tile);
572 Long dst_offset = Long(psize)*(m_box_offsets[m_get_bucket(dst_lev, dst_box, dst_tile)] + i);
573 dst_offset += Long(m_pad_correction[dst_pid]);
574 return dst_offset;
575 }
576};
577
578template <class PC, class Buffer,
579 std::enable_if_t<IsParticleContainer<PC>::value &&
580 std::is_base_of_v<PolymorphicArenaAllocator<typename Buffer::value_type>,
581 Buffer>, int> foo = 0>
582void packBuffer (const PC& pc, const ParticleCopyOp& op, const ParticleCopyPlan& plan,
583 Buffer& snd_buffer)
584{
585 BL_PROFILE("amrex::packBuffer");
586
587 Long psize = plan.superParticleSize();
588
589 int num_levels = op.numLevels();
590 int num_buckets = pc.BufferMap().numBuckets();
591
592 std::size_t total_buffer_size = 0;
593 if (plan.m_snd_offsets.empty())
594 {
595 unsigned int np = 0;
596 Gpu::copy(Gpu::deviceToHost, plan.m_box_offsets.begin() + num_buckets,
597 plan.m_box_offsets.begin() + num_buckets + 1, &np);
598 total_buffer_size = np*psize;
599 }
600 else
601 {
602 total_buffer_size = plan.m_snd_offsets.back();
603 }
604
605 if (! snd_buffer.arena()->hasFreeDeviceMemory(total_buffer_size)) {
606 snd_buffer.clear();
607 snd_buffer.setArena(The_Pinned_Arena());
608 }
609 snd_buffer.resize(total_buffer_size);
610
611 const auto* p_comm_real = plan.d_real_comp_mask.dataPtr();
612 const auto* p_comm_int = plan.d_int_comp_mask.dataPtr();
613
614 const auto plo = pc.Geom(0).ProbLoArray();
615 const auto phi = pc.Geom(0).ProbHiArray();
616 const auto is_per = pc.Geom(0).isPeriodicArray();
617#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
618 struct OmpPackWork
619 {
620 typename PC::ParticleTileType const* src_tile = nullptr;
621 const int* boxes = nullptr;
622 const int* levels = nullptr;
623 const int* tiles = nullptr;
624 const int* src_indices = nullptr;
625 const IntVect* periodic_shift = nullptr;
626 const int* dst_indices = nullptr;
627 int num_copies = 0;
628 };
629 Vector<OmpPackWork> omp_pack_work;
630 Vector<Long> omp_pack_offsets(1, 0);
631#endif
632 for (int lev = 0; lev < num_levels; ++lev)
633 {
634 auto& plev = pc.GetParticles(lev);
635 for (auto& kv : plev)
636 {
637 auto index = kv.first;
638 auto& src_tile = plev.at(index);
639 int num_copies = op.numCopies(index, lev);
640 if (num_copies == 0) { continue; }
641
642 const auto* p_boxes = op.m_boxes[lev].at(index).dataPtr();
643 const auto* p_levels = op.m_levels[lev].at(index).dataPtr();
644 const auto* p_tiles = op.m_tiles[lev].at(index).dataPtr();
645 const auto* p_src_indices = op.m_src_indices[lev].at(index).dataPtr();
646 const auto* p_periodic_shift = op.m_periodic_shift[lev].at(index).dataPtr();
647 const auto* p_dst_indices = plan.m_dst_indices[lev].at(index).dataPtr();
648#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
649 omp_pack_work.push_back({&src_tile, p_boxes, p_levels, p_tiles,
650 p_src_indices, p_periodic_shift, p_dst_indices, num_copies});
651 omp_pack_offsets.push_back(omp_pack_offsets.back() + num_copies);
652#else
653 const auto& ptd = src_tile.getConstParticleTileData();
654 auto* p_snd_buffer = snd_buffer.dataPtr();
655 GetSendBufferOffset get_offset(plan, pc.BufferMap());
656 amrex::ParallelFor(num_copies, [=] AMREX_GPU_DEVICE (int i)
657 {
658 int dst_box = p_boxes[i];
659 if (dst_box >= 0)
660 {
661 int dst_tile = p_tiles[i];
662 int dst_lev = p_levels[i];
663 auto dst_offset = get_offset(dst_box, dst_tile, dst_lev, psize, p_dst_indices[i]);
664 int src_index = p_src_indices[i];
665 ptd.packParticleData(p_snd_buffer, src_index, dst_offset, p_comm_real, p_comm_int);
666
667 const IntVect& pshift = p_periodic_shift[i];
668 bool do_periodic_shift =
669 AMREX_D_TERM( (is_per[0] && pshift[0] != 0),
670 || (is_per[1] && pshift[1] != 0),
671 || (is_per[2] && pshift[2] != 0) );
672
673 if (do_periodic_shift)
674 {
675 ParticleReal pos[AMREX_SPACEDIM];
676 Long pos_offset = dst_offset;
677 // for pure SoA positions come after idcpu
678 if constexpr (PC::ParticleType::is_soa_particle) {
679 pos_offset += sizeof(uint64_t);
680 }
681 amrex::Gpu::memcpy(&pos[0], &p_snd_buffer[pos_offset],
682 AMREX_SPACEDIM*sizeof(ParticleReal));
683 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
684 {
685 if (! is_per[idim]) { continue; }
686 if (pshift[idim] > 0) {
687 pos[idim] += phi[idim] - plo[idim];
688 } else if (pshift[idim] < 0) {
689 pos[idim] -= phi[idim] - plo[idim];
690 }
691 }
692 amrex::Gpu::memcpy(&p_snd_buffer[pos_offset], &pos[0],
693 AMREX_SPACEDIM*sizeof(ParticleReal));
694 }
695 }
696 });
697#endif
698 }
699 }
700
701#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
702 if (!omp_pack_work.empty())
703 {
704 auto* p_snd_buffer = snd_buffer.dataPtr();
705 GetSendBufferOffset get_offset(plan, pc.BufferMap());
706 const Long total_num_copies = omp_pack_offsets.back();
707
708#pragma omp parallel
709 {
710 int thread_num = OpenMP::get_thread_num();
711 int num_threads = OpenMP::get_num_threads();
712 Long ibegin = thread_num*total_num_copies/num_threads;
713 Long iend = (thread_num+1)*total_num_copies/num_threads;
714
715 if (ibegin < iend)
716 {
717 int iwork = static_cast<int>(std::upper_bound(omp_pack_offsets.begin(),
718 omp_pack_offsets.end(),
719 ibegin)
720 - omp_pack_offsets.begin()) - 1;
721 while (iwork < static_cast<int>(omp_pack_work.size()) &&
722 omp_pack_offsets[iwork] < iend)
723 {
724 auto const& work = omp_pack_work[iwork];
725 auto const& ptd = work.src_tile->getConstParticleTileData();
726 Long global_begin = std::max(ibegin, omp_pack_offsets[iwork]);
727 Long global_end = std::min(iend, omp_pack_offsets[iwork+1]);
728 int local_begin = static_cast<int>(global_begin - omp_pack_offsets[iwork]);
729 int local_end = static_cast<int>(global_end - omp_pack_offsets[iwork]);
730 for (int i = local_begin; i < local_end; ++i)
731 {
732 int dst_box = work.boxes[i];
733 if (dst_box >= 0)
734 {
735 int dst_tile = work.tiles[i];
736 int dst_lev = work.levels[i];
737 auto dst_offset = get_offset(dst_box, dst_tile, dst_lev, psize,
738 work.dst_indices[i]);
739 int src_index = work.src_indices[i];
740 ptd.packParticleData(p_snd_buffer, src_index, dst_offset,
741 p_comm_real, p_comm_int);
742
743 const IntVect& pshift = work.periodic_shift[i];
744 bool do_periodic_shift =
745 AMREX_D_TERM( (is_per[0] && pshift[0] != 0),
746 || (is_per[1] && pshift[1] != 0),
747 || (is_per[2] && pshift[2] != 0) );
748
749 if (do_periodic_shift)
750 {
751 ParticleReal pos[AMREX_SPACEDIM];
752 Long pos_offset = dst_offset;
753 if constexpr (PC::ParticleType::is_soa_particle) {
754 pos_offset += sizeof(uint64_t);
755 }
756 amrex::Gpu::memcpy(&pos[0], &p_snd_buffer[pos_offset],
757 AMREX_SPACEDIM*sizeof(ParticleReal));
758 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
759 {
760 if (! is_per[idim]) { continue; }
761 if (pshift[idim] > 0) {
762 pos[idim] += phi[idim] - plo[idim];
763 } else if (pshift[idim] < 0) {
764 pos[idim] -= phi[idim] - plo[idim];
765 }
766 }
767 amrex::Gpu::memcpy(&p_snd_buffer[pos_offset], &pos[0],
768 AMREX_SPACEDIM*sizeof(ParticleReal));
769 }
770 }
771 }
772 ++iwork;
773 }
774 }
775 }
776 }
777#endif
778}
779
780template <class PC, class Buffer, class UnpackPolicy,
781 std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
782void unpackBuffer (PC& pc, const ParticleCopyPlan& plan, const Buffer& snd_buffer, UnpackPolicy const& policy)
783{
784 BL_PROFILE("amrex::unpackBuffer");
785
786 using PTile = typename PC::ParticleTileType;
787
788 int num_levels = pc.BufferMap().numLevels();
789 Long psize = plan.superParticleSize();
790
791 // count how many particles we have to add to each tile
792 std::vector<int> sizes;
793 std::vector<PTile*> tiles;
794 for (int lev = 0; lev < num_levels; ++lev)
795 {
796 for(MFIter mfi = pc.MakeMFIter(lev); mfi.isValid(); ++mfi)
797 {
798 int gid = mfi.index();
799 int tid = mfi.LocalTileIndex();
800 auto& tile = pc.DefineAndReturnParticleTile(lev, gid, tid);
801 int num_copies = plan.m_box_counts_h[pc.BufferMap().gridAndTileAndLevToBucket(gid, tid, lev)];
802 sizes.push_back(num_copies);
803 tiles.push_back(&tile);
804 }
805 }
806
807 // resize the tiles and compute offsets
808 std::vector<int> offsets;
809 policy.resizeTiles(tiles, sizes, offsets);
810
811 const auto* p_comm_real = plan.d_real_comp_mask.dataPtr();
812 const auto* p_comm_int = plan.d_int_comp_mask.dataPtr();
813
814#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
815 struct OmpUnpackWork
816 {
817 PTile* tile = nullptr;
818 int gid = 0;
819 int tid = 0;
820 int lev = 0;
821 int offset = 0;
822 int size = 0;
823 };
824 Vector<OmpUnpackWork> omp_unpack_work;
825 Vector<Long> omp_unpack_offsets(1, 0);
826#endif
827
828 // local unpack
829 int uindex = 0;
830 for (int lev = 0; lev < num_levels; ++lev)
831 {
832 auto& plev = pc.GetParticles(lev);
833 for(MFIter mfi = pc.MakeMFIter(lev); mfi.isValid(); ++mfi)
834 {
835 int gid = mfi.index();
836 int tid = mfi.LocalTileIndex();
837 auto index = std::make_pair(gid, tid);
838
839 auto& tile = plev[index];
840
841 GetSendBufferOffset get_offset(plan, pc.BufferMap());
842
843 int offset = offsets[uindex];
844 int size = sizes[uindex];
845 ++uindex;
846
847#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
848 omp_unpack_work.push_back({&tile, gid, tid, lev, offset, size});
849 omp_unpack_offsets.push_back(omp_unpack_offsets.back() + size);
850#else
851 auto p_snd_buffer = snd_buffer.dataPtr();
852 auto ptd = tile.getParticleTileData();
853 amrex::ParallelFor(size, [=] AMREX_GPU_DEVICE (int i)
854 {
855 auto src_offset = get_offset(gid, tid, lev, psize, i);
856 int dst_index = offset + i;
857 ptd.unpackParticleData(p_snd_buffer, src_offset, dst_index, p_comm_real, p_comm_int);
858 });
859#endif
860 }
861 }
862
863#if defined(AMREX_USE_OMP) && !defined(AMREX_USE_GPU)
864 if (!omp_unpack_work.empty())
865 {
866 GetSendBufferOffset get_offset(plan, pc.BufferMap());
867 auto p_snd_buffer = snd_buffer.dataPtr();
868 const Long total_num_copies = omp_unpack_offsets.back();
869
870#pragma omp parallel
871 {
872 int thread_num = OpenMP::get_thread_num();
873 int num_threads = OpenMP::get_num_threads();
874 Long ibegin = thread_num*total_num_copies/num_threads;
875 Long iend = (thread_num+1)*total_num_copies/num_threads;
876
877 if (ibegin < iend)
878 {
879 int iwork = static_cast<int>(std::upper_bound(omp_unpack_offsets.begin(),
880 omp_unpack_offsets.end(),
881 ibegin)
882 - omp_unpack_offsets.begin()) - 1;
883 while (iwork < static_cast<int>(omp_unpack_work.size()) &&
884 omp_unpack_offsets[iwork] < iend)
885 {
886 auto const& work = omp_unpack_work[iwork];
887 auto ptd = work.tile->getParticleTileData();
888 Long global_begin = std::max(ibegin, omp_unpack_offsets[iwork]);
889 Long global_end = std::min(iend, omp_unpack_offsets[iwork+1]);
890 int local_begin = static_cast<int>(global_begin - omp_unpack_offsets[iwork]);
891 int local_end = static_cast<int>(global_end - omp_unpack_offsets[iwork]);
892 for (int i = local_begin; i < local_end; ++i)
893 {
894 auto src_offset = get_offset(work.gid, work.tid, work.lev, psize, i);
895 int dst_index = work.offset + i;
896 ptd.unpackParticleData(p_snd_buffer, src_offset, dst_index,
897 p_comm_real, p_comm_int);
898 }
899 ++iwork;
900 }
901 }
902 }
903 }
904#endif
905}
906
907template <class PC, class SndBuffer, class RcvBuffer,
908 std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
909void communicateParticlesStart (const PC& pc, ParticleCopyPlan& plan, const SndBuffer& snd_buffer, RcvBuffer& rcv_buffer)
910{
911 BL_PROFILE("amrex::communicateParticlesStart");
912
913#ifdef AMREX_USE_MPI
914 Long psize = plan.superParticleSize();
915 const int NProcs = ParallelContext::NProcsSub();
916 const int MyProc = ParallelContext::MyProcSub();
917
918 if (NProcs == 1) { return; }
919
920 Vector<int> RcvProc;
921 Vector<Long> rOffset;
922
923 plan.m_rcv_pad_correction_h.resize(0);
924 plan.m_rcv_pad_correction_h.push_back(0);
925
926 Long TotRcvBytes = 0;
927 for (int i = 0; i < NProcs; ++i) {
928 if (plan.m_rcv_num_particles[i] > 0) {
929 RcvProc.push_back(i);
930 Long nbytes = plan.m_rcv_num_particles[i]*psize;
931 std::size_t acd = ParallelDescriptor::sizeof_selected_comm_data_type(nbytes);
932 TotRcvBytes = Long(amrex::aligned_size(acd, TotRcvBytes));
933 rOffset.push_back(TotRcvBytes);
934 TotRcvBytes += Long(amrex::aligned_size(acd, nbytes));
935 plan.m_rcv_pad_correction_h.push_back(plan.m_rcv_pad_correction_h.back() + nbytes);
936 }
937 }
938
939 for (int i = 0; i < plan.m_nrcvs; ++i)
940 {
941 plan.m_rcv_pad_correction_h[i] = rOffset[i] - plan.m_rcv_pad_correction_h[i];
942 }
943
946 plan.m_rcv_pad_correction_d.begin());
947
948 rcv_buffer.resize(TotRcvBytes);
949
950 plan.m_nrcvs = int(RcvProc.size());
951
952 plan.m_particle_rstats.resize(0);
953 plan.m_particle_rstats.resize(plan.m_nrcvs);
954
955 plan.m_particle_rreqs.resize(0);
956 plan.m_particle_rreqs.resize(plan.m_nrcvs);
957
958 plan.m_particle_sstats.resize(0);
959 plan.m_particle_sreqs.resize(0);
960
961 const int SeqNum = ParallelDescriptor::SeqNum();
962
963 // Post receives.
964 for (int i = 0; i < plan.m_nrcvs; ++i) {
965 const auto Who = RcvProc[i];
966 const auto offset = rOffset[i];
967 Long nbytes = plan.m_rcv_num_particles[Who]*psize;
968 std::size_t acd = ParallelDescriptor::sizeof_selected_comm_data_type(nbytes);
969 const auto Cnt = amrex::aligned_size(acd, nbytes);
970
971 AMREX_ASSERT(Cnt > 0);
972 AMREX_ASSERT(Who >= 0 && Who < NProcs);
973 AMREX_ASSERT(amrex::aligned_size(acd, nbytes) % acd == 0);
974
975 plan.m_particle_rreqs[i] =
976 ParallelDescriptor::Arecv((char*) (rcv_buffer.dataPtr() + offset), Cnt, Who, SeqNum, ParallelContext::CommunicatorSub()).req();
977 }
978
979 if (plan.m_NumSnds == 0) { return; }
980
981 // Send.
982 for (int i = 0; i < NProcs; ++i)
983 {
984 if (i == MyProc) { continue; }
985 const auto Who = i;
986 const auto Cnt = plan.m_snd_counts[i];
987 if (Cnt == 0) { continue; }
988
989 auto snd_offset = plan.m_snd_offsets[i];
990 AMREX_ASSERT(plan.m_snd_counts[i] % ParallelDescriptor::sizeof_selected_comm_data_type(plan.m_snd_num_particles[i]*psize) == 0);
991 AMREX_ASSERT(Who >= 0 && Who < NProcs);
992
993 plan.m_particle_sreqs.push_back(ParallelDescriptor::Asend((char const*)(snd_buffer.dataPtr()+snd_offset), Cnt, Who, SeqNum,
995 }
996
997 plan.m_particle_sstats.resize(plan.m_particle_sreqs.size());
998
1000#else
1001 amrex::ignore_unused(pc,plan,snd_buffer,rcv_buffer);
1002#endif // MPI
1003}
1004
1005void communicateParticlesFinish (const ParticleCopyPlan& plan);
1006
1007template <class PC, class Buffer, class UnpackPolicy,
1008 std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
1009void unpackRemotes (PC& pc, const ParticleCopyPlan& plan, Buffer& rcv_buffer, UnpackPolicy const& policy)
1010{
1011 BL_PROFILE("amrex::unpackRemotes");
1012
1013#ifdef AMREX_USE_MPI
1014 const int NProcs = ParallelContext::NProcsSub();
1015 if (NProcs == 1) { return; }
1016
1017 const int MyProc = ParallelContext::MyProcSub();
1018 amrex::ignore_unused(MyProc);
1019 using PTile = typename PC::ParticleTileType;
1020
1021 if (plan.m_nrcvs > 0)
1022 {
1023 const auto* p_comm_real = plan.d_real_comp_mask.dataPtr();
1024 const auto* p_comm_int = plan.d_int_comp_mask.dataPtr();
1025 auto* p_rcv_buffer = rcv_buffer.dataPtr();
1026
1027 std::vector<int> sizes;
1028 std::vector<PTile*> tiles;
1029 for (int i = 0; i < std::ssize(plan.m_rcv_box_counts); ++i)
1030 {
1031 int copy_size = plan.m_rcv_box_counts[i];
1032 int lev = plan.m_rcv_box_levs[i];
1033 int gid = plan.m_rcv_box_ids[i];
1034 int tid = plan.m_rcv_box_tids[i];
1035 auto& tile = pc.DefineAndReturnParticleTile(lev, gid, tid);
1036 sizes.push_back(copy_size);
1037 tiles.push_back(&tile);
1038 }
1039
1040 Vector<int> offsets;
1041 policy.resizeTiles(tiles, sizes, offsets);
1043 int uindex = 0;
1044 int procindex = 0, rproc = plan.m_rcv_box_pids[0];
1045 for (int i = 0; i < std::ssize(plan.m_rcv_box_counts); ++i)
1046 {
1047 int lev = plan.m_rcv_box_levs[i];
1048 int gid = plan.m_rcv_box_ids[i];
1049 int tid = plan.m_rcv_box_tids[i];
1050 auto offset = plan.m_rcv_box_offsets[i];
1051 procindex = (rproc == plan.m_rcv_box_pids[i]) ? procindex : procindex+1;
1052 rproc = plan.m_rcv_box_pids[i];
1053
1054 auto& tile = pc.DefineAndReturnParticleTile(lev, gid, tid);
1055 auto ptd = tile.getParticleTileData();
1056
1057 AMREX_ASSERT(MyProc ==
1058 ParallelContext::global_to_local_rank(pc.ParticleDistributionMap(lev)[gid]));
1059
1060 int dst_offset = offsets[uindex];
1061 int size = sizes[uindex];
1062 ++uindex;
1063
1064 Long psize = plan.superParticleSize();
1065 const auto* p_pad_adjust = plan.m_rcv_pad_correction_d.dataPtr();
1066
1067 amrex::ParallelForOMP(size, [=] AMREX_GPU_DEVICE (int ip) {
1068 Long src_offset = psize * static_cast<Long>(offset + ip)
1069 + static_cast<Long>(p_pad_adjust[procindex]);
1070 int dst_index = dst_offset + ip;
1071 ptd.unpackParticleData(p_rcv_buffer, src_offset, dst_index,
1072 p_comm_real, p_comm_int);
1073 });
1074 }
1075 }
1076#else
1077 amrex::ignore_unused(pc,plan,rcv_buffer,policy);
1078#endif // MPI
1079}
1080
1081} // namespace amrex
1082
1083#endif // AMREX_PARTICLECOMMUNICATION_H_
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
amrex::ParmParse pp
Input file parser instance for the given namespace.
Definition AMReX_HypreIJIface.cpp:15
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1139
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:88
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
size_type size() const noexcept
Definition AMReX_PODVector.H:648
iterator begin() noexcept
Definition AMReX_PODVector.H:674
iterator end() noexcept
Definition AMReX_PODVector.H:678
T * dataPtr() noexcept
Definition AMReX_PODVector.H:670
MPI_Request req() const
Definition AMReX_ParallelDescriptor.H:74
Parse Parameters From Command Line and Input Files.
Definition AMReX_ParmParse.H:349
int query(std::string_view name, bool &ref, int ival=FIRST) const
Same as querykth() but searches for the last occurrence of name.
Definition AMReX_ParmParse.cpp:1947
Definition AMReX_ParticleBufferMap.H:59
Definition AMReX_ParticleContainerBase.H:43
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
T * dataPtr() noexcept
get access to the underlying data pointer
Definition AMReX_Vector.H:49
Long size() const noexcept
Definition AMReX_Vector.H:53
amrex_particle_real ParticleReal
Floating Point Type for Particles.
Definition AMReX_REAL.H:90
amrex_long Long
Definition AMReX_INT.H:30
OutIter exclusive_scan(InIter begin, InIter end, OutIter result)
Definition AMReX_Scan.H:1193
void ParallelForOMP(T n, L const &f) noexcept
Performance-portable kernel launch function with optional OpenMP threading.
Definition AMReX_GpuLaunch.H:326
Arena * The_Pinned_Arena()
Definition AMReX_Arena.cpp:860
void copy(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:128
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:228
static constexpr DeviceToHost deviceToHost
Definition AMReX_GpuContainers.H:106
static constexpr HostToDevice hostToDevice
Definition AMReX_GpuContainers.H:105
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
__host__ __device__ void * memcpy(void *dest, const void *src, std::size_t count)
Definition AMReX_GpuUtility.H:226
__host__ __device__ AMREX_FORCE_INLINE T FetchAdd(T *const sum, T const value) noexcept
Definition AMReX_GpuAtomic.H:644
constexpr int get_thread_num()
Definition AMReX_OpenMP.H:37
constexpr int get_num_threads()
Definition AMReX_OpenMP.H:35
constexpr int get_max_threads()
Definition AMReX_OpenMP.H:36
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
int global_to_local_rank(int rank) noexcept
Definition AMReX_ParallelContext.H:98
int NProcsSub() noexcept
number of ranks in current frame
Definition AMReX_ParallelContext.H:74
Message Asend(const T *, size_t n, int pid, int tag)
Definition AMReX_ParallelDescriptor.H:1172
int SeqNum() noexcept
Returns sequential message sequence numbers, usually used as tags for send/recv.
Definition AMReX_ParallelDescriptor.H:696
Message Arecv(T *, size_t n, int pid, int tag)
Definition AMReX_ParallelDescriptor.H:1214
Definition AMReX_Amr.cpp:50
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
void communicateParticlesStart(const PC &pc, ParticleCopyPlan &plan, const SndBuffer &snd_buffer, RcvBuffer &rcv_buffer)
Definition AMReX_ParticleCommunication.H:909
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
void unpackRemotes(PC &pc, const ParticleCopyPlan &plan, Buffer &rcv_buffer, UnpackPolicy const &policy)
Definition AMReX_ParticleCommunication.H:1009
void communicateParticlesFinish(const ParticleCopyPlan &plan)
Definition AMReX_ParticleCommunication.cpp:445
const int[]
Definition AMReX_BLProfiler.cpp:1664
std::size_t aligned_size(std::size_t align_requirement, std::size_t size) noexcept
Given a minimum required size in bytes, this returns the smallest size greater or equal to size that ...
Definition AMReX_Arena.H:33
void unpackBuffer(PC &pc, const ParticleCopyPlan &plan, const Buffer &snd_buffer, UnpackPolicy const &policy)
Definition AMReX_ParticleCommunication.H:782
void packBuffer(const PC &pc, const ParticleCopyOp &op, const ParticleCopyPlan &plan, Buffer &snd_buffer)
Definition AMReX_ParticleCommunication.H:582
Definition AMReX_ParticleBufferMap.H:38
Definition AMReX_ParticleBufferMap.H:14
Definition AMReX_ParticleCommunication.H:554
const unsigned int * m_box_offsets
Definition AMReX_ParticleCommunication.H:555
GetPID m_get_pid
Definition AMReX_ParticleCommunication.H:558
GetBucket m_get_bucket
Definition AMReX_ParticleCommunication.H:559
const std::size_t * m_pad_correction
Definition AMReX_ParticleCommunication.H:556
GetSendBufferOffset(const ParticleCopyPlan &plan, const ParticleBufferMap &map)
Definition AMReX_ParticleCommunication.H:561
__device__ Long operator()(int dst_box, int dst_tile, int dst_lev, std::size_t psize, int i) const
Definition AMReX_ParticleCommunication.H:569
Definition AMReX_ParticleCommunication.H:27
void resizeTiles(std::vector< PTile * > &tiles, const std::vector< int > &sizes, std::vector< int > &offsets) const
Definition AMReX_ParticleCommunication.H:29
Definition AMReX_ParticleCommunication.H:66
void resize(int gid, int tid, int lev, int size)
Definition AMReX_ParticleCommunication.cpp:27
void setNumLevels(int num_levels)
Definition AMReX_ParticleCommunication.cpp:18
Vector< std::map< TileKey, Gpu::DeviceVector< IntVect > > > m_periodic_shift
Definition AMReX_ParticleCommunication.H:73
int numLevels() const
Definition AMReX_ParticleCommunication.H:88
Vector< std::map< TileKey, Gpu::DeviceVector< int > > > m_levels
Definition AMReX_ParticleCommunication.H:70
Vector< std::map< TileKey, Gpu::DeviceVector< int > > > m_boxes
Definition AMReX_ParticleCommunication.H:69
int numCopies(TileKey const &index, int lev) const
Definition AMReX_ParticleCommunication.H:81
std::pair< int, int > TileKey
Definition AMReX_ParticleCommunication.H:67
Vector< std::map< TileKey, Gpu::DeviceVector< int > > > m_tiles
Definition AMReX_ParticleCommunication.H:71
Vector< std::map< TileKey, Gpu::DeviceVector< int > > > m_src_indices
Definition AMReX_ParticleCommunication.H:72
void clear()
Definition AMReX_ParticleCommunication.cpp:9
Definition AMReX_ParticleCommunication.H:97
Definition AMReX_ParticleCommunication.H:111
BuildWorkspace(int a_num_buckets)
Definition AMReX_ParticleCommunication.H:112
Gpu::HostVector< unsigned int > h_box_counts
Definition AMReX_ParticleCommunication.H:120
int num_buckets
Definition AMReX_ParticleCommunication.H:119
Definition AMReX_ParticleCommunication.H:95
Definition AMReX_ParticleCommunication.H:96
Definition AMReX_ParticleCommunication.H:92
Vector< int > m_rcv_box_ids
Definition AMReX_ParticleCommunication.H:366
Vector< std::size_t > m_snd_offsets
Definition AMReX_ParticleCommunication.H:393
Vector< int > m_rcv_box_counts
Definition AMReX_ParticleCommunication.H:364
Vector< std::size_t > m_snd_counts
Definition AMReX_ParticleCommunication.H:394
void finalizeBuildBoxCounts(BuildWorkspace const &workspace, bool use_host_box_counters)
Definition AMReX_ParticleCommunication.H:344
Long m_NumSnds
Definition AMReX_ParticleCommunication.H:371
void buildMPIFinish(const ParticleBufferMap &map)
Definition AMReX_ParticleCommunication.cpp:223
Vector< int > m_neighbor_procs
Definition AMReX_ParticleCommunication.H:385
void clear()
Definition AMReX_ParticleCommunication.cpp:41
Vector< int > m_rcv_box_pids
Definition AMReX_ParticleCommunication.H:368
void buildCopies(const PC &pc, const ParticleCopyOp &op, AtomicScatterAlgorithm, BuildWorkspace &, GetBucket const &getBucket)
Definition AMReX_ParticleCommunication.H:313
Vector< int > m_rcv_box_levs
Definition AMReX_ParticleCommunication.H:369
void build(const PC &pc, const ParticleCopyOp &op, const Vector< int > &int_comp_mask, const Vector< int > &real_comp_mask, int local)
Definition AMReX_ParticleCommunication.H:408
Gpu::DeviceVector< int > d_real_comp_mask
Definition AMReX_ParticleCommunication.H:402
std::pair< int, int > TileKey
Definition AMReX_ParticleCommunication.H:93
Gpu::DeviceVector< std::size_t > m_snd_pad_correction_d
Definition AMReX_ParticleCommunication.H:397
void forEachCopyBatch(const PC &pc, const ParticleCopyOp &op, F &&f)
Definition AMReX_ParticleCommunication.H:130
Long m_superparticle_size
Definition AMReX_ParticleCommunication.H:403
Vector< int > m_rcv_box_tids
Definition AMReX_ParticleCommunication.H:367
Vector< Long > m_Snds
Definition AMReX_ParticleCommunication.H:387
Vector< MPI_Request > m_particle_sreqs
Definition AMReX_ParticleCommunication.H:380
Vector< std::map< TileKey, Gpu::DeviceVector< int > > > m_dst_indices
Definition AMReX_ParticleCommunication.H:358
Gpu::DeviceVector< unsigned int > m_box_counts_d
Definition AMReX_ParticleCommunication.H:360
Vector< Long > m_Rcvs
Definition AMReX_ParticleCommunication.H:388
Vector< int > m_rcv_box_offsets
Definition AMReX_ParticleCommunication.H:365
Vector< std::size_t > m_snd_pad_correction_h
Definition AMReX_ParticleCommunication.H:396
Vector< MPI_Status > m_particle_sstats
Definition AMReX_ParticleCommunication.H:379
Gpu::DeviceVector< unsigned int > m_box_offsets
Definition AMReX_ParticleCommunication.H:362
Gpu::DeviceVector< std::size_t > m_rcv_pad_correction_d
Definition AMReX_ParticleCommunication.H:400
Vector< std::size_t > m_rOffset
Definition AMReX_ParticleCommunication.H:390
Vector< MPI_Status > m_particle_rstats
Definition AMReX_ParticleCommunication.H:376
Vector< Long > m_snd_num_particles
Definition AMReX_ParticleCommunication.H:382
Vector< MPI_Request > m_particle_rreqs
Definition AMReX_ParticleCommunication.H:377
Long superParticleSize() const
Definition AMReX_ParticleCommunication.H:405
Gpu::HostVector< int > m_rcv_data
Definition AMReX_ParticleCommunication.H:391
void buildCopies(const PC &pc, const ParticleCopyOp &op, StableOrderedAlgorithm, BuildWorkspace &workspace, GetBucket const &getBucket)
Definition AMReX_ParticleCommunication.H:153
Vector< MPI_Status > m_build_stats
Definition AMReX_ParticleCommunication.H:373
Vector< int > m_RcvProc
Definition AMReX_ParticleCommunication.H:389
Vector< std::size_t > m_rcv_pad_correction_h
Definition AMReX_ParticleCommunication.H:399
int m_nrcvs
Definition AMReX_ParticleCommunication.H:372
Gpu::HostVector< unsigned int > m_box_counts_h
Definition AMReX_ParticleCommunication.H:361
Vector< MPI_Request > m_build_rreqs
Definition AMReX_ParticleCommunication.H:374
Vector< Long > m_rcv_num_particles
Definition AMReX_ParticleCommunication.H:383
Gpu::DeviceVector< int > d_int_comp_mask
Definition AMReX_ParticleCommunication.H:402
Definition AMReX_ParticleCommunication.H:42
void resizeTiles(std::vector< PTile * > &tiles, const std::vector< int > &sizes, std::vector< int > &offsets) const
Definition AMReX_ParticleCommunication.H:44