Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_ParticleContainerI.H
Go to the documentation of this file.
2
3#include <algorithm>
4#include <iterator>
5#include <set>
6#include <stdexcept>
7#include <string>
8#include <type_traits>
9#include <utility>
10#include <vector>
11
12namespace amrex {
13
14template <typename ParticleType, int NArrayReal, int NArrayInt,
15 template<class> class Allocator, class CellAssignor>
16void
18{
19 num_real_comm_comps = 0;
20 int comm_comps_start = 0;
21 if constexpr (!ParticleType::is_soa_particle) {
22 comm_comps_start += AMREX_SPACEDIM + NStructReal;
23 }
24 for (int i = comm_comps_start; i < comm_comps_start + NumRealComps(); ++i) {
25 if (h_redistribute_real_comp[i]) {++num_real_comm_comps;}
26 }
27
28 num_int_comm_comps = 0;
29 comm_comps_start = 2 + NStructInt;
30 for (int i = comm_comps_start; i < comm_comps_start + NumIntComps(); ++i) {
31 if (h_redistribute_int_comp[i]) {++num_int_comm_comps;}
32 }
33
34 if constexpr (ParticleType::is_soa_particle) {
35 particle_size = sizeof(uint64_t); // idcpu
36 } else {
37 particle_size = sizeof(ParticleType);
38 }
39 superparticle_size = particle_size +
40 num_real_comm_comps*sizeof(ParticleReal) + num_int_comm_comps*sizeof(int);
41}
42
43template <typename ParticleType, int NArrayReal, int NArrayInt,
44 template<class> class Allocator, class CellAssignor>
45void
47{
48 levelDirectoriesCreated = false;
49 usePrePost = false;
50 doUnlink = true;
51
52 // by default communicate all components
53 if constexpr (ParticleType::is_soa_particle)
54 {
55 h_redistribute_real_comp.resize(NArrayReal, true);
56 } else {
57 h_redistribute_real_comp.resize(AMREX_SPACEDIM + NStructReal + NArrayReal, true);
58 }
59 h_redistribute_int_comp.resize(2 + NStructInt + NArrayInt, true);
60
61 SetParticleSize();
62
63 // add default names for SoA Real and Int compile-time arguments
64 m_soa_rdata_names.clear();
65 for (int i=0; i<NArrayReal; ++i)
66 {
67 m_soa_rdata_names.push_back(getDefaultCompNameReal<ParticleType>(i));
68 }
69 m_soa_idata_names.clear();
70 for (int i=0; i<NArrayInt; ++i)
71 {
72 m_soa_idata_names.push_back(getDefaultCompNameInt<ParticleType>(i));
73 }
74
75 static bool initialized = false;
76 if ( ! initialized)
77 {
78 static_assert(sizeof(ParticleType)%sizeof(RealType) == 0,
79 "sizeof ParticleType is not a multiple of sizeof RealType");
80
81 ParmParse pp("particles");
82 pp.queryAdd("do_tiling", do_tiling);
83 Vector<int> tilesize(AMREX_SPACEDIM);
84 if (pp.queryarr("tile_size", tilesize, 0, AMREX_SPACEDIM)) {
85 for (int i=0; i<AMREX_SPACEDIM; ++i) { tile_size[i] = tilesize[i]; }
86 }
87
88 static_assert(std::is_standard_layout_v<ParticleType>,
89 "Particle type must be standard layout");
90 // && std::is_trivial<ParticleType>::value,
91 // "Particle type must be standard layout and trivial.");
92
93 pp.query("use_prepost", usePrePost);
94 pp.query("do_unlink", doUnlink);
95 pp.queryAdd("do_mem_efficient_sort", memEfficientSort);
96 pp.queryAdd("use_comms_arena", use_comms_arena);
97
98 initialized = true;
99 }
100}
101
102template<
103 typename ParticleType,
104 int NArrayReal,
105 int NArrayInt,
106 template<class> class Allocator,
107 class CellAssignor
108>
109void
111 std::vector<std::string> const & rdata_name, std::vector<std::string> const & idata_name
112)
113{
114 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(std::ssize(rdata_name) == NArrayReal, "rdata_name must be equal to NArrayReal");
115 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(std::ssize(idata_name) == NArrayInt, "idata_name must be equal to NArrayInt");
116
117 // ensure names for components are unique
118 std::set<std::string> const unique_r_names(rdata_name.begin(), rdata_name.end());
119 std::set<std::string> const unique_i_names(idata_name.begin(), idata_name.end());
120 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(rdata_name.size() == unique_r_names.size(), "SetSoACompileTimeNames: Provided names in rdata_name are not unique!");
121 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(idata_name.size() == unique_i_names.size(), "SetSoACompileTimeNames: Provided names in idata_name are not unique!");
122
123 for (int i=0; i<NArrayReal; ++i)
124 {
125 m_soa_rdata_names.at(i) = rdata_name.at(i);
126 }
127 for (int i=0; i<NArrayInt; ++i)
128 {
129 m_soa_idata_names.at(i) = idata_name.at(i);
130 }
131}
132
133template<
134 typename ParticleType,
135 int NArrayReal,
136 int NArrayInt,
137 template<class> class Allocator,
138 class CellAssignor
139>
140bool
142{
143 return std::find(m_soa_rdata_names.begin(), m_soa_rdata_names.end(), name) != std::end(m_soa_rdata_names);
144}
145
146template <typename ParticleType, int NArrayReal, int NArrayInt,
147 template<class> class Allocator, class CellAssignor>
148bool
150{
151 return std::find(m_soa_idata_names.begin(), m_soa_idata_names.end(), name) != std::end(m_soa_idata_names);
152}
153
154template<
155 typename ParticleType,
156 int NArrayReal,
157 int NArrayInt,
158 template<class> class Allocator,
159 class CellAssignor
160>
161int
163{
164 const auto it = std::find(m_soa_rdata_names.begin(), m_soa_rdata_names.end(), name);
165
166 if (it == m_soa_rdata_names.end())
167 {
168 throw std::runtime_error("GetRealCompIndex: Component " + name + " does not exist!");
169 }
170 else
171 {
172 return std::distance(m_soa_rdata_names.begin(), it);
173 }
174}
175
176template<
177 typename ParticleType,
178 int NArrayReal,
179 int NArrayInt,
180 template<class> class Allocator,
181 class CellAssignor
182>
183int
185{
186 const auto it = std::find(m_soa_idata_names.begin(), m_soa_idata_names.end(), name);
187
188 if (it == m_soa_idata_names.end())
189 {
190 throw std::runtime_error("GetIntCompIndex: Component " + name + " does not exist!");
191 }
192 else
193 {
194 return std::distance(m_soa_idata_names.begin(), it);
195 }
196}
197
198template <typename ParticleType, int NArrayReal, int NArrayInt,
199 template<class> class Allocator, class CellAssignor>
200template <typename P, typename Assignor>
203{
204 const Geometry& geom = Geom(lev);
205 const auto& domain = geom.Domain();
206 const auto& plo = geom.ProbLoArray();
207 const auto& dxi = geom.InvCellSizeArray();
208
209 return Assignor{}(p, plo, dxi, domain);
210}
211
212template <typename ParticleType, int NArrayReal, int NArrayInt,
213 template<class> class Allocator, class CellAssignor>
214template <typename P>
215bool
217::Where (const P& p,
218 ParticleLocData& pld,
219 int lev_min,
220 int lev_max,
221 int nGrow,
222 int local_grid) const
223{
224
225 AMREX_ASSERT(m_gdb != nullptr);
226
227 if (lev_max == -1) {
228 lev_max = finestLevel();
229 }
230
231 AMREX_ASSERT(lev_max <= finestLevel());
232
233 AMREX_ASSERT(nGrow == 0 || (nGrow >= 0 && lev_min == lev_max));
234
235 std::vector< std::pair<int, Box> > isects;
236
237 for (int lev = lev_max; lev >= lev_min; lev--) {
238 const IntVect& iv = Index(p, lev);
239 if (lev == pld.m_lev) {
240 // The fact that we are here means this particle does not belong to any finer grids.
241 if (pld.m_grid >= 0) {
242 if (pld.m_grown_gridbox.contains(iv)) {
243 pld.m_cell = iv;
244 if (!pld.m_tilebox.contains(iv)) {
245 pld.m_tile = getTileIndex(iv, pld.m_gridbox, do_tiling, tile_size, pld.m_tilebox);
246 }
247 return true;
248 }
249 }
250 }
251
252 int grid;
253 const BoxArray& ba = ParticleBoxArray(lev);
255
256 if (local_grid < 0) {
257 bool findfirst = (nGrow == 0) ? true : false;
258 ba.intersections(Box(iv, iv), isects, findfirst, nGrow);
259 grid = isects.empty() ? -1 : isects[0].first;
260 if (nGrow > 0 && std::ssize(isects) > 1) {
261 for (auto & isect : isects) {
262 Box bx = ba.getCellCenteredBox(isect.first);
263 for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) {
264 Box gbx = bx;
266 gr[dir] = nGrow;
267 gbx.grow(gr);
268 if (gbx.contains(iv)) {
269 grid = isect.first;
270 }
271 }
272 }
273 }
274 } else {
275 grid = (*redistribute_mask_ptr)[local_grid](iv, 0);
276 }
277
278 if (grid >= 0) {
279 const Box& bx = ba.getCellCenteredBox(grid);
280 pld.m_lev = lev;
281 pld.m_grid = grid;
282 pld.m_tile = getTileIndex(iv, bx, do_tiling, tile_size, pld.m_tilebox);
283 pld.m_cell = iv;
284 pld.m_gridbox = bx;
285 pld.m_grown_gridbox = amrex::grow(bx, nGrow);
286 return true;
287 }
288 }
289
290 return false;
291}
292
293template <typename ParticleType, int NArrayReal, int NArrayInt,
294 template<class> class Allocator, class CellAssignor>
295template <typename P>
296bool
299 ParticleLocData& pld,
300 int lev_min,
301 int lev_max,
302 int local_grid) const
303{
304
305 AMREX_ASSERT(m_gdb != nullptr);
306
307 if (!Geom(0).isAnyPeriodic()) { return false; }
308
309 if (lev_max == -1) {
310 lev_max = finestLevel();
311 }
312
313 AMREX_ASSERT(lev_max <= finestLevel());
314
315 // Create a copy "dummy" particle to check for periodic outs.
316 Particle<0, 0> p_prime;
317 AMREX_D_TERM(p_prime.pos(0) = p.pos(0);,
318 p_prime.pos(1) = p.pos(1);,
319 p_prime.pos(2) = p.pos(2));
320 if (PeriodicShift(p_prime)) {
321 std::vector< std::pair<int,Box> > isects;
322 for (int lev = lev_max; lev >= lev_min; lev--) {
323
324 int grid;
325 IntVect iv;
326 const BoxArray& ba = ParticleBoxArray(lev);
328
329 if (local_grid < 0) {
330 iv = Index<amrex::Particle<0, 0>, DefaultAssignor>(p_prime, lev);
331 ba.intersections(Box(iv, iv), isects, true, 0);
332 grid = isects.empty() ? -1 : isects[0].first;
333 } else {
334 iv = Index<amrex::Particle<0, 0>, DefaultAssignor>(p_prime, lev);
335 if (ba[local_grid].contains(iv))
336 {
337 grid = local_grid;
338 }
339 else
340 {
341 ba.intersections(Box(iv, iv), isects, true, 0);
342 grid = isects.empty() ? -1 : isects[0].first;
343 if(grid == -1)
344 {
345 grid = (*redistribute_mask_ptr)[local_grid](Index(p, lev), 0);
346 }
347 }
348 }
349
350 if (grid >= 0) {
351 AMREX_D_TERM(p.pos(0) = p_prime.pos(0);,
352 p.pos(1) = p_prime.pos(1);,
353 p.pos(2) = p_prime.pos(2););
354
355 const Box& bx = ba.getCellCenteredBox(grid);
356
357 pld.m_lev = lev;
358 pld.m_grid = grid;
359 pld.m_tile = getTileIndex(iv, bx, do_tiling, tile_size, pld.m_tilebox);
360 pld.m_cell = iv;
361 pld.m_gridbox = bx;
362 pld.m_grown_gridbox = bx;
363 return true;
364 }
365 }
366 }
367
368 return false;
369}
370
371
372template <typename ParticleType, int NArrayReal, int NArrayInt,
373 template<class> class Allocator, class CellAssignor>
374template <typename P>
375bool
377::PeriodicShift (P& p) const
378{
379 const auto& geom = Geom(0);
380 const auto plo = geom.ProbLoArray();
381 const auto phi = geom.ProbHiArray();
382 const auto rlo = geom.ProbLoArrayInParticleReal();
383 const auto rhi = geom.ProbHiArrayInParticleReal();
384 const auto is_per = geom.isPeriodicArray();
386 return enforcePeriodic(p, plo, phi, rlo, rhi, is_per);
387}
388
389template <typename ParticleType, int NArrayReal, int NArrayInt,
390 template<class> class Allocator, class CellAssignor>
394 bool /*update*/,
395 bool verbose,
396 ParticleLocData pld) const
397{
398 AMREX_ASSERT(m_gdb != nullptr);
399
400 bool ok = Where(p, pld);
401
402 if (!ok && Geom(0).isAnyPeriodic())
403 {
404 // Attempt to shift the particle back into the domain if it
405 // crossed a periodic boundary.
406 PeriodicShift(p);
407 ok = Where(p, pld);
408 }
409
410 if (!ok) {
411 // invalidate the particle.
412 if (verbose) {
413 amrex::AllPrint()<< "Invalidating out-of-domain particle: " << p << '\n';
414 }
415
416 AMREX_ASSERT(p.id().is_valid());
417
418 p.id().make_invalid();
419 }
420
421 return pld;
422}
423
424template <typename ParticleType, int NArrayReal, int NArrayInt,
425 template<class> class Allocator, class CellAssignor>
426void
432
433template <typename ParticleType, int NArrayReal, int NArrayInt,
434 template<class> class Allocator, class CellAssignor>
435void
437{
439 int nlevs = std::max(0, finestLevel()+1);
440 m_particles.resize(nlevs);
441}
442
443template <typename ParticleType, int NArrayReal, int NArrayInt,
444 template<class> class Allocator, class CellAssignor>
445template <typename P>
446void
448 int lev_min, int lev_max, int nGrow, int local_grid) const
449{
450 bool success;
451 if (Geom(0).outsideRoundoffDomain(AMREX_D_DECL(p.pos(0), p.pos(1), p.pos(2))))
452 {
453 // Note that EnforcePeriodicWhere may shift the particle if it is successful.
454 success = EnforcePeriodicWhere(p, pld, lev_min, lev_max, local_grid);
455 if (!success && lev_min == 0)
456 {
457 // The particle has left the domain; invalidate it.
458 p.id().make_invalid();
459 success = true;
460 }
461 }
462 else
463 {
464 success = Where(p, pld, lev_min, lev_max, 0, local_grid);
465 }
466
467 if (!success)
468 {
469 success = (nGrow > 0) && Where(p, pld, lev_min, lev_min, nGrow);
470 pld.m_grown_gridbox = pld.m_gridbox; // reset grown box for subsequent calls.
471 }
473 if (!success)
475 amrex::Abort("ParticleContainer::locateParticle(): invalid particle.");
476 }
477}
478
479template <typename ParticleType, int NArrayReal, int NArrayInt,
480 template<class> class Allocator, class CellAssignor>
481Long
483{
484 Long nparticles = 0;
485 for (int lev = 0; lev <= finestLevel(); lev++) {
486 nparticles += NumberOfParticlesAtLevel(lev,only_valid,true);
487 }
488 if (!only_local) {
490 }
491 return nparticles;
492}
493
494template <typename ParticleType, int NArrayReal, int NArrayInt,
495 template<class> class Allocator, class CellAssignor>
498{
499 AMREX_ASSERT(lev >= 0 && lev < std::ssize(m_particles));
500
501 LayoutData<Long> np_per_grid_local(ParticleBoxArray(lev),
502 ParticleDistributionMap(lev));
503
504 for (ParConstIterType pti(*this, lev); pti.isValid(); ++pti)
505 {
506 int gid = pti.index();
507 if (only_valid)
508 {
509 const auto& ptile = ParticlesAt(lev, pti);
510 const int np = ptile.numParticles();
511 auto const ptd = ptile.getConstParticleTileData();
512
514 ReduceData<int> reduce_data(reduce_op);
515 using ReduceTuple = typename decltype(reduce_data)::Type;
516
517 reduce_op.eval(np, reduce_data,
518 [=] AMREX_GPU_DEVICE (int i) -> ReduceTuple
519 {
520 return (ptd.id(i).is_valid()) ? 1 : 0;
521 });
522
523 int np_valid = amrex::get<0>(reduce_data.value(reduce_op));
524 np_per_grid_local[gid] += np_valid;
525 } else
526 {
527 np_per_grid_local[gid] += pti.numParticles();
528 }
530
531 Vector<Long> nparticles(np_per_grid_local.size(), 0);
532 if (only_local)
533 {
534 for (ParConstIterType pti(*this, lev); pti.isValid(); ++pti)
535 {
536 nparticles[pti.index()] = np_per_grid_local[pti.index()];
537 }
538 }
539 else
540 {
541 ParallelDescriptor::GatherLayoutDataToVector(np_per_grid_local, nparticles,
543 ParallelDescriptor::Bcast(nparticles.data(), nparticles.size(),
546
547 return nparticles;
548}
549
550template <typename ParticleType, int NArrayReal, int NArrayInt,
551 template<class> class Allocator, class CellAssignor>
553{
554 Long nparticles = 0;
555
556 if (level < 0 || level >= std::ssize(m_particles)) { return nparticles; }
557
558 if (only_valid) {
559 ReduceOps<ReduceOpSum> reduce_op;
560 ReduceData<unsigned long long> reduce_data(reduce_op);
561 using ReduceTuple = typename decltype(reduce_data)::Type;
562
563 for (const auto& kv : GetParticles(level)) {
564 const auto& ptile = kv.second;
565 auto const ptd = ptile.getConstParticleTileData();
566
567 reduce_op.eval(ptile.numParticles(), reduce_data,
568 [=] AMREX_GPU_DEVICE (int i) -> ReduceTuple
569 {
570 return (ptd.id(i).is_valid()) ? 1 : 0;
571 });
573
574 nparticles = static_cast<Long>(amrex::get<0>(reduce_data.value(reduce_op)));
575 }
576 else {
577 for (const auto& kv : GetParticles(level)) {
578 const auto& ptile = kv.second;
579 nparticles += ptile.numParticles();
580 }
581 }
582
583 if (!only_local) {
585 }
586
587 return nparticles;
589
590template <typename ParticleType, int NArrayReal, int NArrayInt,
591 template<class> class Allocator, class CellAssignor>
592template <typename I, std::enable_if_t<std::is_integral_v<I> && (sizeof(I) >= sizeof(Long)), int> FOO>
593void
595{
596 AMREX_ASSERT(lev >= 0 && lev < std::ssize(m_particles));
597
598 AMREX_ASSERT(BoxArray::SameRefs(mem.boxArray(), ParticleBoxArray(lev)) &&
599 mem.DistributionMap() == ParticleDistributionMap(lev));
600
601 [[maybe_unused]] Gpu::NoSyncRegion no_sync{};
602 for (ParConstIterType pti(*this, lev); pti.isValid(); ++pti)
603 {
604 int gid = pti.index();
605 mem[gid] += static_cast<I>(pti.capacity());
607}
608
609//
610// This includes both valid and invalid particles since invalid particles still take up space.
611//
612
613template <typename ParticleType, int NArrayReal, int NArrayInt,
614 template<class> class Allocator, class CellAssignor>
615std::array<Long, 3>
619 Long cnt = 0;
620
621 for (unsigned lev = 0; lev < m_particles.size(); lev++) {
622 const auto& pmap = m_particles[lev];
623 for (const auto& kv : pmap) {
624 const auto& ptile = kv.second;
625 cnt += ptile.numParticles();
626 }
628
629 Long mn = cnt, mx = mn;
630
631 const int IOProc = ParallelContext::IOProcessorNumberSub();
632 const Long sz = sizeof(ParticleType)+NumRealComps()*sizeof(ParticleReal)+NumIntComps()*sizeof(int);
633
634#ifdef AMREX_LAZY
635 Lazy::QueueReduction( [=] () mutable {
636#endif
640
641 amrex::Print() << "ParticleContainer spread across MPI nodes - bytes (num particles): [Min: "
642 << mn*sz
643 << " (" << mn << ")"
644 << ", Max: "
645 << mx*sz
646 << " (" << mx << ")"
647 << ", Total: "
648 << cnt*sz
649 << " (" << cnt << ")]\n";
650#ifdef AMREX_LAZY
651 });
652#endif
653
654 return {mn*sz, mx*sz, cnt*sz};
656
657template <typename ParticleType, int NArrayReal, int NArrayInt,
658 template<class> class Allocator, class CellAssignor>
659std::array<Long, 3>
662{
663 Long cnt = 0;
664
665 for (unsigned lev = 0; lev < m_particles.size(); lev++) {
666 const auto& pmap = m_particles[lev];
667 for (const auto& kv : pmap) {
668 const auto& ptile = kv.second;
669 cnt += ptile.capacity();
670 }
671 }
672
673 Long mn = cnt, mx = mn;
675 const int IOProc = ParallelContext::IOProcessorNumberSub();
676
677#ifdef AMREX_LAZY
678 Lazy::QueueReduction( [=] () mutable {
679#endif
683
684 amrex::Print() << "ParticleContainer spread across MPI nodes - bytes: [Min: "
685 << mn
686 << ", Max: "
687 << mx
688 << ", Total: "
689 << cnt
690 << "]\n";
691#ifdef AMREX_LAZY
692 });
693#endif
694
695 return {mn, mx, cnt};
696}
697
698template <typename ParticleType, int NArrayReal, int NArrayInt,
699 template<class> class Allocator, class CellAssignor>
700void
702{
703 for (unsigned lev = 0; lev < m_particles.size(); lev++) {
704 auto& pmap = m_particles[lev];
705 for (auto& kv : pmap) {
706 auto& ptile = kv.second;
707 ptile.shrink_to_fit();
708 }
709 }
710}
711
718template <typename ParticleType, int NArrayReal, int NArrayInt,
719 template<class> class Allocator, class CellAssignor>
720void
722{
723 BL_PROFILE("ParticleContainer::Increment");
724
725 AMREX_ASSERT(OK());
726 if (m_particles.empty()) { return; }
727 AMREX_ASSERT(lev >= 0 && lev < std::ssize(m_particles));
728 AMREX_ASSERT(numParticlesOutOfRange(*this, 0) == 0);
729
730 const auto& geom = Geom(lev);
731 const auto plo = geom.ProbLoArray();
732 const auto dxi = geom.InvCellSizeArray();
733 const auto domain = geom.Domain();
734 amrex::ParticleToMesh(*this, mf, lev,
735 [=] AMREX_GPU_DEVICE (const typename ParticleTileType::ConstParticleTileDataType& ptd, int ip,
736 amrex::Array4<amrex::Real> const& count)
737 {
738 const auto p = ptd[ip];
739 CellAssignor assignor;
740 IntVect iv = assignor(p, plo, dxi, domain);
741 amrex::Gpu::Atomic::AddNoRet(&count(iv), 1.0_rt);
742 }, false);
743}
744
745template <typename ParticleType, int NArrayReal, int NArrayInt,
746 template<class> class Allocator, class CellAssignor>
747Long
749{
750 BL_PROFILE("ParticleContainer::IncrementWithTotal(lev)");
751 Increment(mf, lev);
752 return TotalNumberOfParticles(true, local);
753}
754
755template <typename ParticleType, int NArrayReal, int NArrayInt,
756 template<class> class Allocator, class CellAssignor>
757void
759{
760 BL_PROFILE("ParticleContainer::RemoveParticlesAtLevel()");
761 if (level >= std::ssize(this->m_particles)) { return; }
762
763 if (!this->m_particles[level].empty())
764 {
765 ParticleLevel().swap(this->m_particles[level]);
766 }
767}
768
769template <typename ParticleType, int NArrayReal, int NArrayInt,
770 template<class> class Allocator, class CellAssignor>
771void
773{
774 BL_PROFILE("ParticleContainer::RemoveParticlesNotAtFinestLevel()");
775 AMREX_ASSERT(this->finestLevel()+1 == std::ssize(this->m_particles));
776
777 Long cnt = 0;
778
779 for (unsigned lev = 0; lev < m_particles.size() - 1; ++lev) {
780 auto& pmap = m_particles[lev];
781 if (!pmap.empty()) {
782 for (auto& kv : pmap) {
783 const auto& pbx = kv.second;
784 cnt += pbx.numParticles();
785 }
786 ParticleLevel().swap(pmap);
787 }
788 }
789
790 //
791 // Print how many particles removed on each processor if any were removed.
792 //
793 if (this->m_verbose > 1 && cnt > 0) {
794 amrex::AllPrint() << "Processor " << ParallelContext::MyProcSub() << " removed " << cnt
795 << " particles not in finest level\n";
796 }
797}
798
800{
801
805
807 const GpuArray<Real, AMREX_SPACEDIM> & dxi, const Box& domain)
808 : m_assign_buffer_grid(assign_buffer_grid), m_plo(plo), m_dxi(dxi), m_domain(domain)
809 {}
810
811 template <typename SrcData>
813 int operator() (const SrcData& src, int src_i) const noexcept
814 {
815 auto iv = getParticleCell(src, src_i, m_plo, m_dxi, m_domain);
816 return (m_assign_buffer_grid(iv).first != -1);
817 }
818};
819
821{
822 template <typename DstData, typename SrcData>
824 void operator() (DstData& dst, const SrcData& src,
825 int src_i, int dst_i) const noexcept
826 {
827 copyParticle(dst, src, src_i, dst_i);
828
830 dst.cpu(dst_i) = 0;
831 }
832};
833
834
835template <typename ParticleType, int NArrayReal, int NArrayInt,
836 template<class> class Allocator, class CellAssignor>
837void
838ParticleContainer_impl<ParticleType, NArrayReal, NArrayInt, Allocator, CellAssignor>
839::CreateVirtualParticles (int level, AoS& virts) const
840{
841 ParticleTileType ptile;
842 CreateVirtualParticles(level, ptile);
843 ptile.GetArrayOfStructs().swap(virts);
844}
845
846template <typename ParticleType, int NArrayReal, int NArrayInt,
847 template<class> class Allocator, class CellAssignor>
848void
851{
852 BL_PROFILE("ParticleContainer::CreateVirtualParticles()");
853 AMREX_ASSERT(level > 0);
854 AMREX_ASSERT(virts.empty());
855
856 if (level >= std::ssize(m_particles)) {
857 return;
858 }
859
860 std::string const& aggregation_type = AggregationType();
861 int aggregation_buffer = AggregationBuffer();
862
863 if (aggregation_type == "None")
864 {
865 auto virts_offset = virts.numParticles();
866 for(ParConstIterType pti(*this, level); pti.isValid(); ++pti)
867 {
868 const auto& src_tile = ParticlesAt(level, pti);
869
870 auto np = src_tile.numParticles();
871 virts.resize(virts_offset+np);
872 transformParticles(virts, src_tile, 0, virts_offset, np, TransformerVirt());
873 virts_offset += np;
874 }
875 }
876 if (aggregation_type == "Cell")
877 {
878 //Components would be based on
879 int nComp = AMREX_SPACEDIM + NStructReal + NArrayReal;
880 // NArrayReal, NStructInt, NArrayInt behavior as before
881 int nGhost = 0;
882 MultiFab mf(ParticleBoxArray(level), ParticleDistributionMap(level), nComp, nGhost);
883
884 nComp = 1 + NStructInt + NArrayInt;
885 iMultiFab imf(ParticleBoxArray(level), ParticleDistributionMap(level), nComp, nGhost);
886
887 const auto& geom = Geom(level);
888 const auto plo = geom.ProbLoArray();
889 const auto dxi = geom.InvCellSizeArray();
890 const auto domain = geom.Domain();
891
892 BoxList bl_buffer;
893 bl_buffer.complementIn(Geom(level).Domain(), ParticleBoxArray(level));
894 BoxArray buffer(std::move(bl_buffer));
895 buffer.grow(aggregation_buffer);
896
898 locator.build(buffer, geom, do_tiling, tile_size);
899 AssignGrid<DenseBinIteratorFactory<Box>> assign_buffer_grid = locator.getGridAssignor();
900
901 amrex::ParticleToMesh(*this, mf, level,
902 [=] AMREX_GPU_DEVICE (const ParticleType& p,
903 amrex::Array4<amrex::Real> const& partData,
906 {
907 auto iv = getParticleCell(p, plo_loc, dxi_loc, domain);
908 if(assign_buffer_grid(iv).first == -1)
909 {
910 //Ordering may make this not unique
911 for (int i = 0; i < NArrayReal; ++i)
912 {
913 amrex::Gpu::Atomic::AddNoRet(&partData(iv,AMREX_SPACEDIM+NStructReal+i), partData(iv,AMREX_SPACEDIM)!=0.0 ? static_cast<Real>(0) : static_cast<Real>(p.rdata(NStructReal+i)));
914 }
915 //Add the rdata(0)-weighted sum of position
916 for (int i = 0; i < AMREX_SPACEDIM; ++i)
917 {
918 amrex::Gpu::Atomic::AddNoRet(&partData(iv,i), static_cast<Real>((p.rdata(0)*p.pos(i))));
919 }
920 //Add the rdata(0)-weighted sum of other rdata fields
921 for (int i = 1; i < NStructReal; ++i)
922 {
923 amrex::Gpu::Atomic::AddNoRet(&partData(iv,AMREX_SPACEDIM+i), static_cast<Real>((p.rdata(0)*p.rdata(i))));
924 }
925 //Add the rdata(0) sum
926 for (int i = 0; i < 1; ++i)
927 {
928 amrex::Gpu::Atomic::AddNoRet(&partData(iv,AMREX_SPACEDIM+i), static_cast<Real>(p.rdata(0)));
929 }
930 }
931
932 }); //skipping extra false argument, doing mf.setVal(0) at beginning
933
934 amrex::ParticleToMesh(*this, imf, level,
935 [=] AMREX_GPU_DEVICE (const ParticleType& p,
936 amrex::Array4<int> const& partData,
939 {
940
941 auto iv = getParticleCell(p, plo_loc, dxi_loc, domain);
942 if(assign_buffer_grid(iv).first == -1)
943 {
944 //if this cell has no particle id info, do a straight copy to store idata
945 if(partData(iv,0)==0)
946 {
947 //Add 1 to indicate at least 1 particle at cell iv
948 amrex::Gpu::Atomic::AddNoRet(&partData(iv,0), 1);
949 for (int i = 0; i < NStructInt; ++i)
950 {
951 amrex::Gpu::Atomic::AddNoRet(&partData(iv,1+i), p.idata(i));
952 }
953 for (int i = 0; i < NArrayInt; ++i)
954 {
955 amrex::Gpu::Atomic::AddNoRet(&partData(iv,1+NStructInt+i), p.idata(NStructInt+i));
956 }
957 }
958 }
959 });
960
961 //There may be a better way to ensure virts is the right length
962 virts.resize(imf.sum(0));
963
964 int last_offset = 0;
965 for (MFIter mfi(mf); mfi.isValid(); ++mfi)
966 {
967 const auto bx = mfi.tilebox();
968 const auto partData = mf.array(mfi);
969 const auto imf_arr = imf.array(mfi);
970
971 Gpu::DeviceVector<int> offsets(bx.numPts());
972 auto *offsets_ptr = offsets.dataPtr();
973 int next_offset = Scan::ExclusiveSum((int) bx.numPts(),(imf_arr.ptr(bx.smallEnd(),0)),(offsets.dataPtr()),Scan::retSum);
974 auto dst = virts.getParticleTileData();
975 ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k)
976 {
977 if(imf_arr(i,j,k,0)!=0)
978 {
979 const auto idx = last_offset + offsets_ptr[
980 imf_arr.get_offset(IntVectND<3>{i,j,k})
981 ];
982
983 dst.cpu(idx) = 0;
985
986 auto& p = dst[idx];
987 //Set rdata(0) first so we can normalize the weighted fields
988 //Note that this does not work for soa PC
989 p.rdata(0) = static_cast<ParticleReal>(partData(i,j,k,AMREX_SPACEDIM));;
990 //Set pos with the normalized weighted field
991 for (int n = 0; n < AMREX_SPACEDIM; ++n)
992 {
993 p.pos(n) = static_cast<ParticleReal>(partData(i,j,k,n) / p.rdata(0));
994 }
995 //Set rdata(n>0) with the normalized weighted field for NStructReal
996 for (int n = 1; n < NStructReal; ++n)
997 {
998 p.rdata(n) = static_cast<ParticleReal>(partData(i,j,k,AMREX_SPACEDIM+n) / p.rdata(0));
999 }
1000 //Set rdata(n>0) with the normalized weighted field for NArrayReal
1001 for (int n = 0; n < NArrayReal; ++n)
1002 {
1003 dst.rdata(n)[idx] = static_cast<ParticleReal>(partData(i,j,k,AMREX_SPACEDIM+NStructReal+n));
1004 }
1005 //Set idata with the "first" particles idata field for NStructInt
1006 for (int n = 0; n < NStructInt; ++n)
1007 {
1008 p.idata(n) = imf_arr(i,j,k,1+n);
1009 }
1010 //Set idata with the "first" particles idata field for NArrayInt
1011 for (int n = 0; n < NArrayInt; ++n)
1012 {
1013 dst.idata(n)[idx] = imf_arr(i,j,k,1+NStructInt+n);
1014 }
1015 }
1016
1017 });
1018 last_offset+=next_offset;
1020 }
1021
1022 // last_offset should equal virts.numParticles()
1023 auto virts_offset = last_offset;
1024 for(ParConstIterType pti(*this, level); pti.isValid(); ++pti)
1025 {
1026 const auto& src_tile = ParticlesAt(level, pti);
1027
1028 auto np = src_tile.numParticles();
1029 virts.resize(virts_offset+np);
1030 virts_offset += filterAndTransformParticles(virts, src_tile, FilterVirt(assign_buffer_grid,plo,dxi,domain), TransformerVirt(),0,virts_offset);
1032 }
1033 virts.resize(virts_offset);
1035 }
1036}
1037
1039{
1040
1043
1049 : m_lev_min(level), m_lev_max(level+1), m_nGrow(nGrow), m_gid(gid), m_assign_grid(assign_grid)
1050 {}
1051
1052 template <typename SrcData>
1054 int operator() (const SrcData& src, int src_i) const noexcept
1055 {
1056 const auto tup_min = (m_assign_grid)(src[src_i], m_lev_min, m_lev_max, m_nGrow, DefaultAssignor{});
1057 const auto tup_max = (m_assign_grid)(src[src_i], m_lev_max, m_lev_max, m_nGrow, DefaultAssignor{});
1058 const auto p_boxes = amrex::get<0>(tup_min);
1059 const auto p_boxes_max = amrex::get<0>(tup_max);
1060 const auto p_levs_max = amrex::get<2>(tup_max);
1061 return p_boxes_max >=0 && p_boxes == m_gid && p_levs_max == m_lev_max;
1062 }
1063};
1064
1066{
1067
1068 template <typename DstData, typename SrcData>
1070 void operator() (DstData& dst, const SrcData& src,
1071 int src_i, int dst_i) const noexcept
1072 {
1073 copyParticle(dst, src, src_i, dst_i);
1074
1075 dst.id(dst_i) = LongParticleIds::GhostParticleID;
1076 dst.cpu(dst_i) = 0;
1077 }
1078};
1079
1080template <typename ParticleType, int NArrayReal, int NArrayInt,
1081 template<class> class Allocator, class CellAssignor>
1082void
1083ParticleContainer_impl<ParticleType, NArrayReal, NArrayInt, Allocator, CellAssignor>
1084::CreateGhostParticles (int level, int nGrow, AoS& ghosts) const
1085{
1086 ParticleTileType ptile;
1087 CreateGhostParticles(level, nGrow, ptile);
1088 ptile.GetArrayOfStructs().swap(ghosts);
1089}
1090
1091template <typename ParticleType, int NArrayReal, int NArrayInt,
1092 template<class> class Allocator, class CellAssignor>
1093void
1095::CreateGhostParticles (int level, int nGrow, ParticleTileType& ghosts) const
1096{
1097 BL_PROFILE("ParticleContainer::CreateGhostParticles()");
1098 AMREX_ASSERT(ghosts.empty());
1099 AMREX_ASSERT(level < finestLevel());
1100
1101 if (level >= std::ssize(m_particles)) {
1102 return;
1103 }
1104
1105 if (! m_particle_locator.isValid(GetParGDB(), do_tiling, tile_size)) {
1106 m_particle_locator.build(GetParGDB(), do_tiling, tile_size);
1107 }
1108
1109 m_particle_locator.setGeometry(GetParGDB());
1110 AmrAssignGrid<DenseBinIteratorFactory<Box>> assign_grid = m_particle_locator.getGridAssignor();
1111 auto ghost_offset = ghosts.numParticles();
1112 for(ParConstIterType pti(*this, level); pti.isValid(); ++pti)
1113 {
1114 const auto& src_tile = ParticlesAt(level, pti);
1115 int gid = pti.index();
1116
1117 auto np = src_tile.numParticles();
1118 ghosts.resize(ghost_offset+np);
1119 ghost_offset += filterAndTransformParticles(ghosts, src_tile, AssignGridFilter(assign_grid,gid,level,nGrow), TransformerGhost(),0,ghost_offset);
1120 }
1121 ghosts.resize(ghost_offset);
1123}
1124
1125template <typename ParticleType, int NArrayReal, int NArrayInt,
1126 template<class> class Allocator, class CellAssignor>
1127void
1130{
1131 BL_PROFILE("ParticleContainer::clearParticles()");
1132
1133 for (int lev = 0; lev < std::ssize(m_particles); ++lev)
1134 {
1135 for (auto& kv : m_particles[lev]) { kv.second.resize(0); }
1136 particle_detail::clearEmptyEntries(m_particles[lev]);
1137 }
1138}
1139
1140template <typename ParticleType, int NArrayReal, int NArrayInt,
1141 template<class> class Allocator, class CellAssignor>
1142template <class PCType, std::enable_if_t<IsParticleContainer<PCType>::value, int> foo>
1143void
1145copyParticles (const PCType& other, bool local)
1146{
1147 using PData = typename ParticleTileType::ConstParticleTileDataType;
1148 copyParticles(other, [] AMREX_GPU_HOST_DEVICE (const PData& /*data*/, int /*i*/) { return 1; }, local);
1149}
1150
1151template <typename ParticleType, int NArrayReal, int NArrayInt,
1152 template<class> class Allocator, class CellAssignor>
1153template <class PCType, std::enable_if_t<IsParticleContainer<PCType>::value, int> foo>
1154void
1156addParticles (const PCType& other, bool local)
1157{
1158 using PData = typename ParticleTileType::ConstParticleTileDataType;
1159 addParticles(other, [] AMREX_GPU_HOST_DEVICE (const PData& /*data*/, int /*i*/) { return 1; }, local);
1160}
1161
1162template <typename ParticleType, int NArrayReal, int NArrayInt,
1163 template<class> class Allocator, class CellAssignor>
1164template <class F, class PCType,
1165 std::enable_if_t<IsParticleContainer<PCType>::value, int> foo,
1166 std::enable_if_t<! std::is_integral_v<F>, int> bar>
1167void
1169copyParticles (const PCType& other, F&& f, bool local)
1170{
1171 BL_PROFILE("ParticleContainer::copyParticles");
1172 clearParticles();
1173 addParticles(other, std::forward<F>(f), local);
1174}
1175
1176template <typename ParticleType, int NArrayReal, int NArrayInt,
1177 template<class> class Allocator, class CellAssignor>
1178template <class F, class PCType,
1179 std::enable_if_t<IsParticleContainer<PCType>::value, int> foo,
1180 std::enable_if_t<! std::is_integral_v<F>, int> bar>
1181void
1183addParticles (const PCType& other, F const& f, bool local)
1184{
1185 BL_PROFILE("ParticleContainer::addParticles");
1186
1187 // touch all tiles in serial
1188 for (int lev = 0; lev < other.numLevels(); ++lev)
1189 {
1190 [[maybe_unused]] Gpu::NoSyncRegion no_sync{};
1191 const auto& plevel_other = other.GetParticles(lev);
1192 for(MFIter mfi = other.MakeMFIter(lev); mfi.isValid(); ++mfi)
1193 {
1194 auto index = std::make_pair(mfi.index(), mfi.LocalTileIndex());
1195 if(!plevel_other.contains(index)) { continue; }
1196
1197 DefineAndReturnParticleTile(lev, mfi.index(), mfi.LocalTileIndex());
1198 }
1199 }
1200
1201#ifdef AMREX_USE_OMP
1202#pragma omp parallel if (Gpu::notInLaunchRegion())
1203#endif
1204 for (int lev = 0; lev < other.numLevels(); ++lev)
1205 {
1206 const auto& plevel_other = other.GetParticles(lev);
1207 for(MFIter mfi = other.MakeMFIter(lev); mfi.isValid(); ++mfi)
1208 {
1209 auto index = std::make_pair(mfi.index(), mfi.LocalTileIndex());
1210 if(!plevel_other.contains(index)) { continue; }
1211
1212 // this has already had define() called above
1213 auto& ptile = ParticlesAt(lev, mfi.index(), mfi.LocalTileIndex());
1214 const auto& ptile_other = plevel_other.at(index);
1215 auto np = ptile_other.numParticles();
1216 if (np == 0) { continue; }
1217
1218 auto dst_index = ptile.numParticles();
1219 ptile.resize(dst_index + np);
1220
1221 auto count = amrex::filterParticles(ptile, ptile_other, f,
1222 static_cast<decltype(np)>(0), dst_index, np);
1223
1224 ptile.resize(dst_index + count);
1225 }
1226 }
1227
1228 if (! local) { Redistribute(); }
1229}
1230
1231//
1232// This redistributes valid particles and discards invalid ones.
1233//
1234template <typename ParticleType, int NArrayReal, int NArrayInt,
1235 template<class> class Allocator, class CellAssignor>
1236void
1238::Redistribute (int lev_min, int lev_max, int nGrow, int local, bool remove_negative)
1240 BL_PROFILE_SYNC_START_TIMED("SyncBeforeComms: Redist");
1241
1243 !is_rtsoa_pc || NumRuntimeRealComps() >= AMREX_SPACEDIM,
1244 "ParticleContainer with RTSoA requires at least AMREX_SPACEDIM "
1245 "runtime real components for positions"
1246 );
1247
1248 Redistribute_impl(lev_min, lev_max, nGrow, local, remove_negative);
1249
1251}
1252
1253template <typename ParticleType, int NArrayReal, int NArrayInt,
1254 template<class> class Allocator, class CellAssignor>
1255template <class index_type>
1256void
1258::ReorderParticles (int lev, const MFIter& mfi, const index_type* permutations)
1260 auto& ptile = ParticlesAt(lev, mfi);
1261 const size_t np = ptile.numParticles();
1262 const size_t np_total = np + ptile.numNeighborParticles();
1263
1264 if (memEfficientSort) {
1265 amrex::ReorderParticles(ptile, permutations);
1266 } else {
1267 ParticleTileType ptile_tmp;
1268 ptile_tmp.define(m_num_runtime_real, m_num_runtime_int,
1269 &m_soa_rdata_names, &m_soa_idata_names, arena());
1270 ptile_tmp.resize(np_total);
1271 // copy re-ordered particles
1272 gatherParticles(ptile_tmp, ptile, np, permutations);
1273 // copy neighbor particles
1274 amrex::copyParticles(ptile_tmp, ptile, np, np, np_total-np);
1275 ptile.swap(ptile_tmp);
1276 }
1277}
1278
1279template <typename ParticleType, int NArrayReal, int NArrayInt,
1280 template<class> class Allocator, class CellAssignor>
1281void
1287template <typename ParticleType, int NArrayReal, int NArrayInt,
1288 template<class> class Allocator, class CellAssignor>
1289void
1292{
1293 BL_PROFILE("ParticleContainer::SortParticlesByBin()");
1294
1295 if (bin_size == IntVect::TheZeroVector()) { return; }
1296
1297 for (int lev = 0; lev < numLevels(); ++lev)
1298 {
1299 const Geometry& geom = Geom(lev);
1300 const auto dxi = geom.InvCellSizeArray();
1301 const auto plo = geom.ProbLoArray();
1302 const auto domain = geom.Domain();
1303
1304 for(MFIter mfi = MakeMFIter(lev); mfi.isValid(); ++mfi)
1305 {
1306 auto& ptile = ParticlesAt(lev, mfi);
1307 const size_t np = ptile.numParticles();
1308
1309 const Box& box = mfi.validbox();
1310
1311 int ntiles = numTilesInBox(box, true, bin_size);
1312
1313 m_bins.build(np, ptile.getParticleTileData(), ntiles,
1314 GetParticleBin{.plo = plo, .dxi = dxi, .domain = domain,
1315 .bin_size = bin_size, .box = box});
1316 ReorderParticles(lev, mfi, m_bins.permutationPtr());
1317 }
1318 }
1319}
1320
1321template <typename ParticleType, int NArrayReal, int NArrayInt,
1322 template<class> class Allocator, class CellAssignor>
1323void
1326{
1327 BL_PROFILE("ParticleContainer::SortParticlesForDeposition()");
1328
1329 for (int lev = 0; lev < numLevels(); ++lev)
1330 {
1331 const Geometry& geom = Geom(lev);
1332
1333 for(MFIter mfi = MakeMFIter(lev); mfi.isValid(); ++mfi)
1334 {
1335 const auto& ptile = ParticlesAt(lev, mfi);
1336 const size_t np = ptile.numParticles();
1337
1338 const Box& box = mfi.validbox();
1339
1340 using index_type = typename decltype(m_bins)::index_type;
1342 PermutationForDeposition<index_type>(perm, np, ptile, box, geom, idx_type);
1343 ReorderParticles(lev, mfi, perm.dataPtr());
1344 }
1345 }
1346}
1347
1348template <typename ParticleType, int NArrayReal, int NArrayInt,
1349 template<class> class Allocator, class CellAssignor>
1351int
1353::hostPartitionTile (ParticleTileType& src_tile,
1354 int lev, int gid, int tid,
1355 int lev_min, int lev_max, int nGrow, int local,
1356 bool remove_negative, int myproc,
1358 Gpu::DeviceVector<int>& levels,
1360 Gpu::DeviceVector<int>& src_indices,
1361 Gpu::DeviceVector<IntVect>& periodic_shift)
1362{
1363 ParticleLocData pld;
1364 auto ptd = src_tile.getParticleTileData();
1365 int num_move = 0;
1366
1367 Long last = src_tile.numParticles() - 1;
1368 Long pindex = 0;
1369 int who = -1;
1370 while (pindex <= last) {
1371 decltype(auto) p = ptd[pindex];
1372
1373 // remove invalid if needed
1374 if (!ptd.id(pindex).is_valid()) {
1375 if (remove_negative) {
1376 copyParticle(ptd, ptd, last, pindex);
1377 correctCellVectors(last, pindex, gid, p);
1378 --last;
1379 } else {
1380 ++pindex;
1381 }
1382 continue;
1383 }
1384
1385 // Fast path: if on the finest level, try the last good location first
1386 // If not on finest level, we need to do the full locate because even if
1387 // the last assignment works, the particle might belong on a finer level.
1388 if (lev == lev_max && pld.m_tile == tid && pld.m_grid == gid && pld.m_lev == lev && who == myproc) {
1389 const auto iv = Index(p, lev);
1390 if (pld.m_tilebox.contains(iv)) {
1391 ++pindex;
1392 continue; // don't need correctCellVectors or particlePostLocate if it stays where it is
1393 }
1394 }
1395
1396 // full locate
1397 locateParticle(p, pld, lev_min, lev_max, nGrow, local ? gid : -1);
1398 particlePostLocate(p, pld, lev);
1399
1400 // particle may have been invalidated, so check if we need to remove again.
1401 if (!ptd.id(pindex).is_valid()) {
1402 copyParticle(ptd, ptd, last, pindex);
1403 correctCellVectors(last, pindex, gid, p);
1404 --last;
1405 continue;
1406 }
1407
1408 // flag particle for move if needed
1409 who = BufferMap().procID(pld.m_grid, pld.m_tile, pld.m_lev);
1410 if (pld.m_lev != lev || pld.m_grid != gid || pld.m_tile != tid || who != myproc) {
1411 // the particle is valid but needs to be sent somewhere else
1412 swapParticle(ptd, ptd, last, pindex);
1413 correctCellVectors(last, pindex, gid, p);
1414 boxes[num_move] = pld.m_grid;
1415 levels[num_move] = pld.m_lev;
1416 tiles[num_move] = pld.m_tile;
1417 src_indices[num_move] = static_cast<int>(last);
1418 ++num_move;
1419 --last;
1420 continue;
1421 }
1422
1423 ++pindex; // if here, particle stays
1424 }
1425
1426 boxes.resize(num_move);
1427 levels.resize(num_move);
1428 tiles.resize(num_move);
1429 src_indices.resize(num_move);
1430 periodic_shift.resize(num_move);
1431
1432 return static_cast<int>(pindex);
1433}
1434
1435//
1436// Shared implementation of Redistribute
1437//
1438template <typename ParticleType, int NArrayReal, int NArrayInt,
1439 template<class> class Allocator, class CellAssignor>
1440void
1441ParticleContainer_impl<ParticleType, NArrayReal, NArrayInt, Allocator, CellAssignor>
1442::Redistribute_impl (int lev_min, int lev_max, int nGrow, int local, bool remove_negative)
1443{
1444 if (local) { AMREX_ASSERT(numParticlesOutOfRange(*this, lev_min, lev_max, local) == 0); }
1445
1446 BL_PROFILE("ParticleContainer::Redistribute_impl()");
1447 BL_PROFILE_VAR_NS("Redistribute_partition", blp_partition);
1448
1449 int theEffectiveFinestLevel = m_gdb->finestLevel();
1450 while (!m_gdb->LevelDefined(theEffectiveFinestLevel)) { theEffectiveFinestLevel--; }
1451
1452 if (std::ssize(m_particles) < theEffectiveFinestLevel+1) {
1453 if (Verbose()) {
1454 amrex::Print() << "ParticleContainer::Redistribute() resizing containers from "
1455 << m_particles.size() << " to "
1456 << theEffectiveFinestLevel + 1 << '\n';
1457 }
1458 m_particles.resize(theEffectiveFinestLevel+1);
1459 m_dummy_mf.resize(theEffectiveFinestLevel+1);
1460 }
1461
1462 for (int lev = 0; lev < theEffectiveFinestLevel+1; ++lev) { RedefineDummyMF(lev); }
1463
1464 int finest_lev_particles;
1465 if (lev_max == -1) {
1466 lev_max = theEffectiveFinestLevel;
1467 finest_lev_particles = m_particles.size() - 1;
1468 } else {
1469 finest_lev_particles = lev_max;
1470 }
1471 AMREX_ASSERT(lev_max <= finestLevel());
1472
1473 this->defineBufferMap();
1474
1475#ifndef AMREX_USE_GPU
1476 if (local > 0) { BuildRedistributeMask(0, local); }
1477#else
1478 if (! m_particle_locator.isValid(GetParGDB(), do_tiling, tile_size)) { m_particle_locator.build(GetParGDB(), do_tiling, tile_size); }
1479 m_particle_locator.setGeometry(GetParGDB());
1480#endif
1481
1482 BL_PROFILE_VAR_START(blp_partition);
1483 ParticleCopyOp op;
1484 int num_levels = finest_lev_particles + 1;
1485 op.setNumLevels(num_levels);
1486 Vector<std::map<std::pair<int, int>, int> > new_sizes(num_levels);
1487#ifndef AMREX_USE_GPU
1488 const int myproc = ParallelContext::MyProcSub();
1489#endif
1490#ifdef AMREX_USE_GPU
1491 auto assign_grid = m_particle_locator.getGridAssignor();
1492 const auto plo = Geom(0).ProbLoArray();
1493 const auto phi = Geom(0).ProbHiArray();
1494 const auto rlo = Geom(0).ProbLoArrayInParticleReal();
1495 const auto rhi = Geom(0).ProbHiArrayInParticleReal();
1496 const auto is_per = Geom(0).isPeriodicArray();
1497#endif
1498
1499#if defined(AMREX_USE_OMP) || defined(AMREX_USE_GPU)
1500 Vector<std::pair<int, int> > grid_tile_ids;
1501 Vector<ParticleTileType*> ptile_ptrs;
1502 Vector<int> plevs;
1503 Vector<int*> new_size_ptrs;
1504#ifndef AMREX_USE_GPU
1506 Vector<Gpu::DeviceVector<int>*> level_ptrs;
1508 Vector<Gpu::DeviceVector<int>*> src_index_ptrs;
1509 Vector<Gpu::DeviceVector<IntVect>*> periodic_shift_ptrs;
1510#endif
1511 std::size_t num_tiles = 0;
1512 for (int lev = lev_min; lev <= finest_lev_particles; ++lev) {
1513 for (auto const& kv : m_particles[lev]) {
1514 if (kv.second.numParticles() != 0) {
1515 ++num_tiles;
1516 }
1517 }
1518 }
1519 grid_tile_ids.reserve(num_tiles);
1520 ptile_ptrs.reserve(num_tiles);
1521 plevs.reserve(num_tiles);
1522 new_size_ptrs.reserve(num_tiles);
1523#ifndef AMREX_USE_GPU
1524 box_ptrs.reserve(num_tiles);
1525 level_ptrs.reserve(num_tiles);
1526 tile_ptrs.reserve(num_tiles);
1527 src_index_ptrs.reserve(num_tiles);
1528 periodic_shift_ptrs.reserve(num_tiles);
1529#endif
1530 for (int lev = lev_min; lev <= finest_lev_particles; lev++) {
1531 auto& pmap = m_particles[lev];
1532 for (auto& kv : pmap)
1533 {
1534 const auto np = kv.second.numParticles();
1535 if (np == 0) { continue; }
1536
1537 grid_tile_ids.push_back(kv.first);
1538 ptile_ptrs.push_back(&(kv.second));
1539 plevs.push_back(lev);
1540 auto index = std::make_pair(kv.first.first, kv.first.second);
1541 auto& new_size = new_sizes[lev][index];
1542 new_size = 0;
1543 new_size_ptrs.push_back(&new_size);
1544#ifndef AMREX_USE_GPU
1545 op.resize(kv.first.first, kv.first.second, lev, static_cast<int>(np));
1546 auto& boxes = op.m_boxes[lev][index];
1547 auto& levels = op.m_levels[lev][index];
1548 auto& tiles = op.m_tiles[lev][index];
1549 auto& src_indices = op.m_src_indices[lev][index];
1550 auto& periodic_shift = op.m_periodic_shift[lev][index];
1551 box_ptrs.push_back(&boxes);
1552 level_ptrs.push_back(&levels);
1553 tile_ptrs.push_back(&tiles);
1554 src_index_ptrs.push_back(&src_indices);
1555 periodic_shift_ptrs.push_back(&periodic_shift);
1556#endif
1557 }
1558 }
1559#endif
1560
1561#if defined(AMREX_USE_OMP) || defined(AMREX_USE_GPU)
1562#ifdef AMREX_USE_OMP
1563#pragma omp parallel for
1564#endif
1565 for (int pmap_it = 0; pmap_it < static_cast<int>(ptile_ptrs.size()); ++pmap_it)
1566 {
1567 int lev = plevs[pmap_it];
1568 int gid = grid_tile_ids[pmap_it].first;
1569 int tid = grid_tile_ids[pmap_it].second;
1570 auto& src_tile = *ptile_ptrs[pmap_it];
1571 const size_t np = src_tile.numParticles();
1572 if (np == 0) {continue;}
1573
1574#ifndef AMREX_USE_GPU
1575 auto& boxes = *box_ptrs[pmap_it];
1576 auto& levels = *level_ptrs[pmap_it];
1577 auto& tiles = *tile_ptrs[pmap_it];
1578 auto& src_indices = *src_index_ptrs[pmap_it];
1579 auto& periodic_shift = *periodic_shift_ptrs[pmap_it];
1580 *new_size_ptrs[pmap_it] = hostPartitionTile(src_tile,
1581 lev, gid, tid,
1582 lev_min, lev_max, nGrow, local,
1583 remove_negative, myproc,
1584 boxes, levels, tiles,
1585 src_indices, periodic_shift);
1586#else // GPU algorithm
1587 int num_stay = partitionParticlesByDest(src_tile, assign_grid,
1588 std::forward<CellAssignor>(CellAssignor{}),
1589 BufferMap(),
1590 plo, phi, rlo, rhi, is_per, lev, gid, tid,
1591 lev_min, lev_max, nGrow, remove_negative);
1592
1593 int num_move = np - num_stay;
1594 *new_size_ptrs[pmap_it] = num_stay;
1595 op.resize(gid, tid, lev, num_move);
1596
1597 auto index = std::make_pair(gid, tid);
1598 auto* p_boxes = op.m_boxes[lev][index].dataPtr();
1599 auto* p_levs = op.m_levels[lev][index].dataPtr();
1600 auto* p_tiles = op.m_tiles[lev][index].dataPtr();
1601 auto* p_src_indices = op.m_src_indices[lev][index].dataPtr();
1602 auto* p_periodic_shift = op.m_periodic_shift[lev][index].dataPtr();
1603 auto ptd = src_tile.getParticleTileData();
1604
1605 amrex::ParallelFor(num_move, [=] AMREX_GPU_DEVICE (int i)
1606 {
1607 const auto p = ptd[i + num_stay];
1608
1609 if (!p.id().is_valid())
1610 {
1611 p_boxes[i] = -1;
1612 p_tiles[i] = -1;
1613 p_levs[i] = -1;
1614 }
1615 else
1616 {
1617 const auto tup = assign_grid(p, lev_min, lev_max, nGrow,
1618 std::forward<CellAssignor>(CellAssignor{}));
1619 p_boxes[i] = amrex::get<0>(tup);
1620 p_tiles[i] = amrex::get<1>(tup);
1621 p_levs[i] = amrex::get<2>(tup);
1622 }
1623 p_periodic_shift[i] = IntVect(AMREX_D_DECL(0,0,0));
1624 p_src_indices[i] = i+num_stay;
1625 });
1626#endif
1627 }
1628#else
1629 for (int lev = lev_min; lev <= finest_lev_particles; ++lev) {
1630 auto& pmap = m_particles[lev];
1631 for (auto& kv : pmap)
1632 {
1633 auto& src_tile = kv.second;
1634 const auto np = src_tile.numParticles();
1635 if (np == 0) { continue; }
1636
1637 int gid = kv.first.first;
1638 int tid = kv.first.second;
1639 auto index = std::make_pair(gid, tid);
1640 auto& new_size = new_sizes[lev][index];
1641 new_size = 0;
1642
1643 op.resize(gid, tid, lev, static_cast<int>(np));
1644 auto& boxes = op.m_boxes[lev][index];
1645 auto& levels = op.m_levels[lev][index];
1646 auto& tiles = op.m_tiles[lev][index];
1647 auto& src_indices = op.m_src_indices[lev][index];
1648 auto& periodic_shift = op.m_periodic_shift[lev][index];
1649 new_size = hostPartitionTile(src_tile,
1650 lev, gid, tid,
1651 lev_min, lev_max, nGrow, local,
1652 remove_negative, myproc,
1653 boxes, levels, tiles,
1654 src_indices, periodic_shift);
1655 }
1656 }
1657#endif
1658 BL_PROFILE_VAR_STOP(blp_partition);
1659
1660 ParticleCopyPlan plan;
1661
1662 plan.build(*this, op, h_redistribute_int_comp,
1663 h_redistribute_real_comp, local);
1664
1665 // by default, this uses The_Arena();
1668
1669 if (use_comms_arena) {
1670 snd_buffer.setArena(The_Comms_Arena());
1671 rcv_buffer.setArena(The_Comms_Arena());
1672 }
1673
1674 packBuffer(*this, op, plan, snd_buffer);
1675
1676 // clear particles from container
1677 for (int lev = lev_min; lev <= lev_max; ++lev)
1678 {
1679 auto& plev = m_particles[lev];
1680 for (auto& kv : plev)
1681 {
1682 int gid = kv.first.first;
1683 int tid = kv.first.second;
1684 auto index = std::make_pair(gid, tid);
1685 auto& tile = plev[index];
1686 tile.resize(new_sizes[lev][index]);
1687 }
1688 }
1689
1690 for (int lev = lev_min; lev <= lev_max; lev++)
1691 {
1692 particle_detail::clearEmptyEntries(m_particles[lev]);
1693 }
1694
1695 if (std::ssize(m_particles) > theEffectiveFinestLevel+1) {
1696 if (m_verbose > 0) {
1697 amrex::Print() << "ParticleContainer::Redistribute() resizing m_particles from "
1698 << m_particles.size() << " to " << theEffectiveFinestLevel+1 << '\n';
1699 }
1700 AMREX_ASSERT(std::ssize(m_particles) >= 2);
1701
1702 m_particles.resize(theEffectiveFinestLevel + 1);
1703 m_dummy_mf.resize(theEffectiveFinestLevel + 1);
1704 }
1705
1707 {
1708 plan.buildMPIFinish(BufferMap());
1709 communicateParticlesStart(*this, plan, snd_buffer, rcv_buffer);
1710 this->ReserveForRedistribute(plan);
1711 unpackBuffer(*this, plan, snd_buffer, RedistributeUnpackPolicy());
1713 unpackRemotes(*this, plan, rcv_buffer, RedistributeUnpackPolicy());
1714 }
1715 else
1716 {
1718 Gpu::PinnedVector<char> pinned_snd_buffer;
1719 Gpu::PinnedVector<char> pinned_rcv_buffer;
1720
1721 if (snd_buffer.arena()->isPinned()) {
1722 plan.buildMPIFinish(BufferMap());
1724 communicateParticlesStart(*this, plan, snd_buffer, pinned_rcv_buffer);
1725 } else {
1726 pinned_snd_buffer.resize(snd_buffer.size());
1727 Gpu::dtoh_memcpy_async(pinned_snd_buffer.dataPtr(), snd_buffer.dataPtr(), snd_buffer.size());
1728 plan.buildMPIFinish(BufferMap());
1730 communicateParticlesStart(*this, plan, pinned_snd_buffer, pinned_rcv_buffer);
1731 }
1732
1733 this->ReserveForRedistribute(plan);
1734
1735 rcv_buffer.resize(pinned_rcv_buffer.size());
1736 unpackBuffer(*this, plan, snd_buffer, RedistributeUnpackPolicy());
1738 Gpu::htod_memcpy_async(rcv_buffer.dataPtr(), pinned_rcv_buffer.dataPtr(), pinned_rcv_buffer.size());
1739 unpackRemotes(*this, plan, rcv_buffer, RedistributeUnpackPolicy());
1740 }
1741
1743 AMREX_ASSERT(numParticlesOutOfRange(*this, lev_min, lev_max, nGrow) == 0);
1744}
1745
1746template <typename ParticleType, int NArrayReal, int NArrayInt,
1747 template<class> class Allocator, class CellAssignor>
1748void
1749ParticleContainer_impl<ParticleType, NArrayReal, NArrayInt, Allocator, CellAssignor>
1750::ReserveForRedistribute (ParticleCopyPlan const& plan)
1751{
1752 BL_PROFILE("ParticleContainer::ReserveForRedistribute()");
1753
1754 std::map<ParticleTileType*, int> addsizes;
1755
1756 for (int lev = 0; lev < this->BufferMap().numLevels(); ++lev) {
1757 for (MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) {
1758 int gid = mfi.index();
1759 int tid = mfi.LocalTileIndex();
1760 auto& tile = this->DefineAndReturnParticleTile(lev, gid, tid);
1761 int num_copies = plan.m_box_counts_h[this->BufferMap().gridAndTileAndLevToBucket(gid, tid, lev)];
1762 if (num_copies > 0) {
1763 addsizes[&tile] += num_copies;
1764 }
1765 }
1766 }
1767
1768 if (plan.m_nrcvs > 0) {
1769 for (int i = 0; i < std::ssize(plan.m_rcv_box_counts); ++i) {
1770 int copy_size = plan.m_rcv_box_counts[i];
1771 int lev = plan.m_rcv_box_levs[i];
1772 int gid = plan.m_rcv_box_ids[i];
1773 int tid = plan.m_rcv_box_tids[i];
1774 auto& tile = this->DefineAndReturnParticleTile(lev, gid, tid);
1775 addsizes[&tile] += copy_size;
1776 }
1777 }
1778
1779 ParticleTileType::reserve(addsizes);
1780}
1781
1782template <typename ParticleType, int NArrayReal, int NArrayInt,
1783 template<class> class Allocator, class CellAssignor>
1784bool
1786{
1787 BL_PROFILE("ParticleContainer::OK()");
1788
1789 if (lev_max == -1) {
1790 lev_max = finestLevel();
1791 }
1792
1793 return (m_dummy_mf.size() >= lev_max+1 && numParticlesOutOfRange(*this, lev_min, lev_max, nGrow) == 0);
1794}
1795
1796template <typename ParticleType, int NArrayReal, int NArrayInt,
1797 template<class> class Allocator, class CellAssignor>
1798void
1800::AddParticlesAtLevel (AoS& particles, int level, int nGrow)
1801{
1802 ParticleTileType ptile;
1803 ptile.GetArrayOfStructs().swap(particles);
1804 AddParticlesAtLevel(ptile, level, nGrow);
1805}
1806
1807template <typename ParticleType, int NArrayReal, int NArrayInt,
1808 template<class> class Allocator, class CellAssignor>
1809void
1811::AddParticlesAtLevel (ParticleTileType& particles, int level, int nGrow)
1812{
1813 BL_PROFILE("ParticleContainer::AddParticlesAtLevel()");
1814
1815 if (std::ssize(m_particles) < level+1)
1816 {
1817 if (Verbose())
1818 {
1819 amrex::Print() << "ParticleContainer::AddParticlesAtLevel resizing m_particles from "
1820 << m_particles.size()
1821 << " to "
1822 << level+1 << '\n';
1823 }
1824 m_particles.resize(level+1);
1825 m_dummy_mf.resize(level+1);
1826 for (int lev = 0; lev < level+1; ++lev) {
1827 RedefineDummyMF(lev);
1828 }
1829 }
1830
1831 auto& ptile = DefineAndReturnParticleTile(level, 0, 0);
1832 int old_np = ptile.size();
1833 int num_to_add = particles.size();
1834 int new_np = old_np + num_to_add;
1835 ptile.resize(new_np);
1836 amrex::copyParticles(ptile, particles, 0, old_np, num_to_add);
1837 Redistribute(level, level, nGrow);
1838 particles.resize(0);
1839}
1840
1841// This is the single-level version for cell-centered density
1842template <typename ParticleType, int NArrayReal, int NArrayInt,
1843 template<class> class Allocator, class CellAssignor>
1844void
1846AssignCellDensitySingleLevel (int rho_index,
1847 MultiFab& mf_to_be_filled,
1848 int lev,
1849 int ncomp,
1850 int particle_lvl_offset) const
1851{
1852 BL_PROFILE("ParticleContainer::AssignCellDensitySingleLevel()");
1853
1854 if (rho_index != 0) { amrex::Abort("AssignCellDensitySingleLevel only works if rho_index = 0"); }
1855
1856 MultiFab* mf_pointer;
1857
1858 if (OnSameGrids(lev, mf_to_be_filled)) {
1859 // If we are already working with the internal mf defined on the
1860 // particle_box_array, then we just work with this.
1861 mf_pointer = &mf_to_be_filled;
1862 }
1863 else {
1864 // If mf_to_be_filled is not defined on the particle_box_array, then we need
1865 // to make a temporary here and copy into mf_to_be_filled at the end.
1866 mf_pointer = new MultiFab(ParticleBoxArray(lev),
1867 ParticleDistributionMap(lev),
1868 ncomp, mf_to_be_filled.nGrow());
1869 }
1870
1871 // We must have ghost cells for each FAB so that a particle in one grid can spread
1872 // its effect to an adjacent grid by first putting the value into ghost cells of its
1873 // own grid. The mf->SumBoundary call then adds the value from one grid's ghost cell
1874 // to another grid's valid region.
1875 if (mf_pointer->nGrow() < 1) {
1876 amrex::Error("Must have at least one ghost cell when in AssignCellDensitySingleLevel");
1877 }
1878
1879 const auto strttime = amrex::second();
1880
1881 const auto dxi = Geom(lev).InvCellSizeArray();
1882 const auto plo = Geom(lev).ProbLoArray();
1883 const auto pdxi = Geom(lev + particle_lvl_offset).InvCellSizeArray();
1884
1885 if (Geom(lev).isAnyPeriodic() && ! Geom(lev).isAllPeriodic())
1886 {
1887 amrex::Error("AssignCellDensitySingleLevel: problem must be periodic in no or all directions");
1888 }
1889
1890 mf_pointer->setVal(0);
1891
1893#ifdef AMREX_USE_OMP
1894#pragma omp parallel if (Gpu::notInLaunchRegion())
1895#endif
1896 {
1897 FArrayBox local_rho;
1898 for (ParConstIter pti(*this, lev); pti.isValid(); ++pti) {
1899 const Long np = pti.numParticles();
1900 auto ptd = pti.GetParticleTile().getConstParticleTileData();
1901 FArrayBox& fab = (*mf_pointer)[pti];
1902 auto rhoarr = fab.array();
1903#ifdef AMREX_USE_OMP
1904 Box tile_box;
1905 if (Gpu::notInLaunchRegion())
1906 {
1907 tile_box = pti.tilebox();
1908 tile_box.grow(mf_pointer->nGrow());
1909 local_rho.resize(tile_box,ncomp);
1910 local_rho.setVal<RunOn::Host>(0.0);
1911 rhoarr = local_rho.array();
1912 }
1913#endif
1914
1915 if (particle_lvl_offset == 0)
1916 {
1918 {
1919 auto p = ptd[i];
1920 amrex_deposit_cic(p, ncomp, rhoarr, plo, dxi);
1921 });
1922 }
1923 else
1924 {
1926 {
1927 auto p = ptd[i];
1928 amrex_deposit_particle_dx_cic(p, ncomp, rhoarr, plo, dxi, pdxi);
1929 });
1930 }
1931
1932#ifdef AMREX_USE_OMP
1933 if (Gpu::notInLaunchRegion())
1934 {
1935 fab.atomicAdd<RunOn::Host>(local_rho, tile_box, tile_box, 0, 0, ncomp);
1936 }
1937#endif
1938 }
1939 }
1940
1941 mf_pointer->SumBoundary(Geom(lev).periodicity());
1942
1943 // If ncomp > 1, first divide the momenta (component n)
1944 // by the mass (component 0) in order to get velocities.
1945 // Be careful not to divide by zero.
1946 for (int n = 1; n < ncomp; n++)
1947 {
1948 for (MFIter mfi(*mf_pointer); mfi.isValid(); ++mfi)
1949 {
1950 (*mf_pointer)[mfi].protected_divide<RunOn::Device>((*mf_pointer)[mfi],0,n,1);
1951 }
1952 }
1953
1954 // Only multiply the first component by (1/vol) because this converts mass
1955 // to density. If there are additional components (like velocity), we don't
1956 // want to divide those by volume.
1957 const Real* dx = Geom(lev).CellSize();
1958 const Real vol = AMREX_D_TERM(dx[0], *dx[1], *dx[2]);
1959
1960 mf_pointer->mult(Real(1.0)/vol, 0, 1, mf_pointer->nGrow());
1961
1962 // If mf_to_be_filled is not defined on the particle_box_array, then we need
1963 // to copy here from mf_pointer into mf_to_be_filled.
1964 if (mf_pointer != &mf_to_be_filled)
1965 {
1966 mf_to_be_filled.ParallelCopy(*mf_pointer,0,0,ncomp,0,0);
1967 delete mf_pointer;
1968 }
1969
1970 if (m_verbose > 1)
1971 {
1972 auto stoptime = amrex::second() - strttime;
1973
1974 ParallelReduce::Max(stoptime, ParallelContext::IOProcessorNumberSub(),
1975 ParallelContext::CommunicatorSub());
1976
1977 amrex::Print() << "ParticleContainer::AssignCellDensitySingleLevel) time: "
1978 << stoptime << '\n';
1979 }
1980}
1981
1982template <typename ParticleType, int NArrayReal, int NArrayInt,
1983 template<class> class Allocator, class CellAssignor>
1984void
1986ResizeRuntimeRealComp (int new_size, bool communicate)
1987{
1988 int old_size = m_num_runtime_real;
1989
1990 m_runtime_comps_defined = (new_size > 0);
1991 m_num_runtime_real = new_size;
1992 int cur_size = h_redistribute_real_comp.size();
1993 h_redistribute_real_comp.resize(cur_size-old_size+new_size, communicate);
1994 SetParticleSize();
1995
1996 for (int lev = 0; lev < numLevels(); ++lev) {
1997 for (ParIterType pti(*this,lev); pti.isValid(); ++pti) {
1998 auto& tile = DefineAndReturnParticleTile(lev, pti);
1999 auto np = tile.numParticles();
2000 if (np > 0 && new_size > old_size) {
2001 auto& soa = tile.GetStructOfArrays();
2002 soa.resize(np);
2003 }
2004 }
2005 }
2006}
2007
2008template <typename ParticleType, int NArrayReal, int NArrayInt,
2009 template<class> class Allocator, class CellAssignor>
2010void
2012ResizeRuntimeIntComp (int new_size, bool communicate)
2013{
2014 int old_size = m_num_runtime_int;
2015
2016 m_runtime_comps_defined = (new_size > 0);
2017 m_num_runtime_int = new_size;
2018 int cur_size = h_redistribute_int_comp.size();
2019 h_redistribute_int_comp.resize(cur_size-old_size+new_size, communicate);
2020 SetParticleSize();
2021
2022 for (int lev = 0; lev < numLevels(); ++lev) {
2023 for (ParIterType pti(*this,lev); pti.isValid(); ++pti) {
2024 auto& tile = DefineAndReturnParticleTile(lev, pti);
2025 auto np = tile.numParticles();
2026 if (np > 0 && new_size > old_size) {
2027 auto& soa = tile.GetStructOfArrays();
2028 soa.resize(np);
2029 }
2030 }
2031 }
2032}
2033
2034}
#define BL_PROFILE_VAR_START(vname)
Definition AMReX_BLProfiler.H:562
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition AMReX_BLProfiler.H:563
#define BL_PROFILE_SYNC_STOP()
Definition AMReX_BLProfiler.H:645
#define BL_PROFILE_SYNC_START_TIMED(fname)
Definition AMReX_BLProfiler.H:644
#define BL_PROFILE_VAR_NS(fname, vname)
Definition AMReX_BLProfiler.H:561
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_FOR_1D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:105
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
amrex::ParmParse pp
Input file parser instance for the given namespace.
Definition AMReX_HypreIJIface.cpp:15
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
Print on all processors of the default communicator.
Definition AMReX_Print.H:113
BaseFab< T > & atomicAdd(const BaseFab< T > &x) noexcept
Atomic FAB addition (a[i] <- a[i] + b[i]).
Definition AMReX_BaseFab.H:2481
Array4< T const > array() const noexcept
Definition AMReX_BaseFab.H:382
void setVal(T const &x, const Box &bx, int dcomp, int ncomp) noexcept
The setVal functions set sub-regions in the BaseFab to a constant value. This most general form speci...
Definition AMReX_BaseFab.H:1399
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:564
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition AMReX_BoxArray.H:854
BoxArray & grow(int n)
Grow each Box in the BoxArray by the specified amount.
Definition AMReX_BoxArray.cpp:706
std::vector< std::pair< int, Box > > intersections(const Box &bx) const
Return intersections of Box and BoxArray.
Definition AMReX_BoxArray.cpp:1186
Box getCellCenteredBox(int index) const noexcept
Return cell-centered box at element index of this BoxArray.
Definition AMReX_BoxArray.H:744
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:837
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
BoxList & complementIn(const Box &b, const BoxList &bl)
Definition AMReX_BoxList.cpp:307
__host__ __device__ BoxND & grow(int i) noexcept
Definition AMReX_Box.H:641
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Return true if argument is contained within BoxND.
Definition AMReX_Box.H:212
GpuArray< Real, 3 > InvCellSizeArray() const noexcept
Definition AMReX_CoordSys.H:87
A Fortran Array of REALs.
Definition AMReX_FArrayBox.H:231
void resize(const Box &b, int N=1, Arena *ar=nullptr)
For debugging purposes we hide BaseFab version and do some extra work.
Definition AMReX_FArrayBox.cpp:178
int size() const noexcept
Return the number of FABs in the FabArray.
Definition AMReX_FabArrayBase.H:110
int nGrow(int direction=0) const noexcept
Return the grow factor that defines the region of definition.
Definition AMReX_FabArrayBase.H:78
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition AMReX_FabArray.H:855
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:569
void setVal(value_type val)
Set all components in the entire region of each FAB to val.
Definition AMReX_FabArray.H:2666
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:75
const Box & Domain() const noexcept
Returns our rectangular domain.
Definition AMReX_Geometry.H:216
GpuArray< Real, 3 > ProbLoArray() const noexcept
Definition AMReX_Geometry.H:192
static void streamSynchronize() noexcept
Definition AMReX_GpuDevice.cpp:856
__host__ __device__ bool cellCentered() const noexcept
True if the IndexTypeND is CELL based in all directions.
Definition AMReX_IndexType.H:104
__host__ static __device__ constexpr IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:771
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:88
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:172
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
void mult(Real val, int comp, int num_comp, int nghost=0)
Scales the value of each cell in the specified subregion of the MultiFab by the scalar val (a[i] <- a...
Definition AMReX_MultiFab.cpp:1417
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
size_type size() const noexcept
Definition AMReX_PODVector.H:648
void resize(size_type a_new_size, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:728
T * dataPtr() noexcept
Definition AMReX_PODVector.H:670
Definition AMReX_ParIter.H:147
Definition AMReX_ParIter.H:118
int queryAdd(std::string_view name, T &ref)
If name is found, the value in the ParmParse database will be stored in the ref argument....
Definition AMReX_ParmParse.H:1045
int queryarr(std::string_view name, std::vector< int > &ref, int start_ix=FIRST, int num_val=ALL) const
Same as queryktharr() but searches for last occurrence of name.
Definition AMReX_ParmParse.cpp:2033
int query(std::string_view name, bool &ref, int ival=FIRST) const
Same as querykth() but searches for the last occurrence of name.
Definition AMReX_ParmParse.cpp:1947
virtual void reserveData()
Definition AMReX_ParticleContainerBase.cpp:41
virtual void resizeData()
Definition AMReX_ParticleContainerBase.cpp:46
A distributed container for Particles sorted onto the levels, grids, and tiles of a block-structured ...
Definition AMReX_ParticleContainer.H:149
void addParticles(const PCType &other, bool local=false)
Add particles from other to this ParticleContainer. local controls whether or not to call Redistribut...
Definition AMReX_ParticleContainerI.H:1156
IntVect Index(const P &p, int lev) const
Definition AMReX_ParticleContainerI.H:202
std::map< std::pair< int, int >, ParticleTileType > ParticleLevel
Definition AMReX_ParticleContainer.H:196
void SetSoACompileTimeNames(std::vector< std::string > const &rdata_name, std::vector< std::string > const &idata_name)
Definition AMReX_ParticleContainerI.H:110
typename ParticleTileType::AoS AoS
Definition AMReX_ParticleContainer.H:199
void SetParticleSize()
Definition AMReX_ParticleContainerI.H:17
void RemoveParticlesAtLevel(int level)
The Following methods are for managing Virtual and Ghost Particles.
Definition AMReX_ParticleContainerI.H:758
void clearParticles()
Clear all the particles in this container. This does not free memory.
Definition AMReX_ParticleContainerI.H:1129
void Increment(MultiFab &mf, int level)
Definition AMReX_ParticleContainerI.H:721
bool HasIntComp(std::string const &name)
Definition AMReX_ParticleContainerI.H:149
void ShrinkToFit()
Definition AMReX_ParticleContainerI.H:701
bool HasRealComp(std::string const &name)
Definition AMReX_ParticleContainerI.H:141
void resizeData() override
This resizes the vector of dummy MultiFabs used by the ParticleContainer for the current number of le...
Definition AMReX_ParticleContainerI.H:436
Long IncrementWithTotal(MultiFab &mf, int level, bool local=false)
Definition AMReX_ParticleContainerI.H:748
Long NumberOfParticlesAtLevel(int level, bool only_valid=true, bool only_local=false) const
Returns # of particles at specified the level.
Definition AMReX_ParticleContainerI.H:552
void SortParticlesByCell()
Sort the particles on each tile by cell, using Fortran ordering.
Definition AMReX_ParticleContainerI.H:1282
int GetRealCompIndex(std::string const &name)
Definition AMReX_ParticleContainerI.H:162
int GetIntCompIndex(std::string const &name)
Definition AMReX_ParticleContainerI.H:184
void RemoveParticlesNotAtFinestLevel()
Definition AMReX_ParticleContainerI.H:772
Vector< Long > NumberOfParticlesInGrid(int level, bool only_valid=true, bool only_local=false) const
Definition AMReX_ParticleContainerI.H:497
void copyParticles(const PCType &other, bool local=false)
Copy particles from other to this ParticleContainer. Will clear all the particles from this container...
Definition AMReX_ParticleContainerI.H:1145
ParticleLocData Reset(ParticleType &prt, bool update, bool verbose=true, ParticleLocData pld=ParticleLocData()) const
Updates a particle's location (Where), tries to periodic shift any particles that have left the domai...
Definition AMReX_ParticleContainerI.H:393
void CapacityOfParticlesInGrid(LayoutData< I > &mem, int lev) const
Return capacity of memory for particles at specific grid.
Definition AMReX_ParticleContainerI.H:594
std::conditional_t< is_rtsoa_pc, ParticleTileRT< typename ParticleType::RealType, typename ParticleType::IntType >, ParticleTile< ParticleType, NArrayReal, NArrayInt, Allocator > > ParticleTileType
Definition AMReX_ParticleContainer.H:191
Long TotalNumberOfParticles(bool only_valid=true, bool only_local=false) const
Returns # of particles at all levels.
Definition AMReX_ParticleContainerI.H:482
void reserveData() override
This reserves data in the vector of dummy MultiFabs used by the ParticleContainer for the maximum num...
Definition AMReX_ParticleContainerI.H:427
T_ParticleType ParticleType
Definition AMReX_ParticleContainer.H:151
Definition AMReX_ParticleLocator.H:123
void build(const BoxArray &ba, const Geometry &geom, bool a_do_tiling=false, const IntVect &a_tile_size=IntVect(1024000, 1024000, 1024000))
Definition AMReX_ParticleLocator.H:130
AssignGrid< BinIteratorFactory > getGridAssignor() const noexcept
Definition AMReX_ParticleLocator.H:206
This class provides the user with a few print options.
Definition AMReX_Print.H:35
Definition AMReX_Reduce.H:453
Type value()
Definition AMReX_Reduce.H:488
Definition AMReX_Reduce.H:612
std::enable_if_t< IsFabArray< MF >::value > eval(MF const &mf, IntVect const &nghost, D &reduce_data, F &&f)
Definition AMReX_Reduce.H:748
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
A Collection of IArrayBoxes.
Definition AMReX_iMultiFab.H:34
Long sum(int comp, int nghost=0, bool local=false) const
Returns the sum in component comp.
Definition AMReX_iMultiFab.cpp:454
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
amrex_particle_real ParticleReal
Floating Point Type for Particles.
Definition AMReX_REAL.H:90
amrex_long Long
Definition AMReX_INT.H:30
T ExclusiveSum(N n, T const *in, T *out, RetSum a_ret_sum=retSum)
Exclusive sum.
Definition AMReX_Scan.H:1042
void SumBoundary(const Periodicity &period=Periodicity::NonPeriodic(), bool deterministic=false)
Sum values in overlapped cells.
Definition AMReX_FabArray.H:3597
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
Arena * The_Comms_Arena()
Definition AMReX_Arena.cpp:880
void Min(KeyValuePair< K, V > &vi, int root, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:287
void Max(KeyValuePair< K, V > &vi, int root, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:254
void Sum(T &v, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:221
void Sum(T &v, int root, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:352
__host__ __device__ AMREX_FORCE_INLINE void AddNoRet(T *sum, T value) noexcept
Definition AMReX_GpuAtomic.H:283
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
void dtoh_memcpy_async(void *p_h, const void *p_d, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:435
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:421
void QueueReduction(Func f)
Definition AMReX_Lazy.cpp:7
constexpr Long GhostParticleID
Definition AMReX_Particle.H:19
constexpr Long VirtualParticleID
Definition AMReX_Particle.H:20
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
int IOProcessorNumberSub() noexcept
IO sub-rank in current frame.
Definition AMReX_ParallelContext.H:78
bool UseGpuAwareMpi()
Definition AMReX_ParallelDescriptor.H:113
void Bcast(void *, int, MPI_Datatype, int, MPI_Comm)
Definition AMReX_ParallelDescriptor.cpp:1295
void GatherLayoutDataToVector(const LayoutData< T > &sendbuf, Vector< T > &recvbuf, int root)
Gather LayoutData values to a vector on root.
Definition AMReX_ParallelDescriptor.H:1295
static constexpr RetSum retSum
Definition AMReX_Scan.H:32
Definition AMReX_Amr.cpp:50
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2852
__host__ __device__ void swapParticle(const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
A general single particle swapping routine that can run on the GPU.
Definition AMReX_ParticleTransformation.H:120
__host__ __device__ int getTileIndex(const IntVect &iv, const Box &box, const bool a_do_tiling, const IntVect &a_tile_size, Box &tbx)
Definition AMReX_ParticleUtil.H:185
void communicateParticlesStart(const PC &pc, ParticleCopyPlan &plan, const SndBuffer &snd_buffer, RcvBuffer &rcv_buffer)
Definition AMReX_ParticleCommunication.H:909
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ void copyParticle(const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ConstParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
A general single particle copying routine that can run on the GPU.
Definition AMReX_ParticleTransformation.H:32
void unpackRemotes(PC &pc, const ParticleCopyPlan &plan, Buffer &rcv_buffer, UnpackPolicy const &policy)
Definition AMReX_ParticleCommunication.H:1009
void copyParticles(DstTile &dst, const SrcTile &src) noexcept
Copy particles from src to dst. This version copies all the particles, writing them to the beginning ...
Definition AMReX_ParticleTransformation.H:222
int partitionParticlesByDest(PTile &ptile, const PLocator &ploc, CellAssignor const &assignor, const ParticleBufferMap &pmap, const GpuArray< Real, 3 > &plo, const GpuArray< Real, 3 > &phi, const GpuArray< ParticleReal, 3 > &rlo, const GpuArray< ParticleReal, 3 > &rhi, const GpuArray< int, 3 > &is_per, int lev, int gid, int tid, int lev_min, int lev_max, int nGrow, bool remove_negative)
Definition AMReX_ParticleUtil.H:648
Index filterAndTransformParticles(DstTile &dst, const SrcTile &src, Index *mask, F const &f, Index src_start, Index dst_start) noexcept
Conditionally copy particles from src to dst based on the value of mask. A transformation will also b...
Definition AMReX_ParticleTransformation.H:519
void ReorderParticles(PTile &ptile, const index_type *permutations)
Reorder particles on the tile ptile using a the permutations array.
Definition AMReX_ParticleUtil.H:947
__host__ __device__ bool enforcePeriodic(P &p, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &phi, amrex::GpuArray< amrex::ParticleReal, 3 > const &rlo, amrex::GpuArray< amrex::ParticleReal, 3 > const &rhi, amrex::GpuArray< int, 3 > const &is_per) noexcept
Definition AMReX_ParticleUtil.H:417
__host__ __device__ int numTilesInBox(const Box &box, const bool a_do_tiling, const IntVect &a_tile_size)
Definition AMReX_ParticleUtil.H:233
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
double second() noexcept
Definition AMReX_Utility.cpp:940
void communicateParticlesFinish(const ParticleCopyPlan &plan)
Definition AMReX_ParticleCommunication.cpp:445
Index filterParticles(DstTile &dst, const SrcTile &src, const Index *mask) noexcept
Conditionally copy particles from src to dst based on the value of mask.
Definition AMReX_ParticleTransformation.H:393
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
void ParticleToMesh(PC const &pc, const Vector< MultiFab * > &mf, int lev_min, int lev_max, F &&f, bool zero_out_input=true, bool vol_weight=true)
Deposit particles onto a hierarchy of MultiFabs.
Definition AMReX_AmrParticles.H:188
void Error(const std::string &msg)
Print out message to cerr and exit via amrex::Abort().
Definition AMReX.cpp:235
void gatherParticles(PTile &dst, const PTile &src, N np, const Index *inds)
Gather particles copies particles into contiguous order from an arbitrary order. Specifically,...
Definition AMReX_ParticleTransformation.H:739
int numParticlesOutOfRange(Iterator const &pti, int nGrow)
Returns the number of particles that are more than nGrow cells from the box correspond to the input i...
Definition AMReX_ParticleUtil.H:35
int Verbose() noexcept
Definition AMReX.cpp:181
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:241
const int[]
Definition AMReX_BLProfiler.cpp:1664
void transformParticles(DstTile &dst, const SrcTile &src, F &&f) noexcept
Apply the function f to all the particles in src, writing the result to dst. This version does all th...
Definition AMReX_ParticleTransformation.H:274
void unpackBuffer(PC &pc, const ParticleCopyPlan &plan, const Buffer &snd_buffer, UnpackPolicy const &policy)
Definition AMReX_ParticleCommunication.H:782
void packBuffer(const PC &pc, const ParticleCopyOp &op, const ParticleCopyPlan &plan, Buffer &snd_buffer)
Definition AMReX_ParticleCommunication.H:582
__host__ __device__ IntVect getParticleCell(P const &p, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &dxi) noexcept
Returns the cell index for a given particle using the provided lower bounds and cell sizes.
Definition AMReX_ParticleUtil.H:337
Definition AMReX_ParticleLocator.H:248
A multidimensional array accessor.
Definition AMReX_Array4.H:283
Definition AMReX_ParticleContainerI.H:1039
amrex::AmrAssignGrid< amrex::DenseBinIteratorFactory< amrex::Box > > m_assign_grid
Definition AMReX_ParticleContainerI.H:1042
int m_lev_max
Definition AMReX_ParticleContainerI.H:1041
AssignGridFilter(amrex::AmrAssignGrid< amrex::DenseBinIteratorFactory< amrex::Box > > assign_grid, int gid, int level, int nGrow)
This filters based on matching grids.
Definition AMReX_ParticleContainerI.H:1048
int m_nGrow
Definition AMReX_ParticleContainerI.H:1041
int m_lev_min
Definition AMReX_ParticleContainerI.H:1041
AMREX_GPU_HOST_DEVICE int operator()(const SrcData &src, int src_i) const noexcept
Definition AMReX_ParticleContainerI.H:1054
int m_gid
Definition AMReX_ParticleContainerI.H:1041
Definition AMReX_ParticleLocator.H:17
Definition AMReX_ParticleUtil.H:390
Definition AMReX_DenseBins.H:32
Definition AMReX_ParticleContainerI.H:800
Box m_domain
Definition AMReX_ParticleContainerI.H:804
GpuArray< Real, 3 > m_dxi
Definition AMReX_ParticleContainerI.H:803
FilterVirt(const amrex::AssignGrid< amrex::DenseBinIteratorFactory< amrex::Box > > &assign_buffer_grid, const GpuArray< Real, 3 > &plo, const GpuArray< Real, 3 > &dxi, const Box &domain)
Definition AMReX_ParticleContainerI.H:806
AMREX_GPU_HOST_DEVICE int operator()(const SrcData &src, int src_i) const noexcept
Definition AMReX_ParticleContainerI.H:813
amrex::AssignGrid< amrex::DenseBinIteratorFactory< amrex::Box > > m_assign_buffer_grid
Definition AMReX_ParticleContainerI.H:802
GpuArray< Real, 3 > m_plo
Definition AMReX_ParticleContainerI.H:803
Definition AMReX_ParticleUtil.H:304
Fixed-size array that can be used on GPU.
Definition AMReX_Array.H:43
Definition AMReX_GpuControl.H:180
Definition AMReX_ParticleCommunication.H:92
Vector< int > m_rcv_box_ids
Definition AMReX_ParticleCommunication.H:366
Vector< int > m_rcv_box_counts
Definition AMReX_ParticleCommunication.H:364
Vector< int > m_rcv_box_levs
Definition AMReX_ParticleCommunication.H:369
Vector< int > m_rcv_box_tids
Definition AMReX_ParticleCommunication.H:367
int m_nrcvs
Definition AMReX_ParticleCommunication.H:372
Gpu::HostVector< unsigned int > m_box_counts_h
Definition AMReX_ParticleCommunication.H:361
A struct used for storing a particle's position in the AMR hierarchy.
Definition AMReX_ParticleContainer.H:93
Box m_grown_gridbox
Definition AMReX_ParticleContainer.H:100
IntVect m_cell
Definition AMReX_ParticleContainer.H:97
int m_grid
Definition AMReX_ParticleContainer.H:95
int m_tile
Definition AMReX_ParticleContainer.H:96
int m_lev
Definition AMReX_ParticleContainer.H:94
Box m_tilebox
Definition AMReX_ParticleContainer.H:99
Box m_gridbox
Definition AMReX_ParticleContainer.H:98
The struct used to store particles.
Definition AMReX_Particle.H:405
__host__ __device__ RealVect pos() const &
Definition AMReX_Particle.H:456
Definition AMReX_ParticleContainerI.H:1066
AMREX_GPU_HOST_DEVICE void operator()(DstData &dst, const SrcData &src, int src_i, int dst_i) const noexcept
Definition AMReX_ParticleContainerI.H:1070
Definition AMReX_ParticleContainerI.H:821
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator()(DstData &dst, const SrcData &src, int src_i, int dst_i) const noexcept
Definition AMReX_ParticleContainerI.H:824