Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_NeighborParticlesI.H
Go to the documentation of this file.
1
2#include <iterator>
3
4namespace amrex {
5
6namespace detail {
7template <class>
8inline constexpr bool always_false_v = false;
9}
10
11template <typename T_ParticleType, int NArrayReal, int NArrayInt,
12 template<class> class Allocator, class CellAssignor>
14
15template <typename T_ParticleType, int NArrayReal, int NArrayInt,
16 template<class> class Allocator, class CellAssignor>
18
19template <typename T_ParticleType, int NArrayReal, int NArrayInt,
20 template<class> class Allocator, class CellAssignor>
21NeighborParticleContainer_impl<T_ParticleType, NArrayReal, NArrayInt, Allocator, CellAssignor>
22::NeighborParticleContainer_impl (ParGDBBase* gdb, int ncells)
24 m_num_neighbor_cells(ncells)
25{
27}
28
29template <typename T_ParticleType, int NArrayReal, int NArrayInt,
30 template<class> class Allocator, class CellAssignor>
33 const DistributionMapping & dmap,
34 const BoxArray & ba,
35 int nneighbor)
36 : ParticleContainerType(geom, dmap, ba),
37 m_num_neighbor_cells(nneighbor)
38{
40}
41
42template <typename T_ParticleType, int NArrayReal, int NArrayInt,
43 template<class> class Allocator, class CellAssignor>
46 const Vector<DistributionMapping> & dmap,
47 const Vector<BoxArray> & ba,
48 const Vector<int> & rr,
49 int nneighbor)
50 : ParticleContainerType(geom, dmap, ba, rr),
51 m_num_neighbor_cells(nneighbor)
52{
54}
55
56template <typename T_ParticleType, int NArrayReal, int NArrayInt,
57 template<class> class Allocator, class CellAssignor>
58void
61 for (int ii = 0; ii < numRealCommComps(); ++ii) {
62 ghost_real_comp.push_back(1);
63 }
64 for (int ii = 0; ii < numIntCommComps(); ++ii) {
65 ghost_int_comp.push_back(1);
66 }
67 calcCommSize();
68}
69
70template <typename T_ParticleType, int NArrayReal, int NArrayInt,
71 template<class> class Allocator, class CellAssignor>
72void
74::setRealCommComp (int i, bool value) {
75 ghost_real_comp[i] = value;
76 calcCommSize();
77}
78
79template <typename T_ParticleType, int NArrayReal, int NArrayInt,
80 template<class> class Allocator, class CellAssignor>
81void
83::setIntCommComp (int i, bool value) {
84 ghost_int_comp[i] = value;
85 calcCommSize();
86}
87
88template <typename T_ParticleType, int NArrayReal, int NArrayInt,
89 template<class> class Allocator, class CellAssignor>
90void
93 size_t comm_size = 0;
94 for (int ii = 0; ii < numRealCommComps(); ++ii) {
95 if (ghost_real_comp[ii]) {
96 comm_size += sizeof(ParticleReal);
97 }
98 }
99 for (int ii = 0; ii < numIntCommComps(); ++ii) {
100 if (ghost_int_comp[ii]) {
101 comm_size += sizeof(int);
102 }
103 }
104 if ( enableInverse() ) { comm_size += 4*sizeof(int); }
105 cdata_size = comm_size;
106}
107
108template <typename T_ParticleType, int NArrayReal, int NArrayInt,
109 template<class> class Allocator, class CellAssignor>
110void
112::Regrid (const DistributionMapping &dmap, const BoxArray &ba ) {
113 const int lev = 0;
114 AMREX_ASSERT(this->finestLevel() == 0);
115 this->SetParticleBoxArray(lev, ba);
116 this->SetParticleDistributionMap(lev, dmap);
117 this->Redistribute();
118}
119
120template <typename T_ParticleType, int NArrayReal, int NArrayInt,
121 template<class> class Allocator, class CellAssignor>
122void
124::Regrid (const DistributionMapping &dmap, const BoxArray &ba, int lev) {
125 AMREX_ASSERT(lev <= this->finestLevel());
126 this->SetParticleBoxArray(lev, ba);
127 this->SetParticleDistributionMap(lev, dmap);
128 this->Redistribute();
129}
130
131template <typename T_ParticleType, int NArrayReal, int NArrayInt,
132 template<class> class Allocator, class CellAssignor>
133void
136 AMREX_ASSERT(ba.size() == this->finestLevel()+1);
137 for (int lev = 0; lev < this->numLevels(); ++lev)
138 {
139 this->SetParticleBoxArray(lev, ba[lev]);
140 this->SetParticleDistributionMap(lev, dmap[lev]);
141 }
142 this->Redistribute();
143}
144
145template <typename T_ParticleType, int NArrayReal, int NArrayInt,
146 template<class> class Allocator, class CellAssignor>
147bool
150
151 BL_PROFILE("NeighborParticleContainer::areMasksValid");
152
153 resizeContainers(this->numLevels());
154
155 for (int lev = 0; lev < this->numLevels(); ++lev)
156 {
157 BoxArray ba = this->ParticleBoxArray(lev);
158 const DistributionMapping& dmap = this->ParticleDistributionMap(lev);
159
160 if (mask_ptr[lev] == nullptr ||
161 ! BoxArray::SameRefs(mask_ptr[lev]->boxArray(), ba) ||
162 ! DistributionMapping::SameRefs(mask_ptr[lev]->DistributionMap(), dmap))
163 {
164 return false;
165 }
166 }
167 return true;
168}
169
170template <typename T_ParticleType, int NArrayReal, int NArrayInt,
171 template<class> class Allocator, class CellAssignor>
172void
175
176 BL_PROFILE("NeighborParticleContainer::BuildMasks");
177
178 if (this->numLevels() == 1) { use_mask = true; }
179 else { use_mask = false; }
180
181 resizeContainers(this->numLevels());
182
183 for (int lev = 0; lev < this->numLevels(); ++lev)
184 {
185 BoxArray ba = this->ParticleBoxArray(lev);
186 const DistributionMapping& dmap = this->ParticleDistributionMap(lev);
187
188 const Geometry& geom = this->Geom(lev);
189
190 mask_ptr[lev] = std::make_unique<iMultiFab>(ba, dmap, int(num_mask_comps), m_num_neighbor_cells);
191 mask_ptr[lev]->setVal(-1, m_num_neighbor_cells);
192
193#ifdef AMREX_USE_OMP
194#pragma omp parallel
195#endif
196 for (MFIter mfi(*mask_ptr[lev],this->do_tiling ? this->tile_size : IntVect::TheZeroVector());
197 mfi.isValid(); ++mfi) {
198 const Box& box = mfi.tilebox();
199 const int grid_id = mfi.index();
200 const int tile_id = mfi.LocalTileIndex();
201 (*mask_ptr[lev])[mfi].template setVal<RunOn::Host>(grid_id, box, MaskComps::grid, 1);
202 (*mask_ptr[lev])[mfi].template setVal<RunOn::Host>(tile_id, box, MaskComps::tile, 1);
203 (*mask_ptr[lev])[mfi].template setVal<RunOn::Host>(lev , box, MaskComps::level, 1);
204 }
205
206 mask_ptr[lev]->FillBoundary(geom.periodicity());
207 }
208}
209
210template <typename T_ParticleType, int NArrayReal, int NArrayInt,
211 template<class> class Allocator, class CellAssignor>
212void
215{
216 BL_PROFILE("NeighborParticleContainer::GetNeighborCommTags");
217
218 local_neighbors.clear();
219 neighbor_procs.clear();
220
221 if (use_mask)
222 {
223 AMREX_ASSERT(this->finestLevel() == 0);
224 const int lev = 0;
225 for (MFIter mfi(*mask_ptr[lev],this->do_tiling ? this->tile_size : IntVect::TheZeroVector());
226 mfi.isValid(); ++mfi) {
227 const Box& box = mfi.growntilebox();
228 for (IntVect iv = box.smallEnd(); iv <= box.bigEnd(); box.next(iv)) {
229 const int grid = (*mask_ptr[lev])[mfi](iv, MaskComps::grid);
230 if (grid >= 0) {
231 const int tile = (*mask_ptr[lev])[mfi](iv, MaskComps::tile);
232 const int level = (*mask_ptr[lev])[mfi](iv, MaskComps::level);
233 const int global_proc = this->ParticleDistributionMap(level)[grid];
234 const int proc = ParallelContext::global_to_local_rank(global_proc);
235 NeighborCommTag comm_tag(proc, level, grid, tile);
236 local_neighbors.push_back(comm_tag);
237 if (proc != ParallelContext::MyProcSub()) {
238 neighbor_procs.push_back(proc);
239 }
240 }
241 }
242 }
243 }
244 else
245 {
246 for (int lev = 0; lev < this->numLevels(); ++lev)
247 {
248 for (MFIter mfi(*mask_ptr[lev],this->do_tiling ? this->tile_size : IntVect::TheZeroVector());
249 mfi.isValid(); ++mfi) {
250 const Box& box = mfi.validbox();
251 Vector<NeighborCommTag> comm_tags;
252 GetCommTagsBox(comm_tags, lev, box);
253 for (auto const& tag : comm_tags) {
254 local_neighbors.push_back(tag);
255 if (tag.proc_id != ParallelContext::MyProcSub()) {
256 neighbor_procs.push_back(tag.proc_id);
257 }
258 }
259 }
260 }
261 }
262
263 RemoveDuplicates(local_neighbors);
264 RemoveDuplicates(neighbor_procs);
265}
266
267template <typename T_ParticleType, int NArrayReal, int NArrayInt,
268 template<class> class Allocator, class CellAssignor>
271::computeRefFac (int src_lev, int lev)
272{
273 IntVect ref_fac(1);
274 if (src_lev < lev) {
275 for (int l = src_lev; l < lev; ++l) {
276 ref_fac *= this->GetParGDB()->refRatio(l);
277 }
278 } else if (src_lev > lev) {
279 for (int l = src_lev; l > lev; --l) {
280 ref_fac *= this->GetParGDB()->refRatio(l-1);
281 }
282 ref_fac *= -1;
283 }
284 return ref_fac;
285}
286
287template <typename T_ParticleType, int NArrayReal, int NArrayInt,
288 template<class> class Allocator, class CellAssignor>
289void
291::GetCommTagsBox (Vector<NeighborCommTag>& tags, int src_lev, const Box& in_box)
292{
293 std::vector< std::pair<int, Box> > isects;
294 Box tbx;
295
296 for (int lev = 0; lev < this->numLevels(); ++lev) {
297 Box box = in_box;
298 const IntVect& ref_fac = computeRefFac(src_lev, lev);
299 if (ref_fac < IntVect::TheZeroVector())
300 {
301 box.coarsen(-1*ref_fac);
302 }
303 else if (ref_fac > IntVect::TheZeroVector())
304 {
305 box.refine(ref_fac);
306 }
307 box.grow(computeRefFac(0, lev)*m_num_neighbor_cells);
308 const Periodicity& periodicity = this->Geom(lev).periodicity();
309 const std::vector<IntVect>& pshifts = periodicity.shiftIntVect();
310 const BoxArray& ba = this->ParticleBoxArray(lev);
311
312 for (auto const& pshift : pshifts)
313 {
314 const Box& pbox = box + pshift;
315 bool first_only = false;
316 ba.intersections(pbox, isects, first_only, 0);
317 for (const auto& isec : isects) {
318 const int grid = isec.first;
319 const int global_proc = this->ParticleDistributionMap(lev)[grid];
320 const int proc = ParallelContext::global_to_local_rank(global_proc);
321 for (IntVect iv = pbox.smallEnd(); iv <= pbox.bigEnd(); pbox.next(iv))
322 {
323 if (ba[grid].contains(iv))
324 {
325 int tile = getTileIndex(iv, ba[grid],
326 this->do_tiling, this->tile_size, tbx);
327 tags.push_back(NeighborCommTag(proc, lev, grid, tile));
328 }
329 }
330 }
331 }
332 }
333}
334
335template <typename T_ParticleType, int NArrayReal, int NArrayInt,
336 template<class> class Allocator, class CellAssignor>
337void
340
341 BL_PROFILE("NeighborParticleContainer::cacheNeighborInfo");
342
343 AMREX_ASSERT(this->OK());
344
345 resizeContainers(this->numLevels());
346
347 clearNeighbors();
348
349 AMREX_ASSERT(hasNeighbors() == false);
350
351 const int MyProc = ParallelContext::MyProcSub();
352
354 std::map<NeighborCommTag, Vector<NeighborIndexMap> > remote_map;
355
356 // tmp data structures used for OMP reduction
358 std::map<NeighborCommTag, Vector<Vector<NeighborIndexMap> > > tmp_remote_map;
359
360 local_map.resize(this->numLevels());
361 tmp_local_map.resize(this->numLevels());
362
363 int num_threads = OpenMP::get_max_threads();
364
365 for (int lev = 0; lev < this->numLevels(); ++lev) {
366 // resize our temporaries in serial
367 for (int i = 0; i < std::ssize(local_neighbors); ++i) {
368 const NeighborCommTag& comm_tag = local_neighbors[i];
369 tmp_remote_map[comm_tag].resize(num_threads);
370 remote_map[comm_tag];
371 PairIndex index(comm_tag.grid_id, comm_tag.tile_id);
372 tmp_local_map[lev][index].resize(num_threads);
373 local_map[lev][index];
374 buffer_tag_cache[lev][index].resize(num_threads);
375 }
376 }
377
378 for (int lev = 0; lev < this->numLevels(); ++lev) {
379 // First pass - each thread collects the NeighborIndexMaps it owes to other
380 // grids / tiles / procs
381#ifdef AMREX_USE_OMP
382#pragma omp parallel
383#endif
384 {
386 tags.reserve(AMREX_D_TERM(3, *3, *3));
387 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
388 int thread_num = OpenMP::get_thread_num();
389 const int& grid = pti.index();
390 const int& tile = pti.LocalTileIndex();
391 PairIndex src_index(grid, tile);
392
393 NeighborCopyTag src_tag(lev, grid, tile);
394
395 auto& cache = buffer_tag_cache[lev][src_index][thread_num];
396
397 auto const ptd = pti.GetParticleTile().getConstParticleTileData();
398 for (int i = 0; i < pti.numParticles(); ++i) {
399 getNeighborTags(tags, ptd[i], m_num_neighbor_cells, src_tag, pti);
400
401 // Add neighbors to buffers
402 for (int j = 0; j < std::ssize(tags); ++j) {
403 NeighborCopyTag& tag = tags[j];
404 PairIndex dst_index(tag.grid, tag.tile);
405 if (tag.grid < 0) { continue; }
406
407 tag.src_index = i;
408 const int cache_index = cache.size();
409 cache.push_back(tag);
410
411 const int global_who = this->ParticleDistributionMap(tag.level)[tag.grid];
412 const int who = ParallelContext::global_to_local_rank(global_who);
413 NeighborIndexMap nim(tag.level, dst_index.first, dst_index.second, -1,
414 lev, src_index.first, src_index.second,
415 cache_index, thread_num);
416 if (who == MyProc) {
417 auto& tmp = tmp_local_map[tag.level][dst_index];
418 Vector<NeighborIndexMap>& buffer = tmp[thread_num];
419 buffer.push_back(nim);
420 } else {
421 NeighborCommTag comm_tag(who, tag.level, tag.grid, tag.tile);
422 Vector<NeighborIndexMap>& buffer = tmp_remote_map[comm_tag][thread_num];
423 buffer.push_back(nim);
424 }
425 }
426 tags.clear();
427 }
428 }
429 }
430 }
431
432 for (int lev = 0; lev < this->numLevels(); ++lev) {
433 // second pass - for each tile, collect the neighbors owed from all threads
434#ifdef AMREX_USE_OMP
435#pragma omp parallel
436#endif
437 for (MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) {
438 const int grid = mfi.index();
439 const int tile = mfi.LocalTileIndex();
440 PairIndex index(grid, tile);
441 for (int i = 0; i < num_threads; ++i) {
442 local_map[lev][index].insert(local_map[lev][index].end(),
443 tmp_local_map[lev][index][i].begin(),
444 tmp_local_map[lev][index][i].end());
445 tmp_local_map[lev][index][i].erase(tmp_local_map[lev][index][i].begin(),
446 tmp_local_map[lev][index][i].end());
447 }
448 }
449 }
450
451 // do the same for the remote neighbors
452 typename std::map<NeighborCommTag, Vector<Vector<NeighborIndexMap> > >::iterator it;
453#ifdef AMREX_USE_OMP
454#pragma omp parallel
455#pragma omp single nowait
456#endif
457 for (it=tmp_remote_map.begin(); it != tmp_remote_map.end(); it++) {
458#ifdef AMREX_USE_OMP
459#pragma omp task firstprivate(it)
460#endif
461 {
462 const NeighborCommTag& tag = it->first;
463 Vector<Vector<NeighborIndexMap> >& tmp = it->second;
464 for (int i = 0; i < num_threads; ++i) {
465 remote_map[tag].insert(remote_map[tag].end(), tmp[i].begin(), tmp[i].end());
466 tmp[i].erase(tmp[i].begin(), tmp[i].end());
467 }
468 }
469 }
470
471 for (int lev = 0; lev < this->numLevels(); ++lev) {
472 // now for the local neighbors, allocate buffers and cache
473 for (MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) {
474 const int grid = mfi.index();
475 const int tile = mfi.LocalTileIndex();
476 PairIndex dst_index(grid, tile);
477 const Vector<NeighborIndexMap>& map = local_map[lev][dst_index];
478 const int num_ghosts = map.size();
479 neighbors[lev][dst_index].define(this->NumRuntimeRealComps(),
480 this->NumRuntimeIntComps(),
481 nullptr, nullptr, this->arena());
482 neighbors[lev][dst_index].resize(num_ghosts);
483 local_neighbor_sizes[lev][dst_index] = neighbors[lev][dst_index].size();
484 }
485 }
486
487 for (int lev = 0; lev < this->numLevels(); ++lev) {
488 for (MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) {
489 const int grid = mfi.index();
490 const int tile = mfi.LocalTileIndex();
491 PairIndex dst_index(grid, tile);
492 const Vector<NeighborIndexMap>& map = local_map[lev][dst_index];
493 const int num_ghosts = map.size();
494#ifdef AMREX_USE_OMP
495#pragma omp parallel for
496#endif
497 for (int i = 0; i < num_ghosts; ++i) {
498 const NeighborIndexMap& nim = map[i];
499 PairIndex src_index(nim.src_grid, nim.src_tile);
500 Vector<NeighborCopyTag>& tags = buffer_tag_cache[nim.src_level][src_index][nim.thread_num];
501 AMREX_ASSERT(nim.src_index < tags.size());
502 tags[nim.src_index].dst_index = i;
503 AMREX_ASSERT(size_t(tags[nim.src_index].dst_index) < neighbors[nim.dst_level][dst_index].size());
504 }
505 }
506 }
507
508 // now we allocate the send buffers and cache the remotes
509 std::map<int, int> tile_counts;
510 for (const auto& kv: remote_map) {
511 tile_counts[kv.first.proc_id] += 1;
512 }
513
514 for (const auto& kv: remote_map) {
515 if (kv.first.proc_id == MyProc) { continue; }
516 Vector<char>& buffer = send_data[kv.first.proc_id];
517 buffer.resize(sizeof(int));
518 std::memcpy(buffer.data(), &tile_counts[kv.first.proc_id], sizeof(int));
519 }
520
521 for (auto& kv : remote_map) {
522 if (kv.first.proc_id == MyProc) { continue; }
523 int np = kv.second.size();
524 int data_size = np * cdata_size;
525 Vector<char>& buffer = send_data[kv.first.proc_id];
526 size_t old_size = buffer.size();
527 size_t new_size = buffer.size() + 4*sizeof(int) + data_size;
528 buffer.resize(new_size);
529 char* dst = &buffer[old_size];
530 std::memcpy(dst, &(kv.first.level_id), sizeof(int)); dst += sizeof(int);
531 std::memcpy(dst, &(kv.first.grid_id ), sizeof(int)); dst += sizeof(int);
532 std::memcpy(dst, &(kv.first.tile_id ), sizeof(int)); dst += sizeof(int);
533 std::memcpy(dst, &data_size, sizeof(int)); dst += sizeof(int);
534 size_t buffer_offset = old_size + 4*sizeof(int);
535#ifdef AMREX_USE_OMP
536#pragma omp parallel for
537#endif
538 for (int i = 0; i < np; ++i) {
539 const NeighborIndexMap& nim = kv.second[i];
540 PairIndex src_index(nim.src_grid, nim.src_tile);
541 Vector<NeighborCopyTag>& tags = buffer_tag_cache[nim.src_level][src_index][nim.thread_num];
542 tags[nim.src_index].dst_index = buffer_offset + i*cdata_size;
543 }
544 }
545
546 if ( enableInverse() )
547 {
548 for (int lev = 0; lev < this->numLevels(); ++lev)
549 {
550 for (const auto& kv : neighbors[lev])
551 {
552 inverse_tags[lev][kv.first].resize(kv.second.size());
553 }
554 }
555 }
556}
557
558template <typename T_ParticleType, int NArrayReal, int NArrayInt,
559 template<class> class Allocator, class CellAssignor>
560template <typename P>
561void
564 int nGrow, const NeighborCopyTag& src_tag, const MyParIter& pti)
565{
566 getNeighborTags(tags, p, IntVect(AMREX_D_DECL(nGrow, nGrow, nGrow)), src_tag, pti);
567}
568
569template <typename T_ParticleType, int NArrayReal, int NArrayInt,
570 template<class> class Allocator, class CellAssignor>
571template <typename P>
572void
575 const IntVect& nGrow, const NeighborCopyTag& src_tag, const MyParIter& pti)
576{
577 Box shrink_box = pti.tilebox();
578 shrink_box.grow(-nGrow);
579
580 if (use_mask) {
581 const BaseFab<int>& mask = (*mask_ptr[src_tag.level])[src_tag.grid];
582 AMREX_ASSERT(this->finestLevel() == 0);
583 AMREX_ASSERT(src_tag.level == 0);
584
585 const int lev = 0;
586 const IntVect& iv = this->Index(p, lev);
587 if (shrink_box.contains(iv)) { return; }
588
589 const Periodicity& periodicity = this->Geom(lev).periodicity();
590 const Box& domain = this->Geom(lev).Domain();
591 const IntVect& lo = domain.smallEnd();
592 const IntVect& hi = domain.bigEnd();
593
594 // Figure out all our neighbors, removing duplicates
596 for (int ii = -nGrow[0]; ii < nGrow[0] + 1; ii += nGrow[0]) {,
597 for (int jj = -nGrow[1]; jj < nGrow[1] + 1; jj += nGrow[1]) {,
598 for (int kk = -nGrow[2]; kk < nGrow[2] + 1; kk += nGrow[2]) {)
599 if (AMREX_D_TERM((ii == 0), && (jj == 0), && (kk == 0))) { continue; }
600 IntVect shift(AMREX_D_DECL(ii, jj, kk));
601 IntVect neighbor_cell = iv + shift;
602
603 NeighborCopyTag tag;
604 tag.grid = mask(neighbor_cell, MaskComps::grid);
605 tag.tile = mask(neighbor_cell, MaskComps::tile);
606 tag.level = mask(neighbor_cell, MaskComps::level);
607 if (periodicity.isAnyPeriodic()) {
608 for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) {
609 if (! periodicity.isPeriodic(dir)) { continue; }
610 if (neighbor_cell[dir] < lo[dir]) {
611 tag.periodic_shift[dir] = -1;
612 } else if (neighbor_cell[dir] > hi[dir]) {
613 tag.periodic_shift[dir] = 1;
614 }
615 }
616 }
617
618 if (tag != src_tag) { tags.push_back(tag); }
619
621 },
622 },
623 })
624
625 RemoveDuplicates(tags);
626 return;
627 }
628 else
629 {
630 std::vector< std::pair<int, Box> > isects;
631 Box tbx;
632 for (int lev = 0; lev < this->numLevels(); ++lev)
633 {
634 IntVect ref_fac = computeRefFac(0, lev);
635 const Periodicity& periodicity = this->Geom(lev).periodicity();
636 const std::vector<IntVect>& pshifts = periodicity.shiftIntVect();
637 const BoxArray& ba = this->ParticleBoxArray(lev);
638 const IntVect& iv = this->Index(p, lev);
639 for (auto const& pshift : pshifts)
640 {
641 Box pbox = amrex::grow(Box(iv, iv), ref_fac*nGrow) + pshift;
642 bool first_only = false;
643 ba.intersections(pbox, isects, first_only, 0);
644 for (const auto& isec : isects)
645 {
646 const Box& grid_box = ba[isec.first];
647 for (IntVect cell = pbox.smallEnd(); cell <= pbox.bigEnd(); pbox.next(cell)) {
648 if ( !grid_box.contains(cell) ) { continue; }
649 int tile = getTileIndex(cell, grid_box,
650 this->do_tiling, this->tile_size, tbx);
651 auto nbor = NeighborCopyTag(lev, isec.first, tile);
652 nbor.periodic_shift = -pshift;
653 if (src_tag != nbor) { tags.push_back(nbor); }
654 }
655 }
656 }
657 }
658
659 RemoveDuplicates(tags);
660 return;
661 }
662}
663
664template <typename T_ParticleType, int NArrayReal, int NArrayInt,
665 template<class> class Allocator, class CellAssignor>
666void
669#ifdef AMREX_USE_GPU
670 fillNeighborsGPU();
671#else
672 fillNeighborsCPU();
673#endif
674 m_has_neighbors = true;
675}
676
677template <typename T_ParticleType, int NArrayReal, int NArrayInt,
678 template<class> class Allocator, class CellAssignor>
679void
681::sumNeighbors (int real_start_comp, int real_num_comp,
682 int int_start_comp, int int_num_comp) {
683#ifdef AMREX_USE_GPU
684 amrex::ignore_unused(real_start_comp,real_num_comp,int_start_comp,int_num_comp);
685 amrex::Abort("NeighborParticleContainer::sumNeighbors is not implemented for GPU builds");
686#else
687 sumNeighborsCPU(real_start_comp, real_num_comp, int_start_comp, int_num_comp);
688#endif
689}
690
691template <typename T_ParticleType, int NArrayReal, int NArrayInt,
692 template<class> class Allocator, class CellAssignor>
693void
695::updateNeighbors (bool boundary_neighbors_only)
696{
697
698 AMREX_ASSERT(hasNeighbors());
699
700#ifdef AMREX_USE_GPU
701 updateNeighborsGPU(boundary_neighbors_only);
702#else
703 amrex::ignore_unused(boundary_neighbors_only);
704 updateNeighborsCPU(true);
705#endif
706 m_has_neighbors = true;
707}
708
709template <typename T_ParticleType, int NArrayReal, int NArrayInt,
710 template<class> class Allocator, class CellAssignor>
711void
714{
715#ifdef AMREX_USE_GPU
716 clearNeighborsGPU();
717#else
718 clearNeighborsCPU();
719#endif
720 m_has_neighbors = false;
721}
722
723template <typename T_ParticleType, int NArrayReal, int NArrayInt,
724 template<class> class Allocator, class CellAssignor>
725template <class CheckPair>
726void
728buildNeighborList (CheckPair const& check_pair, bool /*sort*/)
729{
730 AMREX_ASSERT(numParticlesOutOfRange(*this, m_num_neighbor_cells) == 0);
731
732 BL_PROFILE("NeighborParticleContainer::buildNeighborList");
733
734 resizeContainers(this->numLevels());
735
736 for (int lev = 0; lev < this->numLevels(); ++lev)
737 {
738 m_neighbor_list[lev].clear();
739
740 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
741 PairIndex index(pti.index(), pti.LocalTileIndex());
742 m_neighbor_list[lev][index];
743 }
744
745#ifndef AMREX_USE_GPU
746 neighbor_list[lev].clear();
747 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
748 PairIndex index(pti.index(), pti.LocalTileIndex());
749 neighbor_list[lev][index];
750 }
751#endif
752
753 auto& plev = this->GetParticles(lev);
754 const auto& geom = this->Geom(lev);
755
756#ifdef AMREX_USE_OMP
757#pragma omp parallel if (Gpu::notInLaunchRegion())
758#endif
759 for (MyParIter pti(*this, lev); pti.isValid(); ++pti)
760 {
761 int gid = pti.index();
762 int tid = pti.LocalTileIndex();
763 auto index = std::make_pair(gid, tid);
764
765 auto& ptile = plev[index];
766
767 if (ptile.numParticles() == 0) { continue; }
768
769 Box bx = pti.tilebox();
770 int ng = computeRefFac(0, lev).max()*m_num_neighbor_cells;
771 bx.grow(ng);
772
773 Gpu::DeviceVector<int> off_bins_v;
778
779 off_bins_v.push_back(0);
780 off_bins_v.push_back(int(bx.numPts()));
781 lo_v.push_back(lbound(bx));
782 hi_v.push_back(ubound(bx));
783 dxi_v.push_back(geom.InvCellSizeArray());
784 plo_v.push_back(geom.ProbLoArray());
785
786 m_neighbor_list[lev][index].build(ptile,
787 check_pair,
788 off_bins_v, dxi_v, plo_v, lo_v, hi_v, ng);
789
790#ifndef AMREX_USE_GPU
791 const auto& counts = m_neighbor_list[lev][index].GetCounts();
792 const auto& list = m_neighbor_list[lev][index].GetList();
793
794 int li = 0;
795 for (int i = 0; i < ptile.numParticles(); ++i)
796 {
797 auto cnt = counts[i];
798 neighbor_list[lev][index].push_back(cnt);
799 for (size_t j = 0; j < cnt; ++j)
800 {
801 neighbor_list[lev][index].push_back(list[li++]+1);
802 }
803 }
804#endif
805 }
806 }
807}
808
809template <typename T_ParticleType, int NArrayReal, int NArrayInt,
810 template<class> class Allocator, class CellAssignor>
811template <class CheckPair, class OtherPCType>
812void
814buildNeighborList (CheckPair const& check_pair, OtherPCType& other,
815 Vector<std::map<std::pair<int, int>,
817 bool /*sort*/)
818{
819 BL_PROFILE("NeighborParticleContainer::buildNeighborList");
820
821 AMREX_ASSERT(numParticlesOutOfRange(*this, m_num_neighbor_cells) == 0);
822 AMREX_ASSERT(numParticlesOutOfRange(other, m_num_neighbor_cells) == 0);
823 AMREX_ASSERT(SameIteratorsOK(*this, other));
824
827
828 resizeContainers(this->numLevels());
829 neighbor_lists.resize(this->numLevels());
830
831 for (int lev = 0; lev < this->numLevels(); ++lev)
832 {
833 neighbor_lists[lev].clear();
834
835 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
836 PairIndex index(pti.index(), pti.LocalTileIndex());
837 neighbor_lists[lev][index];
838 }
839
840 auto& plev = this->GetParticles(lev);
841 const auto& geom = this->Geom(lev);
842
843#ifdef AMREX_USE_OMP
844#pragma omp parallel if (Gpu::notInLaunchRegion())
845#endif
846 for (MyParIter pti(*this, lev); pti.isValid(); ++pti)
847 {
848 int gid = pti.index();
849 int tid = pti.LocalTileIndex();
850 auto index = std::make_pair(gid, tid);
851
852 const auto& ptile = plev[index];
853 auto& other_ptile = other.ParticlesAt(lev, pti);
854 if (ptile.numParticles() == 0) { continue; }
855
856 Box bx = pti.tilebox();
857 int ng = computeRefFac(0, lev).max()*m_num_neighbor_cells;
858 bx.grow(ng);
859
860 Gpu::DeviceVector<int> off_bins_v;
865
866 off_bins_v.push_back(0);
867 off_bins_v.push_back(int(bx.numPts()));
868 lo_v.push_back(lbound(bx));
869 hi_v.push_back(ubound(bx));
870 dxi_v.push_back(geom.InvCellSizeArray());
871 plo_v.push_back(geom.ProbLoArray());
872
873 neighbor_lists[lev][index].build(ptile, other_ptile,
874 check_pair,
875 off_bins_v, dxi_v, plo_v, lo_v, hi_v, ng);
876 }
877 }
878}
879
880template <typename T_ParticleType, int NArrayReal, int NArrayInt,
881 template<class> class Allocator, class CellAssignor>
882template <class CheckPair>
883void
885buildNeighborList (CheckPair const& check_pair, int type_ind, int* ref_ratio,
886 int num_bin_types, bool /*sort*/)
887{
888 AMREX_ASSERT(numParticlesOutOfRange(*this, m_num_neighbor_cells) == 0);
889
890 if (num_bin_types == 1) { AMREX_ASSERT(ref_ratio[0] == 1); }
891
892 BL_PROFILE("NeighborParticleContainer::buildNeighborList");
893
894 resizeContainers(this->numLevels());
895
896 for (int lev = 0; lev < this->numLevels(); ++lev)
897 {
898 m_neighbor_list[lev].clear();
899
900 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
901 PairIndex index(pti.index(), pti.LocalTileIndex());
902 m_neighbor_list[lev][index];
903 }
904
905#ifndef AMREX_USE_GPU
906 neighbor_list[lev].clear();
907 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
908 PairIndex index(pti.index(), pti.LocalTileIndex());
909 neighbor_list[lev][index];
910 }
911#endif
912
913 auto& plev = this->GetParticles(lev);
914 const auto& geom = this->Geom(lev);
915
916#ifdef AMREX_USE_OMP
917#pragma omp parallel if (Gpu::notInLaunchRegion())
918#endif
919 for (MyParIter pti(*this, lev); pti.isValid(); ++pti)
920 {
921 int gid = pti.index();
922 int tid = pti.LocalTileIndex();
923 auto index = std::make_pair(gid, tid);
924 auto& ptile = plev[index];
925
926 if (ptile.numParticles() == 0) { continue; }
927
928 Box bx = pti.tilebox();
929 int ng = 1;
930
931 auto& soa = pti.GetStructOfArrays();
932 auto TypeVec = soa.GetIntData(type_ind);
933 int* bin_type_array = TypeVec.data();
934
935 Gpu::DeviceVector<int> off_bins_v(num_bin_types+1,0);
936 Gpu::DeviceVector<int> nbins_v(num_bin_types+1,0);
937 Gpu::DeviceVector<Dim3> lo_v(num_bin_types);
938 Gpu::DeviceVector<Dim3> hi_v(num_bin_types);
941
942 for (int type(0); type<num_bin_types; ++type) {
943 // Domain, RB, Coord, Per
944 Box dom = geom.Domain();
945 const Real* plo = geom.ProbLo();
946 const Real* phi = geom.ProbHi();
947 auto lcoord = geom.Coord();
948 Array<int,AMREX_SPACEDIM> lper = geom.isPeriodic();
949
950 // Refined tile box and domain
951 Box lbx( bx.smallEnd(), bx.bigEnd(), bx.ixType() );
952 Box ldom( dom.smallEnd(),dom.bigEnd(),dom.ixType() );
953 lbx.refine( ref_ratio[type] );
954 ldom.refine( ref_ratio[type] );
955
956 // Local copy of RB for refined geom
957 RealBox lrb(plo,phi);
958
959 // New geometry with refined domain
960 Geometry lgeom(ldom,lrb,lcoord,lper);
961
962 // Grow for ghost cells
963 int NGhost = ref_ratio[type]*m_num_neighbor_cells;
964 lbx.grow(NGhost);
965
966 // Store for memcpy
967 auto nbins = int(lbx.numPts());
968 Dim3 lo = lbound( lbx );
969 Dim3 hi = ubound( lbx );
970
971 auto dxInv = lgeom.InvCellSizeArray();
972 auto ploa = lgeom.ProbLoArray();
973
974 Gpu::htod_memcpy_async( dxi_v.data() + type, dxInv.data(), sizeof(dxInv) );
975 Gpu::htod_memcpy_async( plo_v.data() + type, ploa.data() , sizeof(ploa) );
976 Gpu::htod_memcpy_async( lo_v.data() + type, &lo , sizeof(lo) );
977 Gpu::htod_memcpy_async( hi_v.data() + type, &hi , sizeof(hi) );
978 Gpu::htod_memcpy_async( nbins_v.data() + type, &nbins , sizeof(nbins) );
979 }
980
981 Gpu::exclusive_scan(nbins_v.begin(), nbins_v.end(), off_bins_v.begin());
982
983 m_neighbor_list[lev][index].build(ptile,
984 check_pair,
985 off_bins_v, dxi_v, plo_v, lo_v, hi_v,
986 ng, num_bin_types, bin_type_array);
987
988#ifndef AMREX_USE_GPU
989 BL_PROFILE_VAR("CPU_CopyNeighborList()",CPUCNL);
990
991 const auto& counts = m_neighbor_list[lev][index].GetCounts();
992 const auto& list = m_neighbor_list[lev][index].GetList();
993
994 int li = 0;
995 for (int i = 0; i < ptile.numParticles(); ++i) {
996 auto cnt = counts[i];
997 neighbor_list[lev][index].push_back(cnt);
998 for (size_t j = 0; j < cnt; ++j) {
999 neighbor_list[lev][index].push_back(list[li++]+1);
1000 }
1001 }
1002
1003 BL_PROFILE_VAR_STOP(CPUCNL);
1004#endif
1005 } //ParIter
1006 } //Lev
1007}
1008
1009template <typename T_ParticleType, int NArrayReal, int NArrayInt,
1010 template<class> class Allocator, class CellAssignor>
1011template <class CheckPair>
1012void
1014selectActualNeighbors (CheckPair const& check_pair, int num_cells)
1015{
1016 BL_PROFILE("NeighborParticleContainer::selectActualNeighbors");
1017 const auto& geom_fine = this->Geom(0);
1018 const auto& ba_fine = this->ParticleBoxArray(0);
1019 if (ba_fine.size() == 1 && !geom_fine.isAnyPeriodic()) {
1020 return;
1021 }
1022
1023 for (int lev = 0; lev < this->numLevels(); ++lev)
1024 {
1025 // clear previous neighbor particle ids
1026 if (!m_boundary_particle_ids.empty()) {
1027 for (auto& keyval: m_boundary_particle_ids[lev]) {
1028 keyval.second.clear();
1029 }
1030 }
1031
1032 for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
1033 PairIndex index(pti.index(), pti.LocalTileIndex());
1034
1035 // id of actual particles that need to be sent
1036 m_boundary_particle_ids[lev][index];
1037 m_boundary_particle_ids[lev][index].resize(pti.numNeighborParticles());
1038 auto* p_boundary_particle_ids = m_boundary_particle_ids[lev][index].dataPtr();
1039
1040 auto const& ptile = this->ParticlesAt(lev, pti);
1041 auto ptile_data = ptile.getConstParticleTileData();
1042
1043 Box box = pti.tilebox();
1044 Box grownBox = box;
1045 grownBox.grow(computeRefFac(0, lev).max()*m_num_neighbor_cells);
1046 const auto lo = lbound(grownBox);
1047 const auto hi = ubound(grownBox);
1048
1049 const auto& geom = this->Geom(lev);
1050 const auto domain = geom.Domain();
1051 const auto dxi = geom.InvCellSizeArray();
1052 const auto plo = geom.ProbLoArray();
1053
1054 const size_t np_real = pti.numRealParticles();
1055 const size_t np_total = ptile.numTotalParticles();
1056
1057 DenseBins<decltype(ptile_data)> bins;
1058 bins.build(np_total, ptile_data, grownBox,
1059 [=] AMREX_GPU_DEVICE (auto const& p) noexcept -> IntVect
1060 {
1061 AMREX_D_TERM(int i = static_cast<int>(amrex::Math::floor((p.pos(0)-plo[0])*dxi[0]) - lo.x);,
1062 int j = static_cast<int>(amrex::Math::floor((p.pos(1)-plo[1])*dxi[1]) - lo.y);,
1063 int k = static_cast<int>(amrex::Math::floor((p.pos(2)-plo[2])*dxi[2]) - lo.z));
1064 AMREX_D_TERM(AMREX_ASSERT(i >= 0);, AMREX_ASSERT(j >= 0);, AMREX_ASSERT(k >= 0));
1065
1066 return IntVect(AMREX_D_DECL(i, j, k));
1067 });
1068
1069 auto pperm = bins.permutationPtr();
1070 auto poffset = bins.offsetsPtr();
1071
1072 Gpu::Buffer<unsigned int> np_boundary({0});
1073 unsigned int* p_np_boundary = np_boundary.data();
1074
1075 AMREX_FOR_1D ( np_real, i,
1076 {
1077 IntVect iv = getParticleCell(ptile_data, i, plo, dxi, domain);
1078 iv -= grownBox.smallEnd();
1079 auto iv3 = iv.dim3();
1080
1081 int ix = iv3.x;
1082 int iy = iv3.y;
1083 int iz = iv3.z;
1084
1085 int nx = hi.x-lo.x+1;
1086 int ny = hi.y-lo.y+1;
1087 int nz = hi.z-lo.z+1;
1088
1089 bool isActualNeighbor = false;
1090 // These loops mirror the way the neighbor particles were originally constructed, except
1091 // it adds a call to check_pair. Thus the maximum number value of "loc" below is the
1092 // number of neighbors minus 1, and the buffer pointed to by p_boundary_particle_ids
1093 // will not be overwritten.
1094 for (int ii = amrex::max(ix-num_cells, 0); ii <= amrex::min(ix+num_cells, nx-1); ++ii) {
1095 for (int jj = amrex::max(iy-num_cells, 0); jj <= amrex::min(iy+num_cells, ny-1); ++jj) {
1096 for (int kk = amrex::max(iz-num_cells, 0); kk <= amrex::min(iz+num_cells, nz-1); ++kk) {
1097 if (isActualNeighbor) { break; }
1098 int nbr_cell_id = (ii * ny + jj) * nz + kk;
1099 for (auto p = poffset[nbr_cell_id]; p < poffset[nbr_cell_id+1]; ++p) {
1100 if (pperm[p] == int(i)) { continue; }
1101 if (detail::call_check_pair(check_pair, ptile_data, ptile_data, i, pperm[p])) {
1102 IntVect cell_ijk = getParticleCell(ptile_data, pperm[p], plo, dxi, domain);
1103 if (!box.contains(cell_ijk)) {
1104 unsigned int loc = Gpu::Atomic::Add(p_np_boundary, 1U);
1105 p_boundary_particle_ids[loc] = i;
1106 isActualNeighbor = true;
1107 break;
1108 }
1109 }// end if check_pair
1110 }
1111 }
1112 }
1113 }
1114 });// end amrex_for_1d
1115
1116 unsigned int* p_np_boundary_h = np_boundary.copyToHost();
1117 m_boundary_particle_ids[lev][index].resize(*p_np_boundary_h);
1118
1119 }// end mypariter
1120 }// end lev
1121}
1122template <typename T_ParticleType, int NArrayReal, int NArrayInt,
1123 template<class> class Allocator, class CellAssignor>
1124void
1127{
1128 BL_PROFILE("NeighborParticleContainer::printNeighborList");
1129
1130 for (int lev = 0; lev < this->numLevels(); ++lev)
1131 {
1132 for(MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi)
1133 {
1134 int gid = mfi.index();
1135 int tid = mfi.LocalTileIndex();
1136 auto index = std::make_pair(gid, tid);
1137 m_neighbor_list[lev][index].print();
1138 }
1139 }
1140}
1141
1142template <typename T_ParticleType, int NArrayReal, int NArrayInt,
1143 template<class> class Allocator, class CellAssignor>
1144void
1146resizeContainers (int num_levels)
1147{
1148 this->reserveData();
1149 this->resizeData();
1150 if ( std::ssize(neighbors) <= num_levels )
1151 {
1152 neighbors.resize(num_levels);
1153 m_neighbor_list.resize(num_levels);
1154 neighbor_list.resize(num_levels);
1155 mask_ptr.resize(num_levels);
1156 buffer_tag_cache.resize(num_levels);
1157 local_neighbor_sizes.resize(num_levels);
1158 if ( enableInverse() ) { inverse_tags.resize(num_levels); }
1159 }
1160
1161 AMREX_ASSERT((neighbors.size() == m_neighbor_list.size()) &&
1162 (neighbors.size() == mask_ptr.size() ) );
1163}
1164
1165}
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition AMReX_BLProfiler.H:560
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_FOR_1D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:97
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > mask
Definition AMReX_InterpFaceRegister.cpp:93
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
A FortranArrayBox(FAB)-like object.
Definition AMReX_BaseFab.H:190
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:564
std::vector< std::pair< int, Box > > intersections(const Box &bx) const
Return intersections of Box and BoxArray.
Definition AMReX_BoxArray.cpp:1186
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:837
__host__ __device__ BoxND & grow(int i) noexcept
Definition AMReX_Box.H:641
__host__ __device__ const IntVectND< dim > & bigEnd() const &noexcept
Return the inclusive upper bound of the box.
Definition AMReX_Box.H:123
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Return true if argument is contained within BoxND.
Definition AMReX_Box.H:212
__host__ __device__ IndexTypeND< dim > ixType() const noexcept
Return the indexing type.
Definition AMReX_Box.H:135
__host__ __device__ BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition AMReX_Box.H:722
__host__ __device__ BoxND & refine(int ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition AMReX_Box.H:698
__host__ __device__ Long index(const IntVectND< dim > &v) const noexcept
Return offset of point from smallend; i.e. index(smallend) -> 0, bigend would return numPts()-1....
Definition AMReX_Box.H:1055
__host__ __device__ void next(IntVectND< dim > &) const noexcept
Step through the rectangle. It is a runtime error to give a point not inside rectangle....
Definition AMReX_Box.H:1121
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
GpuArray< Real, 3 > InvCellSizeArray() const noexcept
Definition AMReX_CoordSys.H:87
A container for storing items in a set of bins.
Definition AMReX_DenseBins.H:77
void build(N nitems, const_pointer_input_type v, const Box &bx, F &&f)
Populate the bins with a set of items.
Definition AMReX_DenseBins.H:130
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
static bool SameRefs(const DistributionMapping &lhs, const DistributionMapping &rhs)
Definition AMReX_DistributionMapping.H:189
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:75
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:361
GpuArray< Real, 3 > ProbLoArray() const noexcept
Definition AMReX_Geometry.H:192
Definition AMReX_GpuBuffer.H:23
T const * data() const noexcept
Definition AMReX_GpuBuffer.H:50
__host__ static __device__ constexpr IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:771
__host__ __device__ constexpr int max() const noexcept
maximum (no absolute values) value
Definition AMReX_IntVect.H:313
__host__ __device__ constexpr Dim3 dim3() const noexcept
Definition AMReX_IntVect.H:265
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:88
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:172
Definition AMReX_NeighborList.H:311
Definition AMReX_NeighborParticles.H:39
std::pair< int, int > PairIndex
Definition AMReX_NeighborParticles.H:210
void selectActualNeighbors(CheckPair const &check_pair, int num_cells=1)
Definition AMReX_NeighborParticlesI.H:1014
static bool enable_inverse
Definition AMReX_NeighborParticles.H:447
void buildNeighborList(CheckPair const &check_pair, bool sort=false)
Definition AMReX_NeighborParticlesI.H:728
void printNeighborList()
Definition AMReX_NeighborParticlesI.H:1126
void getNeighborTags(Vector< NeighborCopyTag > &tags, const P &p, int nGrow, const NeighborCopyTag &src_tag, const MyParIter &pti)
Definition AMReX_NeighborParticlesI.H:563
static bool use_mask
Definition AMReX_NeighborParticles.H:445
void initializeCommComps()
Definition AMReX_NeighborParticlesI.H:60
typename ParticleContainerType::ParIterType MyParIter
Definition AMReX_NeighborParticles.H:209
void resizeContainers(int num_levels)
Definition AMReX_NeighborParticlesI.H:1146
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
iterator begin() noexcept
Definition AMReX_PODVector.H:674
iterator end() noexcept
Definition AMReX_PODVector.H:678
T * data() noexcept
Definition AMReX_PODVector.H:666
void push_back(const T &a_value)
Definition AMReX_PODVector.H:629
Definition AMReX_ParGDB.H:13
A distributed container for Particles sorted onto the levels, grids, and tiles of a block-structured ...
Definition AMReX_ParticleContainer.H:149
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition AMReX_Periodicity.H:17
std::vector< IntVect > shiftIntVect(IntVect const &nghost=IntVect(0)) const
Definition AMReX_Periodicity.cpp:8
bool isAnyPeriodic() const noexcept
Definition AMReX_Periodicity.H:22
bool isPeriodic(int dir) const noexcept
Definition AMReX_Periodicity.H:26
A Box with real dimensions.
Definition AMReX_RealBox.H:28
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
amrex_particle_real ParticleReal
Floating Point Type for Particles.
Definition AMReX_REAL.H:90
OutIter exclusive_scan(InIter begin, InIter end, OutIter result)
Definition AMReX_Scan.H:1193
__host__ __device__ Dim3 ubound(Array4< T > const &a) noexcept
Return the inclusive upper bounds of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1331
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Return the inclusive lower bounds of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1317
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
std::array< T, N > Array
Definition AMReX_Array.H:26
__host__ __device__ AMREX_FORCE_INLINE T Add(T *sum, T value) noexcept
Definition AMReX_GpuAtomic.H:200
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:421
constexpr int get_thread_num()
Definition AMReX_OpenMP.H:37
constexpr int get_max_threads()
Definition AMReX_OpenMP.H:36
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
int global_to_local_rank(int rank) noexcept
Definition AMReX_ParallelContext.H:98
Definition AMReX_Amr.cpp:50
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
__host__ __device__ int getTileIndex(const IntVect &iv, const Box &box, const bool a_do_tiling, const IntVect &a_tile_size, Box &tbx)
Definition AMReX_ParticleUtil.H:185
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2867
void EnsureThreadSafeTiles(PC &pc)
Definition AMReX_ParticleUtil.H:731
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
__host__ __device__ constexpr const T & min(const T &a, const T &b) noexcept
Definition AMReX_Algorithm.H:24
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
__host__ __device__ BoxND< dim > shift(const BoxND< dim > &b, int dir, int nzones) noexcept
Return a BoxND with indices shifted by nzones in dir direction.
Definition AMReX_Box.H:1495
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
__host__ __device__ constexpr const T & max(const T &a, const T &b) noexcept
Definition AMReX_Algorithm.H:44
bool SameIteratorsOK(const PC1 &pc1, const PC2 &pc2)
Definition AMReX_ParticleUtil.H:719
int numParticlesOutOfRange(Iterator const &pti, int nGrow)
Returns the number of particles that are more than nGrow cells from the box correspond to the input i...
Definition AMReX_ParticleUtil.H:35
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:241
const int[]
Definition AMReX_BLProfiler.cpp:1664
IntVect computeRefFac(const ParGDBBase *a_gdb, int src_lev, int lev)
Definition AMReX_ParticleUtil.cpp:6
void RemoveDuplicates(Vector< T > &vec)
Definition AMReX_Vector.H:211
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
__host__ __device__ IntVect getParticleCell(P const &p, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &dxi) noexcept
Returns the cell index for a given particle using the provided lower bounds and cell sizes.
Definition AMReX_ParticleUtil.H:337
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2862
Definition AMReX_Dim3.H:12
int x
Definition AMReX_Dim3.H:12