Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_NonLocalBCImpl.H
Go to the documentation of this file.
1#ifndef AMREX_NONLOCAL_BC_H_
2#include "AMReX_NonLocalBC.H"
3#endif
4
5#ifndef AMREX_NONLOCAL_BC_IMPL_H_
6#define AMREX_NONLOCAL_BC_IMPL_H_
7#include <AMReX_Config.H>
8#include <AMReX_TypeTraits.H>
9
10namespace amrex::NonLocalBC {
11struct Rotate90ClockWise {
13 IntVect operator() (IntVect const& iv) const noexcept {
14 return IntVect{AMREX_D_DECL(iv[1], -1-iv[0], iv[2])};
15 }
16
18 Dim3 operator() (Dim3 const& a) const noexcept {
19 return Dim3{.x = a.y, .y = -1-a.x, .z = a.z};
20 }
21
22 Box operator() (Box const& box) const noexcept {
23 return Box(operator()(IntVect{AMREX_D_DECL(box.bigEnd (0),
24 box.smallEnd(1),
25 box.smallEnd(2))}),
26 operator()(IntVect{AMREX_D_DECL(box.smallEnd(0),
27 box.bigEnd (1),
28 box.bigEnd (2))}));
29 }
30};
31
32struct Rotate90CounterClockWise {
34 IntVect operator() (IntVect const& iv) const noexcept {
35 return IntVect{AMREX_D_DECL(-1-iv[1], iv[0], iv[2])};
36 }
37
39 Dim3 operator() (Dim3 const& a) const noexcept {
40 return Dim3{.x = -1-a.y, .y = a.x, .z = a.z};
41 }
42
43 Box operator() (Box const& box) const noexcept {
44 return Box(operator()(IntVect{AMREX_D_DECL(box.smallEnd(0),
45 box.bigEnd (1),
46 box.smallEnd(2))}),
47 operator()(IntVect{AMREX_D_DECL(box.bigEnd (0),
48 box.smallEnd(1),
49 box.bigEnd (2))}));
50 }
51};
52
53struct Rotate90DstToSrc
54{
56 Dim3 operator() (Dim3 const& a) const noexcept {
57 if (a.x < 0) {
58 return Rotate90ClockWise()(a);
59 } else {
60 return Rotate90CounterClockWise()(a);
61 }
62 }
63};
64
65struct Rotate180Fn {
66 int Ly;
67
69 IntVect operator() (IntVect const& iv) const noexcept {
70 return IntVect{AMREX_D_DECL(-1-iv[0], Ly-1-iv[1], iv[2])};
71 }
72
74 Dim3 operator() (Dim3 const& a) const noexcept {
75 return Dim3{.x = -1-a.x, .y = Ly-1-a.y, .z = a.z};
76 }
77
78 Box operator() (Box const& box) const noexcept {
79 return Box(operator()(IntVect{AMREX_D_DECL(box.bigEnd (0),
80 box.bigEnd (1),
81 box.smallEnd(2))}),
82 operator()(IntVect{AMREX_D_DECL(box.smallEnd(0),
83 box.smallEnd(1),
84 box.bigEnd (2))}));
85 }
86};
87
88struct PolarFn {
89 int Lx, Ly;
90
91 [[nodiscard]] AMREX_GPU_HOST_DEVICE
92 int i_index (int i) const noexcept {
93 return (i < Lx/2) ? -1-i : 2*Lx-1-i;
94 }
95
96 [[nodiscard]] AMREX_GPU_HOST_DEVICE
97 int j_index (int j) const noexcept {
98 return (j < Ly/2) ? j+Ly/2 : j-Ly/2;
99 }
100
101 [[nodiscard]] AMREX_GPU_HOST_DEVICE
102 IntVect operator() (IntVect const& iv) const noexcept {
103 return IntVect{AMREX_D_DECL(i_index(iv[0]), j_index(iv[1]), iv[2])};
104 }
105
106 [[nodiscard]] AMREX_GPU_HOST_DEVICE
107 Dim3 operator() (Dim3 const& a) const noexcept {
108 return Dim3{.x = i_index(a.x), .y = j_index(a.y), .z = a.z};
109 }
110
111 [[nodiscard]] Box operator() (Box const& box) const noexcept {
112 return Box(operator()(IntVect{AMREX_D_DECL(box.bigEnd (0),
113 box.smallEnd(1),
114 box.smallEnd(2))}),
115 operator()(IntVect{AMREX_D_DECL(box.smallEnd(0),
116 box.bigEnd (1),
117 box.bigEnd (2))}));
118 }
119};
120
121struct PolarFn2 { // for the x-y corners
122 int Lx, Ly;
123
124 [[nodiscard]] AMREX_GPU_HOST_DEVICE
125 int i_index (int i) const noexcept {
126 return (i < Lx/2) ? -1-i : 2*Lx-1-i;
127 }
128
129 [[nodiscard]] AMREX_GPU_HOST_DEVICE
130 int j_index (int j) const noexcept {
131 if (j < 0) { // NOLINT
132 return j+Ly/2;
133 } else if (j >= Ly) { // NOLINT
134 return j-Ly/2;
135 } else if (j < Ly/2) {
136 return j-Ly/2;
137 } else {
138 return j+Ly/2;
139 }
140 }
141
142 [[nodiscard]] AMREX_GPU_HOST_DEVICE
143 IntVect operator() (IntVect const& iv) const noexcept {
144 return IntVect{AMREX_D_DECL(i_index(iv[0]), j_index(iv[1]), iv[2])};
145 }
146
147 [[nodiscard]] AMREX_GPU_HOST_DEVICE
148 Dim3 operator() (Dim3 const& a) const noexcept {
149 return Dim3{.x = i_index(a.x), .y = j_index(a.y), .z = a.z};
150 }
151
152 [[nodiscard]] Box operator() (Box const& box) const noexcept {
153 return Box(operator()(IntVect{AMREX_D_DECL(box.bigEnd (0),
154 box.smallEnd(1),
155 box.smallEnd(2))}),
156 operator()(IntVect{AMREX_D_DECL(box.smallEnd(0),
157 box.bigEnd (1),
158 box.bigEnd (2))}));
159 }
160};
161
186template <class FAB, class DTOS, class Proj>
187std::enable_if_t<IsBaseFab<FAB>() && IsCallableR<Dim3, DTOS, Dim3>() && IsFabProjection<Proj, FAB>()>
188local_copy_cpu (FabArray<FAB>& dest, const FabArray<FAB>& src, int dcomp, int scomp, int ncomp,
189 FabArrayBase::CopyComTagsContainer const& local_tags, DTOS const& dtos, Proj const& proj) noexcept {
190 const auto N_locs = static_cast<int>(local_tags.size());
191 if (N_locs == 0) { return; }
192#ifdef AMREX_USE_OMP
193#pragma omp parallel for
194#endif
195 for (int itag = 0; itag < N_locs; ++itag) {
196 const auto& tag = local_tags[itag];
197 auto const& sfab = src.const_array(tag.srcIndex);
198 auto const& dfab = dest.array (tag.dstIndex);
199 amrex::LoopConcurrentOnCpu(tag.dbox, ncomp, [=] (int i, int j, int k, int n) noexcept
200 {
201 auto const si = dtos(Dim3{.x = i, .y = j, .z = k});
202 dfab(i,j,k,dcomp+n) = proj(sfab,si,scomp+n);
203 });
204 }
205}
206
212template <class FAB, class DTOS, class Proj>
213std::enable_if_t<IsBaseFab<FAB>() && IsCallableR<Dim3, DTOS, Dim3>() && IsFabProjection<Proj, FAB>()>
214unpack_recv_buffer_cpu (FabArray<FAB>& mf, int dcomp, int ncomp, Vector<char*> const& recv_data,
215 Vector<std::size_t> const& recv_size,
216 Vector<FabArrayBase::CopyComTagsContainer const*> const& recv_cctc,
217 DTOS const& dtos, Proj const& proj) noexcept {
218 amrex::ignore_unused(recv_size);
219
220 const auto N_rcvs = static_cast<int>(recv_cctc.size());
221 if (N_rcvs == 0) { return; }
222
223 using T = typename FAB::value_type;
224#ifdef AMREX_USE_OMP
225#pragma omp parallel for
226#endif
227 for (int ircv = 0; ircv < N_rcvs; ++ircv) {
228 const char* dptr = recv_data[ircv];
229 auto const& cctc = *recv_cctc[ircv];
230 for (auto const& tag : cctc) {
231 auto const& dfab = mf.array(tag.dstIndex);
232 auto const& sfab = amrex::makeArray4((T const*)(dptr), tag.sbox, ncomp);
233 amrex::LoopConcurrentOnCpu(tag.dbox, ncomp, [=](int i, int j, int k, int n) noexcept {
234 auto const si = dtos(Dim3{.x = i, .y = j, .z = k});
235 dfab(i, j, k, dcomp + n) = proj(sfab, si, n);
236 });
237 dptr += tag.sbox.numPts() * ncomp * sizeof(T);
238 AMREX_ASSERT(dptr <= recv_data[ircv] + recv_size[ircv]);
239 }
240 }
241}
242
243#ifdef AMREX_USE_GPU
244template <class T>
245struct Array4Array4Box {
246 Array4<T > dfab;
247 Array4<T const> sfab;
248 Box dbox;
249
251 Box const& box () const noexcept { return dbox; }
252};
253
254template <class FAB, class DTOS, class Proj>
255std::enable_if_t<IsBaseFab<FAB>() && IsCallableR<Dim3, DTOS, Dim3>() && IsFabProjection<Proj, FAB>()>
256local_copy_gpu (FabArray<FAB>& dest, const FabArray<FAB>& src, int dcomp, int scomp, int ncomp,
257 FabArrayBase::CopyComTagsContainer const& local_tags, DTOS const& dtos, Proj const& proj) noexcept {
258 int N_locs = local_tags.size();
259 if (N_locs == 0) { return; }
260
261 using T = typename FAB::value_type;
262 Vector<Array4Array4Box<T> > loc_copy_tags;
263 loc_copy_tags.reserve(N_locs);
264 for (auto const& tag : local_tags) {
265 loc_copy_tags.push_back(Array4Array4Box<T>{.dfab = dest.array(tag.dstIndex),
266 .sfab = src.const_array(tag.srcIndex),
267 .dbox = tag.dbox});
268 }
269
270 ParallelFor(loc_copy_tags, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n,
271 Array4Array4Box<T> const& tag) noexcept
272 {
273 auto const si = dtos(Dim3{.x = i, .y = j, .z = k});
274 tag.dfab(i,j,k,dcomp+n) = proj(tag.sfab, si, scomp+n);
275 });
276}
277
278template <class FAB, class DTOS, class Proj>
279std::enable_if_t<IsBaseFab<FAB>() && IsCallableR<Dim3, DTOS, Dim3>() && IsFabProjection<Proj, FAB>()>
280unpack_recv_buffer_gpu (FabArray<FAB>& mf, int scomp, int ncomp,
281 Vector<char*> const& recv_data,
282 Vector<std::size_t> const& recv_size,
283 Vector<FabArrayBase::CopyComTagsContainer const*> const& recv_cctc,
284 DTOS const& dtos, Proj const& proj)
285{
286 amrex::ignore_unused(recv_size);
287
288 const int N_rcvs = recv_cctc.size();
289 if (N_rcvs == 0) { return; }
290
291 char* pbuffer = recv_data[0];
292#if 0
293 std::size_t szbuffer = 0;
294 // For linear solver test on summit, this is slower than writing to
295 // pinned memory directly on device.
296 if (not ParallelDescriptor::UseGpuAwareMpi()) {
297 // Memory in recv_data is pinned.
298 szbuffer = (recv_data[N_rcvs-1]-recv_data[0]) + recv_size[N_rcvs-1];
299 pbuffer = (char*)The_Arena()->alloc(szbuffer);
300 Gpu::copyAsync(Gpu::hostToDevice,recv_data[0],recv_data[0]+szbuffer,pbuffer);
301 Gpu::streamSynchronize();
302 }
303#endif
304
305 using T = typename FAB::value_type;
306 using TagType = Array4Array4Box<T>;
307 Vector<TagType> tags;
308 tags.reserve(N_rcvs);
309
310 for (int k = 0; k < N_rcvs; ++k)
311 {
312 std::size_t offset = recv_data[k]-recv_data[0];
313 const char* dptr = pbuffer + offset;
314 auto const& cctc = *recv_cctc[k];
315 for (auto const& tag : cctc)
316 {
317 tags.emplace_back(TagType{mf.array(tag.dstIndex),
318 amrex::makeArray4((T const*)dptr, tag.sbox, ncomp),
319 tag.dbox});
320 dptr += tag.dbox.numPts() * ncomp * sizeof(T);
321 BL_ASSERT(dptr <= pbuffer + offset + recv_size[k]);
322 }
323 }
324
325 ParallelFor(tags, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n,
326 Array4Array4Box<T> const& tag) noexcept
327 {
328 auto const si = dtos(Dim3{.x = i, .y = j, .z = k});
329 tag.dfab(i,j,k,scomp+n) = proj(tag.sfab, si ,n);
330 });
331
332 // There is Gpu::streamSynchronize in ParallelFor above
333
334 if (pbuffer != recv_data[0]) {
335 The_Arena()->free(pbuffer);
336 }
337}
338#endif
339
340template <typename DTOS, typename>
341MultiBlockCommMetaData::MultiBlockCommMetaData (const FabArrayBase& dst, const Box& dstbox, const FabArrayBase& src,
342 const IntVect& ngrow, DTOS const& dtos)
343 : MultiBlockCommMetaData(dst.boxArray(), dst.DistributionMap(), dstbox, src.boxArray(),
344 src.DistributionMap(), ngrow, dtos) {}
345
346template <typename DTOS, typename>
347MultiBlockCommMetaData::MultiBlockCommMetaData (const BoxArray& dstba, const DistributionMapping& dstdm,
348 const Box& dstbox, const BoxArray& srcba,
349 const DistributionMapping& srcdm, const IntVect& ngrow, DTOS const& dtos) {
350 define(dstba, dstdm, dstbox, srcba, srcdm, ngrow, dtos);
351}
352
353template <typename DTOS>
354std::enable_if_t<IsIndexMapping<DTOS>::value>
355MultiBlockCommMetaData::define (const BoxArray& dstba, const DistributionMapping& dstdm, const Box& dstbox,
356 const BoxArray& srcba, const DistributionMapping& srcdm, const IntVect& ngrow,
357 DTOS const& dtos) {
358 m_LocTags = std::make_unique<FabArrayBase::CopyComTagsContainer>();
359 m_SndTags = std::make_unique<FabArrayBase::MapOfCopyComTagContainers>();
360 m_RcvTags = std::make_unique<FabArrayBase::MapOfCopyComTagContainers>();
361 const int myproc = ParallelDescriptor::MyProc();
362 for (int i = 0, N = static_cast<int>(dstba.size()); i < N; ++i) {
363 const int dest_owner = dstdm[i];
364 const Box partial_dstbox = grow(dstba[i], ngrow) & dstbox;
365 if (partial_dstbox.isEmpty()) {
366 continue;
367 }
368 const Box partial_dstbox_mapped_in_src = Image(dtos, partial_dstbox).setType(srcba.ixType());
369 enum { not_first_only = 0, first_only = 1 };
370 std::vector<std::pair<int, Box>> boxes_from_src =
371 srcba.intersections(partial_dstbox_mapped_in_src, not_first_only, ngrow);
372 for (std::pair<int, Box> counted_box : boxes_from_src) {
373 const int k = counted_box.first;
374 const Box src_box = counted_box.second;
375 AMREX_ASSERT(k < srcba.size());
376 const int src_owner = srcdm[k];
377 if (dest_owner == myproc || src_owner == myproc) {
378 if (src_owner == dest_owner) {
379 const BoxList tilelist(src_box, FabArrayBase::comm_tile_size);
380 for (const Box& tilebox : tilelist) {
381 const Box inverse_image = InverseImage(dtos, tilebox).setType(dstba.ixType());
382 if ((inverse_image & partial_dstbox).ok()) {
383 m_LocTags->emplace_back(inverse_image, tilebox, i, k);
384 }
385 }
386 } else {
387 const Box inverse_image = InverseImage(dtos, src_box).setType(dstba.ixType());
388 if ((inverse_image & partial_dstbox).ok()) {
389 FabArrayBase::CopyComTagsContainer& copy_tags =
390 (src_owner == myproc) ? (*m_SndTags)[dest_owner]
391 : (*m_RcvTags)[src_owner];
392 copy_tags.emplace_back(inverse_image, src_box, i, k);
393 }
394 }
395 }
396 }
397 }
398}
399
400template <class FAB, class DTOS, class Proj>
401#ifdef AMREX_USE_MPI
403#endif
404CommHandler
405Comm_nowait (FabArray<FAB>& mf, int scomp, int ncomp, FabArrayBase::CommMetaData const& cmd,
406 DTOS const& dtos, Proj const& proj)
407{
408#ifdef AMREX_USE_MPI
409 if (ParallelContext::NProcsSub() == 1)
410#endif
411 {
412 if (cmd.m_LocTags->empty()) { return CommHandler{}; }
413#ifdef AMREX_USE_GPU
414 if (Gpu::inLaunchRegion()) {
415 local_copy_gpu(mf, mf, scomp, scomp, ncomp, *cmd.m_LocTags, dtos, proj);
416 } else
417#endif
418 {
419 local_copy_cpu(mf, mf, scomp, scomp, ncomp, *cmd.m_LocTags, dtos, proj);
420 }
421 return CommHandler{};
422 }
423
424#ifdef AMREX_USE_MPI
425 //
426 // Do this before prematurely exiting if running in parallel.
427 // Otherwise sequence numbers will not match across MPI processes.
428 //
429 int SeqNum = ParallelDescriptor::SeqNum();
430
431 const auto N_locs = cmd.m_LocTags->size();
432 const auto N_rcvs = cmd.m_RcvTags->size();
433 const auto N_snds = cmd.m_SndTags->size();
434
435 if (N_locs == 0 && N_rcvs == 0 && N_snds == 0) {
436 // No work to do.
437 return CommHandler{};
438 }
439
440 CommHandler handler{};
441 handler.mpi_tag = SeqNum;
442
443 if (N_rcvs > 0) {
444 handler.recv.the_data = FabArray<FAB>::PostRcvs(*cmd.m_RcvTags, handler.recv.data, handler.recv.size,
445 handler.recv.rank, handler.recv.request, ncomp, SeqNum);
446 }
447
448 if (N_snds > 0) {
449 handler.send.the_data =
450 FabArray<FAB>::PrepareSendBuffers(*cmd.m_SndTags, handler.send.data, handler.send.size,
451 handler.send.rank, handler.send.request, handler.send.cctc, ncomp);
452
453#ifdef AMREX_USE_GPU
454 if (Gpu::inLaunchRegion()) {
455 FabArray<FAB>::pack_send_buffer_gpu(mf, scomp, ncomp, handler.send.data,
456 handler.send.size, handler.send.cctc, handler.send.id);
457 } else
458#endif
459 {
460 FabArray<FAB>::pack_send_buffer_cpu(mf, scomp, ncomp, handler.send.data,
461 handler.send.size, handler.send.cctc);
462 }
463
464 FabArray<FAB>::PostSnds(handler.send.data, handler.send.size, handler.send.rank, handler.send.request, SeqNum);
465 }
466
467 if (N_locs > 0)
468 {
469#ifdef AMREX_USE_GPU
470 if (Gpu::inLaunchRegion()) {
471 local_copy_gpu(mf, mf, scomp, scomp, ncomp, *cmd.m_LocTags, dtos, proj);
472 } else
473#endif
474 {
475 local_copy_cpu(mf, mf, scomp, scomp, ncomp, *cmd.m_LocTags, dtos, proj);
476 }
477 }
478
479 return handler;
480#endif
481}
482
483#ifdef AMREX_USE_MPI
484template <class FAB, class DTOS, class Proj>
485void
486Comm_finish (FabArray<FAB>& mf, int scomp, int ncomp, FabArrayBase::CommMetaData const& cmd,
487 CommHandler handler, DTOS const& dtos, Proj const& proj)
488{
489 if (ParallelContext::NProcsSub() == 1) { return; }
490
491 const auto N_rcvs = static_cast<int>(cmd.m_RcvTags->size());
492 if (N_rcvs > 0)
493 {
494 handler.recv.cctc.resize(N_rcvs, nullptr);
495 for (int k = 0; k < N_rcvs; ++k) {
496 auto const& cctc = cmd.m_RcvTags->at(handler.recv.rank[k]);
497 handler.recv.cctc[k] = &cctc;
498 }
499 handler.recv.stats.resize(handler.recv.request.size());
500 ParallelDescriptor::Waitall(handler.recv.request, handler.recv.stats);
501#ifdef AMREX_DEBUG
502 if (!CheckRcvStats(handler.recv.stats, handler.recv.size, handler.mpi_tag)) {
503 amrex::Abort("NonLocalBC::Comm_finish failed with wrong message size");
504 }
505#endif
506
507#ifdef AMREX_USE_GPU
508 if (Gpu::inLaunchRegion())
509 {
510 unpack_recv_buffer_gpu(mf, scomp, ncomp, handler.recv.data,
511 handler.recv.size, handler.recv.cctc, dtos, proj);
512 } else
513#endif
514 {
515 unpack_recv_buffer_cpu(mf, scomp, ncomp, handler.recv.data,
516 handler.recv.size, handler.recv.cctc, dtos, proj);
517 }
518 }
519
520 if ( ! cmd.m_SndTags->empty() ) {
521 handler.send.stats.resize(handler.send.request.size());
522 ParallelDescriptor::Waitall(handler.send.request, handler.send.stats);
523 }
524}
525#endif
526
527template <class FAB>
528std::enable_if_t<IsBaseFab<FAB>::value>
529Rotate90 (FabArray<FAB>& mf, int scomp, int ncomp, IntVect const& nghost, Box const& domain)
530{
531 BL_PROFILE("Rotate90");
532
533 AMREX_ASSERT(domain.cellCentered());
534 AMREX_ASSERT(domain.smallEnd() == 0);
535 AMREX_ASSERT(domain.length(0) == domain.length(1));
536 AMREX_ASSERT(mf.is_cell_centered());
537 AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp());
538 AMREX_ASSERT(nghost.allLE(mf.nGrowVect()) && nghost[0] == nghost[1]);
539
540 if (nghost[0] <= 0) { return; }
541
542 const FabArrayBase::RB90& TheRB90 = mf.getRB90(nghost, domain);
543
544 auto handler = Comm_nowait(mf, scomp, ncomp, TheRB90,Rotate90DstToSrc{},
545 Identity{});
546
547 Box corner(-nghost, IntVect{AMREX_D_DECL(-1,-1,domain.bigEnd(2)+nghost[2])});
548#ifdef AMREX_USE_OMP
549#pragma omp parallel if (Gpu::notInLaunchRegion())
550#endif
551 for (MFIter mfi(mf); mfi.isValid(); ++mfi) {
552 Box const& bx = corner & mfi.fabbox();
553 if (bx.ok()) {
554 auto const& fab = mf.array(mfi);
555 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx,ncomp,i,j,k,n,
556 {
557 fab(i,j,k,n) = fab(-i-1,-j-1,k,n);
558 });
559 }
560 }
561
562#ifdef AMREX_USE_MPI
563 Comm_finish(mf, scomp, ncomp, TheRB90, std::move(handler), Rotate90DstToSrc{},
564 Identity{});
565#else
566 amrex::ignore_unused(handler);
567#endif
568}
569
570template <class FAB>
571std::enable_if_t<IsBaseFab<FAB>::value>
572Rotate90 (FabArray<FAB>& mf, Box const& domain)
573{
574 Rotate90(mf, 0, mf.nComp(), mf.nGrowVect(), domain);
575}
576
577template <class FAB>
578std::enable_if_t<IsBaseFab<FAB>::value>
579Rotate180 (FabArray<FAB>& mf, int scomp, int ncomp, IntVect const& nghost, Box const& domain)
580{
581 BL_PROFILE("Rotate180");
582
583 AMREX_ASSERT(domain.cellCentered());
584 AMREX_ASSERT(domain.smallEnd() == 0);
585 AMREX_ASSERT(domain.length(1) % 2 == 0);
586 AMREX_ASSERT(mf.is_cell_centered());
587 AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp());
588 AMREX_ASSERT(nghost.allLE(mf.nGrowVect()));
589
590 if (nghost[0] <= 0) { return; }
591
592 const FabArrayBase::RB180& TheRB180 = mf.getRB180(nghost, domain);
593
594 auto handler = Comm_nowait(mf, scomp, ncomp, TheRB180,
595 Rotate180Fn{domain.length(1)}, Identity{});
596
597#ifdef AMREX_USE_MPI
598 Comm_finish(mf, scomp, ncomp, TheRB180, std::move(handler),
599 Rotate180Fn{domain.length(1)}, Identity{});
600#else
601 amrex::ignore_unused(handler);
602#endif
603}
604
605template <class FAB>
606std::enable_if_t<IsBaseFab<FAB>::value>
607Rotate180 (FabArray<FAB>& mf, Box const& domain)
608{
609 Rotate180(mf, 0, mf.nComp(), mf.nGrowVect(), domain);
610}
611
612template <class FAB>
613std::enable_if_t<IsBaseFab<FAB>::value>
614FillPolar (FabArray<FAB>& mf, int scomp, int ncomp, IntVect const& nghost, Box const& domain)
615{
616 BL_PROFILE("FillPolar");
617
618 AMREX_ASSERT(domain.cellCentered());
619 AMREX_ASSERT(domain.smallEnd() == 0);
620 AMREX_ASSERT(domain.length(1) % 2 == 0);
621 AMREX_ASSERT(mf.is_cell_centered());
622 AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp());
623 AMREX_ASSERT(nghost.allLE(mf.nGrowVect()));
624
625 if (nghost[0] <= 0) { return; }
626
627 const FabArrayBase::PolarB& ThePolarB = mf.getPolarB(nghost, domain);
628
629 auto handler = Comm_nowait(mf, scomp, ncomp, ThePolarB,
630 PolarFn{.Lx = domain.length(0), .Ly = domain.length(1)},
631 Identity{});
632
633#ifdef AMREX_USE_MPI
634 Comm_finish(mf, scomp, ncomp, ThePolarB, std::move(handler),
635 PolarFn{.Lx = domain.length(0), .Ly = domain.length(1)}, Identity{});
636#else
637 amrex::ignore_unused(handler);
638#endif
639}
640
641template <class FAB>
642std::enable_if_t<IsBaseFab<FAB>::value>
643FillPolar (FabArray<FAB>& mf, Box const& domain)
644{
645 FillPolar(mf, 0, mf.nComp(), mf.nGrowVect(), domain);
646}
647
648template <typename FAB, typename DTOS, typename Proj>
649std::enable_if_t<IsBaseFab<FAB>() &&
650 IsCallableR<Dim3,DTOS,Dim3>() &&
651 IsFabProjection<Proj,FAB>(),
652 CommHandler>
653FillBoundary_nowait (FabArray<FAB>& mf, const FabArrayBase::CommMetaData& cmd,
654 int scomp, int ncomp, DTOS const& dtos, Proj const& proj)
655{
656 BL_PROFILE("FillBoundary_nowait(cmd)");
657 AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp());
658 return Comm_nowait(mf, scomp, ncomp, cmd, dtos, proj);
659}
660
661template <typename FAB, typename DTOS, typename Proj>
662std::enable_if_t<IsBaseFab<FAB>() &&
663 IsCallableR<Dim3,DTOS,Dim3>() &&
664 IsFabProjection<Proj,FAB>()>
665FillBoundary_finish (CommHandler handler,
666 FabArray<FAB>& mf, const FabArrayBase::CommMetaData& cmd,
667 int scomp, int ncomp, DTOS const& dtos, Proj const& proj)
668{
669#ifdef AMREX_USE_MPI
670 BL_PROFILE("FillBoundary_finish(cmd)");
671 Comm_finish(mf, scomp, ncomp, cmd, std::move(handler), dtos, proj);
672#else
673 amrex::ignore_unused(handler,mf,cmd,scomp,ncomp,dtos,proj);
674#endif
675}
676
677template <typename DTOS>
678Vector<std::pair<Box,Box>>
679get_src_dst_boxes (DTOS const& dtos, Box const& dstbox, Box const& domain)
680{
681 Vector<std::pair<Box,Box>> r;
682 IntVect mapped_smallend(dtos(amrex::lbound(dstbox)));
683 IntVect mapped_bigend (dtos(amrex::ubound(dstbox)));
684 if (!domain.contains(mapped_smallend) || !domain.contains(mapped_bigend)) {
685 return r;
686 }
687
688 auto sign = dtos.sign(amrex::lbound(dstbox));
689 auto perm = dtos.permutation(amrex::lbound(dstbox));
690 auto dtype = dstbox.type();
691 IntVect stype{AMREX_D_DECL(dtype[perm[0]],
692 dtype[perm[1]],
693 dtype[perm[2]])};
694 Array<Array<std::pair<int,int>,2>,AMREX_SPACEDIM> ends;
695 Array<Array<std::pair<int,int>,2>,AMREX_SPACEDIM> dst_ends;
696 Array<int,AMREX_SPACEDIM> nboxes;
697 for (int ddim = 0; ddim < AMREX_SPACEDIM; ++ddim) {
698 int sdim = perm[ddim];
699 auto mm = std::minmax(mapped_smallend[sdim],mapped_bigend[sdim]);
700 if (((sign[ddim] > 0) && (mapped_smallend[sdim] <= mapped_bigend[sdim])) ||
701 ((sign[ddim] < 0) && (mapped_bigend[sdim] <= mapped_smallend[sdim])))
702 {
703 nboxes[sdim] = 1;
704 ends[sdim][0] = mm;
705 dst_ends[ddim][0] = std::make_pair(dstbox.smallEnd(ddim),
706 dstbox.bigEnd(ddim));
707 } else {
708 nboxes[sdim] = 2;
709 ends[sdim][0].first = domain.smallEnd(sdim);
710 ends[sdim][0].second = mm.first;
711 ends[sdim][1].first = mm.second;
712 ends[sdim][1].second = domain.bigEnd(sdim);
713 int n0 = ends[sdim][0].second - ends[sdim][0].first;
714 int n1 = ends[sdim][1].second - ends[sdim][1].first;
715 if (mm.first == mapped_smallend[sdim]) {
716 dst_ends[ddim][0] = std::make_pair(dstbox.smallEnd(ddim),
717 dstbox.smallEnd(ddim)+n0);
718 dst_ends[ddim][1] = std::make_pair(dstbox.bigEnd(ddim)-n1,
719 dstbox.bigEnd(ddim));
720 } else {
721 dst_ends[ddim][0] = std::make_pair(dstbox.bigEnd(ddim)-n0,
722 dstbox.bigEnd(ddim));
723 dst_ends[ddim][1] = std::make_pair(dstbox.smallEnd(ddim),
724 dstbox.smallEnd(ddim)+n1);
725 }
726 }
727 }
728
729 r.reserve(AMREX_D_TERM(nboxes[0],*nboxes[1],*nboxes[2]));
730
731#if (AMREX_SPACEDIM == 3)
732 for (int kbox = 0; kbox < nboxes[2]; ++kbox) {
733#endif
734#if (AMREX_SPACEDIM >=2 )
735 for (int jbox = 0; jbox < nboxes[1]; ++jbox) {
736#endif
737 for (int ibox = 0; ibox < nboxes[0]; ++ibox)
738 {
739 IntVect siv(AMREX_D_DECL(ibox,jbox,kbox));
740 IntVect div(AMREX_D_DECL(siv[perm[0]],siv[perm[1]],siv[perm[2]]));
741 r.emplace_back(Box(IntVect(AMREX_D_DECL(ends[0][ibox].first,
742 ends[1][jbox].first,
743 ends[2][kbox].first)),
744 IntVect(AMREX_D_DECL(ends[0][ibox].second,
745 ends[1][jbox].second,
746 ends[2][kbox].second)),
747 stype),
748 Box(IntVect(AMREX_D_DECL(dst_ends[0][div[0]].first,
749 dst_ends[1][div[1]].first,
750 dst_ends[2][div[2]].first)),
751 IntVect(AMREX_D_DECL(dst_ends[0][div[0]].second,
752 dst_ends[1][div[1]].second,
753 dst_ends[2][div[2]].second)),
754 dtype));
755 AMREX_D_TERM(},},})
756
757 return r;
758}
759
760template <typename DTOS>
761Box get_dst_subbox (DTOS const& dtos, std::pair<Box,Box> const& sdboxes,
762 Box const& srcsubbox)
763{
764 Box const& srcbox = sdboxes.first;
765 Box const& dstbox = sdboxes.second;
766 if (srcbox == srcsubbox) {
767 return dstbox;
768 } else {
769 auto sign = dtos.sign(amrex::lbound(dstbox));
770 auto perm = dtos.permutation(amrex::lbound(dstbox));
771 Box dstsubbox = dstbox;
772 for (int ddim = 0; ddim < AMREX_SPACEDIM; ++ddim) {
773 int sdim = perm[ddim];
774 if (sign[ddim] > 0) {
775 dstsubbox.growLo(ddim, srcbox.smallEnd(sdim)-srcsubbox.smallEnd(sdim));
776 dstsubbox.growHi(ddim, srcsubbox.bigEnd(sdim)-srcbox.bigEnd(sdim));
777 } else {
778 dstsubbox.growLo(ddim, srcsubbox.bigEnd(sdim)-srcbox.bigEnd(sdim));
779 dstsubbox.growHi(ddim, srcbox.smallEnd(sdim)-srcsubbox.smallEnd(sdim));
780 }
781 }
782 return dstsubbox;
783 }
784}
785
787namespace detail {
788 void split_boxes (BoxList& bl, Box const& domain);
789}
791
792template <typename FAB, typename DTOS>
793std::enable_if_t<IsBaseFab<FAB>() && IsCallableR<Dim3,DTOS,Dim3>(),
794 FabArrayBase::CommMetaData>
795makeFillBoundaryMetaData (FabArray<FAB>& mf, IntVect const& nghost,
796 Geometry const& geom, DTOS const& dtos)
797{
798 FabArrayBase::CommMetaData cmd;
799 cmd.m_LocTags = std::make_unique<FabArrayBase::CopyComTagsContainer>();
800 cmd.m_SndTags = std::make_unique<FabArrayBase::MapOfCopyComTagContainers>();
801 cmd.m_RcvTags = std::make_unique<FabArrayBase::MapOfCopyComTagContainers>();
802
803 // Normal FillBoundary part
804 mf.define_fb_metadata(cmd, nghost, false, geom.periodicity(), false);
805
806 BoxArray const& ba = mf.boxArray();
807 DistributionMapping const& dm = mf.DistributionMap();
808 Box dombox = amrex::convert(geom.Domain(), ba.ixType());
809 Box pdombox = amrex::convert(geom.growPeriodicDomain(nghost), ba.ixType());
810
811 const int myproc = ParallelDescriptor::MyProc();
812 const auto nboxes = static_cast<int>(ba.size());
813 std::vector<std::pair<int,Box> > isects;
814
815 for (int i = 0; i < nboxes; ++i) {
816 Box const& gbx = amrex::grow(ba[i], nghost);
817 BoxList bl = amrex::boxDiff(gbx, pdombox);
818 if (bl.isEmpty()) { continue; }
819
820 detail::split_boxes(bl, dombox);
821
822 const int dst_owner = dm[i];
823 for (auto const& dst_box : bl) {
824 auto const& src_dst_boxes = get_src_dst_boxes(dtos, dst_box, dombox);
825 for (auto const& sd_box_pair : src_dst_boxes) {
826 ba.intersections(sd_box_pair.first, isects);
827 for (auto const& is : isects) {
828 int const k = is.first;
829 Box const src_b = is.second;
830 int const src_owner = dm[k];
831 if (dst_owner == myproc || src_owner == myproc) {
832 Box const& dst_b = get_dst_subbox(dtos, sd_box_pair, src_b);
833 if (src_owner == dst_owner) {
834 cmd.m_LocTags->emplace_back(dst_b, src_b, i, k);
835 } else {
836 auto& tags = (dst_owner == myproc) ?
837 (*cmd.m_RcvTags)[src_owner] :
838 (*cmd.m_SndTags)[dst_owner];
839 tags.emplace_back(dst_b, src_b, i, k);
840 }
841 }
842 }
843 }
844 }
845 }
846
847 return cmd;
848}
849
850struct SphThetaPhiRIndexMapping
851{
852 SphThetaPhiRIndexMapping (Box const& a_domain)
853 : nx(a_domain.length(0)),
854 ny(a_domain.length(1)),
855 nz(a_domain.length(2))
856 {
857 AMREX_ASSERT(a_domain.smallEnd() == 0);
858 }
859
860 [[nodiscard]] AMREX_GPU_HOST_DEVICE
861 Dim3 operator() (Dim3 const& ijk) const noexcept
862 {
863 const int i = ijk.x;
864 const int j = ijk.y;
865 const int k = ijk.z;
866 bool ilo = i < 0;
867 bool ihi = i >= nx;
868 bool imd = i >= 0 && i < nx;
869 bool jlo = j < 0;
870 bool jhi = j >= ny;
871 bool jmd = j >= 0 && j < ny;
872 bool klo = k < 0;
873 bool kmd = k >= 0 && k < nz;
874 // We do not need to do anything at the theta-lo/r-lo edge,
875 // theta-hi/r-lo edge, and r > r-hi.
876 if (ilo && jmd && kmd)
877 {
878 return Dim3{.x = -1-i, .y = (j+ny/2)%ny, .z = k};
879 }
880 else if (ihi && jmd && kmd)
881 {
882 return Dim3{.x = 2*nx-1-i, .y = (j+ny/2)%ny, .z = k};
883 }
884 else if (imd && jlo && kmd)
885 {
886 return Dim3{.x = i, .y = j+ny, .z = k};
887 }
888 else if (imd && jhi && kmd)
889 {
890 return Dim3{.x = i, .y = j-ny, .z = k};
891 }
892 else if (imd && jmd && klo)
893 {
894 return Dim3{.x = nx-1-i, .y = (j+ny/2)%ny, .z = -1-k};
895 }
896 else if (ilo && jlo && kmd)
897 {
898 return Dim3{.x = -1-i, .y = (j+ny/2)%ny, .z = k};
899 }
900 else if (ihi && jlo && kmd)
901 {
902 return Dim3{.x = 2*nx-1-i, .y = (j+ny/2)%ny, .z = k};
903 }
904 else if (ilo && jhi && kmd)
905 {
906 return Dim3{.x = -1-i, .y = (j+ny/2)%ny, .z = k};
907 }
908 else if (ihi && jhi && kmd)
909 {
910 return Dim3{.x = 2*nx-1-i, .y = (j+ny/2)%ny, .z = k};
911 }
912 else if (imd && jlo && klo)
913 {
914 return Dim3{.x = nx-1-i, .y = (j+ny/2)%ny, .z = -1-k};
915 }
916 else if (imd && jhi && klo)
917 {
918 return Dim3{.x = nx-1-i, .y = (j+ny/2)%ny, .z = -1-k};
919 }
920 else
921 {
922 return ijk;
923 }
924 }
925
926 [[nodiscard]] IntVect sign (Dim3 const& ijk) const noexcept
927 {
928 if (ijk.z < 0) {
929 return IntVect{AMREX_D_DECL(-1, 1,-1)};
930 } else if (ijk.z >=0 && ijk.z < nz &&
931 (ijk.x < 0 || ijk.x >= nx)) {
932 return IntVect{AMREX_D_DECL(-1, 1, 1)};
933 } else {
934 return IntVect{AMREX_D_DECL( 1, 1, 1)};
935 }
936 }
937
938 [[nodiscard]] IntVect permutation (Dim3 const& /*ijk*/) const noexcept // NOLINT(readability-convert-member-functions-to-static)
939 {
940 return IntVect(AMREX_D_DECL(0,1,2));
941 }
942
943private:
944 int nx, ny, nz;
945};
946
947struct SphThetaPhiRComponentMapping
948{
949 SphThetaPhiRComponentMapping (Box const& a_domain, int a_start_index)
950 : nx(a_domain.length(0)),
951 ny(a_domain.length(1)),
952 nz(a_domain.length(2)),
953 scomp(a_start_index) {}
954
955 template <typename T>
956 [[nodiscard]] AMREX_GPU_HOST_DEVICE
957 T operator()(Array4<const T> const& a, Dim3 const& ijk, int n) const noexcept
958 {
959 const int i = ijk.x;
960 const int j = ijk.y;
961 const int k = ijk.z;
962 auto r = a(i,j,k,n);
963 if (n == scomp) {
964 if ((i >= 0 && i < nx) &&
965 (j < 0 || j >= ny) &&
966 (k >= 0 && k < nz)) {
967 return r;
968 } else {
969 // We do not need to worry about the theta-lo/r-lo edge,
970 // theta-hi/r-lo edge, and r > r-hi.
971 return -r;
972 }
973 } else if (n == scomp+2) {
974 if (k < 0) {
975 return -r;
976 } else {
977 return r;
978 }
979 } else {
980 return r;
981 }
982 }
983private:
984 int nx, ny, nz;
985 int scomp;
986};
987
988extern template MultiBlockCommMetaData ParallelCopy(FabArray<FArrayBox>& dest, const Box& destbox,
989 const FabArray<FArrayBox>& src, int destcomp,
990 int srccomp, int numcomp, const IntVect& ngrow,
991 MultiBlockIndexMapping const&, Identity const&);
992}
993
994#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_NODISCARD
Definition AMReX_Extension.H:252
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1139
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
__host__ __device__ BoxND & growLo(int idir, int n_cell=1) noexcept
Grow the BoxND on the low end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the Bo...
Definition AMReX_Box.H:662
__host__ __device__ BoxND & growHi(int idir, int n_cell=1) noexcept
Grow the BoxND on the high end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the B...
Definition AMReX_Box.H:673
CopyComTag::CopyComTagsContainer CopyComTagsContainer
Definition AMReX_FabArrayBase.H:220
__host__ __device__ Dim3 ubound(Array4< T > const &a) noexcept
Return the inclusive upper bounds of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1331
__host__ __device__ Dim3 length(Array4< T > const &a) noexcept
Return the spatial extents of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1345
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Return the inclusive lower bounds of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1317
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
Arena * The_Arena()
Definition AMReX_Arena.cpp:820
Definition AMReX_NonLocalBC.cpp:39
std::enable_if_t< IsBaseFab< FAB >() &&IsCallableR< Dim3, DTOS, Dim3 >() &&IsFabProjection< Proj, FAB >()> unpack_recv_buffer_cpu(FabArray< FAB > &mf, int dcomp, int ncomp, Vector< char * > const &recv_data, Vector< std::size_t > const &recv_size, Vector< FabArrayBase::CopyComTagsContainer const * > const &recv_cctc, DTOS const &dtos=DTOS{}, Proj const &proj=Proj{}) noexcept
std::enable_if_t< IsBaseFab< FAB >::value > Rotate90(FabArray< FAB > &mf, int scomp, int ncomp, IntVect const &nghost, Box const &domain)
std::enable_if_t< IsBaseFab< FAB >::value > Rotate180(FabArray< FAB > &mf, int scomp, int ncomp, IntVect const &nghost, Box const &domain)
std::enable_if_t< IsCallableR< Dim3, DTOS, Dim3 >::value &&!IsCallableR< IndexType, DTOS, IndexType >::value, Box > Image(DTOS const &dtos, const Box &box)
Applies the Dim3 to Dim3 mapping onto Boxes but does not change the index type.
Definition AMReX_NonLocalBC.H:120
std::enable_if_t< IsBaseFab< FAB >() &&IsCallableR< Dim3, DTOS, Dim3 >(), FabArrayBase::CommMetaData > makeFillBoundaryMetaData(FabArray< FAB > &mf, IntVect const &nghost, Geometry const &geom, DTOS const &dtos)
Make metadata for FillBoundary.
std::enable_if_t< IsBaseFab< FAB >() &&IsCallableR< Dim3, DTOS, Dim3 >() &&IsFabProjection< Proj, FAB >()> local_copy_gpu(FabArray< FAB > &dest, const FabArray< FAB > &src, int dcomp, int scomp, int ncomp, FabArrayBase::CopyComTagsContainer const &local_tags, DTOS const &dtos=DTOS{}, Proj const &proj=Proj{}) noexcept
std::enable_if_t< IsBaseFab< FAB >::value > FillPolar(FabArray< FAB > &mf, int scomp, int ncomp, IntVect const &nghost, Box const &domain)
std::enable_if_t< HasInverseMemFn< DTOS >::value &&!IsCallableR< IndexType, DTOS, IndexType >::value, Box > InverseImage(DTOS const &dtos, const Box &box)
Applies the inverse Dim3 to Dim3 mapping onto Boxes without changing the index type.
Definition AMReX_NonLocalBC.H:183
std::enable_if_t< IsBaseFab< FAB >() &&IsCallableR< Dim3, DTOS, Dim3 >() &&IsFabProjection< Proj, FAB >()> local_copy_cpu(FabArray< FAB > &dest, const FabArray< FAB > &src, int dcomp, int scomp, int ncomp, FabArrayBase::CopyComTagsContainer const &local_tags, DTOS const &dtos=DTOS{}, Proj const &proj=Proj{}) noexcept
std::enable_if_t< IsBaseFab< FAB >() &&IsCallableR< Dim3, DTOS, Dim3 >() &&IsFabProjection< Proj, FAB >()> unpack_recv_buffer_gpu(FabArray< FAB > &mf, int scomp, int ncomp, Vector< char * > const &recv_data, Vector< std::size_t > const &recv_size, Vector< FabArrayBase::CopyComTagsContainer const * > const &recv_cctc, DTOS const &dtos=DTOS{}, Proj const &proj=Proj{})
int SeqNum() noexcept
Returns sequential message sequence numbers, usually used as tags for send/recv.
Definition AMReX_ParallelDescriptor.H:696
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ Array4< T > makeArray4(T *p, Box const &bx, int ncomp) noexcept
Definition AMReX_BaseFab.H:92
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2867
std::enable_if_t< IsFabArray< MF >::value > FillBoundary_finish(Vector< MF * > const &mf)
Wait for outstanding FillBoundary_nowait operations launched with the vector helper to complete.
Definition AMReX_FabArrayCommI.H:1113
std::enable_if_t< IsFabArray< MF >::value > FillBoundary_nowait(Vector< MF * > const &mf, Vector< int > const &scomp, Vector< int > const &ncomp, Vector< IntVect > const &nghost, Vector< Periodicity > const &period, Vector< int > const &cross={})
Launch FillBoundary_nowait across a vector of FabArrays.
Definition AMReX_FabArrayCommI.H:1067
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
BoxList boxDiff(const Box &b1in, const Box &b2)
Returns BoxList defining the compliment of b2 in b1in.
Definition AMReX_BoxList.cpp:599
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
void LoopConcurrentOnCpu(Dim3 lo, Dim3 hi, F const &f) noexcept
Definition AMReX_Loop.H:388
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:241
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:2019
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2862