23template <
class T0,
class T1>
27 operator() (T0* d, T1 s)
const noexcept
29 *d =
static_cast<T0
>(s);
33template <
class T0,
class T1>
37 operator() (T0* d, T1 s)
const noexcept
39 *d +=
static_cast<T0
>(s);
43template <
class T0,
class T1>
46 template<class U0=T0, std::enable_if_t<amrex::HasAtomicAdd<U0>::value,
int> = 0>
48 operator() (U0* d, T1 s)
const noexcept
50 Gpu::Atomic::AddNoRet(d,
static_cast<U0
>(s));
54template <
class T0,
class T1,
class F>
56fab_to_fab (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
int dcomp,
int ncomp,
59 TagVector<Array4CopyTag<T0, T1>> tv{copy_tags};
61 detail::ParallelFor_doit(tv,
63 int icell,
int ncells,
int i,
int j,
int k, Array4CopyTag<T0, T1>
const& tag)
noexcept
66 for (
int n = 0; n < ncomp; ++n) {
67 f(&(tag.dfab(i,j,k,n+dcomp)),
68 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
74template <
class TagType,
class F>
76fab_to_fab_store (Vector<TagType>
const& tags,
int scomp,
int dcomp,
int ncomp,
F&&f)
83 for (
int n = 0; n < ncomp; ++n) {
84 f(&(tag.dfab(i,j,k,n+dcomp)),
85 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
91template <
class TagType,
class F>
93fab_to_fab_other (Vector<TagType>
const& tags,
int scomp,
int dcomp,
int ncomp,
F&&f)
98 int* m = &(tag.mask(i,j,k));
101#if defined(AMREX_USE_SYCL)
107#if defined(AMREX_USE_SYCL)
108 sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
112 for (
int n = 0; n < ncomp; ++n) {
113 f(&(tag.dfab(i,j,k,n+dcomp)),
114 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
116#if defined(AMREX_USE_SYCL)
117 sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
124#if defined(AMREX_USE_CUDA)
126#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)
129 for (
int c = 0; c < 2; ++c) {
133 for (
int c = 0; c < 2; ++c) {
134 __asm__
volatile(
"");
141#elif defined(AMREX_USE_HIP)
143 __builtin_amdgcn_s_sleep(1);
145#elif defined(AMREX_USE_SYCL)
147 for (
int c = 0; c < 2; ++c) {
148 __asm__
volatile(
"");
157template <
class T0,
class T1,
class F>
159fab_to_fab (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
int dcomp,
160 int ncomp,
F && f, Vector<Array4Tag<int> >
const& masks)
162 using TagType = Array4MaskCopyTag<T0, T1>;
163 Vector<TagType> tags;
164 const int N = copy_tags.size();
166 for (
int i = 0; i < N; ++i) {
167 tags.push_back(TagType{.dfab = copy_tags[i].dfab, .sfab = copy_tags[i].sfab,
168 .mask = masks[i].dfab, .dbox = copy_tags[i].dbox,
169 .offset = copy_tags[i].offset});
172 if constexpr (std::is_same_v<F, CellStore<T0,T1>>)
174 fab_to_fab_store(tags, scomp, dcomp, ncomp, std::forward<F>(f));
178 fab_to_fab_other(tags, scomp, dcomp, ncomp, std::forward<F>(f));
183template <
typename T0,
typename T1,
184 std::enable_if_t<amrex::IsStoreAtomic<T0>::value,
int> = 0>
186fab_to_fab_atomic_cpy (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
187 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const&)
189 fab_to_fab<T0, T1>(copy_tags, scomp, dcomp, ncomp, CellStore<T0, T1>());
192template <
typename T0,
typename T1,
193 std::enable_if_t<!amrex::IsStoreAtomic<T0>::value,
int> = 0>
195fab_to_fab_atomic_cpy (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
196 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const& masks)
198 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellStore<T0, T1>(), masks);
201template <
typename T0,
typename T1,
202 std::enable_if_t<amrex::HasAtomicAdd<T0>::value,
int> = 0>
204fab_to_fab_atomic_add (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
205 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const&)
207 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellAtomicAdd<T0, T1>());
210template <
typename T0,
typename T1,
211 std::enable_if_t<!amrex::HasAtomicAdd<T0>::value,
int> = 0>
213fab_to_fab_atomic_add (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
214 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const& masks)
216 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellAdd<T0, T1>(), masks);
219template <
typename T0,
typename T1,
class F>
220void deterministic_fab_to_fab (Vector<Array4CopyTag<T0,T1>>
const& a_tags,
int scomp,
221 int dcomp,
int ncomp,
F const& f)
223 if (a_tags.empty()) {
return; }
225 using TagType = Array4CopyTag<T0,T1>;
229 std::pair<int,Box> dindex_tilebox;
230 bool operator< (TiledTag
const& rhs)
const noexcept {
231 return this->dindex_tilebox < rhs.dindex_tilebox;
233 bool operator!= (TiledTag
const& rhs)
const noexcept {
234 return this->dindex_tilebox != rhs.dindex_tilebox;
237 Vector<TiledTag> tiled_tags;
239 auto const ixtype = a_tags[0].dbox.ixType();
241 constexpr int tile_size = 64;
242 for (
int itag = 0; itag < a_tags.size(); ++itag) {
243 auto const& tag = a_tags[itag];
244 auto const& dlo = tag.dbox.smallEnd();
245 auto const& dhi = tag.dbox.bigEnd();
247 amrex::coarsen<tile_size>(dlo[1]),
248 amrex::coarsen<tile_size>(dlo[2])));
250 amrex::coarsen<tile_size>(dhi[1]),
251 amrex::coarsen<tile_size>(dhi[2])));
252#if (AMREX_SPACEDIM == 3)
253 for (
int kt = tlo[2]; kt <= thi[2]; ++kt)
256#if (AMREX_SPACEDIM >= 2)
257 for (
int jt = tlo[1]; jt <= thi[1]; ++jt)
260 for (
int it = tlo[0]; it <= thi[0]; ++it)
265 tiled_tags.push_back(TiledTag{
267 .dindex_tilebox = std::make_pair(tag.dindex,
Box(lo, lo+(tile_size-1), ixtype))
274 std::sort(tiled_tags.begin(), tiled_tags.end());
276 Gpu::HostVector<unsigned int> h_ntags;
277 Gpu::HostVector<TagType> h_tags;
278 h_tags.reserve(tiled_tags.size());
280 for (
unsigned int itag = 0; itag < tiled_tags.size(); ++itag) {
282 h_ntags.push_back(0);
283 }
else if (tiled_tags[itag-1] != tiled_tags[itag]) {
284 h_ntags.push_back(itag);
286 auto const& ttag = tiled_tags[itag];
287 auto const& btag = a_tags[ttag.tag_index];
288 h_tags.push_back(TagType{.dfab = btag.dfab, .dindex = btag.dindex, .sfab = btag.sfab,
289 .dbox = btag.dbox & ttag.dindex_tilebox.second,
290 .offset = btag.offset});
292 h_ntags.push_back((
unsigned int)tiled_tags.size());
294 Gpu::DeviceVector<TagType> d_tags(h_tags.size());
295 Gpu::DeviceVector<unsigned int> d_ntags(h_ntags.size());
298 auto const* ptag = d_tags.data();
299 auto const* pntags = d_ntags.data();
300 auto const nblocks =
int(h_ntags.size()-1);
301 constexpr auto nthreads = 256;
305 [[sycl::reqd_work_group_size(nthreads)]]
311 Dim1 blockIdx{item.get_group_linear_id()};
312 Dim1 threadIdx{item.get_local_linear_id()};
315 for (
unsigned int itag = pntags[blockIdx.x]; itag < pntags[blockIdx.x+1]; ++itag) {
316 auto const tag = ptag[itag];
317 auto ncells =
int(tag.dbox.numPts());
320 for (
int icell =
int(threadIdx.x); icell < ncells; icell += nthreads) {
321 int k = icell / (len.x*len.y);
322 int j = (icell - k*(len.x*len.y)) / len.x;
323 int i = (icell - k*(len.x*len.y)) - j*len.x;
327 for (
int n = 0; n < ncomp; ++n) {
328 f(tag.dfab.ptr(i,j,k,n+dcomp),
329 tag.sfab(i + tag.offset.x,
331 k + tag.offset.z, n+scomp));
335 if (itag+1 < pntags[blockIdx.x+1]) {
337 sycl::group_barrier(item.get_group());
347template <
typename B,
typename V,
typename TT,
348 std::enable_if_t<amrex::HasAtomicAdd<V>::value,
int> FOO = 0>
349void unpack_recv_buffer_gpu_atomic_add (
char* pbuffer, TagVector<TT>
const& tv,
350 int dcomp,
int ncomp)
352 detail::ParallelFor_doit(tv,
354 int icell,
int ncells,
int i,
int j,
int k, TT
const& tag)
noexcept
356 if (icell < ncells) {
357 Array4<B const> sfab{(B
const*)(pbuffer+tag.poff),
359 for (
int n = 0; n < ncomp; ++n) {
367template <
typename B,
typename V,
typename TT,
368 std::enable_if_t<!amrex::HasAtomicAdd<V>::value,
int> FOO = 0>
369void unpack_recv_buffer_gpu_atomic_add (
char* pbuffer, TagVector<TT>
const& tv,
370 int dcomp,
int ncomp)
373 amrex::Abort(
"unpack_recv_buffer_gpu: should NOT get here");
385 auto const& LocTags = *(TheFB.
m_LocTags);
386 auto N_locs =
static_cast<int>(LocTags.size());
387 if (N_locs == 0) {
return; }
392#pragma omp parallel for
394 for (
int i = 0; i < N_locs; ++i)
403 dfab->template copy<RunOn::Host>(*sfab, tag.
sbox, scomp, tag.
dbox, scomp, ncomp);
409 for (
int i = 0; i < N_locs; ++i)
416 loc_copy_tags[tag.
dstIndex].push_back
424 const auto& tags = loc_copy_tags[mfi];
425 auto dfab = this->array(mfi);
426 for (
auto const & tag : tags)
428 auto const sfab = tag.sfab->array();
429 const auto offset = tag.offset.dim3();
431 [=] (
int i,
int j,
int k,
int n)
noexcept
433 dfab(i,j,k,n+scomp) = sfab(i+offset.x,j+offset.y,k+offset.z,n+scomp);
444 auto const& LocTags = *(TheFB.
m_LocTags);
445 auto N_locs =
static_cast<int>(LocTags.size());
446 if (N_locs == 0) {
return; }
450 std::vector<FAB> src_fabs(N_locs);
451 for (
int itag = 0; itag < N_locs; ++itag) {
453 src_fabs[itag].resize(tag.
sbox,ncomp);
454 loc_copy_tags[tag.
dstIndex].push_back
460#pragma omp parallel for
462 for (
int itag = 0; itag < N_locs; ++itag) {
464 src_fabs[itag].template copy<RunOn::Host>(this->
operator[](tag.
srcIndex), scomp, 0, ncomp);
472 const auto& tags = loc_copy_tags[mfi];
473 const auto& dfab = this->array(mfi);
474 for (
auto const & tag : tags)
476 auto const sfab = tag.sfab->array();
477 const auto offset = tag.offset.dim3();
479 [&] (
int i,
int j,
int k,
int n)
noexcept
481 dfab(i,j,k,n+scomp) += sfab(i+offset.x,j+offset.y,k+offset.z,n);
493 auto const& LocTags = *(TheFB.m_LocTags);
494 int N_locs = LocTags.size();
499 if (
auto it = m_fb_local_copy_handler.find(TheFB.m_id);
500 it != m_fb_local_copy_handler.end())
502 tv = it->second.get();
505 loc_copy_tags.reserve(N_locs);
507 for (
int i = 0; i < N_locs; ++i)
514 int li = this->localindex(tag.
dstIndex);
515 loc_copy_tags.push_back
516 (TagType{.dfab = this->atLocalIdx(li).array(),
518 .sfab = this->fabPtr(tag.
srcIndex)->const_array(),
523 auto utv = std::make_unique<TagVector<TagType>>(loc_copy_tags);
525 m_fb_local_copy_handler[TheFB.m_id] = std::move(utv);
535 auto const& LocTags = *(TheFB.
m_LocTags);
536 int N_locs = LocTags.size();
537 if (N_locs == 0) {
return; }
544 auto* tv = FB_get_local_copy_tag_vector(TheFB);
546 detail::ParallelFor_doit(*tv,
548 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
550 if (icell < ncells) {
551 for (
int n = 0; n < ncomp; ++n) {
552 tag.dfab(i,j,k,n+scomp) = tag.sfab(i+tag.offset.x,
554 k+tag.offset.z,n+scomp);
562 loc_copy_tags.reserve(N_locs);
566 masks_unique.reserve(this->local_size());
568 masks.reserve(N_locs);
570 for (
int i = 0; i < N_locs; ++i)
577 int li = this->localindex(tag.
dstIndex);
578 loc_copy_tags.push_back
579 (TagType{.dfab = this->atLocalIdx(li).array(),
581 .sfab = this->fabPtr(tag.
srcIndex)->const_array(),
585 if (!maskfabs[li].isAllocated()) {
586 maskfabs[li].resize(this->atLocalIdx(li).box());
598 detail::fab_to_fab_atomic_cpy<value_type, value_type>(
599 loc_copy_tags, scomp, scomp, ncomp, masks);
607 auto const& LocTags = *(TheFB.
m_LocTags);
608 int N_locs = LocTags.size();
609 if (N_locs == 0) {
return; }
614 loc_copy_tags_1.reserve(N_locs);
615 loc_copy_tags_2.reserve(N_locs);
618 for (
int itag = 0; itag < N_locs; ++itag) {
620 src_fabs[itag].resize(tag.
sbox,ncomp);
621 loc_copy_tags_1.push_back(
622 TagType{.dfab = src_fabs[itag].array(), .dindex = -1,
623 .sfab = this->const_array(tag.
srcIndex,scomp), .dbox = tag.
sbox,
624 .offset =
Dim3{.x = 0, .y = 0, .z = 0}});
625 loc_copy_tags_2.push_back(
627 .sfab = src_fabs[itag].const_array(), .dbox = tag.
dbox,
635 detail::fab_to_fab(loc_copy_tags_1, 0, 0, ncomp,
636 detail::CellStore<value_type, value_type>{});
638 detail::deterministic_fab_to_fab(loc_copy_tags_2, 0, 0, ncomp,
639 detail::CellAdd<value_type,value_type>{});
642 detail::fab_to_fab(loc_copy_tags_2, 0, 0, ncomp,
643 detail::CellAtomicAdd<value_type, value_type>{});
656 auto const& LocTags = *(thecmd.
m_LocTags);
657 int N_locs = LocTags.
size();
658 if (N_locs == 0) {
return; }
663 loc_setval_tags.reserve(N_locs);
667 for (
int i = 0; i < N_locs; ++i)
671 loc_setval_tags.push_back(TagType{.dfab = this->array(tag.
dstIndex), .dbox = tag.
dbox});
675 [
x,scomp]
AMREX_GPU_DEVICE (
int i,
int j,
int k,
int n, TagType
const& tag)
noexcept
677 tag.dfab(i,j,k,n+scomp) =
x;
686 auto const& RcvTags = *(thecmd.
m_RcvTags);
692 for (
auto it = RcvTags.begin(); it != RcvTags.end(); ++it) {
693 for (
auto const& tag: it->second) {
694 rcv_setval_tags.push_back(TagType{.dfab = this->array(tag.dstIndex), .dbox = tag.dbox});
698 if (rcv_setval_tags.empty()) {
return; }
703 [
x,scomp]
AMREX_GPU_DEVICE (
int i,
int j,
int k,
int n, TagType
const& tag)
noexcept
705 tag.dfab(i,j,k,n+scomp) =
x;
709#if defined(__CUDACC__) && defined (AMREX_USE_CUDA)
714 const int N_locs = (*TheFB.m_LocTags).size();
716 for (
int i = 0; i < N_locs; ++i)
718 const CopyComTag& tag = (*TheFB.m_LocTags)[i];
723 loc_copy_tags[tag.dstIndex].push_back
724 ({this->fabPtr(tag.srcIndex), tag.dbox, tag.sbox.smallEnd()-tag.dbox.smallEnd()});
728 if ( !(TheFB.m_localCopy.ready()) )
730 const_cast<FB&
>(TheFB).m_localCopy.resize(N_locs);
734 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
736 amrex::Gpu::Device::startGraphRecording( (mfi.LocalIndex() == 0),
737 const_cast<FB&
>(TheFB).m_localCopy.getHostPtr(0),
738 (TheFB).m_localCopy.getDevicePtr(0),
739 std::size_t(
sizeof(CopyMemory)*N_locs) );
741 const auto& tags = loc_copy_tags[mfi];
742 for (
auto const & tag : tags)
744 const auto offset = tag.offset.dim3();
745 CopyMemory* cmem = TheFB.m_localCopy.getDevicePtr(idx++);
749 auto const dst = cmem->getDst<value_type>();
750 auto const src = cmem->getSrc<value_type>();
751 for (int n = 0; n < cmem->ncomp; ++n) {
752 dst(i,j,k,(cmem->scomp)+n) = src(i+offset.x,j+offset.y,k+offset.z,(cmem->scomp)+n);
757 bool last_iter = mfi.LocalIndex() == (this->local_size()-1);
758 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
759 if (last_iter) {
const_cast<FB&
>(TheFB).m_localCopy.setGraph( graphExec ); }
768 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
770 auto const dst_array = this->array(mfi);
771 const auto& tags = loc_copy_tags[mfi];
772 for (
auto const & tag : tags)
774 const_cast<FB&
>(TheFB).m_localCopy.setParams(idx++, makeCopyMemory(tag.sfab->array(),
781 TheFB.m_localCopy.executeGraph();
787FabArray<FAB>::FB_local_copy_cuda_graph_n (
const FB& TheFB,
int scomp,
int ncomp)
789 const int N_locs = TheFB.m_LocTags->size();
793 for (
int i = 0; i < N_locs; ++i)
795 const CopyComTag& tag = (*TheFB.m_LocTags)[i];
797 BL_ASSERT(ParallelDescriptor::sameTeam(distributionMap[tag.dstIndex]));
798 BL_ASSERT(ParallelDescriptor::sameTeam(distributionMap[tag.srcIndex]));
800 if (distributionMap[tag.dstIndex] == ParallelDescriptor::MyProc())
802 loc_copy_tags[tag.dstIndex].push_back
803 ({this->fabPtr(tag.srcIndex), tag.dbox, tag.sbox.smallEnd()-tag.dbox.smallEnd()});
810 if ( !(TheFB.m_localCopy.ready()) )
812 const_cast<FB&
>(TheFB).m_localCopy.resize(launches);
816 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
818 const auto& tags = loc_copy_tags[mfi];
819 for (
int t = 0; t<tags.size(); ++t)
821 Gpu::Device::setStreamIndex(cuda_stream++);
822 amrex::Gpu::Device::startGraphRecording( (idx == 0),
823 const_cast<FB&
>(TheFB).m_localCopy.getHostPtr(0),
824 (TheFB).m_localCopy.getDevicePtr(0),
825 std::size_t(
sizeof(CopyMemory)*launches) );
827 const auto& tag = tags[t];
828 const Dim3
offset = tag.offset.dim3();
830 CopyMemory* cmem = TheFB.m_localCopy.getDevicePtr(idx++);
833 auto const dst = cmem->getDst<value_type>();
834 auto const src = cmem->getSrc<value_type>();
835 for (int n = 0; n < cmem->ncomp; ++n) {
836 dst(i,j,k,(cmem->scomp)+n) = src(i+offset.x,j+offset.y,k+offset.z,(cmem->scomp)+n);
840 bool last_iter = idx == launches;
841 cudaGraphExec_t graphExec = Gpu::Device::stopGraphRecording(last_iter);
842 if (last_iter) {
const_cast<FB&
>(TheFB).m_localCopy.setGraph( graphExec ); }
850 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
852 const auto& dst_array = this->array(mfi);
853 const auto& tags = loc_copy_tags[mfi];
854 for (
auto const & tag : tags)
856 const_cast<FB&
>(TheFB).m_localCopy.setParams(idx++, makeCopyMemory(tag.sfab->array(),
863 TheFB.m_localCopy.executeGraph(
false);
875#if defined(__CUDACC__) && defined(AMREX_USE_CUDA)
879FabArray<FAB>::FB_pack_send_buffer_cuda_graph (
const FB& TheFB,
int scomp,
int ncomp,
880 Vector<char*>& send_data,
881 Vector<std::size_t>
const& send_size,
882 Vector<
typename FabArray<FAB>::CopyComTagsContainer
const*>
const& send_cctc)
884 const int N_snds = send_data.size();
885 if (N_snds == 0) {
return; }
887 if ( !(TheFB.m_copyToBuffer.ready()) )
892 for (
int send = 0; send < N_snds; ++send) {
893 if (send_size[send] > 0) {
894 launches += send_cctc[send]->size();
897 const_cast<FB&
>(TheFB).m_copyToBuffer.resize(launches);
901 for (Gpu::StreamIter sit(N_snds,Gpu::StreamItInfo().DisableDeviceSync());
902 sit.isValid(); ++sit)
904 amrex::Gpu::Device::startGraphRecording( (sit() == 0),
905 const_cast<FB&
>(TheFB).m_copyToBuffer.getHostPtr(0),
906 (TheFB).m_copyToBuffer.getDevicePtr(0),
907 std::size_t(
sizeof(CopyMemory)*launches) );
910 if (send_size[j] > 0)
912 auto const& cctc = *send_cctc[j];
913 for (
auto const& tag : cctc)
915 const Box& bx = tag.sbox;
916 CopyMemory* cmem = TheFB.m_copyToBuffer.getDevicePtr(idx++);
919 auto const pfab = cmem->getDst<value_type>();
920 auto const sfab = cmem->getSrc<value_type>();
921 for (
int n = 0; n < cmem->ncomp; ++n)
923 pfab(ii,jj,kk,n) = sfab(ii,jj,kk,n+(cmem->scomp));
929 bool last_iter = sit() == (N_snds-1);
930 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
931 if (last_iter) {
const_cast<FB&
>(TheFB).m_copyToBuffer.setGraph( graphExec ); }
937 for (
int send = 0; send < N_snds; ++send)
940 if (send_size[j] > 0)
942 char* dptr = send_data[j];
943 auto const& cctc = *send_cctc[j];
944 for (
auto const& tag : cctc)
946 const_cast<FB&
>(TheFB).m_copyToBuffer.setParams(idx++, makeCopyMemory(this->array(tag.srcIndex),
952 dptr += (tag.sbox.numPts() * ncomp *
sizeof(value_type));
955 BL_ASSERT(dptr <= send_data[j] + send_size[j]);
960 TheFB.m_copyToBuffer.executeGraph();
965FabArray<FAB>::FB_unpack_recv_buffer_cuda_graph (
const FB& TheFB,
int dcomp,
int ncomp,
966 Vector<char*>
const& recv_data,
967 Vector<std::size_t>
const& recv_size,
968 Vector<CopyComTagsContainer const*>
const& recv_cctc,
971 const int N_rcvs = recv_cctc.size();
972 if (N_rcvs == 0) {
return; }
976 for (
int k = 0; k < N_rcvs; ++k)
978 if (recv_size[k] > 0)
980 const char* dptr = recv_data[k];
981 auto const& cctc = *recv_cctc[k];
982 for (
auto const& tag : cctc)
984 recv_copy_tags[tag.dstIndex].push_back(VoidCopyTag{.p = dptr, .dbox = tag.dbox});
985 dptr += tag.dbox.numPts() * ncomp *
sizeof(value_type);
989 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
993 if ( !(TheFB.m_copyFromBuffer.ready()) )
995 const_cast<FB&
>(TheFB).m_copyFromBuffer.resize(launches);
998 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
1000 amrex::Gpu::Device::startGraphRecording( (mfi.LocalIndex() == 0),
1001 const_cast<FB&
>(TheFB).m_copyFromBuffer.getHostPtr(0),
1002 (TheFB).m_copyFromBuffer.getDevicePtr(0),
1003 std::size_t(
sizeof(CopyMemory)*launches) );
1005 const auto& tags = recv_copy_tags[mfi];
1006 for (
auto const & tag : tags)
1008 CopyMemory* cmem = TheFB.m_copyFromBuffer.getDevicePtr(idx++);
1011 auto const pfab = cmem->getSrc<value_type>();
1012 auto const dfab = cmem->getDst<value_type>();
1013 for (int n = 0; n < cmem->ncomp; ++n)
1015 dfab(i,j,k,n+(cmem->scomp)) = pfab(i,j,k,n);
1020 bool last_iter = mfi.LocalIndex() == (this->local_size()-1);
1021 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
1022 if (last_iter) {
const_cast<FB&
>(TheFB).m_copyFromBuffer.setGraph( graphExec ); }
1028 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
1030 auto dst_array = this->array(mfi);
1031 const auto & tags = recv_copy_tags[mfi];
1032 for (
auto const & tag : tags)
1034 const_cast<FB&
>(TheFB).m_copyFromBuffer.setParams(idx++, makeCopyMemory(
amrex::makeArray4((value_type*)(tag.p),
1043 TheFB.m_copyFromBuffer.executeGraph();
1049template <
typename BUF>
1054 int ncomp, std::uint64_t
id)
const
1059 auto kit = std::find_if(send_cctc.begin(), send_cctc.end(),
1061 if (kit == send_cctc.end()) {
1068 char* pbuf = send_data[0];
1069 const int N_snds = send_data.
size();
1070 for (
int j = 0; j < N_snds; ++j)
1072 if (send_size[j] > 0)
1074 char* dptr = send_data[j];
1075 auto const& cctc = *send_cctc[j];
1076 for (
auto const& tag : cctc)
1078 snd_copy_tags.emplace_back
1079 (TagType{.sfab = this->const_array(tag.srcIndex), .poff = dptr-pbuf, .bx = tag.sbox});
1080 dptr += (tag.sbox.numPts() * ncomp *
sizeof(BUF));
1084 return snd_copy_tags;
1088 std::tuple<std::uint64_t,std::size_t,int> key{id,
sizeof(BUF), ncomp};
1090 if (
auto it = m_send_copy_handler.find(key); it != m_send_copy_handler.end()) {
1091 tv = it->second.get();
1093 if (m_send_copy_handler.size() > 32) {
1098 m_send_copy_handler.clear();
1100 auto snd_copy_tags = get_tags();
1101 auto utv = std::make_unique<TagVector<TagType>>(snd_copy_tags);
1103 m_send_copy_handler[key] = std::move(utv);
1110template <
typename BUF>
1118 const int N_snds = send_data.
size();
1119 if (N_snds == 0) {
return; }
1123 auto* tv = src.template get_send_copy_tag_vector<BUF>
1124 (send_data, send_size, send_cctc, ncomp,
id);
1125 if (tv ==
nullptr) {
return; }
1127 char* pbuffer = send_data[0];
1129 detail::ParallelFor_doit(*tv,
1131 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1133 if (icell < ncells) {
1136 for (
int n = 0; n < ncomp; ++n) {
1137 dfab(i,j,k,n) = (BUF)tag.sfab(i,j,k,n+scomp);
1142 Gpu::streamSynchronize();
1146template <
typename BUF>
1151 int ncomp, std::uint64_t
id)
1156 auto kit = std::find_if(recv_cctc.begin(), recv_cctc.end(),
1158 if (kit == recv_cctc.end()) {
1165 char* pbuf = recv_data[0];
1166 const int N_rcvs = recv_cctc.
size();
1167 for (
int k = 0; k < N_rcvs; ++k)
1169 if (recv_size[k] > 0)
1171 char* dptr = recv_data[k];
1172 auto const& cctc = *recv_cctc[k];
1173 for (
auto const& tag : cctc)
1175 const int li = this->localindex(tag.dstIndex);
1176 recv_copy_tags.emplace_back
1177 (TagType{.dfab = this->atLocalIdx(li).array(), .poff = dptr-pbuf, .bx = tag.dbox});
1178 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1182 return recv_copy_tags;
1186 std::tuple<std::uint64_t,std::size_t,int> key{id,
sizeof(BUF), ncomp};
1188 if (
auto it = m_recv_copy_handler.find(key); it != m_recv_copy_handler.end()) {
1189 tv = it->second.get();
1191 if (m_recv_copy_handler.size() > 32) {
1196 m_recv_copy_handler.clear();
1198 auto recv_copy_tags = get_tags();
1199 auto utv = std::make_unique<TagVector<TagType>>(recv_copy_tags);
1201 m_recv_copy_handler[key] = std::move(utv);
1208template <
typename BUF>
1214 CpOp op,
bool is_thread_safe, std::uint64_t
id,
1217 const int N_rcvs = recv_cctc.
size();
1218 if (N_rcvs == 0) {
return; }
1220 bool use_mask =
false;
1221 if (!is_thread_safe)
1224 (op == FabArrayBase::ADD && !amrex::HasAtomicAdd <value_type>::value))
1235 tags.reserve(N_rcvs);
1236 for (
int k = 0; k < N_rcvs; ++k) {
1237 if (recv_size[k] > 0) {
1238 char const* dptr = recv_data[k];
1239 auto const& cctc = *recv_cctc[k];
1240 for (
auto const& tag : cctc) {
1242 TagType{.dfab = dst.
array(tag.dstIndex), .dindex = tag.dstIndex,
1247 .offset =
Dim3{.
x = 0, .y = 0, .z = 0}});
1248 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1253 detail::deterministic_fab_to_fab<value_type,BUF>
1254 (tags, 0, dcomp, ncomp, detail::CellAdd<value_type,BUF>{});
1262 auto* tv = dst.template get_recv_copy_tag_vector<BUF>
1263 (recv_data, recv_size, recv_cctc, ncomp,
id);
1264 if (tv ==
nullptr) {
return; }
1266 char* pbuffer = recv_data[0];
1268 if (op == FabArrayBase::COPY)
1270 detail::ParallelFor_doit(*tv,
1272 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1274 if (icell < ncells) {
1277 for (
int n = 0; n < ncomp; ++n) {
1278 tag.dfab(i,j,k,n+dcomp) = (
value_type)sfab(i,j,k,n);
1285 if (is_thread_safe) {
1286 detail::ParallelFor_doit(*tv,
1288 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1290 if (icell < ncells) {
1293 for (
int n = 0; n < ncomp; ++n) {
1294 tag.dfab(i,j,k,n+dcomp) += (
value_type)sfab(i,j,k,n);
1299 detail::unpack_recv_buffer_gpu_atomic_add<BUF, value_type>
1300 (pbuffer, *tv, dcomp, ncomp);
1303 Gpu::streamSynchronize();
1307 char* pbuffer = recv_data[0];
1311 recv_copy_tags.reserve(N_rcvs);
1318 for (
int k = 0; k < N_rcvs; ++k)
1320 if (recv_size[k] > 0)
1322 std::size_t
offset = recv_data[k]-recv_data[0];
1323 const char* dptr = pbuffer +
offset;
1324 auto const& cctc = *recv_cctc[k];
1325 for (
auto const& tag : cctc)
1328 recv_copy_tags.emplace_back(TagType{
1329 .dfab = dst.
atLocalIdx(li).array(), .dindex = tag.dstIndex,
1332 .offset =
Dim3{.
x = 0, .y = 0, .z = 0}
1334 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1336 if (!maskfabs[li].isAllocated()) {
1337 maskfabs[li].resize(dst.
atLocalIdx(li).box());
1352 if (op == FabArrayBase::COPY)
1354 detail::fab_to_fab_atomic_cpy<value_type, BUF>(
1355 recv_copy_tags, 0, dcomp, ncomp, masks);
1359 detail::fab_to_fab_atomic_add<value_type, BUF>(
1360 recv_copy_tags, 0, dcomp, ncomp, masks);
1370template <
typename BUF>
1379 auto const N_snds =
static_cast<int>(send_data.
size());
1380 if (N_snds == 0) {
return; }
1383#pragma omp parallel for
1385 for (
int j = 0; j < N_snds; ++j)
1387 if (send_size[j] > 0)
1389 char* dptr = send_data[j];
1390 auto const& cctc = *send_cctc[j];
1391 for (
auto const& tag : cctc)
1393 const Box& bx = tag.sbox;
1394 auto const sfab = src.
array(tag.srcIndex);
1397 [=] (
int ii,
int jj,
int kk,
int n)
noexcept
1399 pfab(ii,jj,kk,n) =
static_cast<BUF
>(sfab(ii,jj,kk,n+scomp));
1401 dptr += (bx.
numPts() * ncomp *
sizeof(BUF));
1403 BL_ASSERT(dptr <= send_data[j] + send_size[j]);
1409template <
typename BUF>
1415 CpOp op,
bool is_thread_safe)
1419 auto const N_rcvs =
static_cast<int>(recv_cctc.
size());
1420 if (N_rcvs == 0) {
return; }
1425#pragma omp parallel for
1427 for (
int k = 0; k < N_rcvs; ++k)
1429 if (recv_size[k] > 0)
1431 const char* dptr = recv_data[k];
1432 auto const& cctc = *recv_cctc[k];
1433 for (
auto const& tag : cctc)
1435 const Box& bx = tag.dbox;
1436 FAB& dfab = dst[tag.dstIndex];
1437 if (op == FabArrayBase::COPY)
1439 dfab.template copyFromMem<RunOn::Host, BUF>(bx, dcomp, ncomp, dptr);
1443 dfab.template addFromMem<RunOn::Host, BUF>(tag.dbox, dcomp, ncomp, dptr);
1445 dptr += bx.
numPts() * ncomp *
sizeof(BUF);
1447 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
1453 LayoutData<Vector<VoidCopyTag> > recv_copy_tags;
1455 for (
int k = 0; k < N_rcvs; ++k)
1457 if (recv_size[k] > 0)
1459 const char* dptr = recv_data[k];
1460 auto const& cctc = *recv_cctc[k];
1461 for (
auto const& tag : cctc)
1463 recv_copy_tags[tag.dstIndex].push_back(VoidCopyTag{.p = dptr, .dbox = tag.dbox});
1464 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1466 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
1473 for (MFIter mfi(dst); mfi.isValid(); ++mfi)
1475 const auto& tags = recv_copy_tags[mfi];
1476 auto dfab = dst.
array(mfi);
1477 for (
auto const & tag : tags)
1480 if (op == FabArrayBase::COPY)
1483 [=] (
int i,
int j,
int k,
int n)
noexcept
1485 dfab(i,j,k,n+dcomp) = pfab(i,j,k,n);
1491 [=] (
int i,
int j,
int k,
int n)
noexcept
1493 dfab(i,j,k,n+dcomp) += pfab(i,j,k,n);
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:106
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1139
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
CopyComTag::CopyComTagsContainer CopyComTagsContainer
Definition AMReX_FabArrayBase.H:220
int localindex(int K) const noexcept
Return local index in the vector of FABs.
Definition AMReX_FabArrayBase.H:119
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
int local_size() const noexcept
Return the number of local FABs in the FabArray.
Definition AMReX_FabArrayBase.H:113
CpOp
parallel copy or add
Definition AMReX_FabArrayBase.H:394
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:351
typename std::conditional_t< IsBaseFab< FAB >::value, FAB, FABType >::value_type value_type
Definition AMReX_FabArray.H:362
void CMD_remote_setVal_gpu(value_type x, const CommMetaData &thecmd, int scomp, int ncomp)
Definition AMReX_FBI.H:683
void FB_local_add_cpu(const FB &TheFB, int scomp, int ncomp)
Definition AMReX_FBI.H:442
void FB_local_add_gpu(const FB &TheFB, int scomp, int ncomp, bool deterministic)
Definition AMReX_FBI.H:605
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:569
void FB_local_copy_gpu(const FB &TheFB, int scomp, int ncomp)
Definition AMReX_FBI.H:533
void CMD_local_setVal_gpu(value_type x, const CommMetaData &thecmd, int scomp, int ncomp)
Definition AMReX_FBI.H:653
void FB_local_copy_cpu(const FB &TheFB, int scomp, int ncomp)
Definition AMReX_FBI.H:383
TagVector< Array4CopyTag< value_type > > const * FB_get_local_copy_tag_vector(const FB &TheFB)
Definition AMReX_FBI.H:490
FAB & atLocalIdx(int L) noexcept
Return a reference to the FAB associated with local index L.
Definition AMReX_FabArray.H:539
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:88
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:172
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
__host__ __device__ Dim3 length(Array4< T > const &a) noexcept
Return the spatial extents of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1345
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Return the inclusive lower bounds of an Array4 in Dim3 form.
Definition AMReX_Array4.H:1317
int MyProc() noexcept
Definition AMReX_ParallelDescriptor.H:128
__host__ __device__ AMREX_FORCE_INLINE T Exch(T *address, T val) noexcept
Definition AMReX_GpuAtomic.H:487
__host__ __device__ AMREX_FORCE_INLINE T CAS(T *const address, T compare, T const val) noexcept
Definition AMReX_GpuAtomic.H:513
__host__ __device__ AMREX_FORCE_INLINE void AddNoRet(T *sum, T value) noexcept
Definition AMReX_GpuAtomic.H:283
__host__ __device__ AMREX_FORCE_INLINE T Add(T *sum, T value) noexcept
Definition AMReX_GpuAtomic.H:200
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:228
static constexpr HostToDevice hostToDevice
Definition AMReX_GpuContainers.H:105
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
gpuStream_t gpuStream() noexcept
Definition AMReX_GpuDevice.H:291
Definition AMReX_Amr.cpp:50
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ Array4< T > makeArray4(T *p, Box const &bx, int ncomp) noexcept
Definition AMReX_BaseFab.H:92
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2867
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
void LoopConcurrentOnCpu(Dim3 lo, Dim3 hi, F const &f) noexcept
Definition AMReX_Loop.H:388
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:241
const int[]
Definition AMReX_BLProfiler.cpp:1664
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2862
__host__ __device__ constexpr int get(IntVectND< dim > const &iv) noexcept
Get I'th element of IntVectND<dim>
Definition AMReX_IntVect.H:1334
Definition AMReX_TagParallelFor.H:58
Definition AMReX_TagParallelFor.H:26
Definition AMReX_TagParallelFor.H:50
Array4< T > dfab
Definition AMReX_TagParallelFor.H:51
A multidimensional array accessor.
Definition AMReX_Array4.H:283
Definition AMReX_TagParallelFor.H:106
Definition AMReX_TagParallelFor.H:116
Definition AMReX_Dim3.H:12
int x
Definition AMReX_Dim3.H:12
Used by a bunch of routines when communicating via MPI.
Definition AMReX_FabArrayBase.H:195
Box sbox
Definition AMReX_FabArrayBase.H:197
int srcIndex
Definition AMReX_FabArrayBase.H:199
Box dbox
Definition AMReX_FabArrayBase.H:196
int dstIndex
Definition AMReX_FabArrayBase.H:198
FillBoundary.
Definition AMReX_FabArrayBase.H:488
IntVect offset
Definition AMReX_FBI.H:10
Box dbox
Definition AMReX_FBI.H:9
FAB const * sfab
Definition AMReX_FBI.H:8
Definition AMReX_TypeTraits.H:56
Definition AMReX_TypeTraits.H:66
Definition AMReX_TypeTraits.H:282
Definition AMReX_TagParallelFor.H:158
Definition AMReX_FBI.H:13
Box dbox
Definition AMReX_FBI.H:15
char const * p
Definition AMReX_FBI.H:14