Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_CSR.H
Go to the documentation of this file.
1#ifndef AMREX_CSR_H_
2#define AMREX_CSR_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_Gpu.H>
6#include <AMReX_INT.H>
7#include <AMReX_OpenMP.H>
8
9#if defined(AMREX_USE_CUDA)
10#include <cub/cub.cuh> // for Clang
11#endif
12
13#include <algorithm>
14#include <climits>
15#include <type_traits>
16
17namespace amrex {
18
32template <typename T>
33struct CsrView {
34 using U = std::conditional_t<std::is_const_v<T>, Long const, Long>;
35 T* AMREX_RESTRICT mat = nullptr;
38 Long nnz = 0;
40};
41
48template <typename T, template <typename> class V>
49struct CSR {
50 V<T> mat;
51 V<Long> col_index;
52 V<Long> row_offset;
53 Long nnz = 0;
54
56 [[nodiscard]] Long nrows () const {
57 return row_offset.empty() ? Long(0) : Long(row_offset.size())-1;
58 }
59
69 void resize (Long num_rows, Long num_non_zeros) {
70 mat.resize(num_non_zeros);
71 col_index.resize(num_non_zeros);
72 row_offset.resize(num_rows+1);
73 nnz = num_non_zeros;
74 }
75
78 return CsrView<T>{mat.data(), col_index.data(), row_offset.data(),
79 nnz, Long(row_offset.size())-1};
80 }
81
83 [[nodiscard]] CsrView<T const> view () const {
84 return CsrView<T const>{.mat = mat.data(),
85 .col_index = col_index.data(),
86 .row_offset = row_offset.data(),
87 .nnz = nnz,
88 .nrows = Long(row_offset.size())-1};
89 }
90
92 [[nodiscard]] CsrView<T const> const_view () const {
93 return CsrView<T const>{mat.data(), col_index.data(), row_offset.data(),
94 nnz, Long(row_offset.size())-1};
95 }
96
100 void sort ();
101
106};
107
108template <typename C, typename T, template<typename> class AD, template<typename> class AS,
109 std::enable_if_t<std::is_same_v<C,Gpu::HostToDevice> ||
110 std::is_same_v<C,Gpu::DeviceToHost> ||
111 std::is_same_v<C,Gpu::DeviceToDevice>, int> = 0>
119void duplicateCSR (C c, CSR<T,AD>& dst, CSR<T,AS> const& src)
120{
121 dst.mat.resize(src.mat.size());
122 dst.col_index.resize(src.col_index.size());
123 dst.row_offset.resize(src.row_offset.size());
125 src.mat.begin(),
126 src.mat.end(),
127 dst.mat.begin());
129 src.col_index.begin(),
130 src.col_index.end(),
131 dst.col_index.begin());
133 src.row_offset.begin(),
134 src.row_offset.end(),
135 dst.row_offset.begin());
136 dst.nnz = src.nnz;
137}
138
139template <typename T, template <typename> class V>
141{
142 if (nnz <= 0) { return; }
143
144#ifdef AMREX_USE_GPU
145
146#if defined(AMREX_USE_CUDA) || defined(AMREX_USE_HIP)
147
148 // The function is synchronous. If that is no longer the case, we might
149 // need to update SpMatrix::define.
150
151 constexpr int nthreads = 256;
152 constexpr int nwarps_per_block = nthreads / Gpu::Device::warp_size;
153
154 AMREX_ALWAYS_ASSERT((nrows()+nwarps_per_block-1) < Long(std::numeric_limits<int>::max()));
155
156 auto nr = int(nrows());
157 int nblocks = (nr + nwarps_per_block-1) / nwarps_per_block;
158 auto const& stream = Gpu::gpuStream();
159
160 auto* pmat = mat.data();
161 auto* pcol = col_index.data();
162 auto* prow = row_offset.data();
163
164 Gpu::Buffer<int> needs_fallback({0});
165 auto* d_needs_fallback = needs_fallback.data();
166
167 amrex::launch_global<nthreads><<<nblocks, nthreads, 0, stream>>>
168 ([=] AMREX_GPU_DEVICE () noexcept
169 {
170 int wid = int(threadIdx.x)/Gpu::Device::warp_size;
171 int r = int(blockIdx.x)*nwarps_per_block + wid;
172 if (r >= nr) return;
173
174 Long const b = prow[r];
175 Long const e = prow[r+1];
176 auto const len = int(e - b);
177
178 if (len <= 1) return;
179
180 int lane = int(threadIdx.x) - wid * Gpu::Device::warp_size;
181
182 bool sorted = true;
183 for (Long i = lane + 1; i < len; i += Gpu::Device::warp_size) {
184 sorted = sorted && (pcol[b+i-1] <= pcol[b+i]);
185 }
186#if defined(AMREX_USE_CUDA)
187 if (__all_sync(0xffffffff, sorted)) { return; }
188#else
189 if (__all(sorted)) { return; }
190#endif
191
192 constexpr int ITEMS_PER_THREAD = AMREX_HIP_OR_CUDA(2,4);
193 constexpr int ITEMS_PER_WARP = Gpu::Device::warp_size * ITEMS_PER_THREAD;
194
195 if (len <= ITEMS_PER_WARP)
196 {
197#if defined(AMREX_USE_CUDA)
198 using WarpSort = cub::WarpMergeSort<Long, ITEMS_PER_THREAD, Gpu::Device::warp_size, T>;
199 __shared__ typename WarpSort::TempStorage temp_storage[nwarps_per_block];
200#elif defined(AMREX_USE_HIP)
201 using WarpSort = rocprim::warp_sort<Long, Gpu::Device::warp_size, T>;
202 __shared__ typename WarpSort::storage_type temp_storage[nwarps_per_block];
203#endif
204
205 Long keys[ITEMS_PER_THREAD];
206 T values[ITEMS_PER_THREAD];
207
208 #pragma unroll
209 for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
210 int idx = lane * ITEMS_PER_THREAD + i;
211 if (idx < len) {
212 keys[i] = pcol[b + idx];
213 values[i] = pmat[b + idx];
214 } else {
215 keys[i] = std::numeric_limits<Long>::max();
216 values[i] = T{};
217 }
218 }
219
221 WarpSort{}.sort(keys, values, temp_storage[wid]),
222 WarpSort(temp_storage[wid]).Sort(
223 keys, values, [](Long x, Long y) {return x < y;}));
224
225 #pragma unroll
226 for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
227 int idx = lane * ITEMS_PER_THREAD + i;
228 if (idx < len) {
229 pcol[b + idx] = keys[i];
230 pmat[b + idx] = values[i];
231 }
232 }
233 } else {
234 if (lane == 0) {
235 Gpu::Atomic::AddNoRet(d_needs_fallback, 1);
236 }
237 }
238 });
239
240 auto* h_needs_fallback = needs_fallback.copyToHost();
241
242 if (*h_needs_fallback)
243 {
244 V<Long> col_index_out(col_index.size());
245 V<T> mat_out(mat.size());
246 auto* d_col_out = col_index_out.data();
247 auto* d_val_out = mat_out.data();
248
249 std::size_t temp_bytes = 0;
250
252 rocprim::segmented_radix_sort_pairs,
253 cub::DeviceSegmentedRadixSort::SortPairs)
254 (nullptr, temp_bytes, pcol, d_col_out, pmat, d_val_out,
255 nnz, nr, prow, prow+1, 0, int(sizeof(Long)*CHAR_BIT),
256 stream));
257
258 auto* d_temp = (void*) The_Arena()->alloc(temp_bytes);
259
261 rocprim::segmented_radix_sort_pairs,
262 cub::DeviceSegmentedRadixSort::SortPairs)
263 (d_temp, temp_bytes, pcol, d_col_out, pmat, d_val_out,
264 nnz, nr, prow, prow+1, 0, int(sizeof(Long)*CHAR_BIT),
265 stream));
266
267 std::swap(col_index, col_index_out);
268 std::swap(mat, mat_out);
269
271 The_Arena()->free(d_temp);
272 }
273
274 // let's test both by print matrix out to see if it's sorted.
275
277
278#elif defined(AMREX_USE_SYCL)
279
280 // xxxxx TODO SYCL: Let's not worry about performance for now.
282 duplicateCSR(Gpu::deviceToHost, h_csr, *this);
284 h_csr.sort_on_host();
285 duplicateCSR(Gpu::hostToDevice, *this, h_csr);
287
288#endif
289
290#else
291
292 sort_on_host();
293
294#endif
295}
296
297template <typename T, template <typename> class V>
299{
300 if (nnz <= 0) { return; }
301
302 constexpr int SMALL = 128;
303
304 Long nr = nrows();
305
306#ifdef AMREX_USE_OMP
307#pragma omp parallel
308#endif
309 {
310 V<Long> lcols;
311 V<T > lvals;
312 V<int > perm;
313
314 Long scols[SMALL];
315 T svals[SMALL];
316
317#ifdef AMREX_USE_OMP
318#pragma omp for
319#endif
320 for (Long r = 0; r < nr; ++r) {
321 Long const b = row_offset[r ];
322 Long const e = row_offset[r+1];
323 auto const len = int(e - b);
324
325 if (len <= 1) { continue; }
326
327 bool sorted = true;
328 for (int i = 1; i < len; ++i) {
329 if (col_index[b+i-1] > col_index[b+i]) {
330 sorted = false;
331 break;
332 }
333 }
334 if (sorted) { continue; }
335
336 if (len <= SMALL) {
337 // Insertion sort using arrays on stack
338 for (int i = 0; i < len; ++i) {
339 scols[i] = col_index[b+i];
340 svals[i] = mat [b+i];
341 }
342 for (int i = 1; i < len; ++i) {
343 auto c = scols[i];
344 auto v = svals[i];
345 auto j = i;
346 while (j > 0 && scols[j-1] > c) {
347 scols[j] = scols[j-1];
348 svals[j] = svals[j-1];
349 --j;
350 }
351 scols[j] = c;
352 svals[j] = v;
353 }
354 for (int i = 0; i < len; ++i) {
355 col_index[b+i] = scols[i];
356 mat [b+i] = svals[i];
357 }
358 } else {
359 lcols.resize(len);
360 lvals.resize(len);
361 perm.resize(len);
362
363 for (int i = 0; i < len; ++i) {
364 lcols[i] = col_index[b+i];
365 lvals[i] = mat [b+i];
366 perm [i] = i;
367 }
368
369 std::sort(perm.begin(), perm.end(),
370 [&] (int i0, int i1) {
371 return lcols[i0] < lcols[i1];
372 });
373
374 for (int out = 0; out < len; ++out) {
375 auto const in = perm[out];
376 col_index[b+out] = lcols[in];
377 mat [b+out] = lvals[in];
378 }
379 }
380 }
381 }
382}
383
384}
385
386#endif
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_RESTRICT
Definition AMReX_Extension.H:32
#define AMREX_HIP_OR_CUDA(a, b)
Definition AMReX_GpuControl.H:17
#define AMREX_GPU_SAFE_CALL(call)
Definition AMReX_GpuError.H:63
#define AMREX_GPU_ERROR_CHECK()
Definition AMReX_GpuError.H:151
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
Definition AMReX_GpuBuffer.H:23
T const * data() const noexcept
Definition AMReX_GpuBuffer.H:50
static constexpr int warp_size
Definition AMReX_GpuDevice.H:236
amrex_long Long
Definition AMReX_INT.H:30
Arena * The_Arena()
Definition AMReX_Arena.cpp:820
__host__ __device__ AMREX_FORCE_INLINE void AddNoRet(T *sum, T value) noexcept
Definition AMReX_GpuAtomic.H:283
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:228
static constexpr DeviceToHost deviceToHost
Definition AMReX_GpuContainers.H:106
static constexpr HostToDevice hostToDevice
Definition AMReX_GpuContainers.H:105
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
gpuStream_t gpuStream() noexcept
Definition AMReX_GpuDevice.H:291
Definition AMReX_Amr.cpp:50
void duplicateCSR(C c, CSR< T, AD > &dst, CSR< T, AS > const &src)
Copy CSR buffers between memory spaces asynchronously.
Definition AMReX_CSR.H:119
const int[]
Definition AMReX_BLProfiler.cpp:1664
Owning CSR container backed by AMReX resizable vectors.
Definition AMReX_CSR.H:49
V< Long > row_offset
Definition AMReX_CSR.H:52
Long nrows() const
Number of logical rows represented by the CSR offset array.
Definition AMReX_CSR.H:56
Long nnz
Definition AMReX_CSR.H:53
void sort()
Sort each row by column index. Uses GPU acceleration when possible.
Definition AMReX_CSR.H:140
CsrView< T > view()
Mutable view of the underlying buffers.
Definition AMReX_CSR.H:77
void sort_on_host()
Host-only fallback that sorts column indices row by row.
Definition AMReX_CSR.H:298
CsrView< T const > view() const
Const view of the underlying buffers.
Definition AMReX_CSR.H:83
CsrView< T const > const_view() const
Convenience alias for view() const.
Definition AMReX_CSR.H:92
void resize(Long num_rows, Long num_non_zeros)
Resize the storage to accommodate num_rows and num_non_zeros entries.
Definition AMReX_CSR.H:69
V< Long > col_index
Definition AMReX_CSR.H:51
V< T > mat
Definition AMReX_CSR.H:50
Lightweight non-owning CSR view that can point to host or device buffers.
Definition AMReX_CSR.H:33
std::conditional_t< std::is_const_v< T >, Long const, Long > U
Definition AMReX_CSR.H:34
T *__restrict__ mat
Definition AMReX_CSR.H:35
Long nrows
Definition AMReX_CSR.H:39
Long nnz
Definition AMReX_CSR.H:38
U *__restrict__ row_offset
Definition AMReX_CSR.H:37
U *__restrict__ col_index
Definition AMReX_CSR.H:36