HILA
Loading...
Searching...
No Matches
backend_gpu/defs.h
1#ifndef GPU_DEFS_H
2#define GPU_DEFS_H
3
4// On Puhti, use UCX_MEMTYPE_CACHE=n with
5// GPU_AWARE_MPI
6
7#include <sstream>
8#include <iostream>
9
10// Prototypes for memory pool ops
11void gpu_memory_pool_alloc(void **p, size_t req_size);
12void gpu_memory_pool_free(void *ptr);
13void gpu_memory_pool_purge();
14void gpu_memory_pool_report();
15
16////////////////////////////////////////////////////////////////////////////////////
17// some device rng headers
18////////////////////////////////////////////////////////////////////////////////////
19namespace hila {
20// double random(); // defined in random.h
21void seed_device_rng(unsigned long long seed);
22} // namespace hila
23
24namespace hila {
25void free_device_rng();
26} // namespace hila
27
28
29#ifndef HILAPP
30
31// GPU specific definitions
32
33////////////////////////////////////////////////////////////////////////////////////
34// Some cuda-specific definitions
35////////////////////////////////////////////////////////////////////////////////////
36#if defined(CUDA)
37
38#include <cuda.h>
39#include <cuda_runtime.h>
40#include <cub/cub.cuh>
41
42using gpuError = cudaError;
43#define gpuSuccess cudaSuccess
44
45/////////////////////////////////////////////
46// If gpu memory pool in use, the interface to memory
47#ifdef GPU_MEMORY_POOL
48#define gpuMalloc(a, b) gpu_memory_pool_alloc((void **)a, b)
49#define gpuFree(a) gpu_memory_pool_free(a)
50#define gpuMemPoolPurge() gpu_memory_pool_purge()
51#define gpuMemPoolReport() gpu_memory_pool_report()
52
53#else
54// here std interfaces
55
56// clang-format off
57#define gpuMemPoolPurge() do { } while (0)
58#define gpuMemPoolReport() do { } while (0)
59// clang-format on
60
61#ifdef CUDA_MALLOC_ASYNC
62#define gpuMalloc(a, b) GPU_CHECK(cudaMallocAsync(a, b, 0))
63#define gpuFree(a) GPU_CHECK(cudaFreeAsync(a, 0))
64
65#else
66#define gpuMalloc(a, b) GPU_CHECK(cudaMalloc((void **)a, b))
67#define gpuFree(a) GPU_CHECK(cudaFree(a))
68
69#endif
70
71#endif // gpu memory pool
72/////////////////////////////////////////////
73
74
75#define gpuGetLastError cudaGetLastError
76#define gpuMemcpy(a, b, c, d) GPU_CHECK(cudaMemcpy(a, b, c, d))
77#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
78#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
79#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
80#define gpuDeviceSynchronize() GPU_CHECK(cudaDeviceSynchronize())
81#define gpuStreamSynchronize(a) GPU_CHECK(cudaStreamSynchronize(a))
82#define gpuMemset(a, b, c) GPU_CHECK(cudaMemset(a, b, c))
83#define gpuMemcpyToSymbol(a, b, size, c, dir) GPU_CHECK(cudaMemcpyToSymbol(a, b, size, c, dir))
84
85#define GPUTYPESTR "CUDA"
86
87#ifdef __CUDA_ARCH__
88#define _GPU_DEVICE_COMPILE_ __CUDA_ARCH__
89#endif
90
91////////////////////////////////////////////////////////////////////////////////////
92// Same for HIP
93////////////////////////////////////////////////////////////////////////////////////
94#elif defined(HIP)
95
96#include <hip/hip_runtime.h>
97#include <hiprand/hiprand.h>
98
99// #include <hipcub/hipcub.hpp>*
100
101using gpuError = hipError_t;
102#define gpuSuccess hipSuccess
103
104/////////////////////////////////////////////
105// If gpu memory pool in use, the interface to memory
106#ifdef GPU_MEMORY_POOL
107#define gpuMalloc(a, b) gpu_memory_pool_alloc((void **)a, b)
108#define gpuFree(a) gpu_memory_pool_free(a)
109#define gpuMemPoolPurge() gpu_memory_pool_purge()
110#define gpuMemPoolReport() gpu_memory_pool_report()
111
112
113#else
114// here std interfaces
115
116// clang-format off
117#define gpuMemPoolPurge() do {} while (0)
118#define gpuMemPoolReport() do {} while (0)
119// clang-format on
120
121#define gpuMalloc(a, b) GPU_CHECK(hipMalloc((void **)a, b))
122#define gpuFree(a) GPU_CHECK(hipFree(a))
123
124#endif // ifdef memory pool
125
126#define gpuGetLastError hipGetLastError
127#define gpuMemcpy(a, b, siz, d) GPU_CHECK(hipMemcpy(a, b, siz, d))
128#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
129#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
130#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice
131#define gpuDeviceSynchronize() GPU_CHECK(hipDeviceSynchronize())
132#define gpuStreamSynchronize(a) GPU_CHECK(hipStreamSynchronize(a))
133#define gpuMemset(a, b, c) GPU_CHECK(hipMemset(a, b, c))
134#define gpuMemcpyToSymbol(a, b, size, c, dir) \
135 GPU_CHECK(hipMemcpyToSymbol(HIP_SYMBOL(a), b, size, c, dir))
136
137
138#define GPUTYPESTR "HIP"
139
140#ifdef __HIP_DEVICE_COMPILE__
141#define _GPU_DEVICE_COMPILE_ __HIP_DEVICE_COMPILE__
142#endif
143
144#endif
145////////////////////////////////////////////////////////////////////////////////////
146// General GPU (cuda/hip) definitions
147////////////////////////////////////////////////////////////////////////////////////
148
149
150#define GPU_CHECK(cmd) \
151 do { \
152 auto code = cmd; \
153 gpu_exit_on_error(code, #cmd, __FILE__, __LINE__); \
154 } while (0)
155
156#define check_device_error(msg) gpu_exit_on_error(msg, __FILE__, __LINE__)
157#define check_device_error_code(code, msg) gpu_exit_on_error(code, msg, __FILE__, __LINE__)
158void gpu_exit_on_error(const char *msg, const char *file, int line);
159void gpu_exit_on_error(gpuError code, const char *msg, const char *file, int line);
160
161namespace hila {
162inline void synchronize_threads() {
163 gpuDeviceSynchronize();
164}
165} // namespace hila
166
167#else // NOW HILAPP
168
169////////////////////////////////////////////////////////////////////////////////////
170// Now not cuda or hip - hilapp stage scans this section
171///////////////////////////////////////////////////////////////////////////////////
172
173
174using gpuError = int;
175
176// Define empty stubs - return 1 (true)
177// clang-format off
178#define gpuMalloc(a, b) do {} while(0)
179#define gpuFree(a) do {} while(0)
180#define gpuMemcpy(a, b, siz, d) do {} while(0)
181#define gpuMemcpyHostToDevice 1
182#define gpuMemcpyDeviceToHost 2
183#define gpuMemset(a,b,c) do {} while(0)
184#define gpuMemcpyToSymbol(a, b, size, c, dir) do {} while(0)
185
186#define gpuMemPoolPurge() do {} while(0)
187#define gpuMemPoolReport() do {} while(0)
188
189#define check_device_error(msg) do {} while(0)
190#define check_device_error_code(code, msg) do {} while(0)
191
192#define gpuStreamSynchronize(a) do {} while(0)
193#define gpuDeviceSynchronize() do {} while(0)
194
195#define gpuGetLastError cudaGetLastError
196
197
198// clang-format on
199
200
201#define GPUTYPESTR "NONE"
202
203namespace hila {
204inline void synchronize_threads() {}
205} // namespace hila
206
207#endif
208////////////////////////////////////////////////////////////////////////////////////
209
210void initialize_gpu(int rank, int device);
211void gpu_device_info();
212
213// This is not the CUDA compiler
214// Maybe hilapp?
215
216namespace hila {
217
218// Implements test for basic in types, similar to
219/// std::is_arithmetic, but allows the backend to add
220/// it's own basic tyes (such as AVX vectors)
221template <class T>
222struct is_arithmetic : std::integral_constant<bool, std::is_arithmetic<T>::value> {};
223
224template <class T, class U>
225struct is_assignable : std::integral_constant<bool, std::is_assignable<T, U>::value> {};
226
227template <class T>
228struct is_floating_point : std::integral_constant<bool, std::is_floating_point<T>::value> {};
229
230} // namespace hila
231
232#endif
Implement hila::swap for gauge fields.
Definition array.h:981
void free_device_rng()
Free GPU RNG state, does nothing on non-GPU archs.
Definition hila_gpu.cpp:107