HILA
Loading...
Searching...
No Matches
backend_gpu/defs.h
1#ifndef GPU_DEFS_H
2#define GPU_DEFS_H
3
4// On Puhti, use UCX_MEMTYPE_CACHE=n with
5// GPU_AWARE_MPI
6
7#include <sstream>
8#include <iostream>
9
10// Prototypes for memory pool ops
11void gpu_memory_pool_alloc(void **p, size_t req_size);
12void gpu_memory_pool_free(void *ptr);
13void gpu_memory_pool_purge();
14void gpu_memory_pool_report();
15
16////////////////////////////////////////////////////////////////////////////////////
17// some device rng headers
18////////////////////////////////////////////////////////////////////////////////////
19namespace hila {
20// double random(); // defined in random.h
21void seed_device_rng(unsigned long long seed);
22} // namespace hila
23
24namespace hila {
25void free_device_rng();
26} // namespace hila
27
28
29#ifndef HILAPP
30
31// GPU specific definitions
32
33////////////////////////////////////////////////////////////////////////////////////
34// Some cuda-specific definitions
35////////////////////////////////////////////////////////////////////////////////////
36#if defined(CUDA)
37
38#include <cuda.h>
39#include <cuda_runtime.h>
40#include <cub/cub.cuh>
41
42using gpuError = cudaError;
43#define gpuSuccess cudaSuccess
44
45/////////////////////////////////////////////
46// If gpu memory pool in use, the interface to memory
47#ifdef GPU_MEMORY_POOL
48#define gpuMalloc(a, b) gpu_memory_pool_alloc((void **)a, b)
49#define gpuFree(a) gpu_memory_pool_free(a)
50#define gpuMemPoolPurge() gpu_memory_pool_purge()
51#define gpuMemPoolReport() gpu_memory_pool_report()
52
53#else
54// here std interfaces
55
56// clang-format off
57#define gpuMemPoolPurge() do { } while (0)
58#define gpuMemPoolReport() do { } while (0)
59// clang-format on
60
61#ifdef CUDA_MALLOC_ASYNC
62#define gpuMalloc(a, b) GPU_CHECK(cudaMallocAsync(a, b, 0))
63#define gpuFree(a) GPU_CHECK(cudaFreeAsync(a, 0))
64
65#else
66#define gpuMalloc(a, b) GPU_CHECK(cudaMalloc((void **)a, b))
67#define gpuFree(a) GPU_CHECK(cudaFree(a))
68
69#endif
70
71#endif // gpu memory pool
72/////////////////////////////////////////////
73
74
75#define gpuGetLastError cudaGetLastError
76#define gpuMemcpy(a, b, c, d) GPU_CHECK(cudaMemcpy(a, b, c, d))
77#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
78#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
79#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
80#define gpuDeviceSynchronize() GPU_CHECK(cudaDeviceSynchronize())
81#define gpuStreamSynchronize(a) GPU_CHECK(cudaStreamSynchronize(a))
82#define gpuMemset(a,b,c) GPU_CHECK(cudaMemset(a,b,c))
83#define gpuMemcpyToSymbol(a, b, size, c, dir) GPU_CHECK(cudaMemcpyToSymbol(a, b, size, c, dir))
84
85#define GPUTYPESTR "CUDA"
86
87#ifdef __CUDA_ARCH__
88#define _GPU_DEVICE_COMPILE_ __CUDA_ARCH__
89#endif
90
91////////////////////////////////////////////////////////////////////////////////////
92// Same for HIP
93////////////////////////////////////////////////////////////////////////////////////
94#elif defined(HIP)
95
96#include <hip/hip_runtime.h>
97#include <hiprand/hiprand.h>
98
99//#include <hipcub/hipcub.hpp>*
100
101using gpuError = hipError_t;
102#define gpuSuccess hipSuccess
103
104/////////////////////////////////////////////
105// If gpu memory pool in use, the interface to memory
106#ifdef GPU_MEMORY_POOL
107#define gpuMalloc(a, b) gpu_memory_pool_alloc((void **)a, b)
108#define gpuFree(a) gpu_memory_pool_free(a)
109#define gpuMemPoolPurge() gpu_memory_pool_purge()
110#define gpuMemPoolReport() gpu_memory_pool_report()
111
112
113#else
114// here std interfaces
115
116// clang-format off
117#define gpuMemPoolPurge() do {} while (0)
118#define gpuMemPoolReport() do {} while (0)
119// clang-format on
120
121#define gpuMalloc(a, b) GPU_CHECK(hipMalloc((void **)a, b))
122#define gpuFree(a) GPU_CHECK(hipFree(a))
123
124#endif // ifdef memory pool
125
126#define gpuGetLastError hipGetLastError
127#define gpuMemcpy(a, b, siz, d) GPU_CHECK(hipMemcpy(a, b, siz, d))
128#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
129#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
130#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice
131#define gpuDeviceSynchronize() GPU_CHECK(hipDeviceSynchronize())
132#define gpuStreamSynchronize(a) GPU_CHECK(hipStreamSynchronize(a))
133#define gpuMemset(a,b,c) GPU_CHECK(hipMemset(a,b,c))
134#define gpuMemcpyToSymbol(a, b, size, c, dir) \
135 GPU_CHECK(hipMemcpyToSymbol(HIP_SYMBOL(a), b, size, c, dir))
136
137
138#define GPUTYPESTR "HIP"
139
140#ifdef __HIP_DEVICE_COMPILE__
141#define _GPU_DEVICE_COMPILE_ __HIP_DEVICE_COMPILE__
142#endif
143
144#endif
145////////////////////////////////////////////////////////////////////////////////////
146// General GPU (cuda/hip) definitions
147////////////////////////////////////////////////////////////////////////////////////
148
149
150#define GPU_CHECK(cmd) \
151 do { \
152 auto code = cmd; \
153 gpu_exit_on_error(code, #cmd, __FILE__, __LINE__); \
154 } while (0)
155
156#define check_device_error(msg) gpu_exit_on_error(msg, __FILE__, __LINE__)
157#define check_device_error_code(code, msg) \
158 gpu_exit_on_error(code, msg, __FILE__, __LINE__)
159void gpu_exit_on_error(const char *msg, const char *file, int line);
160void gpu_exit_on_error(gpuError code, const char *msg, const char *file, int line);
161
162namespace hila {
163inline void synchronize_threads() {
164 gpuDeviceSynchronize();
165}
166} // namespace hila
167
168#else // NOW HILAPP
169
170////////////////////////////////////////////////////////////////////////////////////
171// Now not cuda or hip - hilapp stage scans this section
172///////////////////////////////////////////////////////////////////////////////////
173
174
175using gpuError = int;
176
177// Define empty stubs - return 1 (true)
178// clang-format off
179#define gpuMalloc(a, b) do {} while(0)
180#define gpuFree(a) do {} while(0)
181#define gpuMemcpy(a, b, siz, d) do {} while(0)
182#define gpuMemcpyHostToDevice 1
183#define gpuMemcpyDeviceToHost 2
184#define gpuMemset(a,b,c) do {} while(0)
185#define gpuMemcpyToSymbol(a, b, size, c, dir) do {} while(0)
186
187#define gpuMemPoolPurge() do {} while(0)
188#define gpuMemPoolReport() do {} while(0)
189
190#define check_device_error(msg) do {} while(0)
191#define check_device_error_code(code, msg) do {} while(0)
192
193#define gpuStreamSynchronize(a) do {} while(0)
194#define gpuDeviceSynchronize() do {} while(0)
195
196#define gpuGetLastError cudaGetLastError
197
198
199// clang-format on
200
201
202#define GPUTYPESTR "NONE"
203
204namespace hila {
205inline void synchronize_threads() {}
206} // namespace hila
207
208#endif
209////////////////////////////////////////////////////////////////////////////////////
210
211void initialize_gpu(int rank,int device);
212void gpu_device_info();
213
214// This is not the CUDA compiler
215// Maybe hilapp?
216
217namespace hila {
218
219// Implements test for basic in types, similar to
220/// std::is_arithmetic, but allows the backend to add
221/// it's own basic tyes (such as AVX vectors)
222template <class T>
223struct is_arithmetic : std::integral_constant<bool, std::is_arithmetic<T>::value> {};
224
225template <class T, class U>
226struct is_assignable : std::integral_constant<bool, std::is_assignable<T, U>::value> {};
227
228template <class T>
229struct is_floating_point
230 : std::integral_constant<bool, std::is_floating_point<T>::value> {};
231
232} // namespace hila
233
234#endif
Implement hila::swap for gauge fields.
Definition array.h:981
void free_device_rng()
Free GPU RNG state, does nothing on non-GPU archs.
Definition hila_gpu.cpp:107