Blender V5.0
hip/queue.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#ifdef WITH_HIP
6
7# include "device/hip/queue.h"
8
11# include "device/hip/kernel.h"
12
14
15/* HIPDeviceQueue */
16
17HIPDeviceQueue::HIPDeviceQueue(HIPDevice *device)
18 : DeviceQueue(device), hip_device_(device), hip_stream_(nullptr)
19{
20 const HIPContextScope scope(hip_device_);
21 hip_device_assert(hip_device_, hipStreamCreateWithFlags(&hip_stream_, hipStreamNonBlocking));
22}
23
24HIPDeviceQueue::~HIPDeviceQueue()
25{
26 const HIPContextScope scope(hip_device_);
27 hipStreamDestroy(hip_stream_);
28}
29
30int HIPDeviceQueue::num_concurrent_states(const size_t state_size) const
31{
32 const int max_num_threads = hip_device_->get_num_multiprocessors() *
33 hip_device_->get_max_num_threads_per_multiprocessor();
34 int num_states = ((max_num_threads == 0) ? 65536 : max_num_threads) * 16;
35
36 const char *factor_str = getenv("CYCLES_CONCURRENT_STATES_FACTOR");
37 if (factor_str) {
38 const float factor = (float)atof(factor_str);
39 if (factor != 0.0f) {
40 num_states = max((int)(num_states * factor), 1024);
41 }
42 else {
43 LOG_TRACE << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
44 }
45 }
46
47 LOG_TRACE << "GPU queue concurrent states: " << num_states << ", using up to "
49
50 return num_states;
51}
52
53int HIPDeviceQueue::num_concurrent_busy_states(const size_t /*state_size*/) const
54{
55 const int max_num_threads = hip_device_->get_num_multiprocessors() *
56 hip_device_->get_max_num_threads_per_multiprocessor();
57
58 if (max_num_threads == 0) {
59 return 65536;
60 }
61
62 return 4 * max_num_threads;
63}
64
65void HIPDeviceQueue::init_execution()
66{
67 /* Synchronize all textures and memory copies before executing task. */
68 HIPContextScope scope(hip_device_);
69 hip_device_->load_texture_info();
70 hip_device_assert(hip_device_, hipDeviceSynchronize());
71
72 debug_init_execution();
73}
74
75bool HIPDeviceQueue::enqueue(DeviceKernel kernel,
76 const int work_size,
77 const DeviceKernelArguments &args)
78{
79 if (hip_device_->have_error()) {
80 return false;
81 }
82
83 debug_enqueue_begin(kernel, work_size);
84
85 const HIPContextScope scope(hip_device_);
86
87 /* Update texture info in case memory moved to host. */
88 if (hip_device_->load_texture_info()) {
89 hip_device_assert(hip_device_, hipDeviceSynchronize());
90 if (hip_device_->have_error()) {
91 return false;
92 }
93 }
94
95 /* Compute kernel launch parameters. */
96 const HIPDeviceKernel &hip_kernel = hip_device_->kernels.get(kernel);
97 const int num_threads_per_block = hip_kernel.num_threads_per_block;
98 const int num_blocks = divide_up(work_size, num_threads_per_block);
99
100 int shared_mem_bytes = 0;
101
102 switch (kernel) {
111 /* See parall_active_index.h for why this amount of shared memory is needed. */
112 shared_mem_bytes = (num_threads_per_block + 1) * sizeof(int);
113 break;
114 default:
115 break;
116 }
117
118 /* Launch kernel. */
119 assert_success(hipModuleLaunchKernel(hip_kernel.function,
120 num_blocks,
121 1,
122 1,
123 num_threads_per_block,
124 1,
125 1,
126 shared_mem_bytes,
127 hip_stream_,
128 const_cast<void **>(args.values),
129 nullptr),
130 "enqueue");
131
132 debug_enqueue_end();
133
134 return !(hip_device_->have_error());
135}
136
137bool HIPDeviceQueue::synchronize()
138{
139 if (hip_device_->have_error()) {
140 return false;
141 }
142
143 const HIPContextScope scope(hip_device_);
144 assert_success(hipStreamSynchronize(hip_stream_), "synchronize");
145 debug_synchronize();
146
147 return !(hip_device_->have_error());
148}
149
150void HIPDeviceQueue::zero_to_device(device_memory &mem)
151{
152 assert(mem.type != MEM_GLOBAL && mem.type != MEM_TEXTURE);
153
154 if (mem.memory_size() == 0) {
155 return;
156 }
157
158 /* Allocate on demand. */
159 if (mem.device_pointer == 0) {
160 hip_device_->mem_alloc(mem);
161 }
162
163 /* Zero memory on device. */
164 assert(mem.device_pointer != 0);
165
166 const HIPContextScope scope(hip_device_);
167 assert_success(
168 hipMemsetD8Async((hipDeviceptr_t)mem.device_pointer, 0, mem.memory_size(), hip_stream_),
169 "zero_to_device");
170}
171
172void HIPDeviceQueue::copy_to_device(device_memory &mem)
173{
174 assert(mem.type != MEM_GLOBAL && mem.type != MEM_TEXTURE);
175
176 if (mem.memory_size() == 0) {
177 return;
178 }
179
180 /* Allocate on demand. */
181 if (mem.device_pointer == 0) {
182 hip_device_->mem_alloc(mem);
183 }
184
185 assert(mem.device_pointer != 0);
186 assert(mem.host_pointer != nullptr);
187
188 /* Copy memory to device. */
189 const HIPContextScope scope(hip_device_);
190 assert_success(
191 hipMemcpyHtoDAsync(
192 (hipDeviceptr_t)mem.device_pointer, mem.host_pointer, mem.memory_size(), hip_stream_),
193 "copy_to_device");
194}
195
196void HIPDeviceQueue::copy_from_device(device_memory &mem)
197{
198 assert(mem.type != MEM_GLOBAL && mem.type != MEM_TEXTURE);
199
200 if (mem.memory_size() == 0) {
201 return;
202 }
203
204 assert(mem.device_pointer != 0);
205 assert(mem.host_pointer != nullptr);
206
207 /* Copy memory from device. */
208 const HIPContextScope scope(hip_device_);
209 assert_success(
210 hipMemcpyDtoHAsync(
211 mem.host_pointer, (hipDeviceptr_t)mem.device_pointer, mem.memory_size(), hip_stream_),
212 "copy_from_device");
213}
214
215void HIPDeviceQueue::assert_success(hipError_t result, const char *operation)
216{
217 if (result != hipSuccess) {
218 const char *name = hipewErrorString(result);
219 hip_device_->set_error(
220 string_printf("%s in HIP queue %s (%s)", name, operation, debug_active_kernels().c_str()));
221 }
222}
223
224unique_ptr<DeviceGraphicsInterop> HIPDeviceQueue::graphics_interop_create()
225{
226 return make_unique<HIPDeviceGraphicsInterop>(this);
227}
228
230
231#endif /* WITH_HIP */
nullptr float
@ MEM_TEXTURE
#define CCL_NAMESPACE_END
#define assert(assertion)
const int num_states
ccl_gpu_kernel_postfix const ccl_global int ccl_global float const int work_size
DeviceKernel
@ DEVICE_KERNEL_INTEGRATOR_QUEUED_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_QUEUED_SHADOW_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_TERMINATED_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_SORTED_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_TERMINATED_SHADOW_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_ACTIVE_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY
#define LOG_TRACE
Definition log.h:108
const char * name
string string_human_readable_size(size_t size)
Definition string.cpp:257
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
Definition string.cpp:23
void * values[MAX_ARGS]
max
Definition text_draw.cc:251
ccl_device_inline size_t divide_up(const size_t x, const size_t y)
Definition types_base.h:52