Blender V5.0
hip/device_impl.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#ifdef WITH_HIP
6
7# include <cstdio>
8# include <cstdlib>
9# include <cstring>
10# include <iomanip>
11
13
14# include "util/debug.h"
15# include "util/log.h"
16# include "util/md5.h"
17# include "util/path.h"
18# include "util/string.h"
19# include "util/system.h"
20# include "util/time.h"
21# include "util/types.h"
22
23# ifdef _WIN32
24# include "util/windows.h"
25# endif
26
28
30
32
33class HIPDevice;
34
35bool HIPDevice::have_precompiled_kernels()
36{
37 string fatbins_path = path_get("lib");
38 return path_exists(fatbins_path);
39}
40
41BVHLayoutMask HIPDevice::get_bvh_layout_mask(uint /*kernel_features*/) const
42{
43 return BVH_LAYOUT_BVH2;
44}
45
46void HIPDevice::set_error(const string &error)
47{
49
50 if (first_error) {
51 LOG_ERROR << "Refer to the Cycles GPU rendering documentation for possible solutions:\n"
52 "https://docs.blender.org/manual/en/latest/render/cycles/gpu_rendering.html\n";
53 first_error = false;
54 }
55}
56
57HIPDevice::HIPDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
58 : GPUDevice(info, stats, profiler, headless)
59{
60 /* Verify that base class types can be used with specific backend types */
61 static_assert(sizeof(texMemObject) == sizeof(hipTextureObject_t));
62 static_assert(sizeof(arrayMemObject) == sizeof(hArray));
63
64 first_error = true;
65
66 hipDevId = info.num;
67 hipDevice = 0;
68 hipContext = nullptr;
69
70 hipModule = nullptr;
71
72 need_texture_info = false;
73
74 pitch_alignment = 0;
75
76 /* Initialize HIP. */
77 hipError_t result = hipInit(0);
78 if (result != hipSuccess) {
79 set_error(string_printf("Failed to initialize HIP runtime (%s)", hipewErrorString(result)));
80 return;
81 }
82
83 /* Setup device and context. */
84 result = hipDeviceGet(&hipDevice, hipDevId);
85 if (result != hipSuccess) {
86 set_error(string_printf("Failed to get HIP device handle from ordinal (%s)",
87 hipewErrorString(result)));
88 return;
89 }
90
91 /* hipDeviceMapHost for mapping host memory when out of device memory.
92 * hipDeviceLmemResizeToMax for reserving local memory ahead of render,
93 * so we can predict which memory to map to host. */
94 int value;
95 hip_assert(hipDeviceGetAttribute(&value, hipDeviceAttributeCanMapHostMemory, hipDevice));
96 can_map_host = value != 0;
97
98 hip_assert(
99 hipDeviceGetAttribute(&pitch_alignment, hipDeviceAttributeTexturePitchAlignment, hipDevice));
100
101 unsigned int ctx_flags = hipDeviceLmemResizeToMax;
102 if (can_map_host) {
103 ctx_flags |= hipDeviceMapHost;
104 init_host_memory();
105 }
106
107 /* Create context. */
108 result = hipCtxCreate(&hipContext, ctx_flags, hipDevice);
109
110 if (result != hipSuccess) {
111 set_error(string_printf("Failed to create HIP context (%s)", hipewErrorString(result)));
112 return;
113 }
114
115 int major, minor;
116 hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId);
117 hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId);
118 hipDevArchitecture = major * 100 + minor * 10;
119
120 /* Get hip runtime Version needed for memory types. */
121 hip_assert(hipRuntimeGetVersion(&hipRuntimeVersion));
122
123 /* Pop context set by hipCtxCreate. */
124 hipCtxPopCurrent(nullptr);
125}
126
127HIPDevice::~HIPDevice()
128{
129 texture_info.free();
130 if (hipModule) {
131 hip_assert(hipModuleUnload(hipModule));
132 }
133 hip_assert(hipCtxDestroy(hipContext));
134}
135
136bool HIPDevice::support_device(const uint /*kernel_features*/)
137{
138 if (hipSupportsDevice(hipDevId)) {
139 return true;
140 }
141 /* We only support Navi and above. */
142 hipDeviceProp_t props;
143 hipGetDeviceProperties(&props, hipDevId);
144
145 set_error(string_printf("HIP backend requires AMD RDNA graphics card or up, but found %s.",
146 props.name));
147 return false;
148}
149
150bool HIPDevice::check_peer_access(Device *peer_device)
151{
152 if (peer_device == this) {
153 return false;
154 }
155 if (peer_device->info.type != DEVICE_HIP && peer_device->info.type != DEVICE_OPTIX) {
156 return false;
157 }
158
159 HIPDevice *const peer_device_hip = static_cast<HIPDevice *>(peer_device);
160
161 int can_access = 0;
162 hip_assert(hipDeviceCanAccessPeer(&can_access, hipDevice, peer_device_hip->hipDevice));
163 if (can_access == 0) {
164 return false;
165 }
166
167 // Ensure array access over the link is possible as well (for 3D textures)
168 hip_assert(hipDeviceGetP2PAttribute(
169 &can_access, hipDevP2PAttrHipArrayAccessSupported, hipDevice, peer_device_hip->hipDevice));
170 if (can_access == 0) {
171 return false;
172 }
173
174 // Enable peer access in both directions
175 {
176 const HIPContextScope scope(this);
177 hipError_t result = hipCtxEnablePeerAccess(peer_device_hip->hipContext, 0);
178 if (result != hipSuccess) {
179 set_error(string_printf("Failed to enable peer access on HIP context (%s)",
180 hipewErrorString(result)));
181 return false;
182 }
183 }
184 {
185 const HIPContextScope scope(peer_device_hip);
186 hipError_t result = hipCtxEnablePeerAccess(hipContext, 0);
187 if (result != hipSuccess) {
188 set_error(string_printf("Failed to enable peer access on HIP context (%s)",
189 hipewErrorString(result)));
190 return false;
191 }
192 }
193
194 return true;
195}
196
197bool HIPDevice::use_adaptive_compilation()
198{
200}
201
202/* Common HIPCC flags which stays the same regardless of shading model,
203 * kernel sources md5 and only depends on compiler or compilation settings.
204 */
205string HIPDevice::compile_kernel_get_common_cflags(const uint kernel_features)
206{
207 const int machine = system_cpu_bits();
208 const string source_path = path_get("source");
209 const string include_path = source_path;
210 string cflags = string_printf(
211 "-m%d "
212 "-DHIPCC "
213 "-I\"%s\"",
214 machine,
215 include_path.c_str());
216 if (use_adaptive_compilation()) {
217 cflags += " -D__KERNEL_FEATURES__=" + to_string(kernel_features);
218 }
219
220 const char *extra_cflags = getenv("CYCLES_HIP_EXTRA_CFLAGS");
221 if (extra_cflags) {
222 cflags += string(" ") + string(extra_cflags);
223 }
224
225# ifdef WITH_NANOVDB
226 cflags += " -DWITH_NANOVDB";
227# endif
228
229# ifdef WITH_CYCLES_DEBUG
230 cflags += " -DWITH_CYCLES_DEBUG";
231# endif
232
233 return cflags;
234}
235
236string HIPDevice::compile_kernel(const uint kernel_features, const char *name, const char *base)
237{
238 /* Compute kernel name. */
239 int major, minor;
240 hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId);
241 hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId);
242 const std::string arch = hipDeviceArch(hipDevId);
243
244 /* Attempt to use kernel provided with Blender. */
245 if (!use_adaptive_compilation()) {
246 const string fatbin = path_get(string_printf("lib/%s_%s.fatbin.zst", name, arch.c_str()));
247 LOG_INFO << "Testing for pre-compiled kernel " << fatbin << ".";
248 if (path_exists(fatbin)) {
249 LOG_INFO << "Using precompiled kernel.";
250 return fatbin;
251 }
252 }
253
254 /* Try to use locally compiled kernel. */
255 string source_path = path_get("source");
256 const string source_md5 = path_files_md5_hash(source_path);
257
258 /* We include cflags into md5 so changing hip toolkit or changing other
259 * compiler command line arguments makes sure fatbin gets re-built.
260 */
261 string common_cflags = compile_kernel_get_common_cflags(kernel_features);
262 const string kernel_md5 = util_md5_string(source_md5 + common_cflags);
263
264 const char *const kernel_ext = "genco";
265 std::string options = "-Wno-parentheses-equality -Wno-unused-value -ffast-math -std=c++17";
266
267# ifndef NDEBUG
268 options.append(" -save-temps");
269# endif
270 if (major == 9 && minor == 0) {
271 /* Reduce optimization level on VEGA GPUs to avoid some rendering artifacts */
272 options.append(" -O1");
273 }
274 options.append(" --offload-arch=").append(arch);
275
276 const string include_path = source_path;
277 const string fatbin_file = string_printf(
278 "cycles_%s_%s_%s", name, arch.c_str(), kernel_md5.c_str());
279 const string fatbin = path_cache_get(path_join("kernels", fatbin_file));
280 LOG_INFO << "Testing for locally compiled kernel " << fatbin << ".";
281 if (path_exists(fatbin)) {
282 LOG_INFO << "Using locally compiled kernel.";
283 return fatbin;
284 }
285
286# ifdef _WIN32
287 if (!use_adaptive_compilation() && have_precompiled_kernels()) {
288 if (!hipSupportsDevice(hipDevId)) {
289 set_error(
290 string_printf("HIP backend requires compute capability 10.1 or up, but found %d.%d. "
291 "Your GPU is not supported.",
292 major,
293 minor));
294 }
295 else {
296 set_error(
297 string_printf("HIP binary kernel for this graphics card compute "
298 "capability (%d.%d) not found.",
299 major,
300 minor));
301 }
302 return string();
303 }
304# endif
305
306 /* Compile. */
307 const char *const hipcc = hipewCompilerPath();
308 if (hipcc == nullptr) {
309 set_error(
310 "HIP hipcc compiler not found. "
311 "Install HIP toolkit in default location.");
312 return string();
313 }
314
315 const int hipcc_hip_version = hipewCompilerVersion();
316 LOG_INFO << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
317
318 double starttime = time_dt();
319
321
322 source_path = path_join(path_join(source_path, "kernel"),
323 path_join("device", path_join(base, string_printf("%s.cpp", name))));
324
325 string command = string_printf("%s %s -I \"%s\" --%s \"%s\" -o \"%s\" %s",
326 hipcc,
327 options.c_str(),
328 include_path.c_str(),
329 kernel_ext,
330 source_path.c_str(),
331 fatbin.c_str(),
332 common_cflags.c_str());
333
334 LOG_INFO_IMPORTANT << "Compiling " << ((use_adaptive_compilation()) ? "adaptive " : "")
335 << "HIP kernel ... " << command;
336
337# ifdef _WIN32
338 command = "call " + command;
339# endif
340 if (system(command.c_str()) != 0) {
341 set_error(
342 "Failed to execute compilation command, "
343 "see console for details.");
344 return string();
345 }
346
347 /* Verify if compilation succeeded */
348 if (!path_exists(fatbin)) {
349 set_error(
350 "HIP kernel compilation failed, "
351 "see console for details.");
352 return string();
353 }
354
355 LOG_INFO_IMPORTANT << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
356 << time_dt() - starttime << "s";
357
358 return fatbin;
359}
360
361bool HIPDevice::load_kernels(const uint kernel_features)
362{
363 /* TODO(sergey): Support kernels re-load for HIP devices adaptive compile.
364 *
365 * Currently re-loading kernels will invalidate memory pointers.
366 */
367 if (hipModule) {
368 if (use_adaptive_compilation()) {
369 LOG_INFO << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
370 }
371 return true;
372 }
373
374 /* check if hip init succeeded */
375 if (hipContext == nullptr) {
376 return false;
377 }
378
379 /* check if GPU is supported */
380 if (!support_device(kernel_features)) {
381 return false;
382 }
383
384 /* get kernel */
385 const char *kernel_name = "kernel";
386 string fatbin = compile_kernel(kernel_features, kernel_name);
387 if (fatbin.empty()) {
388 return false;
389 }
390
391 /* open module */
392 HIPContextScope scope(this);
393
394 string fatbin_data;
395 hipError_t result;
396
397 if (path_read_compressed_text(fatbin, fatbin_data)) {
398 result = hipModuleLoadData(&hipModule, fatbin_data.c_str());
399 }
400 else {
401 result = hipErrorFileNotFound;
402 }
403
404 if (result != hipSuccess) {
405 set_error(string_printf(
406 "Failed to load HIP kernel from '%s' (%s)", fatbin.c_str(), hipewErrorString(result)));
407 }
408
409 if (result == hipSuccess) {
410 kernels.load(this);
411 reserve_local_memory(kernel_features);
412 }
413
414 return (result == hipSuccess);
415}
416
417void HIPDevice::reserve_local_memory(const uint kernel_features)
418{
419 /* Together with hipDeviceLmemResizeToMax, this reserves local memory
420 * needed for kernel launches, so that we can reliably figure out when
421 * to allocate scene data in mapped host memory. */
422 size_t total = 0, free_before = 0, free_after = 0;
423
424 {
425 HIPContextScope scope(this);
426 hipMemGetInfo(&free_before, &total);
427 }
428
429 {
430 /* Use the biggest kernel for estimation. */
431 const DeviceKernel test_kernel = (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) ?
433 (kernel_features & KERNEL_FEATURE_MNEE) ?
436
437 /* Launch kernel, using just 1 block appears sufficient to reserve memory for all
438 * multiprocessors. It would be good to do this in parallel for the multi GPU case
439 * still to make it faster. */
440 HIPDeviceQueue queue(this);
441
442 device_ptr d_path_index = 0;
443 device_ptr d_render_buffer = 0;
444 int d_work_size = 0;
445 DeviceKernelArguments args(&d_path_index, &d_render_buffer, &d_work_size);
446
447 queue.init_execution();
448 queue.enqueue(test_kernel, 1, args);
449 queue.synchronize();
450 }
451
452 {
453 HIPContextScope scope(this);
454 hipMemGetInfo(&free_after, &total);
455 }
456
457 LOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after)
458 << " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
459
460# if 0
461 /* For testing mapped host memory, fill up device memory. */
462 const size_t keep_mb = 1024;
463
464 while (free_after > keep_mb * 1024 * 1024LL) {
465 hipDeviceptr_t tmp;
466 hip_assert(hipMalloc(&tmp, 10 * 1024 * 1024LL));
467 hipMemGetInfo(&free_after, &total);
468 }
469# endif
470}
471
472void HIPDevice::get_device_memory_info(size_t &total, size_t &free)
473{
474 HIPContextScope scope(this);
475
476 hipMemGetInfo(&free, &total);
477}
478
479bool HIPDevice::alloc_device(void *&device_pointer, const size_t size)
480{
481 HIPContextScope scope(this);
482
483 hipError_t mem_alloc_result = hipMalloc((hipDeviceptr_t *)&device_pointer, size);
484 return mem_alloc_result == hipSuccess;
485}
486
487void HIPDevice::free_device(void *device_pointer)
488{
489 HIPContextScope scope(this);
490
491 hip_assert(hipFree((hipDeviceptr_t)device_pointer));
492}
493
494bool HIPDevice::shared_alloc(void *&shared_pointer, const size_t size)
495{
496 HIPContextScope scope(this);
497
498 hipError_t mem_alloc_result = hipHostMalloc(
499 &shared_pointer, size, hipHostMallocMapped | hipHostMallocWriteCombined);
500
501 return mem_alloc_result == hipSuccess;
502}
503
504void HIPDevice::shared_free(void *shared_pointer)
505{
506 HIPContextScope scope(this);
507
508 hipHostFree(shared_pointer);
509}
510
511void *HIPDevice::shared_to_device_pointer(const void *shared_pointer)
512{
513 HIPContextScope scope(this);
514 void *device_pointer = nullptr;
515 hip_assert(
516 hipHostGetDevicePointer((hipDeviceptr_t *)&device_pointer, (void *)shared_pointer, 0));
517 return device_pointer;
518}
519
520void HIPDevice::copy_host_to_device(void *device_pointer, void *host_pointer, const size_t size)
521{
522 const HIPContextScope scope(this);
523
524 hip_assert(hipMemcpyHtoD((hipDeviceptr_t)device_pointer, host_pointer, size));
525}
526
527void HIPDevice::mem_alloc(device_memory &mem)
528{
529 if (mem.type == MEM_TEXTURE) {
530 assert(!"mem_alloc not supported for textures.");
531 }
532 else if (mem.type == MEM_GLOBAL) {
533 assert(!"mem_alloc not supported for global memory.");
534 }
535 else {
536 generic_alloc(mem);
537 }
538}
539
540void HIPDevice::mem_copy_to(device_memory &mem)
541{
542 if (mem.type == MEM_GLOBAL) {
543 global_copy_to(mem);
544 }
545 else if (mem.type == MEM_TEXTURE) {
546 tex_copy_to((device_texture &)mem);
547 }
548 else {
549 if (!mem.device_pointer) {
550 generic_alloc(mem);
551 generic_copy_to(mem);
552 }
553 else if (mem.is_resident(this)) {
554 generic_copy_to(mem);
555 }
556 }
557}
558
559void HIPDevice::mem_move_to_host(device_memory &mem)
560{
561 if (mem.type == MEM_GLOBAL) {
562 global_free(mem);
563 global_alloc(mem);
564 }
565 else if (mem.type == MEM_TEXTURE) {
566 tex_free((device_texture &)mem);
567 tex_alloc((device_texture &)mem);
568 }
569 else {
570 assert(!"mem_move_to_host only supported for texture and global memory");
571 }
572}
573
574void HIPDevice::mem_copy_from(
575 device_memory &mem, const size_t y, size_t w, const size_t h, size_t elem)
576{
577 if (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) {
578 assert(!"mem_copy_from not supported for textures.");
579 }
580 else if (mem.host_pointer) {
581 const size_t size = elem * w * h;
582 const size_t offset = elem * y * w;
583
584 if (mem.device_pointer) {
585 const HIPContextScope scope(this);
586 hip_assert(hipMemcpyDtoH(
587 (char *)mem.host_pointer + offset, (hipDeviceptr_t)mem.device_pointer + offset, size));
588 }
589 else {
590 memset((char *)mem.host_pointer + offset, 0, size);
591 }
592 }
593}
594
595void HIPDevice::mem_zero(device_memory &mem)
596{
597 if (!mem.device_pointer) {
598 mem_alloc(mem);
599 }
600 if (!mem.device_pointer) {
601 return;
602 }
603
604 if (!(mem.is_shared(this) && mem.host_pointer == mem.shared_pointer)) {
605 const HIPContextScope scope(this);
606 hip_assert(hipMemsetD8((hipDeviceptr_t)mem.device_pointer, 0, mem.memory_size()));
607 }
608 else if (mem.host_pointer) {
609 memset(mem.host_pointer, 0, mem.memory_size());
610 }
611}
612
613void HIPDevice::mem_free(device_memory &mem)
614{
615 if (mem.type == MEM_GLOBAL) {
616 global_free(mem);
617 }
618 else if (mem.type == MEM_TEXTURE) {
619 tex_free((device_texture &)mem);
620 }
621 else {
622 generic_free(mem);
623 }
624}
625
626device_ptr HIPDevice::mem_alloc_sub_ptr(device_memory &mem, const size_t offset, size_t /*size*/)
627{
628 return (device_ptr)(((char *)mem.device_pointer) + mem.memory_elements_size(offset));
629}
630
631void HIPDevice::const_copy_to(const char *name, void *host, const size_t size)
632{
633 HIPContextScope scope(this);
634 hipDeviceptr_t mem;
635 size_t bytes;
636
637 hip_assert(hipModuleGetGlobal(&mem, &bytes, hipModule, "kernel_params"));
638 assert(bytes == sizeof(KernelParamsHIP));
639
640 /* Update data storage pointers in launch parameters. */
641# define KERNEL_DATA_ARRAY(data_type, data_name) \
642 if (strcmp(name, #data_name) == 0) { \
643 hip_assert(hipMemcpyHtoD(mem + offsetof(KernelParamsHIP, data_name), host, size)); \
644 return; \
645 }
646 KERNEL_DATA_ARRAY(KernelData, data)
647 KERNEL_DATA_ARRAY(IntegratorStateGPU, integrator_state)
648# include "kernel/data_arrays.h"
649# undef KERNEL_DATA_ARRAY
650}
651
652void HIPDevice::global_alloc(device_memory &mem)
653{
654 if (mem.is_resident(this)) {
655 generic_alloc(mem);
656 generic_copy_to(mem);
657 }
658
659 const_copy_to(mem.name, &mem.device_pointer, sizeof(mem.device_pointer));
660}
661
662void HIPDevice::global_copy_to(device_memory &mem)
663{
664 if (!mem.device_pointer) {
665 generic_alloc(mem);
666 generic_copy_to(mem);
667 }
668 else if (mem.is_resident(this)) {
669 generic_copy_to(mem);
670 }
671
672 const_copy_to(mem.name, &mem.device_pointer, sizeof(mem.device_pointer));
673}
674
675void HIPDevice::global_free(device_memory &mem)
676{
677 if (mem.is_resident(this) && mem.device_pointer) {
678 generic_free(mem);
679 }
680}
681
682static size_t tex_src_pitch(const device_texture &mem)
683{
684 return mem.data_width * datatype_size(mem.data_type) * mem.data_elements;
685}
686
687static hip_Memcpy2D tex_2d_copy_param(const device_texture &mem, const int pitch_alignment)
688{
689 /* 2D texture using pitch aligned linear memory. */
690 const size_t src_pitch = tex_src_pitch(mem);
691 const size_t dst_pitch = align_up(src_pitch, pitch_alignment);
692
693 hip_Memcpy2D param;
694 memset(&param, 0, sizeof(param));
695 param.dstMemoryType = hipMemoryTypeDevice;
696 param.dstDevice = mem.device_pointer;
697 param.dstPitch = dst_pitch;
698 param.srcMemoryType = hipMemoryTypeHost;
699 param.srcHost = mem.host_pointer;
700 param.srcPitch = src_pitch;
701 param.WidthInBytes = param.srcPitch;
702 param.Height = mem.data_height;
703
704 return param;
705}
706
707void HIPDevice::tex_alloc(device_texture &mem)
708{
709 HIPContextScope scope(this);
710
711 hipTextureAddressMode address_mode = hipAddressModeWrap;
712 switch (mem.info.extension) {
713 case EXTENSION_REPEAT:
714 address_mode = hipAddressModeWrap;
715 break;
716 case EXTENSION_EXTEND:
717 address_mode = hipAddressModeClamp;
718 break;
719 case EXTENSION_CLIP:
720 address_mode = hipAddressModeBorder;
721 break;
722 case EXTENSION_MIRROR:
723 address_mode = hipAddressModeMirror;
724 break;
725 default:
726 assert(0);
727 break;
728 }
729
730 hipTextureFilterMode filter_mode;
732 filter_mode = hipFilterModePoint;
733 }
734 else {
735 filter_mode = hipFilterModeLinear;
736 }
737
738 /* Image Texture Storage */
739 hipArray_Format format;
740 switch (mem.data_type) {
741 case TYPE_UCHAR:
742 format = HIP_AD_FORMAT_UNSIGNED_INT8;
743 break;
744 case TYPE_UINT16:
745 format = HIP_AD_FORMAT_UNSIGNED_INT16;
746 break;
747 case TYPE_UINT:
748 format = HIP_AD_FORMAT_UNSIGNED_INT32;
749 break;
750 case TYPE_INT:
751 format = HIP_AD_FORMAT_SIGNED_INT32;
752 break;
753 case TYPE_FLOAT:
754 format = HIP_AD_FORMAT_FLOAT;
755 break;
756 case TYPE_HALF:
757 format = HIP_AD_FORMAT_HALF;
758 break;
759 default:
760 assert(0);
761 return;
762 }
763
764 Mem *cmem = nullptr;
765
766 if (!mem.is_resident(this)) {
767 thread_scoped_lock lock(device_mem_map_mutex);
768 cmem = &device_mem_map[&mem];
769 cmem->texobject = 0;
770 }
771 else if (mem.data_height > 0) {
772 /* 2D texture, using pitch aligned linear memory. */
773 const size_t dst_pitch = align_up(tex_src_pitch(mem), pitch_alignment);
774 const size_t dst_size = dst_pitch * mem.data_height;
775
776 cmem = generic_alloc(mem, dst_size - mem.memory_size());
777 if (!cmem) {
778 return;
779 }
780
781 const hip_Memcpy2D param = tex_2d_copy_param(mem, pitch_alignment);
782 hip_assert(hipDrvMemcpy2DUnaligned(&param));
783 }
784 else {
785 /* 1D texture, using linear memory. */
786 cmem = generic_alloc(mem);
787 if (!cmem) {
788 return;
789 }
790
791 hip_assert(hipMemcpyHtoD(mem.device_pointer, mem.host_pointer, mem.memory_size()));
792 }
793
794 /* Set Mapping and tag that we need to (re-)upload to device */
795 TextureInfo tex_info = mem.info;
796
797 if (!is_nanovdb_type(mem.info.data_type)) {
798 /* Bindless textures. */
799 hipResourceDesc resDesc;
800 memset(&resDesc, 0, sizeof(resDesc));
801
802 if (mem.data_height > 0) {
803 const size_t dst_pitch = align_up(tex_src_pitch(mem), pitch_alignment);
804
805 resDesc.resType = hipResourceTypePitch2D;
806 resDesc.res.pitch2D.devPtr = mem.device_pointer;
807 resDesc.res.pitch2D.format = format;
808 resDesc.res.pitch2D.numChannels = mem.data_elements;
809 resDesc.res.pitch2D.height = mem.data_height;
810 resDesc.res.pitch2D.width = mem.data_width;
811 resDesc.res.pitch2D.pitchInBytes = dst_pitch;
812 }
813 else {
814 resDesc.resType = hipResourceTypeLinear;
815 resDesc.res.linear.devPtr = mem.device_pointer;
816 resDesc.res.linear.format = format;
817 resDesc.res.linear.numChannels = mem.data_elements;
818 resDesc.res.linear.sizeInBytes = mem.device_size;
819 }
820
821 hipTextureDesc texDesc;
822 memset(&texDesc, 0, sizeof(texDesc));
823 texDesc.addressMode[0] = address_mode;
824 texDesc.addressMode[1] = address_mode;
825 texDesc.addressMode[2] = address_mode;
826 texDesc.filterMode = filter_mode;
827 texDesc.flags = HIP_TRSF_NORMALIZED_COORDINATES;
828
829 thread_scoped_lock lock(device_mem_map_mutex);
830 cmem = &device_mem_map[&mem];
831
832 if (hipTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, nullptr) != hipSuccess) {
833 set_error(
834 "Failed to create texture. Maximum GPU texture size or available GPU memory was likely "
835 "exceeded.");
836 }
837
838 tex_info.data = (uint64_t)cmem->texobject;
839 }
840 else {
841 tex_info.data = (uint64_t)mem.device_pointer;
842 }
843
844 {
845 /* Update texture info. */
846 thread_scoped_lock lock(texture_info_mutex);
847 const uint slot = mem.slot;
848 if (slot >= texture_info.size()) {
849 /* Allocate some slots in advance, to reduce amount of re-allocations. */
850 texture_info.resize(slot + 128);
851 }
852 texture_info[slot] = tex_info;
853 need_texture_info = true;
854 }
855}
856
857void HIPDevice::tex_copy_to(device_texture &mem)
858{
859 if (!mem.device_pointer) {
860 /* Not yet allocated on device. */
861 tex_alloc(mem);
862 }
863 else if (!mem.is_resident(this)) {
864 /* Peering with another device, may still need to create texture info and object. */
865 bool texture_allocated = false;
866 {
867 thread_scoped_lock lock(texture_info_mutex);
868 texture_allocated = mem.slot < texture_info.size() && texture_info[mem.slot].data != 0;
869 }
870 if (!texture_allocated) {
871 tex_alloc(mem);
872 }
873 }
874 else {
875 /* Resident and fully allocated, only copy. */
876 if (mem.data_height > 0) {
877 HIPContextScope scope(this);
878 const hip_Memcpy2D param = tex_2d_copy_param(mem, pitch_alignment);
879 hip_assert(hipDrvMemcpy2DUnaligned(&param));
880 }
881 else {
882 generic_copy_to(mem);
883 }
884 }
885}
886
887void HIPDevice::tex_free(device_texture &mem)
888{
889 HIPContextScope scope(this);
890 thread_scoped_lock lock(device_mem_map_mutex);
891
892 /* Check if the memory was allocated for this device. */
893 auto it = device_mem_map.find(&mem);
894 if (it == device_mem_map.end()) {
895 return;
896 }
897
898 const Mem &cmem = it->second;
899
900 /* Always clear texture info and texture object, regardless of residency. */
901 {
902 thread_scoped_lock lock(texture_info_mutex);
903 texture_info[mem.slot] = TextureInfo();
904 }
905
906 if (cmem.texobject) {
907 /* Free bindless texture. */
908 hipTexObjectDestroy(cmem.texobject);
909 }
910
911 if (!mem.is_resident(this)) {
912 /* Do not free memory here, since it was allocated on a different device. */
913 device_mem_map.erase(device_mem_map.find(&mem));
914 }
915 else if (cmem.array) {
916 /* Free array. */
917 hipArrayDestroy(reinterpret_cast<hArray>(cmem.array));
918 stats.mem_free(mem.device_size);
919 mem.device_pointer = 0;
920 mem.device_size = 0;
921
922 device_mem_map.erase(device_mem_map.find(&mem));
923 }
924 else {
925 lock.unlock();
926 generic_free(mem);
927 }
928}
929
930unique_ptr<DeviceQueue> HIPDevice::gpu_queue_create()
931{
932 return make_unique<HIPDeviceQueue>(this);
933}
934
935bool HIPDevice::should_use_graphics_interop(const GraphicsInteropDevice &interop_device,
936 const bool log)
937{
938 if (headless) {
939 /* Avoid any call which might involve interaction with a graphics backend when we know that
940 * we don't have active graphics context. This avoids potential crash in the driver. */
941 return false;
942 }
943
944 HIPContextScope scope(this);
945
946 switch (interop_device.type) {
948 /* Disable graphics interop for now, because of driver bug in 21.40. See #92972.
949 * Also missing Vulkan support which is needed now. */
950 return false;
951
952 /* Check whether this device is part of OpenGL context.
953 *
954 * Using HIP device for graphics interoperability which is not part of the OpenGL context is
955 * possible, but from the empiric measurements with CUDA it can be considerably slower than
956 * using naive pixels copy. */
957 int num_all_devices = 0;
958 hip_assert(hipGetDeviceCount(&num_all_devices));
959
960 if (num_all_devices == 0) {
961 return false;
962 }
963
964 vector<hipDevice_t> gl_devices(num_all_devices);
965 uint num_gl_devices = 0;
966 hipGLGetDevices(&num_gl_devices, gl_devices.data(), num_all_devices, hipGLDeviceListAll);
967
968 bool found = false;
969 for (hipDevice_t gl_device : gl_devices) {
970 if (gl_device == hipDevice) {
971 found = true;
972 break;
973 }
974 }
975
976 if (log) {
977 if (found) {
978 LOG_INFO << "Graphics interop: found matching OpenGL device for HIP";
979 }
980 else {
981 LOG_INFO << "Graphics interop: no matching OpenGL device for HIP";
982 }
983 }
984
985 return found;
986 }
990 /* TODO: Implement Vulkan support. */
991 return false;
992 }
993
994 return false;
995}
996
997int HIPDevice::get_num_multiprocessors()
998{
999 return get_device_default_attribute(hipDeviceAttributeMultiprocessorCount, 0);
1000}
1001
1002int HIPDevice::get_max_num_threads_per_multiprocessor()
1003{
1004 return get_device_default_attribute(hipDeviceAttributeMaxThreadsPerMultiProcessor, 0);
1005}
1006
1007bool HIPDevice::get_device_attribute(hipDeviceAttribute_t attribute, int *value)
1008{
1009 HIPContextScope scope(this);
1010
1011 return hipDeviceGetAttribute(value, attribute, hipDevice) == hipSuccess;
1012}
1013
1014int HIPDevice::get_device_default_attribute(hipDeviceAttribute_t attribute,
1015 const int default_value)
1016{
1017 int value = 0;
1018 if (!get_device_attribute(attribute, &value)) {
1019 return default_value;
1020 }
1021 return value;
1022}
1023
1025
1026#endif
void BLI_kdtree_nd_ free(KDTree *tree)
unsigned int uint
volatile int lock
BMesh const char void * data
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
HIP hip
Definition debug.h:129
DeviceType type
virtual void set_error(const string &error)
DeviceInfo info
void mem_free(const size_t size)
Definition util/stats.h:24
bool is_resident(Device *sub_device) const
Definition memory.cpp:132
size_t memory_elements_size(const int elements)
bool is_shared(Device *sub_device) const
Definition memory.cpp:137
static constexpr size_t datatype_size(DataType datatype)
@ MEM_TEXTURE
@ TYPE_UINT16
CCL_NAMESPACE_BEGIN struct Options options
#define KERNEL_DATA_ARRAY(type, name)
Definition data_arrays.h:8
DebugFlags & DebugFlags()
Definition debug.h:145
#define KERNEL_FEATURE_NODE_RAYTRACE
#define KERNEL_FEATURE_MNEE
#define CCL_NAMESPACE_END
@ DEVICE_OPTIX
@ DEVICE_HIP
static const char * to_string(const Interpolation &interp)
Definition gl_shader.cc:103
#define assert(assertion)
#define log
@ BVH_LAYOUT_BVH2
DeviceKernel
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE
format
#define LOG_INFO_IMPORTANT
Definition log.h:105
#define LOG_ERROR
Definition log.h:101
#define LOG_INFO
Definition log.h:106
string util_md5_string(const string &str)
Definition md5.cpp:386
static void error(const char *str)
int BVHLayoutMask
Definition params.h:50
string path_cache_get(const string &sub)
Definition path.cpp:360
string path_get(const string &sub)
Definition path.cpp:337
string path_files_md5_hash(const string &dir)
Definition path.cpp:611
string path_join(const string &dir, const string &file)
Definition path.cpp:415
bool path_exists(const string &path)
Definition path.cpp:563
void path_create_directories(const string &filepath)
Definition path.cpp:647
bool path_read_compressed_text(const string &path, string &text)
Definition path.cpp:754
const char * name
string string_human_readable_size(size_t size)
Definition string.cpp:257
string string_human_readable_number(size_t num)
Definition string.cpp:276
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
Definition string.cpp:23
bool adaptive_compile
Definition debug.h:70
uint64_t data
Definition texture.h:86
uint data_type
Definition texture.h:88
uint extension
Definition texture.h:91
uint interpolation
Definition texture.h:90
int system_cpu_bits()
Definition system.cpp:130
@ INTERPOLATION_CLOSEST
Definition texture.h:25
@ EXTENSION_REPEAT
Definition texture.h:73
@ EXTENSION_CLIP
Definition texture.h:77
@ EXTENSION_EXTEND
Definition texture.h:75
@ EXTENSION_MIRROR
Definition texture.h:79
ccl_device_inline bool is_nanovdb_type(int type)
Definition texture.h:51
std::unique_lock< std::mutex > thread_scoped_lock
Definition thread.h:28
CCL_NAMESPACE_BEGIN double time_dt()
Definition time.cpp:47
uint64_t device_ptr
Definition types_base.h:44
ccl_device_inline size_t align_up(const size_t offset, const size_t alignment)
Definition types_base.h:47