Blender V5.0
device/device.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#include <cstdlib>
6#include <cstring>
7
8#include "bvh/bvh2.h"
9
10#include "device/device.h"
11#include "device/queue.h"
12
13#include "device/cpu/device.h"
14#include "device/cpu/kernel.h"
15#include "device/cuda/device.h"
16#include "device/dummy/device.h"
17#include "device/hip/device.h"
18#include "device/metal/device.h"
19#include "device/multi/device.h"
21#include "device/optix/device.h"
22
23#ifdef WITH_HIPRT
24# include <hiprtew.h>
25#endif
26
27#include "util/log.h"
28#include "util/math.h"
29#include "util/string.h"
30#include "util/system.h"
31#include "util/task.h"
32#include "util/types.h"
33#include "util/vector.h"
34
36
37bool Device::need_types_update = true;
38bool Device::need_devices_update = true;
39thread_mutex Device::device_mutex;
40vector<DeviceInfo> Device::cuda_devices;
41vector<DeviceInfo> Device::optix_devices;
42vector<DeviceInfo> Device::cpu_devices;
43vector<DeviceInfo> Device::hip_devices;
44vector<DeviceInfo> Device::metal_devices;
45vector<DeviceInfo> Device::oneapi_devices;
46uint Device::devices_initialized_mask = 0;
47
48/* Device */
49
50Device::~Device() noexcept(false) = default;
51
52void Device::set_error(const string &error)
53{
54 if (!have_error()) {
56 }
58 fflush(stderr);
59}
60
61void Device::build_bvh(BVH *bvh, Progress &progress, bool refit)
62{
64
65 BVH2 *const bvh2 = static_cast<BVH2 *>(bvh);
66 if (refit) {
67 bvh2->refit(progress);
68 }
69 else {
70 bvh2->build(progress, &stats);
71 }
72}
73
75 Stats &stats,
77 bool headless)
78{
79 if (!info.multi_devices.empty()) {
80 /* Always create a multi device when info contains multiple devices.
81 * This is done so that the type can still be e.g. DEVICE_CPU to indicate
82 * that it is a homogeneous collection of devices, which simplifies checks. */
84 }
85
86 unique_ptr<Device> device;
87
88 switch (info.type) {
89 case DEVICE_CPU:
91 break;
92#ifdef WITH_CUDA
93 case DEVICE_CUDA:
94 if (device_cuda_init()) {
96 }
97 break;
98#endif
99#ifdef WITH_OPTIX
100 case DEVICE_OPTIX:
101 if (device_optix_init()) {
103 }
104 break;
105#endif
106
107#ifdef WITH_HIP
108 case DEVICE_HIP:
109 if (device_hip_init()) {
111 }
112 break;
113#endif
114
115#ifdef WITH_METAL
116 case DEVICE_METAL:
117 if (device_metal_init()) {
119 }
120 break;
121#endif
122
123#ifdef WITH_ONEAPI
124 case DEVICE_ONEAPI:
126 break;
127#endif
128
129 default:
130 break;
131 }
132
133 if (device == nullptr) {
135 }
136
137 return device;
138}
139
141{
142 if (strcmp(name, "CPU") == 0) {
143 return DEVICE_CPU;
144 }
145 if (strcmp(name, "CUDA") == 0) {
146 return DEVICE_CUDA;
147 }
148 if (strcmp(name, "OPTIX") == 0) {
149 return DEVICE_OPTIX;
150 }
151 if (strcmp(name, "MULTI") == 0) {
152 return DEVICE_MULTI;
153 }
154 if (strcmp(name, "HIP") == 0) {
155 return DEVICE_HIP;
156 }
157 if (strcmp(name, "METAL") == 0) {
158 return DEVICE_METAL;
159 }
160 if (strcmp(name, "ONEAPI") == 0) {
161 return DEVICE_ONEAPI;
162 }
163 if (strcmp(name, "HIPRT") == 0) {
164 return DEVICE_HIPRT;
165 }
166
167 return DEVICE_NONE;
168}
169
171{
172 if (type == DEVICE_CPU) {
173 return "CPU";
174 }
175 if (type == DEVICE_CUDA) {
176 return "CUDA";
177 }
178 if (type == DEVICE_OPTIX) {
179 return "OPTIX";
180 }
181 if (type == DEVICE_MULTI) {
182 return "MULTI";
183 }
184 if (type == DEVICE_HIP) {
185 return "HIP";
186 }
187 if (type == DEVICE_METAL) {
188 return "METAL";
189 }
190 if (type == DEVICE_ONEAPI) {
191 return "ONEAPI";
192 }
193 if (type == DEVICE_HIPRT) {
194 return "HIPRT";
195 }
196
197 return "";
198}
199
201{
203 types.push_back(DEVICE_CPU);
204#ifdef WITH_CUDA
205 types.push_back(DEVICE_CUDA);
206#endif
207#ifdef WITH_OPTIX
208 types.push_back(DEVICE_OPTIX);
209#endif
210#ifdef WITH_HIP
211 types.push_back(DEVICE_HIP);
212#endif
213#ifdef WITH_METAL
214 types.push_back(DEVICE_METAL);
215#endif
216#ifdef WITH_ONEAPI
217 types.push_back(DEVICE_ONEAPI);
218#endif
219#ifdef WITH_HIPRT
220 if (hiprtewInit()) {
221 types.push_back(DEVICE_HIPRT);
222 }
223#endif
224 return types;
225}
226
228{
229 /* Lazy initialize devices. On some platforms OpenCL or CUDA drivers can
230 * be broken and cause crashes when only trying to get device info, so
231 * we don't want to do any initialization until the user chooses to. */
232 const thread_scoped_lock lock(device_mutex);
233 vector<DeviceInfo> devices;
234
235#if defined(WITH_CUDA) || defined(WITH_OPTIX)
237 if (!(devices_initialized_mask & DEVICE_MASK_CUDA)) {
238 if (device_cuda_init()) {
239 device_cuda_info(cuda_devices);
240 }
241 devices_initialized_mask |= DEVICE_MASK_CUDA;
242 }
243 if (mask & DEVICE_MASK_CUDA) {
244 for (DeviceInfo &info : cuda_devices) {
245 devices.push_back(info);
246 }
247 }
248 }
249#endif
250
251#ifdef WITH_OPTIX
252 if (mask & DEVICE_MASK_OPTIX) {
253 if (!(devices_initialized_mask & DEVICE_MASK_OPTIX)) {
254 if (device_optix_init()) {
255 device_optix_info(cuda_devices, optix_devices);
256 }
257 devices_initialized_mask |= DEVICE_MASK_OPTIX;
258 }
259 for (DeviceInfo &info : optix_devices) {
260 devices.push_back(info);
261 }
262 }
263#endif
264
265#ifdef WITH_HIP
266 if (mask & DEVICE_MASK_HIP) {
267 if (!(devices_initialized_mask & DEVICE_MASK_HIP)) {
268 if (device_hip_init()) {
269 device_hip_info(hip_devices);
270 }
271 devices_initialized_mask |= DEVICE_MASK_HIP;
272 }
273 for (DeviceInfo &info : hip_devices) {
274 devices.push_back(info);
275 }
276 }
277#endif
278
279#ifdef WITH_ONEAPI
280 if (mask & DEVICE_MASK_ONEAPI) {
281 if (!(devices_initialized_mask & DEVICE_MASK_ONEAPI)) {
282 if (device_oneapi_init()) {
283 device_oneapi_info(oneapi_devices);
284 }
285 devices_initialized_mask |= DEVICE_MASK_ONEAPI;
286 }
287 for (DeviceInfo &info : oneapi_devices) {
288 devices.push_back(info);
289 }
290 }
291#endif
292
293 if (mask & DEVICE_MASK_CPU) {
294 if (!(devices_initialized_mask & DEVICE_MASK_CPU)) {
295 device_cpu_info(cpu_devices);
296 devices_initialized_mask |= DEVICE_MASK_CPU;
297 }
298 for (const DeviceInfo &info : cpu_devices) {
299 devices.push_back(info);
300 }
301 }
302
303#ifdef WITH_METAL
304 if (mask & DEVICE_MASK_METAL) {
305 if (!(devices_initialized_mask & DEVICE_MASK_METAL)) {
306 if (device_metal_init()) {
307 device_metal_info(metal_devices);
308 }
309 devices_initialized_mask |= DEVICE_MASK_METAL;
310 }
311 for (const DeviceInfo &info : metal_devices) {
312 devices.push_back(info);
313 }
314 }
315#endif
316
317 return devices;
318}
319
321{
323 info.type = DEVICE_DUMMY;
324 info.error_msg = error_msg;
325 return info;
326}
327
329{
330 const thread_scoped_lock lock(device_mutex);
331 string capabilities;
332
333 if (mask & DEVICE_MASK_CPU) {
334 capabilities += "\nCPU device capabilities: ";
335 capabilities += device_cpu_capabilities() + "\n";
336 }
337
338#ifdef WITH_CUDA
339 if (mask & DEVICE_MASK_CUDA) {
340 if (device_cuda_init()) {
342 if (!device_capabilities.empty()) {
343 capabilities += "\nCUDA device capabilities:\n";
344 capabilities += device_capabilities;
345 }
346 }
347 }
348#endif
349
350#ifdef WITH_HIP
351 if (mask & DEVICE_MASK_HIP) {
352 if (device_hip_init()) {
354 if (!device_capabilities.empty()) {
355 capabilities += "\nHIP device capabilities:\n";
356 capabilities += device_capabilities;
357 }
358 }
359 }
360#endif
361
362#ifdef WITH_ONEAPI
363 if (mask & DEVICE_MASK_ONEAPI) {
364 if (device_oneapi_init()) {
366 if (!device_capabilities.empty()) {
367 capabilities += "\noneAPI device capabilities:\n";
368 capabilities += device_capabilities;
369 }
370 }
371 }
372#endif
373
374#ifdef WITH_METAL
375 if (mask & DEVICE_MASK_METAL) {
376 if (device_metal_init()) {
378 if (!device_capabilities.empty()) {
379 capabilities += "\nMetal device capabilities:\n";
380 capabilities += device_capabilities;
381 }
382 }
383 }
384#endif
385
386 return capabilities;
387}
388
390 const int threads,
391 bool background)
392{
393 assert(!subdevices.empty());
394
395 if (subdevices.size() == 1) {
396 /* No multi device needed. */
397 return subdevices.front();
398 }
399
401 info.type = DEVICE_NONE;
402 info.id = "MULTI";
403 info.description = "Multi Device";
404 info.num = 0;
405
406 info.has_nanovdb = true;
407 info.has_mnee = true;
408 info.has_osl = true;
409 info.has_guiding = true;
410 info.has_profiling = true;
411 info.has_peer_memory = false;
412 info.use_hardware_raytracing = false;
413 info.denoisers = DENOISER_ALL;
414
415 for (const DeviceInfo &device : subdevices) {
416 /* Ensure CPU device does not slow down GPU. */
417 if (device.type == DEVICE_CPU && subdevices.size() > 1) {
418 if (background) {
419 const int orig_cpu_threads = (threads) ? threads : TaskScheduler::max_concurrency();
420 const int cpu_threads = max(orig_cpu_threads - (subdevices.size() - 1), size_t(0));
421
422 LOG_INFO << "CPU render threads reduced from " << orig_cpu_threads << " to " << cpu_threads
423 << ", to dedicate to GPU.";
424
425 if (cpu_threads >= 1) {
426 DeviceInfo cpu_device = device;
427 cpu_device.cpu_threads = cpu_threads;
428 info.multi_devices.push_back(cpu_device);
429 }
430 else {
431 continue;
432 }
433 }
434 else {
435 LOG_INFO << "CPU render threads disabled for interactive render.";
436 continue;
437 }
438 }
439 else {
440 info.multi_devices.push_back(device);
441 }
442
443 /* Create unique ID for this combination of devices. */
444 info.id += device.id;
445
446 /* Set device type to MULTI if subdevices are not of a common type. */
447 if (info.type == DEVICE_NONE) {
448 info.type = device.type;
449 }
450 else if (device.type != info.type) {
451 info.type = DEVICE_MULTI;
452 }
453
454 /* Accumulate device info. */
455 info.has_nanovdb &= device.has_nanovdb;
456 info.has_mnee &= device.has_mnee;
457 info.has_osl &= device.has_osl;
458 info.has_guiding &= device.has_guiding;
459 info.has_profiling &= device.has_profiling;
460 info.has_peer_memory |= device.has_peer_memory;
461 info.use_hardware_raytracing |= device.use_hardware_raytracing;
462 info.denoisers &= device.denoisers;
463 }
464
465 return info;
466}
467
469{
470 free_memory();
471}
472
474{
475 devices_initialized_mask = 0;
476 cuda_devices.free_memory();
477 optix_devices.free_memory();
478 hip_devices.free_memory();
479 oneapi_devices.free_memory();
480 cpu_devices.free_memory();
481 metal_devices.free_memory();
482}
483
485{
486 LOG_FATAL << "Device does not support queues.";
487 return nullptr;
488}
489
491{
492 /* Initialize CPU kernels once and reuse. */
493 static const CPUKernels kernels;
494 return kernels;
495}
496
498 vector<ThreadKernelGlobalsCPU> & /*kernel_thread_globals*/)
499{
500 LOG_FATAL << "Device does not support CPU kernels.";
501}
502
504{
505 return nullptr;
506}
507
509{
510 LOG_ERROR << "Request guiding field from a device which does not support it.";
511 return nullptr;
512}
513
514void *Device::host_alloc(const MemoryType /*type*/, const size_t size)
515{
517}
518
519void Device::host_free(const MemoryType /*type*/, void *host_pointer, const size_t size)
520{
521 util_aligned_free(host_pointer, size);
522}
523
524GPUDevice::~GPUDevice() noexcept(false) = default;
525
527{
528 /* Note texture_info is never host mapped, and load_texture_info() should only
529 * be called right before kernel enqueue when all memory operations have completed. */
530 if (need_texture_info) {
531 texture_info.copy_to_device();
532 need_texture_info = false;
533 return true;
534 }
535 return false;
536}
537
538void GPUDevice::init_host_memory(const size_t preferred_texture_headroom,
539 const size_t preferred_working_headroom)
540{
541 /* Limit amount of host mapped memory, because allocating too much can
542 * cause system instability. Leave at least half or 4 GB of system
543 * memory free, whichever is smaller. */
544 const size_t default_limit = 4 * 1024 * 1024 * 1024LL;
545 const size_t system_ram = system_physical_ram();
546
547 if (system_ram > 0) {
548 if (system_ram / 2 > default_limit) {
549 map_host_limit = system_ram - default_limit;
550 }
551 else {
552 map_host_limit = system_ram / 2;
553 }
554 }
555 else {
556 LOG_WARNING << "Mapped host memory disabled, failed to get system RAM";
557 map_host_limit = 0;
558 }
559
560 /* Amount of device memory to keep free after texture memory
561 * and working memory allocations respectively. We set the working
562 * memory limit headroom lower than the working one so there
563 * is space left for it. */
564 device_working_headroom = preferred_working_headroom > 0 ? preferred_working_headroom :
565 32 * 1024 * 1024LL; // 32MB
566 device_texture_headroom = preferred_texture_headroom > 0 ? preferred_texture_headroom :
567 128 * 1024 * 1024LL; // 128MB
568
569 LOG_INFO << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
570 << " bytes. (" << string_human_readable_size(map_host_limit) << ")";
571}
572
573void GPUDevice::move_textures_to_host(size_t size, const size_t headroom, const bool for_texture)
574{
575 static thread_mutex move_mutex;
576 const thread_scoped_lock lock(move_mutex);
577
578 /* Check if there is enough space. Within mutex locks so that multiple threads
579 * calling take into account memory freed by another thread. */
580 size_t total = 0;
581 size_t free = 0;
583 if (size + headroom < free) {
584 return;
585 }
586
587 while (size > 0) {
588 /* Find suitable memory allocation to move. */
589 device_memory *max_mem = nullptr;
590 size_t max_size = 0;
591 bool max_is_image = false;
592
594 for (MemMap::value_type &pair : device_mem_map) {
595 device_memory &mem = *pair.first;
596 Mem *cmem = &pair.second;
597
598 /* Can only move textures allocated on this device (and not those from peer devices).
599 * And need to ignore memory that is already on the host. */
600 if (!mem.is_resident(this) || mem.is_shared(this)) {
601 continue;
602 }
603
604 const bool is_texture = (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) &&
605 (&mem != &texture_info);
606 const bool is_image = is_texture && (mem.data_height > 1);
607
608 /* Can't move this type of memory. */
609 if (!is_texture || cmem->array) {
610 continue;
611 }
612
613 /* For other textures, only move image textures. */
614 if (for_texture && !is_image) {
615 continue;
616 }
617
618 /* Try to move largest allocation, prefer moving images. */
619 if (is_image > max_is_image || (is_image == max_is_image && mem.device_size > max_size)) {
620 max_is_image = is_image;
621 max_size = mem.device_size;
622 max_mem = &mem;
623 }
624 }
625 lock.unlock();
626
627 /* Move to host memory. This part is mutex protected since
628 * multiple backend devices could be moving the memory. The
629 * first one will do it, and the rest will adopt the pointer. */
630 if (max_mem) {
631 LOG_DEBUG << "Move memory from device to host: " << max_mem->name;
632
633 /* Potentially need to call back into multi device, so pointer mapping
634 * and peer devices are updated. This is also necessary since the device
635 * pointer may just be a key here, so cannot be accessed and freed directly.
636 * Unfortunately it does mean that memory is reallocated on all other
637 * devices as well, which is potentially dangerous when still in use (since
638 * a thread rendering on another devices would only be caught in this mutex
639 * if it so happens to do an allocation at the same time as well. */
640 max_mem->move_to_host = true;
641 max_mem->device_move_to_host();
642 max_mem->move_to_host = false;
643 size = (max_size >= size) ? 0 : size - max_size;
644
645 /* Tag texture info update for new pointers. */
646 need_texture_info = true;
647 }
648 else {
649 break;
650 }
651 }
652}
653
654GPUDevice::Mem *GPUDevice::generic_alloc(device_memory &mem, const size_t pitch_padding)
655{
656 void *device_pointer = nullptr;
657 const size_t size = mem.memory_size() + pitch_padding;
658
659 bool mem_alloc_result = false;
660 const char *status = "";
661
662 /* First try allocating in device memory, respecting headroom. We make
663 * an exception for texture info. It is small and frequently accessed,
664 * so treat it as working memory.
665 *
666 * If there is not enough room for working memory, we will try to move
667 * textures to host memory, assuming the performance impact would have
668 * been worse for working memory. */
669 const bool is_texture = (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) &&
670 (&mem != &texture_info);
671 const bool is_image = is_texture && (mem.data_height > 1);
672
673 const size_t headroom = (is_texture) ? device_texture_headroom : device_working_headroom;
674
675 /* Move textures to host memory if needed. */
676 if (!mem.move_to_host && !is_image && can_map_host) {
677 move_textures_to_host(size, headroom, is_texture);
678 }
679
680 size_t total = 0;
681 size_t free = 0;
683
684 /* Allocate in device memory. */
685 if ((!mem.move_to_host && (size + headroom) < free) || (mem.type == MEM_DEVICE_ONLY)) {
686 mem_alloc_result = alloc_device(device_pointer, size);
687 if (mem_alloc_result) {
689 status = " in device memory";
690 }
691 }
692
693 /* Fall back to mapped host memory if needed and possible. */
694
695 void *shared_pointer = nullptr;
696
697 if (!mem_alloc_result && can_map_host && mem.type != MEM_DEVICE_ONLY) {
698 if (mem.shared_pointer) {
699 /* Another device already allocated host memory. */
700 mem_alloc_result = true;
701 shared_pointer = mem.shared_pointer;
702 }
703 else if (map_host_used + size < map_host_limit) {
704 /* Allocate host memory ourselves. */
705 mem_alloc_result = shared_alloc(shared_pointer, size);
706
707 assert((mem_alloc_result && shared_pointer != nullptr) ||
708 (!mem_alloc_result && shared_pointer == nullptr));
709 }
710
711 if (mem_alloc_result) {
712 device_pointer = shared_to_device_pointer(shared_pointer);
714 status = " in host memory";
715 }
716 }
717
718 if (!mem_alloc_result) {
719 if (mem.type == MEM_DEVICE_ONLY) {
720 status = " failed, out of device memory";
721 set_error("System is out of GPU memory");
722 }
723 else {
724 status = " failed, out of device and host memory";
725 set_error("System is out of GPU and shared host memory");
726 }
727 }
728
729 if (mem.name) {
730 LOG_DEBUG << "Buffer allocate: " << mem.name << ", "
731 << string_human_readable_number(mem.memory_size()) << " bytes. ("
733 }
734
735 mem.device_pointer = (device_ptr)device_pointer;
736 mem.device_size = size;
737 stats.mem_alloc(size);
738
739 if (!mem.device_pointer) {
740 return nullptr;
741 }
742
743 /* Insert into map of allocations. */
745 Mem *cmem = &device_mem_map[&mem];
746 if (shared_pointer != nullptr) {
747 /* Replace host pointer with our host allocation. Only works if
748 * memory layout is the same and has no pitch padding. Also
749 * does not work if we move textures to host during a render,
750 * since other devices might be using the memory. */
751
752 if (!mem.move_to_host && pitch_padding == 0 && mem.host_pointer &&
753 mem.host_pointer != shared_pointer)
754 {
755 memcpy(shared_pointer, mem.host_pointer, size);
756 host_free(mem.type, mem.host_pointer, mem.memory_size());
757 mem.host_pointer = shared_pointer;
758 }
759 mem.shared_pointer = shared_pointer;
760 mem.shared_counter++;
761 }
762
763 return cmem;
764}
765
767{
768 if (!(mem.device_pointer && mem.is_resident(this))) {
769 return;
770 }
771
772 /* Host pointer should already have been freed at this point. If not we might
773 * end up freeing shared memory and can't recover original host memory. */
774 assert(mem.host_pointer == nullptr || mem.move_to_host);
775
777 DCHECK(device_mem_map.find(&mem) != device_mem_map.end());
778
779 /* For host mapped memory, reference counting is used to safely free it. */
780 if (mem.is_shared(this)) {
781 assert(mem.shared_counter > 0);
782 if (--mem.shared_counter == 0) {
783 if (mem.host_pointer == mem.shared_pointer) {
784 /* Safely move the device-side data back to the host before it is freed.
785 * We should actually never reach this code as it is inefficient, but
786 * better than to crash if there is a bug. */
787 assert(!"GPU device should not copy memory back to host");
788 const size_t size = mem.memory_size();
789 mem.host_pointer = mem.host_alloc(size);
790 memcpy(mem.host_pointer, mem.shared_pointer, size);
791 }
793 mem.shared_pointer = nullptr;
794 }
796 }
797 else {
798 /* Free device memory. */
799 free_device((void *)mem.device_pointer);
801 }
802
803 stats.mem_free(mem.device_size);
804 mem.device_pointer = 0;
805 mem.device_size = 0;
806
807 device_mem_map.erase(device_mem_map.find(&mem));
808}
809
811{
812 if (!mem.host_pointer || !mem.device_pointer) {
813 return;
814 }
815
816 /* If not host mapped, the current device only uses device memory allocated by backend
817 * device allocation regardless of mem.host_pointer and mem.shared_pointer, and should
818 * copy data from mem.host_pointer. */
819 if (!(mem.is_shared(this) && mem.host_pointer == mem.shared_pointer)) {
821 }
822}
823
824bool GPUDevice::is_shared(const void *shared_pointer,
825 const device_ptr device_pointer,
826 Device * /*sub_device*/)
827{
828 return (shared_pointer && device_pointer &&
829 (device_ptr)shared_to_device_pointer(shared_pointer) == device_pointer);
830}
831
832/* DeviceInfo */
833
void BLI_kdtree_nd_ free(KDTree *tree)
unsigned int uint
CCL_NAMESPACE_BEGIN void * util_aligned_malloc(const size_t size, const int alignment)
void util_aligned_free(void *ptr, const size_t size)
volatile int lock
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
void refit(btStridingMeshInterface *triangles, const btVector3 &aabbMin, const btVector3 &aabbMax)
Definition bvh2.h:38
void refit(Progress &progress)
Definition bvh2.cpp:78
void build(Progress &progress, Stats *stats)
Definition bvh2.cpp:39
BVHLayout bvh_layout
Definition params.h:83
Definition bvh/bvh.h:67
BVHParams params
Definition bvh/bvh.h:69
virtual void host_free(const MemoryType type, void *host_pointer, const size_t size)
virtual void * get_guiding_device() const
static void free_memory()
static DeviceInfo dummy_device(const string &error_msg="")
Device(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_, bool headless_)
static DeviceInfo get_multi_device(const vector< DeviceInfo > &subdevices, const int threads, bool background)
static void tag_update()
static const CPUKernels & get_cpu_kernels()
string error_msg
virtual ~Device() noexcept(false)
virtual unique_ptr< DeviceQueue > gpu_queue_create()
Profiler & profiler
Stats & stats
virtual void build_bvh(BVH *bvh, Progress &progress, bool refit)
static DeviceType type_from_string(const char *name)
virtual void set_error(const string &error)
bool headless
virtual void get_cpu_kernel_thread_globals(vector< ThreadKernelGlobalsCPU > &)
DeviceInfo info
static string device_capabilities(const uint device_type_mask=DEVICE_MASK_ALL)
static vector< DeviceType > available_types()
static string string_from_type(DeviceType type)
virtual OSLGlobals * get_cpu_osl_memory()
static vector< DeviceInfo > available_devices(const uint device_type_mask=DEVICE_MASK_ALL)
static unique_ptr< Device > create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
bool have_error()
virtual void * host_alloc(const MemoryType type, const size_t size)
static int max_concurrency()
Definition task.cpp:96
bool is_resident(Device *sub_device) const
Definition memory.cpp:132
void * host_alloc(const size_t size)
Definition memory.cpp:41
bool is_shared(Device *sub_device) const
Definition memory.cpp:137
void device_move_to_host()
Definition memory.cpp:87
@ MEM_TEXTURE
@ MEM_DEVICE_ONLY
#define MIN_ALIGNMENT_DEVICE_MEMORY
@ DENOISER_ALL
Definition denoise.h:17
void device_cpu_info(vector< DeviceInfo > &devices)
string device_cpu_capabilities()
CCL_NAMESPACE_BEGIN unique_ptr< Device > device_cpu_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
#define CCL_NAMESPACE_END
void device_cuda_info(vector< DeviceInfo > &devices)
string device_cuda_capabilities()
CCL_NAMESPACE_BEGIN bool device_cuda_init()
unique_ptr< Device > device_cuda_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
@ DEVICE_MASK_OPTIX
@ DEVICE_MASK_CPU
@ DEVICE_MASK_HIP
@ DEVICE_MASK_CUDA
@ DEVICE_MASK_METAL
@ DEVICE_MASK_ONEAPI
DeviceType
@ DEVICE_DUMMY
@ DEVICE_NONE
@ DEVICE_METAL
@ DEVICE_MULTI
@ DEVICE_CUDA
@ DEVICE_CPU
@ DEVICE_HIPRT
@ DEVICE_OPTIX
@ DEVICE_HIP
@ DEVICE_ONEAPI
unique_ptr< Device > device_dummy_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
void device_hip_info(vector< DeviceInfo > &devices)
string device_hip_capabilities()
unique_ptr< Device > device_hip_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, const bool headless)
CCL_NAMESPACE_BEGIN bool device_hip_init()
string device_metal_capabilities()
Definition device.mm:148
unique_ptr< Device > device_metal_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
bool device_metal_init()
Definition device.mm:141
void device_metal_info(vector< DeviceInfo > &devices)
Definition device.mm:146
unique_ptr< Device > device_multi_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
unique_ptr< Device > device_oneapi_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
void device_oneapi_info(vector< DeviceInfo > &devices)
CCL_NAMESPACE_BEGIN bool device_oneapi_init()
string device_oneapi_capabilities()
CCL_NAMESPACE_BEGIN bool device_optix_init()
void device_optix_info(const vector< DeviceInfo > &cuda_devices, vector< DeviceInfo > &devices)
unique_ptr< Device > device_optix_create(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
#define assert(assertion)
@ BVH_LAYOUT_BVH2
#define DCHECK(expression)
Definition log.h:135
#define LOG_DEBUG
Definition log.h:107
#define LOG_FATAL
Definition log.h:99
#define LOG_ERROR
Definition log.h:101
#define LOG_WARNING
Definition log.h:103
#define LOG_INFO
Definition log.h:106
static char ** types
Definition makesdna.cc:71
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
static void error(const char *str)
const char * name
const int status
string string_human_readable_size(size_t size)
Definition string.cpp:257
string string_human_readable_number(size_t num)
Definition string.cpp:276
arrayMemObject array
bool is_shared(const void *shared_pointer, const device_ptr device_pointer, Device *sub_device) override
bool need_texture_info
virtual bool shared_alloc(void *&shared_pointer, const size_t size)=0
size_t map_host_used
virtual void shared_free(void *shared_pointer)=0
GPUDevice(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_, bool headless_)
bool can_map_host
size_t device_texture_headroom
virtual void get_device_memory_info(size_t &total, size_t &free)=0
virtual bool alloc_device(void *&device_pointer, const size_t size)=0
size_t device_working_headroom
friend class device_memory
virtual void * shared_to_device_pointer(const void *shared_pointer)=0
virtual void generic_copy_to(device_memory &mem)
virtual void move_textures_to_host(const size_t size, const size_t headroom, const bool for_texture)
virtual void copy_host_to_device(void *device_pointer, void *host_pointer, const size_t size)=0
virtual bool load_texture_info()
size_t map_host_limit
virtual void free_device(void *device_pointer)=0
thread_mutex device_mem_map_mutex
virtual void generic_free(device_memory &mem)
virtual void init_host_memory(const size_t preferred_texture_headroom=0, const size_t preferred_working_headroom=0)
size_t device_mem_in_use
virtual GPUDevice::Mem * generic_alloc(device_memory &mem, const size_t pitch_padding=0)
device_vector< TextureInfo > texture_info
MemMap device_mem_map
~GPUDevice() noexcept(false) override
size_t system_physical_ram()
Definition system.cpp:227
max
Definition text_draw.cc:251
std::mutex thread_mutex
Definition thread.h:27
std::unique_lock< std::mutex > thread_scoped_lock
Definition thread.h:28
uint64_t device_ptr
Definition types_base.h:44