22# include <TargetConditionals.h>
23# include <crt_externs.h>
30std::map<int, MetalDevice *> MetalDevice::active_device_ids;
34MetalDevice *MetalDevice::get_device_by_ID(
const int ID,
37 auto it = active_device_ids.find(
ID);
38 if (it != active_device_ids.end()) {
44bool MetalDevice::is_device_cancelled(
const int ID)
47 return get_device_by_ID(
ID,
lock) ==
nullptr;
55void MetalDevice::set_error(
const string &
error)
57 static std::mutex s_error_mutex;
58 std::lock_guard<std::mutex>
lock(s_error_mutex);
63 LOG_ERROR <<
"Refer to the Cycles GPU rendering documentation for possible solutions:\n"
64 "https://docs.blender.org/manual/en/latest/render/cycles/gpu_rendering.html\n";
70 :
Device(info, stats, profiler, headless), texture_info(this,
"texture_info",
MEM_GLOBAL)
77 static int existing_devices_counter = 1;
78 device_id = existing_devices_counter++;
79 active_device_ids[device_id] =
this;
85 auto usable_devices = MetalInfo::get_usable_devices();
86 assert(mtlDevId < usable_devices.size());
87 mtlDevice = usable_devices[mtlDevId];
88 metal_printf(
"Creating new Cycles Metal device: %s", info.
description.c_str());
91 metal_gpu_address_helper_init(mtlDevice);
95 if (@available(macOS 13.3, *)) {
96 [mtlDevice setShouldMaximizeConcurrentCompilation:YES];
99 max_threads_per_threadgroup = 512;
102 if (
const char *metalrt = getenv(
"CYCLES_METALRT")) {
103 use_metalrt = (atoi(metalrt) != 0);
106 if (
const char *
str = getenv(
"CYCLES_METALRT_EXTENDED_LIMITS")) {
107 use_metalrt_extended_limits = (atoi(
str) != 0);
110# if defined(MAC_OS_VERSION_15_0)
113 if (use_metalrt && [mtlDevice supportsFamily:MTLGPUFamilyApple9]) {
115 if (@available(macos 15.6, *)) {
121 if (getenv(
"CYCLES_DEBUG_METAL_CAPTURE_KERNEL")) {
122 capture_enabled =
true;
128 if (
auto str = getenv(
"CYCLES_METAL_PROFILING")) {
129 if (atoi(
str) && [mtlDevice supportsCounterSampling:MTLCounterSamplingPointAtStageBoundary])
131 NSArray<id<MTLCounterSet>> *counterSets = [mtlDevice counterSets];
133 NSError *
error = nil;
134 MTLCounterSampleBufferDescriptor *desc = [[MTLCounterSampleBufferDescriptor alloc]
init];
135 [desc setStorageMode:MTLStorageModeShared];
136 [desc setLabel:
@"CounterSampleBuffer"];
137 [desc setSampleCount:MAX_SAMPLE_BUFFER_LENGTH];
138 [desc setCounterSet:counterSets[0]];
139 mtlCounterSampleBuffer = [mtlDevice newCounterSampleBufferWithDescriptor:desc
141 [mtlCounterSampleBuffer retain];
148 kernel_specialization_level = PSO_GENERIC;
152 kernel_specialization_level = PSO_SPECIALIZED_INTERSECT;
155 kernel_specialization_level = PSO_SPECIALIZED_SHADE;
159 if (
auto *envstr = getenv(
"CYCLES_METAL_SPECIALIZATION_LEVEL")) {
160 kernel_specialization_level = (MetalPipelineType)atoi(envstr);
162 metal_printf(
"kernel_specialization_level = %s",
163 kernel_type_as_string(
164 (MetalPipelineType)
min((
int)kernel_specialization_level, (
int)PSO_NUM - 1)));
166 texture_bindings = [mtlDevice newBufferWithLength:8192
options:MTLResourceStorageModeShared];
167 stats.
mem_alloc(texture_bindings.allocatedSize);
170 options:MTLResourceStorageModeShared];
180 mtlComputeCommandQueue = [mtlDevice newCommandQueue];
183 mtlGeneralCommandQueue = [mtlDevice newCommandQueue];
187MetalDevice::~MetalDevice()
197 for (
int res = 0; res < texture_info.size(); res++) {
198 [texture_slot_map[res] release];
199 texture_slot_map[res] = nil;
203 flush_delayed_free_list();
206 [launch_params_buffer release];
208 stats.
mem_free(texture_bindings.allocatedSize);
209 [texture_bindings release];
211 [mtlComputeCommandQueue release];
212 [mtlGeneralCommandQueue release];
213 if (mtlCounterSampleBuffer) {
214 [mtlCounterSampleBuffer release];
221bool MetalDevice::support_device(
const uint )
226bool MetalDevice::check_peer_access(
Device * )
233bool MetalDevice::use_adaptive_compilation()
238bool MetalDevice::use_local_atomic_sort()
const
243string MetalDevice::preprocess_source(MetalPipelineType pso_type,
244 const uint kernel_features,
247 string global_defines;
248 if (use_adaptive_compilation()) {
249 global_defines +=
"#define __KERNEL_FEATURES__ " +
to_string(kernel_features) +
"\n";
252 if (use_local_atomic_sort()) {
253 global_defines +=
"#define __KERNEL_LOCAL_ATOMIC_SORT__\n";
257 global_defines +=
"#define __METALRT__\n";
259 global_defines +=
"#define __METALRT_MOTION__\n";
261 if (use_metalrt_extended_limits) {
262 global_defines +=
"#define __METALRT_EXTENDED_LIMITS__\n";
266# ifdef WITH_CYCLES_DEBUG
267 global_defines +=
"#define WITH_CYCLES_DEBUG\n";
270 global_defines +=
"#define __KERNEL_METAL_APPLE__\n";
271 if (@available(macos 14.0, *)) {
273 global_defines +=
"#define __METAL_GLOBAL_BUILTINS__\n";
278 if ((pso_type == PSO_GENERIC || using_nanovdb) &&
DebugFlags().metal.use_nanovdb) {
279 global_defines +=
"#define WITH_NANOVDB\n";
283 NSProcessInfo *processInfo = [NSProcessInfo processInfo];
284 NSOperatingSystemVersion macos_ver = [processInfo operatingSystemVersion];
285 global_defines +=
"#define __KERNEL_METAL_MACOS__ " +
to_string(macos_ver.majorVersion) +
"\n";
288 global_defines +=
"#define __KERNEL_METAL_TARGET_CPU_ARM64__\n";
295 if (pso_type != PSO_GENERIC) {
297 const double starttime =
time_dt();
299# define KERNEL_STRUCT_BEGIN(name, parent) \
300 string_replace_same_length(*source, "kernel_data." #parent ".", "kernel_data_" #parent "_");
302 bool next_member_is_specialized =
true;
304# define KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE next_member_is_specialized = false;
306# define KERNEL_STRUCT_MEMBER(parent, _type, name) \
307 if (!next_member_is_specialized) { \
309 *source, "kernel_data_" #parent "_" #name, "kernel_data." #parent ".__unused_" #name); \
310 next_member_is_specialized = true; \
315# undef KERNEL_STRUCT_MEMBER
316# undef KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE
317# undef KERNEL_STRUCT_BEGIN
321 *source,
"kernel_data.kernel_features",
"kernel_data_kernel_features");
323 metal_printf(
"KernelData patching took %.1f ms", (
time_dt() - starttime) * 1000.0);
331 global_defines +=
"#define __KERNEL_USE_DATA_CONSTANTS__\n";
335 *source = global_defines + *source;
339 md5.
append(global_defines);
343void MetalDevice::make_source(MetalPipelineType pso_type,
const uint kernel_features)
345 string &source = this->source[pso_type];
346 source =
"\n#include \"kernel/device/metal/kernel.metal\"\n";
353 global_defines_md5[pso_type] = preprocess_source(pso_type, kernel_features, &source);
356bool MetalDevice::load_kernels(
const uint _kernel_features)
359 kernel_features |= _kernel_features;
362 if (!support_device(kernel_features)) {
373 refresh_source_and_kernels_md5(PSO_GENERIC);
374 if (MetalDeviceKernels::should_load_kernels(
this, PSO_GENERIC)) {
377 int this_device_id = this->device_id;
378 auto compile_kernels_fn = ^() {
379 compile_and_load(this_device_id, PSO_GENERIC);
382 dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
389void MetalDevice::refresh_source_and_kernels_md5(MetalPipelineType pso_type)
391 string defines_md5 = preprocess_source(pso_type, kernel_features);
394 if (global_defines_md5[pso_type] != defines_md5) {
395 make_source(pso_type, kernel_features);
398 string constant_values;
399 if (pso_type != PSO_GENERIC) {
400 bool next_member_is_specialized =
true;
402# define KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE next_member_is_specialized = false;
406# define KERNEL_STRUCT_MEMBER(parent, _type, name) \
407 if (next_member_is_specialized) { \
408 constant_values += string(#parent "." #name "=") + \
409 to_string(_type(launch_params->data.parent.name)) + "\n"; \
412 next_member_is_specialized = true; \
417# undef KERNEL_STRUCT_MEMBER
418# undef KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE
422 md5.
append(constant_values);
423 md5.
append(source[pso_type]);
427 kernels_md5[pso_type] = md5.
get_hex();
430void MetalDevice::compile_and_load(
const int device_id, MetalPipelineType pso_type)
436 id<MTLDevice> mtlDevice;
444 MetalDevice *instance = get_device_by_ID(device_id,
lock);
446 metal_printf(
"Ignoring %s compilation request - device no longer exists",
447 kernel_type_as_string(pso_type));
451 if (!MetalDeviceKernels::should_load_kernels(instance, pso_type)) {
454 metal_printf(
"Ignoreing %s compilation request - kernels already requested",
455 kernel_type_as_string(pso_type));
459 mtlDevice = instance->mtlDevice;
460 source = instance->source[pso_type];
466 MTLCompileOptions *
options = [[MTLCompileOptions alloc]
init];
469 if (@available(macos 12.0, *)) {
470 options.languageVersion = MTLLanguageVersion2_4;
472# if defined(MAC_OS_VERSION_13_0)
473 if (@available(macos 13.0, *)) {
474 options.languageVersion = MTLLanguageVersion3_0;
477# if defined(MAC_OS_VERSION_14_0)
478 if (@available(macos 14.0, *)) {
479 options.languageVersion = MTLLanguageVersion3_1;
482# if defined(MAC_OS_VERSION_15_0)
483 if (@available(macos 15.0, *)) {
484 options.languageVersion = MTLLanguageVersion3_2;
485 if (
const char *loglevel = getenv(
"MTL_LOG_LEVEL")) {
486 if (strcmp(loglevel,
"MTLLogLevelDebug") == 0) {
493 if (getenv(
"CYCLES_METAL_PROFILING") || getenv(
"CYCLES_METAL_DEBUG")) {
500 NSError *
error =
nullptr;
501 id<MTLLibrary> mtlLibrary = [mtlDevice newLibraryWithSource:@(source.c_str())
505 metal_printf(
"Front-end compilation finished in %.1f seconds (%s)",
507 kernel_type_as_string(pso_type));
511 bool blocking_pso_build = (getenv(
"CYCLES_METAL_PROFILING") ||
512 MetalDeviceKernels::is_benchmark_warmup());
513 if (blocking_pso_build) {
514 MetalDeviceKernels::wait_for_all();
522 if (MetalDevice *instance = get_device_by_ID(device_id,
lock)) {
526 << [[
error localizedDescription] UTF8String];
529 instance->mtlLibrary[pso_type] = mtlLibrary;
532 MetalDeviceKernels::load(instance, pso_type);
535 NSString *err = [
error localizedDescription];
536 instance->set_error(
string_printf(
"Failed to compile library:\n%s", [err UTF8String]));
541 if (starttime && blocking_pso_build) {
542 MetalDeviceKernels::wait_for_all();
544 metal_printf(
"Back-end compilation finished in %.1f seconds (%s)",
546 kernel_type_as_string(pso_type));
551bool MetalDevice::is_texture(
const TextureInfo &tex)
556void MetalDevice::load_texture_info() {}
564 auto it = metal_mem_map.find(&mem);
565 if (it != metal_mem_map.end()) {
566 MetalMem *mmem = it->second.get();
569 if (mmem->pointer_index >= 0) {
571 pointers[mmem->pointer_index] = 0;
573 metal_mem_map.erase(it);
577bool MetalDevice::max_working_set_exceeded(
const size_t safety_margin)
const
581 size_t available = [mtlDevice recommendedMaxWorkingSetSize] - safety_margin;
582 return (stats.
mem_used > available);
585MetalDevice::MetalMem *MetalDevice::generic_alloc(
device_memory &mem)
592 id<MTLBuffer> metal_buffer = nil;
593 MTLResourceOptions
options = MTLResourceStorageModeShared;
597 options = MTLResourceStorageModePrivate;
603 set_error(
"System is out of GPU memory");
617 metal_buffer.label = [NSString stringWithFormat:
@"%s", mem.
name];
619 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
621 assert(metal_mem_map.count(&mem) == 0);
625 mmem->mtlBuffer = metal_buffer;
628 if (
options != MTLResourceStorageModePrivate) {
629 mmem->hostPtr = [metal_buffer contents];
632 mmem->hostPtr =
nullptr;
639 if (metal_buffer.storageMode == MTLStorageModeShared) {
651 MetalMem *mmem_ptr = mmem.get();
652 metal_mem_map[&mem] = std::move(mmem);
654 if (max_working_set_exceeded()) {
655 set_error(
"System is out of GPU memory");
678 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
679 MetalMem &mmem = *metal_mem_map.at(&mem);
680 size_t size = mmem.size;
682 bool free_mtlBuffer =
true;
688 free_mtlBuffer =
false;
692 if (free_mtlBuffer) {
697 assert(!
"Metal device should not copy memory back to host");
705 delayed_free_list.push_back(mmem.mtlBuffer);
706 mmem.mtlBuffer = nil;
709 erase_allocation(mem);
715 assert(!
"mem_alloc not supported for textures.");
736 generic_copy_to(mem);
741 generic_copy_to(mem);
747 generic_copy_to(mem);
755 assert(!
"Metal does not support mem_move_to_host");
758void MetalDevice::mem_copy_from(
device_memory &,
const size_t,
size_t,
const size_t,
size_t)
794void MetalDevice::cancel()
800 active_device_ids.erase(device_id);
805bool MetalDevice::is_ready(
string &
status)
const
807 if (!error_msg.empty()) {
812 int num_loaded = MetalDeviceKernels::get_loaded_kernel_count(
this, PSO_GENERIC);
814 status =
string_printf(
"%d / %d render kernels loaded (may take a few minutes the first time)",
820 if (
int num_requests = MetalDeviceKernels::num_incomplete_specialization_requests()) {
823 else if (kernel_specialization_level == PSO_SPECIALIZED_INTERSECT) {
824 status =
"Using optimized intersection kernels";
826 else if (kernel_specialization_level == PSO_SPECIALIZED_SHADE) {
827 status =
"Using optimized kernels";
830 metal_printf(
"MetalDevice::is_ready(...) --> true");
834bool MetalDevice::set_bvh_limits(
size_t instance_count,
size_t max_prim_count)
842 const int standard_limits_max_prim_count = (1 << 28);
843 const int standard_limits_max_instance_count = (1 << 24);
845 bool using_metalrt_extended_limits_before = use_metalrt_extended_limits;
849 if (instance_count > standard_limits_max_instance_count ||
850 max_prim_count > standard_limits_max_prim_count)
852 use_metalrt_extended_limits =
true;
853 metal_printf(
"Enabling MetalRT extended limits (max_prim_count = %zu, instance_count = %zu)",
859 return using_metalrt_extended_limits_before != use_metalrt_extended_limits;
862void MetalDevice::optimize_for_scene(
Scene *scene)
864 MetalPipelineType specialization_level = kernel_specialization_level;
868 specialization_level = (MetalPipelineType)
min(specialization_level, PSO_SPECIALIZED_INTERSECT);
873 int this_device_id = this->device_id;
874 auto specialize_kernels_fn = ^() {
875 for (
int level = 1; level <= int(specialization_level); level++) {
876 compile_and_load(this_device_id, MetalPipelineType(level));
881 bool specialize_in_background =
true;
884 if (getenv(
"CYCLES_METAL_PROFILING") !=
nullptr) {
885 specialize_in_background =
false;
889 if (MetalDeviceKernels::is_benchmark_warmup()) {
890 specialize_in_background =
false;
893 if (specialize_in_background) {
894 if (MetalDeviceKernels::num_incomplete_specialization_requests() == 0) {
895 dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
896 specialize_kernels_fn);
899 metal_printf(
"\"optimize_for_scene\" request already in flight - dropping request");
903 specialize_kernels_fn();
907void MetalDevice::const_copy_to(
const char *
name,
void *host,
const size_t size)
909 if (strcmp(
name,
"data") == 0) {
911 memcpy((uint8_t *)&launch_params->data, host,
sizeof(KernelData));
914 for (
int level = 1; level <= int(kernel_specialization_level); level++) {
915 refresh_source_and_kernels_md5(MetalPipelineType(level));
920 auto update_launch_pointers = [&](
size_t offset,
void *
data,
const size_t pointers_size) {
923 MetalMem **mmem = (MetalMem **)
data;
924 int pointer_count = pointers_size /
sizeof(
device_ptr);
925 int pointer_index = offset /
sizeof(
device_ptr);
926 for (
int i = 0;
i < pointer_count;
i++) {
929 mmem[
i]->pointer_index = pointer_index +
i;
930 if (mmem[
i]->mtlBuffer) {
931 if (@available(macOS 13.0, *)) {
932 addresses[
i] = metal_gpuAddress(mmem[
i]->mtlBuffer);
940 if (strcmp(
name,
"integrator_state") == 0) {
943 update_launch_pointers(
948 memcpy((uint8_t *)&launch_params->integrator_state + pointer_block_size,
949 (uint8_t *)host + pointer_block_size,
952# define KERNEL_DATA_ARRAY(data_type, tex_name) \
953 else if (strcmp(name, #tex_name) == 0) { \
954 update_launch_pointers(offsetof(KernelParamsMetal, tex_name), host, size); \
957# undef KERNEL_DATA_ARRAY
964 generic_copy_to(mem);
979 MetalDevice::MetalMem *mmem = generic_alloc(mem);
980 generic_copy_to(mem);
984 if (slot >= texture_info.size()) {
987 texture_info.resize(
round_up(slot + 1, 128));
988 texture_slot_map.resize(
round_up(slot + 1, 128));
991 texture_info[slot] = mem.
info;
992 texture_slot_map[slot] = mmem->mtlBuffer;
995 using_nanovdb =
true;
1008 "Texture exceeds maximum allowed size of 16384 x 16384 (requested: %zu x %zu)",
1028 MTLPixelFormat formats[] = {MTLPixelFormatR8Unorm,
1029 MTLPixelFormatRG8Unorm,
1030 MTLPixelFormatInvalid,
1031 MTLPixelFormatRGBA8Unorm};
1035 MTLPixelFormat formats[] = {MTLPixelFormatR16Unorm,
1036 MTLPixelFormatRG16Unorm,
1037 MTLPixelFormatInvalid,
1038 MTLPixelFormatRGBA16Unorm};
1042 MTLPixelFormat formats[] = {MTLPixelFormatR32Uint,
1043 MTLPixelFormatRG32Uint,
1044 MTLPixelFormatInvalid,
1045 MTLPixelFormatRGBA32Uint};
1049 MTLPixelFormat formats[] = {MTLPixelFormatR32Sint,
1050 MTLPixelFormatRG32Sint,
1051 MTLPixelFormatInvalid,
1052 MTLPixelFormatRGBA32Sint};
1056 MTLPixelFormat formats[] = {MTLPixelFormatR32Float,
1057 MTLPixelFormatRG32Float,
1058 MTLPixelFormatInvalid,
1059 MTLPixelFormatRGBA32Float};
1063 MTLPixelFormat formats[] = {MTLPixelFormatR16Float,
1064 MTLPixelFormatRG16Float,
1065 MTLPixelFormatInvalid,
1066 MTLPixelFormatRGBA16Float};
1076 id<MTLTexture> mtlTexture = nil;
1081 MTLTextureDescriptor *desc;
1083 desc = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:
format
1088 desc.storageMode = MTLStorageModeShared;
1089 desc.usage = MTLTextureUsageShaderRead;
1094 desc.allowGPUOptimizedContents =
false;
1100 mtlTexture = [mtlDevice newTextureWithDescriptor:desc];
1102 set_error(
"System is out of GPU memory");
1108 withBytes:mem.host_pointer
1109 bytesPerRow:src_pitch];
1113 tex_alloc_as_buffer(mem);
1121 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
1124 mmem->mtlTexture = mtlTexture;
1125 metal_mem_map[&mem] = std::move(mmem);
1129 if (slot >= texture_info.size()) {
1132 texture_info.resize(slot + 128);
1133 texture_slot_map.resize(slot + 128);
1135 ssize_t min_buffer_length =
sizeof(
void *) * texture_info.size();
1136 if (!texture_bindings || (texture_bindings.length < min_buffer_length)) {
1137 if (texture_bindings) {
1138 delayed_free_list.push_back(texture_bindings);
1139 stats.
mem_free(texture_bindings.allocatedSize);
1141 texture_bindings = [mtlDevice newBufferWithLength:min_buffer_length
1142 options:MTLResourceStorageModeShared];
1144 stats.
mem_alloc(texture_bindings.allocatedSize);
1149 texture_slot_map[slot] = mtlTexture;
1150 texture_info[slot] = mem.
info;
1151 texture_info[slot].
data =
uint64_t(slot) | (sampler_index << 32);
1153 if (max_working_set_exceeded()) {
1154 set_error(
"System is out of GPU memory");
1165 id<MTLTexture> mtlTexture;
1167 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
1168 mtlTexture = metal_mem_map.at(&mem)->mtlTexture;
1172 withBytes:mem.host_pointer
1173 bytesPerRow:src_pitch];
1176 generic_copy_to(mem);
1183 int slot = mem.
slot;
1187 else if (metal_mem_map.count(&mem)) {
1188 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
1189 MetalMem &mmem = *metal_mem_map.at(&mem);
1192 delayed_free_list.push_back(mmem.mtlTexture);
1193 mmem.mtlTexture = nil;
1194 erase_allocation(mem);
1196 texture_slot_map[slot] = nil;
1201 return make_unique<MetalDeviceQueue>(
this);
1213 return ((MetalMem *)
ptr)->mtlBuffer;
1216void MetalDevice::flush_delayed_free_list()
1221 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
1222 for (
auto &it : delayed_free_list) {
1225 delayed_free_list.clear();
1236 BVHMetal *bvh_metal =
static_cast<BVHMetal *
>(bvh);
1237 bvh_metal->motion_blur = motion_blur;
1238 bvh_metal->use_pcmi = use_pcmi;
1239 bvh_metal->extended_limits = use_metalrt_extended_limits;
1240 if (bvh_metal->build(progress, mtlDevice, mtlGeneralCommandQueue,
refit)) {
1243 update_bvh(bvh_metal);
1247 if (max_working_set_exceeded()) {
1248 set_error(
"System is out of GPU memory");
1253void MetalDevice::free_bvh()
1255 for (id<MTLAccelerationStructure> &blas : unique_blas_array) {
1258 unique_blas_array.clear();
1262 [blas_buffer release];
1267 [accel_struct release];
1272void MetalDevice::update_bvh(BVHMetal *bvh_metal)
1280 accel_struct = bvh_metal->accel_struct;
1281 unique_blas_array = bvh_metal->unique_blas_array;
1282 blas_array = bvh_metal->blas_array;
1284 [accel_struct retain];
1285 for (id<MTLAccelerationStructure> &blas : unique_blas_array) {
1291 blas_buffer = [mtlDevice newBufferWithLength:buffer_size
options:MTLResourceStorageModeShared];
1292 stats.
mem_alloc(blas_buffer.allocatedSize);
BMesh const char void * data
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
void refit(btStridingMeshInterface *triangles, const btVector3 &aabbMin, const btVector3 &aabbMax)
KernelOptimizationLevel kernel_optimization_level
bool use_hardware_raytracing
virtual void build_bvh(BVH *bvh, Progress &progress, bool refit)
virtual void set_error(const string &error)
void append(const uint8_t *data, const int nbytes)
void mem_alloc(const size_t size)
void mem_free(const size_t size)
bool is_resident(Device *sub_device) const
void * host_alloc(const size_t size)
device_ptr device_pointer
static constexpr size_t datatype_size(DataType datatype)
CCL_NAMESPACE_BEGIN struct Options options
DebugFlags & DebugFlags()
#define KERNEL_FEATURE_OBJECT_MOTION
#define CCL_NAMESPACE_END
@ KERNEL_OPTIMIZATION_LEVEL_OFF
@ KERNEL_OPTIMIZATION_LEVEL_FULL
@ KERNEL_OPTIMIZATION_LEVEL_INTERSECT
static const char * to_string(const Interpolation &interp)
#define assert(assertion)
static void error(const char *str)
static void init(bNodeTree *, bNode *node)
string path_cache_get(const string &sub)
string path_source_replace_includes(const string &source, const string &path)
string path_get(const string &sub)
bool path_write_text(const string &path, string &text)
string string_human_readable_size(size_t size)
string string_human_readable_number(size_t num)
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
void string_replace_same_length(string &haystack, const string &needle, const string &other)
ccl_device_inline bool is_nanovdb_type(int type)
std::unique_lock< std::mutex > thread_scoped_lock
CCL_NAMESPACE_BEGIN double time_dt()
ccl_device_inline size_t round_up(const size_t x, const size_t multiple)