Blender V4.3
vk_backend.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include <sstream>
10
11#include "GHOST_C-api.h"
12
13#include "BLI_threads.h"
14
15#include "CLG_log.h"
16
19
20#include "vk_batch.hh"
21#include "vk_context.hh"
22#include "vk_drawlist.hh"
23#include "vk_fence.hh"
24#include "vk_framebuffer.hh"
25#include "vk_index_buffer.hh"
26#include "vk_pixel_buffer.hh"
27#include "vk_query.hh"
28#include "vk_shader.hh"
29#include "vk_state_manager.hh"
30#include "vk_storage_buffer.hh"
31#include "vk_texture.hh"
32#include "vk_uniform_buffer.hh"
33#include "vk_vertex_buffer.hh"
34
35#include "vk_backend.hh"
36
37static CLG_LogRef LOG = {"gpu.vulkan"};
38
39namespace blender::gpu {
40static const char *KNOWN_CRASHING_DRIVER = "instable driver";
41
42static Vector<StringRefNull> missing_capabilities_get(VkPhysicalDevice vk_physical_device)
43{
44 Vector<StringRefNull> missing_capabilities;
45 /* Check device features. */
46 VkPhysicalDeviceFeatures2 features = {};
47 VkPhysicalDeviceDynamicRenderingFeatures dynamic_rendering = {};
48
49 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
50 dynamic_rendering.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES;
51 features.pNext = &dynamic_rendering;
52
53 vkGetPhysicalDeviceFeatures2(vk_physical_device, &features);
54#ifndef __APPLE__
55 if (features.features.geometryShader == VK_FALSE) {
56 missing_capabilities.append("geometry shaders");
57 }
58 if (features.features.logicOp == VK_FALSE) {
59 missing_capabilities.append("logical operations");
60 }
61#endif
62 if (features.features.dualSrcBlend == VK_FALSE) {
63 missing_capabilities.append("dual source blending");
64 }
65 if (features.features.imageCubeArray == VK_FALSE) {
66 missing_capabilities.append("image cube array");
67 }
68 if (features.features.multiDrawIndirect == VK_FALSE) {
69 missing_capabilities.append("multi draw indirect");
70 }
71 if (features.features.multiViewport == VK_FALSE) {
72 missing_capabilities.append("multi viewport");
73 }
74 if (features.features.shaderClipDistance == VK_FALSE) {
75 missing_capabilities.append("shader clip distance");
76 }
77 if (features.features.drawIndirectFirstInstance == VK_FALSE) {
78 missing_capabilities.append("draw indirect first instance");
79 }
80 if (features.features.fragmentStoresAndAtomics == VK_FALSE) {
81 missing_capabilities.append("fragment stores and atomics");
82 }
83 if (dynamic_rendering.dynamicRendering == VK_FALSE) {
84 missing_capabilities.append("dynamic rendering");
85 }
86
87 /* Check device extensions. */
88 uint32_t vk_extension_count;
89 vkEnumerateDeviceExtensionProperties(vk_physical_device, nullptr, &vk_extension_count, nullptr);
90
91 Array<VkExtensionProperties> vk_extensions(vk_extension_count);
92 vkEnumerateDeviceExtensionProperties(
93 vk_physical_device, nullptr, &vk_extension_count, vk_extensions.data());
94 Set<StringRefNull> extensions;
95 for (VkExtensionProperties &vk_extension : vk_extensions) {
96 extensions.add(vk_extension.extensionName);
97 }
98
99 if (!extensions.contains(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
100 missing_capabilities.append(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
101 }
102 if (!extensions.contains(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME)) {
103 missing_capabilities.append(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME);
104 }
105
106 /* Check for known faulty drivers. */
107 VkPhysicalDeviceProperties2 vk_physical_device_properties = {};
108 VkPhysicalDeviceDriverProperties vk_physical_device_driver_properties = {};
109 vk_physical_device_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
110 vk_physical_device_driver_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES;
111 vk_physical_device_properties.pNext = &vk_physical_device_driver_properties;
112 vkGetPhysicalDeviceProperties2(vk_physical_device, &vk_physical_device_properties);
113
114 /* Check for drivers that are known to crash. */
115
116 /* Intel IRIS on 10th gen CPU crashes due to issues when using dynamic rendering. It seems like
117 * when vkCmdBeginRendering is called some requirements need to be met, that can only be met when
118 * actually calling a vkCmdDraw command. As driver versions are not easy accessible we check
119 * against the latest conformance test version.
120 *
121 * This should be revisited when dynamic rendering is fully optional.
122 */
123 uint32_t conformance_version = VK_MAKE_API_VERSION(
124 vk_physical_device_driver_properties.conformanceVersion.major,
125 vk_physical_device_driver_properties.conformanceVersion.minor,
126 vk_physical_device_driver_properties.conformanceVersion.subminor,
127 vk_physical_device_driver_properties.conformanceVersion.patch);
128 if (vk_physical_device_driver_properties.driverID == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS &&
129 vk_physical_device_properties.properties.deviceType ==
130 VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU &&
131 conformance_version < VK_MAKE_API_VERSION(1, 3, 2, 0))
132 {
133 missing_capabilities.append(KNOWN_CRASHING_DRIVER);
134 }
135
136 return missing_capabilities;
137}
138
140{
142
143 /* Initialize an vulkan 1.2 instance. */
144 VkApplicationInfo vk_application_info = {VK_STRUCTURE_TYPE_APPLICATION_INFO};
145 vk_application_info.pApplicationName = "Blender";
146 vk_application_info.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
147 vk_application_info.pEngineName = "Blender";
148 vk_application_info.engineVersion = VK_MAKE_VERSION(1, 0, 0);
149 vk_application_info.apiVersion = VK_API_VERSION_1_2;
150
151 VkInstanceCreateInfo vk_instance_info = {VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO};
152 vk_instance_info.pApplicationInfo = &vk_application_info;
153
154 VkInstance vk_instance = VK_NULL_HANDLE;
155 vkCreateInstance(&vk_instance_info, nullptr, &vk_instance);
156 if (vk_instance == VK_NULL_HANDLE) {
157 CLOG_ERROR(&LOG, "Unable to initialize a Vulkan 1.2 instance.");
158 return false;
159 }
160
161 // go over all the devices
162 uint32_t physical_devices_count = 0;
163 vkEnumeratePhysicalDevices(vk_instance, &physical_devices_count, nullptr);
164 Array<VkPhysicalDevice> vk_physical_devices(physical_devices_count);
165 vkEnumeratePhysicalDevices(vk_instance, &physical_devices_count, vk_physical_devices.data());
166
167 for (VkPhysicalDevice vk_physical_device : vk_physical_devices) {
168 Vector<StringRefNull> missing_capabilities = missing_capabilities_get(vk_physical_device);
169
170 VkPhysicalDeviceProperties vk_properties = {};
171 vkGetPhysicalDeviceProperties(vk_physical_device, &vk_properties);
172
173 /* Report result. */
174 if (missing_capabilities.is_empty()) {
175 /* This device meets minimum requirements. */
176 CLOG_INFO(&LOG,
177 2,
178 "Device [%s] supports minimum requirements. Skip checking other GPUs. Another GPU "
179 "can still be selected during auto-detection.",
180 vk_properties.deviceName);
181
182 vkDestroyInstance(vk_instance, nullptr);
183 return true;
184 }
185
186 std::stringstream ss;
187 ss << "Device [" << vk_properties.deviceName
188 << "] does not meet minimum requirements. Missing features are [";
189 for (StringRefNull &feature : missing_capabilities) {
190 ss << feature << ", ";
191 }
192 ss.seekp(-2, std::ios_base::end);
193 ss << "]";
194 CLOG_WARN(&LOG, "%s", ss.str().c_str());
195 }
196
197 /* No device found meeting the minimum requirements. */
198
199 vkDestroyInstance(vk_instance, nullptr);
201 "No Vulkan device found that meets the minimum requirements. "
202 "Updating GPU driver can improve compatibility.");
203 return false;
204}
205
207{
208#ifdef _WIN32
209 return GPU_OS_WIN;
210#elif defined(__APPLE__)
211 return GPU_OS_MAC;
212#else
213 return GPU_OS_UNIX;
214#endif
215}
216
217void VKBackend::platform_init()
218{
224 "",
225 "",
226 "",
228
229 /* Query for all compatible devices */
230 VkApplicationInfo vk_application_info = {VK_STRUCTURE_TYPE_APPLICATION_INFO};
231 vk_application_info.pApplicationName = "Blender";
232 vk_application_info.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
233 vk_application_info.pEngineName = "Blender";
234 vk_application_info.engineVersion = VK_MAKE_VERSION(1, 0, 0);
235 vk_application_info.apiVersion = VK_API_VERSION_1_2;
236
237 VkInstanceCreateInfo vk_instance_info = {VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO};
238 vk_instance_info.pApplicationInfo = &vk_application_info;
239
240 VkInstance vk_instance = VK_NULL_HANDLE;
241 vkCreateInstance(&vk_instance_info, nullptr, &vk_instance);
242 BLI_assert(vk_instance != VK_NULL_HANDLE);
243
244 uint32_t physical_devices_count = 0;
245 vkEnumeratePhysicalDevices(vk_instance, &physical_devices_count, nullptr);
246 Array<VkPhysicalDevice> vk_physical_devices(physical_devices_count);
247 vkEnumeratePhysicalDevices(vk_instance, &physical_devices_count, vk_physical_devices.data());
248 int index = 0;
249 for (VkPhysicalDevice vk_physical_device : vk_physical_devices) {
250 if (missing_capabilities_get(vk_physical_device).is_empty()) {
251 VkPhysicalDeviceProperties vk_properties = {};
252 vkGetPhysicalDeviceProperties(vk_physical_device, &vk_properties);
253 std::stringstream identifier;
254 identifier << std::hex << vk_properties.vendorID << "/" << vk_properties.deviceID << "/"
255 << index;
256 GPG.devices.append({identifier.str(),
257 index,
258 vk_properties.vendorID,
259 vk_properties.deviceID,
260 std::string(vk_properties.deviceName)});
261 }
262 index++;
263 }
264 vkDestroyInstance(vk_instance, nullptr);
265 std::sort(GPG.devices.begin(), GPG.devices.end(), [&](const GPUDevice &a, const GPUDevice &b) {
266 if (a.name == b.name) {
267 return a.index < b.index;
268 }
269 return a.name < b.name;
270 });
271}
272
273void VKBackend::platform_init(const VKDevice &device)
274{
275 const VkPhysicalDeviceProperties &properties = device.physical_device_properties_get();
276
277 eGPUDeviceType device_type = device.device_type();
278 eGPUDriverType driver = device.driver_type();
281
282 std::string vendor_name = device.vendor_name();
283 std::string driver_version = device.driver_version();
284
285 /* GPG has already been initialized, but without a specific device. Calling init twice will
286 * clear the list of devices. Making a copy of the device list and set it after initialization to
287 * make sure the list isn't destroyed at this moment, but only when the backend is destroyed. */
288 Vector<GPUDevice> devices = GPG.devices;
289 GPG.init(device_type,
290 os,
291 driver,
292 support_level,
294 vendor_name.c_str(),
295 properties.deviceName,
296 driver_version.c_str(),
298 GPG.devices = devices;
299
300 CLOG_INFO(&LOG,
301 0,
302 "Using vendor [%s] device [%s] driver version [%s].",
303 vendor_name.c_str(),
304 device.vk_physical_device_properties_.deviceName,
305 driver_version.c_str());
306}
307
308void VKBackend::detect_workarounds(VKDevice &device)
309{
310 VKWorkarounds workarounds;
311
312 if (G.debug & G_DEBUG_GPU_FORCE_WORKAROUNDS) {
313 printf("\n");
314 printf("VK: Forcing workaround usage and disabling features and extensions.\n");
315 printf(" Vendor: %s\n", device.vendor_name().c_str());
316 printf(" Device: %s\n", device.physical_device_properties_get().deviceName);
317 printf(" Driver: %s\n", device.driver_version().c_str());
318 /* Force workarounds. */
319 workarounds.not_aligned_pixel_formats = true;
320 workarounds.shader_output_layer = true;
321 workarounds.shader_output_viewport_index = true;
322 workarounds.vertex_formats.r8g8b8 = true;
323 workarounds.fragment_shader_barycentric = true;
324 workarounds.dynamic_rendering_unused_attachments = true;
325
326 device.workarounds_ = workarounds;
327 return;
328 }
329
330 workarounds.shader_output_layer =
331 !device.physical_device_vulkan_12_features_get().shaderOutputLayer;
332 workarounds.shader_output_viewport_index =
333 !device.physical_device_vulkan_12_features_get().shaderOutputViewportIndex;
334
335 /* AMD GPUs don't support texture formats that use are aligned to 24 or 48 bits. */
338 {
339 workarounds.not_aligned_pixel_formats = true;
340 }
341
342 VkFormatProperties format_properties = {};
343 vkGetPhysicalDeviceFormatProperties(
344 device.physical_device_get(), VK_FORMAT_R8G8B8_UNORM, &format_properties);
345 workarounds.vertex_formats.r8g8b8 = (format_properties.bufferFeatures &
346 VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0;
347
349 VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME);
350
352 VK_EXT_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_EXTENSION_NAME);
353
354 device.workarounds_ = workarounds;
355}
356
357void VKBackend::platform_exit()
358{
359 GPG.clear();
360 VKDevice &device = VKBackend::get().device;
361 if (device.is_initialized()) {
362 device.deinit();
363 }
364}
365
366void VKBackend::delete_resources() {}
367
368void VKBackend::samplers_update()
369{
370 VKDevice &device = VKBackend::get().device;
371 if (device.is_initialized()) {
372 device.reinit();
373 }
374}
375
376void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
377{
378 VKContext &context = *VKContext::get();
379 render_graph::VKResourceAccessInfo &resources = context.reset_and_get_access_info();
380 render_graph::VKDispatchNode::CreateInfo dispatch_info(resources);
381 context.update_pipeline_data(dispatch_info.dispatch_node.pipeline_data);
382 dispatch_info.dispatch_node.group_count_x = groups_x_len;
383 dispatch_info.dispatch_node.group_count_y = groups_y_len;
384 dispatch_info.dispatch_node.group_count_z = groups_z_len;
385 context.render_graph.add_node(dispatch_info);
386}
387
388void VKBackend::compute_dispatch_indirect(StorageBuf *indirect_buf)
389{
390 BLI_assert(indirect_buf);
391 VKContext &context = *VKContext::get();
392 VKStorageBuffer &indirect_buffer = *unwrap(indirect_buf);
393 render_graph::VKResourceAccessInfo &resources = context.reset_and_get_access_info();
394 render_graph::VKDispatchIndirectNode::CreateInfo dispatch_indirect_info(resources);
395 context.update_pipeline_data(dispatch_indirect_info.dispatch_indirect_node.pipeline_data);
396 dispatch_indirect_info.dispatch_indirect_node.buffer = indirect_buffer.vk_handle();
397 dispatch_indirect_info.dispatch_indirect_node.offset = 0;
398 context.render_graph.add_node(dispatch_indirect_info);
399}
400
401Context *VKBackend::context_alloc(void *ghost_window, void *ghost_context)
402{
403 if (ghost_window) {
404 BLI_assert(ghost_context == nullptr);
405 ghost_context = GHOST_GetDrawingContext((GHOST_WindowHandle)ghost_window);
406 }
407
408 BLI_assert(ghost_context != nullptr);
409 if (!device.is_initialized()) {
410 device.init(ghost_context);
411 }
412
413 VKContext *context = new VKContext(ghost_window, ghost_context, device.resources);
414 device.context_register(*context);
415 GHOST_SetVulkanSwapBuffersCallbacks((GHOST_ContextHandle)ghost_context,
416 VKContext::swap_buffers_pre_callback,
417 VKContext::swap_buffers_post_callback);
418 return context;
419}
420
421Batch *VKBackend::batch_alloc()
422{
423 return new VKBatch();
424}
425
426DrawList *VKBackend::drawlist_alloc(int list_length)
427{
428 return new VKDrawList(list_length);
429}
430
431Fence *VKBackend::fence_alloc()
432{
433 return new VKFence();
434}
435
436FrameBuffer *VKBackend::framebuffer_alloc(const char *name)
437{
438 return new VKFrameBuffer(name);
439}
440
441IndexBuf *VKBackend::indexbuf_alloc()
442{
443 return new VKIndexBuffer();
444}
445
446PixelBuffer *VKBackend::pixelbuf_alloc(size_t size)
447{
448 return new VKPixelBuffer(size);
449}
450
451QueryPool *VKBackend::querypool_alloc()
452{
453 return new VKQueryPool();
454}
455
456Shader *VKBackend::shader_alloc(const char *name)
457{
458 return new VKShader(name);
459}
460
461Texture *VKBackend::texture_alloc(const char *name)
462{
463 return new VKTexture(name);
464}
465
466UniformBuf *VKBackend::uniformbuf_alloc(size_t size, const char *name)
467{
468 return new VKUniformBuffer(size, name);
469}
470
471StorageBuf *VKBackend::storagebuf_alloc(size_t size, GPUUsageType usage, const char *name)
472{
473 return new VKStorageBuffer(size, usage, name);
474}
475
476VertBuf *VKBackend::vertbuf_alloc()
477{
478 return new VKVertexBuffer();
479}
480
481void VKBackend::render_begin()
482{
483 VKThreadData &thread_data = device.current_thread_data();
484 BLI_assert_msg(thread_data.rendering_depth >= 0, "Unbalanced `GPU_render_begin/end`");
485 thread_data.rendering_depth += 1;
486}
487
488void VKBackend::render_end()
489{
490 VKThreadData &thread_data = device.current_thread_data();
491 thread_data.rendering_depth -= 1;
492 BLI_assert_msg(thread_data.rendering_depth >= 0, "Unbalanced `GPU_render_begin/end`");
493 if (G.background) {
494 /* Garbage collection when performing background rendering. In this case the rendering is
495 * already 'thread-safe'. We move the resources to the device discard list and we destroy it
496 * the next frame. */
497 if (thread_data.rendering_depth == 0) {
498 VKResourcePool &resource_pool = thread_data.resource_pool_get();
499 device.orphaned_data.destroy_discarded_resources(device);
500 device.orphaned_data.move_data(resource_pool.discard_pool);
501 resource_pool.reset();
502 }
503 }
504
505 else if (!BLI_thread_is_main()) {
506 /* Foreground rendering using a worker/render thread. In this case we move the resources to the
507 * device discard list and it will be cleared by the main thread. */
508 if (thread_data.rendering_depth == 0) {
509 VKResourcePool &resource_pool = thread_data.resource_pool_get();
510 device.orphaned_data.move_data(resource_pool.discard_pool);
511 resource_pool.reset();
512 }
513 }
514}
515
516void VKBackend::render_step() {}
517
518void VKBackend::capabilities_init(VKDevice &device)
519{
520 const VkPhysicalDeviceProperties &properties = device.physical_device_properties_get();
521 const VkPhysicalDeviceLimits &limits = properties.limits;
522
523 /* Reset all capabilities from previous context. */
524 GCaps = {};
528 VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME);
530 device.physical_device_vulkan_11_features_get().shaderDrawParameters;
531
532 GCaps.max_texture_size = max_ii(limits.maxImageDimension1D, limits.maxImageDimension2D);
533 GCaps.max_texture_3d_size = min_uu(limits.maxImageDimension3D, INT_MAX);
534 GCaps.max_texture_layers = min_uu(limits.maxImageArrayLayers, INT_MAX);
535 GCaps.max_textures = min_uu(limits.maxDescriptorSetSampledImages, INT_MAX);
537 limits.maxPerStageDescriptorSampledImages, INT_MAX);
538 GCaps.max_samplers = min_uu(limits.maxSamplerAllocationCount, INT_MAX);
539 GCaps.max_images = min_uu(limits.maxPerStageDescriptorStorageImages, INT_MAX);
540 for (int i = 0; i < 3; i++) {
541 GCaps.max_work_group_count[i] = min_uu(limits.maxComputeWorkGroupCount[i], INT_MAX);
542 GCaps.max_work_group_size[i] = min_uu(limits.maxComputeWorkGroupSize[i], INT_MAX);
543 }
545 limits.maxPerStageDescriptorUniformBuffers, INT_MAX);
546 GCaps.max_batch_indices = min_uu(limits.maxDrawIndirectCount, INT_MAX);
547 GCaps.max_batch_vertices = min_uu(limits.maxDrawIndexedIndexValue, INT_MAX);
548 GCaps.max_vertex_attribs = min_uu(limits.maxVertexInputAttributes, INT_MAX);
549 GCaps.max_varying_floats = min_uu(limits.maxVertexOutputComponents, INT_MAX);
551 limits.maxPerStageDescriptorStorageBuffers, INT_MAX);
552 GCaps.max_storage_buffer_size = size_t(limits.maxStorageBufferRange);
553
556
557 detect_workarounds(device);
558}
559
560} // namespace blender::gpu
@ G_DEBUG_GPU_FORCE_WORKAROUNDS
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
MINLINE uint min_uu(uint a, uint b)
MINLINE int max_ii(int a, int b)
int BLI_system_thread_count(void)
Definition threads.cc:253
int BLI_thread_is_main(void)
Definition threads.cc:179
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:182
#define CLOG_WARN(clg_ref,...)
Definition CLG_log.h:181
void CLG_logref_init(CLG_LogRef *clg_ref)
Definition clog.c:764
#define CLOG_INFO(clg_ref, level,...)
Definition CLG_log.h:179
GHOST C-API function and type declarations.
GHOST_ContextHandle GHOST_GetDrawingContext(GHOST_WindowHandle windowhandle)
eGPUDriverType
@ GPU_DRIVER_ANY
@ GPU_ARCHITECTURE_IMR
eGPUSupportLevel
@ GPU_SUPPORT_LEVEL_SUPPORTED
eGPUOSType
@ GPU_OS_WIN
@ GPU_OS_UNIX
@ GPU_OS_ANY
@ GPU_OS_MAC
eGPUDeviceType
@ GPU_DEVICE_ATI
@ GPU_DEVICE_ANY
@ GPU_DEVICE_APPLE
bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
const T * data() const
Definition BLI_array.hh:301
bool contains(const Key &key) const
Definition BLI_set.hh:291
bool add(const Key &key)
Definition BLI_set.hh:248
void append(const T &value)
bool is_empty() const
void init(eGPUDeviceType gpu_device, eGPUOSType os_type, eGPUDriverType driver_type, eGPUSupportLevel gpu_support_level, eGPUBackendType backend, const char *vendor_str, const char *renderer_str, const char *version_str, GPUArchitectureType arch_type)
static bool is_supported()
bool supports_extension(const char *extension_name) const
Definition vk_device.cc:176
const VkPhysicalDeviceProperties & physical_device_properties_get() const
Definition vk_device.hh:199
std::string vendor_name() const
Definition vk_device.cc:324
std::string driver_version() const
Definition vk_device.cc:351
eGPUDriverType driver_type() const
Definition vk_device.cc:298
bool is_initialized() const
Definition vk_device.cc:70
VkPhysicalDevice physical_device_get() const
Definition vk_device.hh:194
const VkPhysicalDeviceVulkan12Features & physical_device_vulkan_12_features_get() const
Definition vk_device.hh:214
const VkPhysicalDeviceVulkan11Features & physical_device_vulkan_11_features_get() const
Definition vk_device.hh:209
eGPUDeviceType device_type() const
Definition vk_device.cc:266
VKResourcePool & resource_pool_get()
Definition vk_device.hh:105
local_group_size(16, 16) .push_constant(Type b
#define printf
#define LOG(severity)
Definition log.h:33
#define G(x, y, z)
GPUPlatformGlobal GPG
static eGPUOSType determine_os_type()
static CLG_LogRef LOG
GPUCapabilities GCaps
VKBatch * unwrap(Batch *batch)
Definition vk_batch.hh:34
static const char * KNOWN_CRASHING_DRIVER
Definition vk_backend.cc:40
static Vector< StringRefNull > missing_capabilities_get(VkPhysicalDevice vk_physical_device)
Definition vk_backend.cc:42
unsigned int uint32_t
Definition stdint.h:80
std::string name
struct blender::gpu::VKWorkarounds::@669 vertex_formats
static CLG_LogRef LOG
Definition vk_backend.cc:37