Blender V4.5
vk_context.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "GPU_debug.hh"
10
11#include "vk_backend.hh"
12#include "vk_context.hh"
13#include "vk_debug.hh"
14#include "vk_framebuffer.hh"
15#include "vk_immediate.hh"
16#include "vk_shader.hh"
18#include "vk_state_manager.hh"
19#include "vk_texture.hh"
20
21#include "GHOST_C-api.h"
22
23namespace blender::gpu {
24
25VKContext::VKContext(void *ghost_window, void *ghost_context)
26{
27 ghost_window_ = ghost_window;
28 ghost_context_ = ghost_context;
29
31 imm = new VKImmediate();
32
33 back_left = new VKFrameBuffer("back_left");
34 front_left = new VKFrameBuffer("front_left");
36}
37
39{
40 if (surface_texture_) {
41 back_left->attachment_remove(GPU_FB_COLOR_ATTACHMENT0);
42 front_left->attachment_remove(GPU_FB_COLOR_ATTACHMENT0);
43 GPU_texture_free(surface_texture_);
44 surface_texture_ = nullptr;
45 }
47 delete imm;
48 imm = nullptr;
49 VKDevice &device = VKBackend::get().device;
50 device.context_unregister(*this);
51
52 this->process_frame_timings();
53}
54
55void VKContext::sync_backbuffer(bool cycle_resource_pool)
56{
57 if (ghost_window_) {
58 GHOST_VulkanSwapChainData swap_chain_data = {};
59 GHOST_GetVulkanSwapChainFormat((GHOST_WindowHandle)ghost_window_, &swap_chain_data);
60 VKThreadData &thread_data = thread_data_.value().get();
61 if (cycle_resource_pool) {
62 thread_data.resource_pool_next();
63 }
64
65 const bool reset_framebuffer = swap_chain_format_.format !=
66 swap_chain_data.surface_format.format ||
67 swap_chain_format_.colorSpace !=
68 swap_chain_data.surface_format.colorSpace ||
69 vk_extent_.width != swap_chain_data.extent.width ||
70 vk_extent_.height != swap_chain_data.extent.height;
71 if (reset_framebuffer) {
74 }
75 if (surface_texture_) {
76 GPU_texture_free(surface_texture_);
77 surface_texture_ = nullptr;
78 }
79 surface_texture_ = GPU_texture_create_2d(
80 "back-left",
81 swap_chain_data.extent.width,
82 swap_chain_data.extent.height,
83 1,
84 to_gpu_format(swap_chain_data.surface_format.format),
86 nullptr);
87
88 back_left->attachment_set(GPU_FB_COLOR_ATTACHMENT0,
89 GPU_ATTACHMENT_TEXTURE(surface_texture_));
91 GPU_ATTACHMENT_TEXTURE(surface_texture_));
92
93 back_left->bind(false);
94
95 swap_chain_format_ = swap_chain_data.surface_format;
96 vk_extent_ = swap_chain_data.extent;
97 }
98 }
99}
100
102{
103 /* Make sure no other context is already bound to this thread. */
104 BLI_assert(is_active_ == false);
105
106 VKDevice &device = VKBackend::get().device;
107 VKThreadData &thread_data = device.current_thread_data();
108 thread_data_ = std::reference_wrapper<VKThreadData>(thread_data);
109
110 if (!render_graph_.has_value()) {
111 render_graph_ = std::reference_wrapper<render_graph::VKRenderGraph>(
112 *device.render_graph_new());
113 /* Recreate the debug group stack for the new graph.
114 * Note: there is no associated `debug_group_end` as the graph groups
115 * are implicitly closed on submission. */
116 for (const StringRef &group : debug_stack) {
117 std::string str_group = group;
118 render_graph_.value().get().debug_group_begin(str_group.c_str(),
120 }
121 }
122
123 is_active_ = true;
124
125 sync_backbuffer(false);
126
127 immActivate();
128}
129
131{
134 thread_data_.reset();
135
136 is_active_ = false;
137}
138
140
142{
143 this->process_frame_timings();
144}
145
150
152 VkPipelineStageFlags wait_dst_stage_mask,
153 VkSemaphore wait_semaphore,
154 VkSemaphore signal_semaphore,
155 VkFence signal_fence)
156{
158 VKFrameBuffer &framebuffer = *active_framebuffer_get();
159 if (framebuffer.is_rendering()) {
160 framebuffer.rendering_end(*this);
161 }
162 }
163 VKDevice &device = VKBackend::get().device;
165 if (!device.extensions_get().descriptor_buffer) {
167 }
168 TimelineValue timeline = device.render_graph_submit(
169 &render_graph_.value().get(),
171 bool(flags & RenderGraphFlushFlags::SUBMIT),
173 wait_dst_stage_mask,
174 wait_semaphore,
175 signal_semaphore,
176 signal_fence);
177 render_graph_.reset();
179 render_graph_ = std::reference_wrapper<render_graph::VKRenderGraph>(
180 *device.render_graph_new());
181 /* Recreate the debug group stack for the new graph.
182 * Note: there is no associated `debug_group_end` as the graph groups
183 * are implicitly closed on submission. */
184 for (const StringRef &group : debug_stack) {
185 std::string str_group = group;
186 render_graph_.value().get().debug_group_begin(str_group.c_str(),
188 }
189 }
190 return timeline;
191}
192
194
195void VKContext::memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb)
196{
197 const VKDevice &device = VKBackend::get().device;
198 device.memory_statistics_get(r_total_mem_kb, r_free_mem_kb);
199}
200
201/* -------------------------------------------------------------------- */
204
206{
207 return thread_data_.value().get().resource_pool_get().descriptor_pools;
208}
209
211{
212 return thread_data_.value().get().resource_pool_get().descriptor_set;
213}
214
216{
217 return *static_cast<VKStateManager *>(state_manager);
218}
219
224
229
231
232/* -------------------------------------------------------------------- */
235
237{
240 }
241
242 BLI_assert(active_fb == nullptr);
243 active_fb = &framebuffer;
244 framebuffer.update_size();
245 framebuffer.update_srgb();
246 framebuffer.rendering_reset();
247}
248
253
255{
256 return active_framebuffer_get() != nullptr;
257}
258
260{
261 VKFrameBuffer *framebuffer = active_framebuffer_get();
262 BLI_assert(framebuffer != nullptr);
263 if (framebuffer->is_rendering()) {
264 framebuffer->rendering_end(*this);
265 }
266 active_fb = nullptr;
267}
268
270{
271 VKFrameBuffer *framebuffer = active_framebuffer_get();
272 if (framebuffer) {
273 framebuffer->rendering_end(*this);
274 }
275}
276
278
279/* -------------------------------------------------------------------- */
282
285 render_graph::VKPipelineData &r_pipeline_data)
286{
287 VKShader &vk_shader = unwrap(*shader);
288 VKFrameBuffer &framebuffer = *active_framebuffer_get();
289
290 /* Override size of point shader when GPU_point size < 0 */
291 const float point_size = state_manager_get().mutable_state.point_size;
292 if (primitive == GPU_PRIM_POINTS && point_size < 0.0) {
293 GPU_shader_uniform_1f(wrap(shader), "size", -point_size);
294 }
295
296 update_pipeline_data(vk_shader,
298 primitive, vao, state_manager_get(), framebuffer, constants_state_),
299 r_pipeline_data);
300}
301
303{
304 VKShader &vk_shader = unwrap(*shader);
306 vk_shader, vk_shader.ensure_and_get_compute_pipeline(constants_state_), r_pipeline_data);
307}
308
310 VkPipeline vk_pipeline,
311 render_graph::VKPipelineData &r_pipeline_data)
312{
313 r_pipeline_data.vk_pipeline_layout = vk_shader.vk_pipeline_layout;
314 r_pipeline_data.vk_pipeline = vk_pipeline;
315
316 /* Update push constants. */
317 r_pipeline_data.push_constants_data = nullptr;
318 r_pipeline_data.push_constants_size = 0;
319 const VKPushConstants::Layout &push_constants_layout =
321 if (push_constants_layout.storage_type_get() == VKPushConstants::StorageType::PUSH_CONSTANTS) {
322 r_pipeline_data.push_constants_size = push_constants_layout.size_in_bytes();
323 r_pipeline_data.push_constants_data = vk_shader.push_constants.data();
324 }
325
326 /* Update descriptor set. */
327 r_pipeline_data.vk_descriptor_set = VK_NULL_HANDLE;
328 r_pipeline_data.descriptor_buffer_device_address = 0;
329 r_pipeline_data.descriptor_buffer_offset = 0;
330 if (vk_shader.has_descriptor_set()) {
331 VKDescriptorSetTracker &descriptor_set = descriptor_set_get();
332 descriptor_set.update_descriptor_set(*this, access_info_, r_pipeline_data);
333 }
334}
335
337{
338 access_info_.reset();
339 return access_info_;
340}
341
343
344/* -------------------------------------------------------------------- */
347
348void VKContext::swap_buffers_pre_callback(const GHOST_VulkanSwapChainData *swap_chain_data)
349{
350 VKContext *context = VKContext::get();
351 BLI_assert(context);
352 context->swap_buffers_pre_handler(*swap_chain_data);
353}
354
356{
357 VKContext *context = VKContext::get();
358 BLI_assert(context);
359 context->swap_buffers_post_handler();
360}
361
362void VKContext::swap_buffers_pre_handler(const GHOST_VulkanSwapChainData &swap_chain_data)
363{
364 GPU_debug_group_begin("BackBuffer.Blit");
365
366 VKFrameBuffer &framebuffer = *unwrap(active_fb);
367 VKTexture *color_attachment = unwrap(unwrap(framebuffer.color_tex(0)));
368
370 blit_image.src_image = color_attachment->vk_image_handle();
371 blit_image.dst_image = swap_chain_data.image;
372 blit_image.filter = VK_FILTER_LINEAR;
373
374 VkImageBlit &region = blit_image.region;
375 region.srcOffsets[0] = {0, 0, 0};
376 region.srcOffsets[1] = {color_attachment->width_get(), color_attachment->height_get(), 1};
377 region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
378 region.srcSubresource.mipLevel = 0;
379 region.srcSubresource.baseArrayLayer = 0;
380 region.srcSubresource.layerCount = 1;
381
382 region.dstOffsets[0] = {0, int32_t(swap_chain_data.extent.height), 0};
383 region.dstOffsets[1] = {int32_t(swap_chain_data.extent.width), 0, 1};
384 region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
385 region.dstSubresource.mipLevel = 0;
386 region.dstSubresource.baseArrayLayer = 0;
387 region.dstSubresource.layerCount = 1;
388
389 /* Swap chain commands are CPU synchronized at this moment, allowing to temporary add the swap
390 * chain image as device resources. When we move towards GPU swap chain synchronization we need
391 * to keep track of the swap chain image between frames. */
392 VKDevice &device = VKBackend::get().device;
393 device.resources.add_image(swap_chain_data.image, 1, "SwapchainImage");
394
395 framebuffer.rendering_end(*this);
397
398 render_graph::VKRenderGraph &render_graph = this->render_graph();
399 render_graph.add_node(blit_image);
402 synchronization.vk_image = swap_chain_data.image;
403 synchronization.vk_image_layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
404 synchronization.vk_image_aspect = VK_IMAGE_ASPECT_COLOR_BIT;
405 render_graph.add_node(synchronization);
407 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
408 swap_chain_data.acquire_semaphore,
409 swap_chain_data.present_semaphore,
410 swap_chain_data.submission_fence);
411
412 device.resources.remove_image(swap_chain_data.image);
413#if 0
414 device.debug_print();
415#endif
416}
417
418void VKContext::swap_buffers_post_handler()
419{
420 sync_backbuffer(true);
421}
422
424 const shader::SpecializationConstants *constants_state)
425{
426 constants_state_ = (constants_state != nullptr) ? *constants_state :
428}
429
431
432/* -------------------------------------------------------------------- */
435
436void VKContext::openxr_acquire_framebuffer_image_callback(GHOST_VulkanOpenXRData *openxr_data)
437{
438 VKContext *context = VKContext::get();
439 BLI_assert(context);
440 context->openxr_acquire_framebuffer_image_handler(*openxr_data);
441}
442
443void VKContext::openxr_release_framebuffer_image_callback(GHOST_VulkanOpenXRData *openxr_data)
444{
445 VKContext *context = VKContext::get();
446 BLI_assert(context);
447 context->openxr_release_framebuffer_image_handler(*openxr_data);
448}
449
450void VKContext::openxr_acquire_framebuffer_image_handler(GHOST_VulkanOpenXRData &openxr_data)
451{
452 VKFrameBuffer &framebuffer = *unwrap(active_fb);
453 VKTexture *color_attachment = unwrap(unwrap(framebuffer.color_tex(0)));
454 openxr_data.extent.width = color_attachment->width_get();
455 openxr_data.extent.height = color_attachment->height_get();
456
457 /* Determine the data format for data transfer. */
458 const eGPUTextureFormat device_format = color_attachment->device_format_get();
460 if (ELEM(device_format, GPU_RGBA8)) {
461 data_format = GPU_DATA_UBYTE;
462 }
463
464 switch (openxr_data.data_transfer_mode) {
465 case GHOST_kVulkanXRModeCPU:
466 openxr_data.cpu.image_data = color_attachment->read(0, data_format);
467 break;
468
469 case GHOST_kVulkanXRModeFD: {
473 if (openxr_data.gpu.vk_image_blender != color_attachment->vk_image_handle()) {
474 VKMemoryExport exported_memory = color_attachment->export_memory(
475 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
476 openxr_data.gpu.image_handle = exported_memory.handle;
477 openxr_data.gpu.new_handle = true;
478 openxr_data.gpu.image_format = to_vk_format(color_attachment->device_format_get());
479 openxr_data.gpu.memory_size = exported_memory.memory_size;
480 openxr_data.gpu.memory_offset = exported_memory.memory_offset;
481 openxr_data.gpu.vk_image_blender = color_attachment->vk_image_handle();
482 }
483 break;
484 }
485
486 case GHOST_kVulkanXRModeWin32: {
490 if (openxr_data.gpu.vk_image_blender != color_attachment->vk_image_handle()) {
491 VKMemoryExport exported_memory = color_attachment->export_memory(
492 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT);
493 openxr_data.gpu.image_handle = exported_memory.handle;
494 openxr_data.gpu.new_handle = true;
495 openxr_data.gpu.image_format = to_vk_format(color_attachment->device_format_get());
496 openxr_data.gpu.memory_size = exported_memory.memory_size;
497 openxr_data.gpu.memory_offset = exported_memory.memory_offset;
498 openxr_data.gpu.vk_image_blender = color_attachment->vk_image_handle();
499 }
500 break;
501 }
502 }
503}
504
505void VKContext::openxr_release_framebuffer_image_handler(GHOST_VulkanOpenXRData &openxr_data)
506{
507 switch (openxr_data.data_transfer_mode) {
508 case GHOST_kVulkanXRModeCPU:
509 MEM_freeN(openxr_data.cpu.image_data);
510 openxr_data.cpu.image_data = nullptr;
511 break;
512
513 case GHOST_kVulkanXRModeFD:
514 /* Nothing to do as import of the handle by the XrInstance removes the ownership of the
515 * handle. Ref
516 * https://registry.khronos.org/vulkan/specs/latest/man/html/VK_KHR_external_memory_fd.html#_issues
517 */
518 break;
519
520 case GHOST_kVulkanXRModeWin32:
521#ifdef _WIN32
522 if (openxr_data.gpu.new_handle) {
523 /* Exported handle isn't consumed during import and should be freed after use. */
524 CloseHandle(HANDLE(openxr_data.gpu.image_handle));
525 openxr_data.gpu.image_handle = 0;
526 }
527#endif
528 break;
529 }
530}
531
533
534} // namespace blender::gpu
#define BLI_assert(a)
Definition BLI_assert.h:46
#define ELEM(...)
GHOST C-API function and type declarations.
void GPU_debug_group_end()
Definition gpu_debug.cc:33
void GPU_debug_group_begin(const char *name)
Definition gpu_debug.cc:22
#define GPU_ATTACHMENT_TEXTURE(_texture)
GPUPrimType
@ GPU_PRIM_POINTS
void GPU_shader_uniform_1f(GPUShader *sh, const char *name, float value)
GPUTexture * GPU_texture_create_2d(const char *name, int width, int height, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
void GPU_texture_free(GPUTexture *texture)
eGPUDataFormat
@ GPU_DATA_HALF_FLOAT
@ GPU_DATA_UBYTE
@ GPU_TEXTURE_USAGE_ATTACHMENT
eGPUTextureFormat
@ GPU_RGBA8
GPUTexture * color_tex(int slot) const
static VKBackend & get()
Definition vk_backend.hh:91
void debug_unbind_all_ssbo() override
void sync_backbuffer(bool cycle_resource_pool)
Definition vk_context.cc:55
render_graph::VKResourceAccessInfo & reset_and_get_access_info()
void finish() override
static void openxr_acquire_framebuffer_image_callback(GHOST_VulkanOpenXRData *data)
void specialization_constants_set(const shader::SpecializationConstants *constants_state)
void deactivate() override
void memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb) override
VKContext(void *ghost_window, void *ghost_context)
Definition vk_context.cc:25
VKDiscardPool discard_pool
Definition vk_context.hh:74
static void openxr_release_framebuffer_image_callback(GHOST_VulkanOpenXRData *data)
void flush() override
static void swap_buffers_post_callback()
VKFrameBuffer * active_framebuffer_get() const
void activate() override
const render_graph::VKRenderGraph & render_graph() const
Definition vk_context.hh:76
bool has_active_framebuffer() const
void debug_unbind_all_ubo() override
static void swap_buffers_pre_callback(const GHOST_VulkanSwapChainData *data)
void activate_framebuffer(VKFrameBuffer &framebuffer)
VKStateManager & state_manager_get() const
VKDescriptorSetTracker & descriptor_set_get()
void update_pipeline_data(render_graph::VKPipelineData &r_pipeline_data)
static VKContext * get()
void begin_frame() override
VKDescriptorPools & descriptor_pools_get()
void end_frame() override
TimelineValue flush_render_graph(RenderGraphFlushFlags flags, VkPipelineStageFlags wait_dst_stage_mask=VK_PIPELINE_STAGE_NONE, VkSemaphore wait_semaphore=VK_NULL_HANDLE, VkSemaphore signal_semaphore=VK_NULL_HANDLE, VkFence signal_fence=VK_NULL_HANDLE)
void discard(VKContext &vk_context)
render_graph::VKResourceStateTracker resources
Definition vk_device.hh:242
const VKExtensions & extensions_get() const
Definition vk_device.hh:396
render_graph::VKRenderGraph * render_graph_new()
VKThreadData & current_thread_data()
Definition vk_device.cc:520
TimelineValue render_graph_submit(render_graph::VKRenderGraph *render_graph, VKDiscardPool &context_discard_pool, bool submit_to_device, bool wait_for_completion, VkPipelineStageFlags wait_dst_stage_mask, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, VkFence signal_fence)
void context_unregister(VKContext &context)
Definition vk_device.cc:541
void memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb) const
Definition vk_device.cc:563
void rendering_end(VKContext &context)
const VKPushConstants::Layout & push_constants_layout_get() const
VkPipeline ensure_and_get_graphics_pipeline(GPUPrimType primitive, VKVertexAttributeObject &vao, VKStateManager &state_manager, VKFrameBuffer &framebuffer, shader::SpecializationConstants &constants_state)
VkPipeline ensure_and_get_compute_pipeline(const shader::SpecializationConstants &constants_state)
VKPushConstants push_constants
Definition vk_shader.hh:55
bool has_descriptor_set() const
Definition vk_shader.hh:108
VkPipelineLayout vk_pipeline_layout
Definition vk_shader.hh:54
const VKShaderInterface & interface_get() const
VKMemoryExport export_memory(VkExternalMemoryHandleTypeFlagBits handle_type)
eGPUTextureFormat device_format_get() const
VkImage vk_image_handle() const
void * read(int mip, eGPUDataFormat format) override
void add_image(VkImage vk_image, uint32_t layer_count, const char *name=nullptr)
@ GPU_FB_COLOR_ATTACHMENT0
void immDeactivate()
void immActivate()
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ColorTheme4f get_debug_group_color(StringRefNull name)
uint64_t TimelineValue
Definition vk_common.hh:36
static Context * unwrap(GPUContext *ctx)
static GPUContext * wrap(Context *ctx)
eGPUTextureFormat to_gpu_format(const VkFormat format)
Definition vk_common.cc:118
VkFormat to_vk_format(const eGPUTextureFormat format)
Definition vk_common.cc:131