Blender V5.0
vk_context.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "DNA_userdef_types.h"
10
11#include "GPU_debug.hh"
12
14
15#include "vk_backend.hh"
16#include "vk_context.hh"
17#include "vk_debug.hh"
18#include "vk_framebuffer.hh"
19#include "vk_immediate.hh"
20#include "vk_shader.hh"
22#include "vk_state_manager.hh"
23#include "vk_texture.hh"
24
25#include "GHOST_C-api.h"
26
27namespace blender::gpu {
28
29VKContext::VKContext(void *ghost_window, void *ghost_context)
30{
31 ghost_window_ = ghost_window;
32 ghost_context_ = ghost_context;
33
35 imm = new VKImmediate();
36
37 back_left = new VKFrameBuffer("back_left");
38 front_left = new VKFrameBuffer("front_left");
40}
41
43{
44 if (surface_texture_) {
45 back_left->attachment_remove(GPU_FB_COLOR_ATTACHMENT0);
46 front_left->attachment_remove(GPU_FB_COLOR_ATTACHMENT0);
47 GPU_texture_free(surface_texture_);
48 surface_texture_ = nullptr;
49 }
51 delete imm;
52 imm = nullptr;
53 VKDevice &device = VKBackend::get().device;
54 device.context_unregister(*this);
55
56 this->process_frame_timings();
57}
58
60{
61 if (ghost_window_) {
62 GHOST_VulkanSwapChainData swap_chain_data = {};
63 GHOST_GetVulkanSwapChainFormat((GHOST_WindowHandle)ghost_window_, &swap_chain_data);
64
65 const bool reset_framebuffer = swap_chain_format_.format !=
66 swap_chain_data.surface_format.format ||
67 swap_chain_format_.colorSpace !=
68 swap_chain_data.surface_format.colorSpace ||
69 vk_extent_.width != swap_chain_data.extent.width ||
70 vk_extent_.height != swap_chain_data.extent.height;
71 if (reset_framebuffer) {
74 }
75 if (surface_texture_) {
76 GPU_texture_free(surface_texture_);
77 surface_texture_ = nullptr;
78 }
79 vk_extent_ = swap_chain_data.extent;
80 vk_extent_.width = max_uu(vk_extent_.width, 1u);
81 vk_extent_.height = max_uu(vk_extent_.height, 1u);
82 surface_texture_ = GPU_texture_create_2d(
83 "back-left",
84 vk_extent_.width,
85 vk_extent_.height,
86 1,
87 to_gpu_format(swap_chain_data.surface_format.format),
89 nullptr);
90
91 back_left->attachment_set(GPU_FB_COLOR_ATTACHMENT0,
92 GPU_ATTACHMENT_TEXTURE(surface_texture_));
94 GPU_ATTACHMENT_TEXTURE(surface_texture_));
95
96 back_left->bind(false);
97
98 swap_chain_format_ = swap_chain_data.surface_format;
99 GCaps.hdr_viewport_support = (swap_chain_format_.format == VK_FORMAT_R16G16B16A16_SFLOAT) &&
100 ELEM(swap_chain_format_.colorSpace,
101 VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT,
102 VK_COLOR_SPACE_SRGB_NONLINEAR_KHR);
103 }
104 }
105}
106
108{
109 /* Make sure no other context is already bound to this thread. */
110 BLI_assert(is_active_ == false);
111
112 VKDevice &device = VKBackend::get().device;
113 VKThreadData &thread_data = device.current_thread_data();
114 thread_data_ = std::reference_wrapper<VKThreadData>(thread_data);
115
116 if (!render_graph_.has_value()) {
117 render_graph_ = std::reference_wrapper<render_graph::VKRenderGraph>(
118 *device.render_graph_new());
119 /* Recreate the debug group stack for the new graph.
120 * Note: there is no associated `debug_group_end` as the graph groups
121 * are implicitly closed on submission. */
122 for (const StringRef &group : debug_stack) {
123 std::string str_group = group;
124 render_graph_.value().get().debug_group_begin(str_group.c_str(),
126 }
127 }
128
129 is_active_ = true;
130
132
133 immActivate();
134}
135
137{
140 thread_data_.reset();
141
142 is_active_ = false;
143}
144
146
148{
149 this->process_frame_timings();
150}
151
156
158 VkPipelineStageFlags wait_dst_stage_mask,
159 VkSemaphore wait_semaphore,
160 VkSemaphore signal_semaphore,
161 VkFence signal_fence)
162{
164 VKFrameBuffer &framebuffer = *active_framebuffer_get();
165 if (framebuffer.is_rendering()) {
166 framebuffer.rendering_end(*this);
167 }
168 }
169 VKDevice &device = VKBackend::get().device;
171 TimelineValue timeline = device.render_graph_submit(
172 &render_graph_.value().get(),
174 bool(flags & RenderGraphFlushFlags::SUBMIT),
176 wait_dst_stage_mask,
177 wait_semaphore,
178 signal_semaphore,
179 signal_fence);
180 render_graph_.reset();
181 streaming_buffers_.clear();
183 render_graph_ = std::reference_wrapper<render_graph::VKRenderGraph>(
184 *device.render_graph_new());
185 /* Recreate the debug group stack for the new graph.
186 * Note: there is no associated `debug_group_end` as the graph groups
187 * are implicitly closed on submission. */
188 for (const StringRef &group : debug_stack) {
189 std::string str_group = group;
190 render_graph_.value().get().debug_group_begin(str_group.c_str(),
192 }
193 }
194 return timeline;
195}
196
198
199void VKContext::memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb)
200{
201 const VKDevice &device = VKBackend::get().device;
202 device.memory_statistics_get(r_total_mem_kb, r_free_mem_kb);
203}
204
205/* -------------------------------------------------------------------- */
208
210{
211 return thread_data_.value().get().descriptor_pools;
212}
213
215{
216 return thread_data_.value().get().descriptor_set;
217}
218
220{
221 return *static_cast<VKStateManager *>(state_manager);
222}
223
228
233
235
236/* -------------------------------------------------------------------- */
239
241{
244 }
245
246 BLI_assert(active_fb == nullptr);
247 active_fb = &framebuffer;
248 framebuffer.update_size();
249 framebuffer.update_srgb();
250 framebuffer.rendering_reset();
251}
252
257
259{
260 return active_framebuffer_get() != nullptr;
261}
262
264{
265 VKFrameBuffer *framebuffer = active_framebuffer_get();
266 BLI_assert(framebuffer != nullptr);
267 if (framebuffer->is_rendering()) {
268 framebuffer->rendering_end(*this);
269 }
270 active_fb = nullptr;
271}
272
274{
275 VKFrameBuffer *framebuffer = active_framebuffer_get();
276 if (framebuffer) {
277 framebuffer->rendering_end(*this);
278 }
279}
280
282
283/* -------------------------------------------------------------------- */
286
290{
291 VKShader &vk_shader = unwrap(*shader);
292 VKFrameBuffer &framebuffer = *active_framebuffer_get();
293
294 /* Override size of point shader when GPU_point size < 0 */
295 const float point_size = state_manager_get().mutable_state.point_size;
296 if (primitive == GPU_PRIM_POINTS && point_size < 0.0) {
297 GPU_shader_uniform_1f(shader, "size", -point_size);
298 }
299
300 /* Dynamic state line width */
301 const bool is_line_primitive = ELEM(primitive,
307
308 if (is_line_primitive) {
309 const bool supports_wide_lines = VKBackend::get().device.extensions_get().wide_lines;
310 r_pipeline_data.line_width = supports_wide_lines ?
312 1.0f;
313 }
314 else {
315 r_pipeline_data.line_width.reset();
316 }
317
318 update_pipeline_data(vk_shader,
320 primitive, vao, state_manager_get(), framebuffer, constants_state_),
321 r_pipeline_data.pipeline_data);
322}
323
325{
326 VKShader &vk_shader = unwrap(*shader);
328 vk_shader, vk_shader.ensure_and_get_compute_pipeline(constants_state_), r_pipeline_data);
329}
330
332 VkPipeline vk_pipeline,
333 render_graph::VKPipelineData &r_pipeline_data)
334{
335 r_pipeline_data.vk_pipeline_layout = vk_shader.vk_pipeline_layout;
336 r_pipeline_data.vk_pipeline = vk_pipeline;
337
338 /* Update push constants. */
339 r_pipeline_data.push_constants_data = nullptr;
340 r_pipeline_data.push_constants_size = 0;
341 const VKPushConstants::Layout &push_constants_layout =
343 if (push_constants_layout.storage_type_get() == VKPushConstants::StorageType::PUSH_CONSTANTS) {
344 r_pipeline_data.push_constants_size = push_constants_layout.size_in_bytes();
345 r_pipeline_data.push_constants_data = vk_shader.push_constants.data();
346 }
347
348 /* Update descriptor set. */
349 r_pipeline_data.vk_descriptor_set = VK_NULL_HANDLE;
350 r_pipeline_data.descriptor_buffer_device_address = 0;
351 r_pipeline_data.descriptor_buffer_offset = 0;
352 if (vk_shader.has_descriptor_set()) {
353 VKDescriptorSetTracker &descriptor_set = descriptor_set_get();
354 descriptor_set.update_descriptor_set(*this, access_info_, r_pipeline_data);
355 }
356}
357
359{
360 access_info_.reset();
361 return access_info_;
362}
363
365
366/* -------------------------------------------------------------------- */
369
371{
372 VKContext *context = VKContext::get();
373 BLI_assert(context);
374 context->swap_buffer_acquired_handler();
375}
376
377void VKContext::swap_buffer_draw_callback(const GHOST_VulkanSwapChainData *swap_chain_data)
378{
379 VKContext *context = VKContext::get();
380 BLI_assert(context);
381 context->swap_buffer_draw_handler(*swap_chain_data);
382}
383
384void VKContext::swap_buffer_acquired_handler()
385{
387}
388
389void VKContext::swap_buffer_draw_handler(const GHOST_VulkanSwapChainData &swap_chain_data)
390{
391 const bool do_blit_to_swapchain = swap_chain_data.image != VK_NULL_HANDLE;
392 const bool use_shader = swap_chain_data.surface_format.colorSpace ==
393 VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT;
394
395 /* When swapchain is invalid/minimized we only flush the render graph to free GPU resources. */
396 if (!do_blit_to_swapchain) {
398 return;
399 }
400
401 VKDevice &device = VKBackend::get().device;
402 render_graph::VKRenderGraph &render_graph = this->render_graph();
403 VKFrameBuffer &framebuffer = *unwrap(active_fb);
404 framebuffer.rendering_end(*this);
405 VKTexture *color_attachment = unwrap(unwrap(framebuffer.color_tex(0)));
406 device.resources.add_swapchain_image(swap_chain_data.image, "SwapchainImage");
407
408 GPU_debug_group_begin("BackBuffer.Blit");
409 if (use_shader) {
410 VKTexture swap_chain_texture("swap_chain_texture");
411 swap_chain_texture.init_swapchain(swap_chain_data.image,
412 to_gpu_format(swap_chain_data.surface_format.format));
413 Shader *shader = device.vk_backbuffer_blit_sh_get();
415 GPU_shader_uniform_1f(shader, "sdr_scale", swap_chain_data.sdr_scale);
416 VKStateManager &state_manager = state_manager_get();
417 state_manager.image_bind(color_attachment, 0);
418 state_manager.image_bind(&swap_chain_texture, 1);
419 int2 dispatch_size = math::divide_ceil(
420 int2(swap_chain_data.extent.width, swap_chain_data.extent.height), int2(16));
421 VKBackend::get().compute_dispatch(UNPACK2(dispatch_size), 1);
422 }
423 else {
425 blit_image.src_image = color_attachment->vk_image_handle();
426 blit_image.dst_image = swap_chain_data.image;
427 blit_image.filter = VK_FILTER_LINEAR;
428
429 VkImageBlit &region = blit_image.region;
430 region.srcOffsets[0] = {0, 0, 0};
431 region.srcOffsets[1] = {color_attachment->width_get(), color_attachment->height_get(), 1};
432 region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
433 region.srcSubresource.mipLevel = 0;
434 region.srcSubresource.baseArrayLayer = 0;
435 region.srcSubresource.layerCount = 1;
436
437 region.dstOffsets[0] = {0, int32_t(swap_chain_data.extent.height), 0};
438 region.dstOffsets[1] = {int32_t(swap_chain_data.extent.width), 0, 1};
439 region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
440 region.dstSubresource.mipLevel = 0;
441 region.dstSubresource.baseArrayLayer = 0;
442 region.dstSubresource.layerCount = 1;
443
444 render_graph.add_node(blit_image);
445 }
446
448 synchronization.vk_image = swap_chain_data.image;
449 synchronization.vk_image_layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
450 synchronization.vk_image_aspect = VK_IMAGE_ASPECT_COLOR_BIT;
451 render_graph.add_node(synchronization);
453
455 VK_PIPELINE_STAGE_TRANSFER_BIT,
456 swap_chain_data.acquire_semaphore,
457 swap_chain_data.present_semaphore,
458 swap_chain_data.submission_fence);
459
460 device.resources.remove_image(swap_chain_data.image);
461#if 0
462 device.debug_print();
463#endif
464}
465
467 const shader::SpecializationConstants *constants_state)
468{
469 constants_state_ = (constants_state != nullptr) ? *constants_state :
471}
472
473std::unique_ptr<VKStreamingBuffer> &VKContext::get_or_create_streaming_buffer(
474 VKBuffer &buffer, VkDeviceSize min_offset_alignment)
475{
476 for (std::unique_ptr<VKStreamingBuffer> &streaming_buffer : streaming_buffers_) {
477 if (streaming_buffer->vk_buffer_dst() == buffer.vk_handle()) {
478 return streaming_buffer;
479 }
480 }
481
482 streaming_buffers_.append(std::make_unique<VKStreamingBuffer>(buffer, min_offset_alignment));
483 return streaming_buffers_.last();
484}
485
487
488/* -------------------------------------------------------------------- */
491
492void VKContext::openxr_acquire_framebuffer_image_callback(GHOST_VulkanOpenXRData *openxr_data)
493{
494 VKContext *context = VKContext::get();
495 BLI_assert(context);
496 context->openxr_acquire_framebuffer_image_handler(*openxr_data);
497}
498
499void VKContext::openxr_release_framebuffer_image_callback(GHOST_VulkanOpenXRData *openxr_data)
500{
501 VKContext *context = VKContext::get();
502 BLI_assert(context);
503 context->openxr_release_framebuffer_image_handler(*openxr_data);
504}
505
506void VKContext::openxr_acquire_framebuffer_image_handler(GHOST_VulkanOpenXRData &openxr_data)
507{
508 VKFrameBuffer &framebuffer = *unwrap(active_fb);
509 VKTexture *color_attachment = unwrap(unwrap(framebuffer.color_tex(0)));
510 openxr_data.extent.width = color_attachment->width_get();
511 openxr_data.extent.height = color_attachment->height_get();
512
513 /* Determine the data format for data transfer. */
514 const TextureFormat device_format = color_attachment->device_format_get();
516 if (ELEM(device_format, TextureFormat::UNORM_8_8_8_8)) {
517 data_format = GPU_DATA_UBYTE;
518 }
519
520 switch (openxr_data.data_transfer_mode) {
521 case GHOST_kVulkanXRModeCPU:
522 openxr_data.cpu.image_data = color_attachment->read(0, data_format);
523 break;
524
525 case GHOST_kVulkanXRModeFD: {
529 if (openxr_data.gpu.vk_image_blender != color_attachment->vk_image_handle()) {
530 VKMemoryExport exported_memory = color_attachment->export_memory(
531 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
532 openxr_data.gpu.image_handle = exported_memory.handle;
533 openxr_data.gpu.new_handle = true;
534 openxr_data.gpu.image_format = to_vk_format(color_attachment->device_format_get());
535 openxr_data.gpu.memory_size = exported_memory.memory_size;
536 openxr_data.gpu.memory_offset = exported_memory.memory_offset;
537 openxr_data.gpu.vk_image_blender = color_attachment->vk_image_handle();
538 }
539 break;
540 }
541
542 case GHOST_kVulkanXRModeWin32: {
546 if (openxr_data.gpu.vk_image_blender != color_attachment->vk_image_handle()) {
547 VKMemoryExport exported_memory = color_attachment->export_memory(
548 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT);
549 openxr_data.gpu.image_handle = exported_memory.handle;
550 openxr_data.gpu.new_handle = true;
551 openxr_data.gpu.image_format = to_vk_format(color_attachment->device_format_get());
552 openxr_data.gpu.memory_size = exported_memory.memory_size;
553 openxr_data.gpu.memory_offset = exported_memory.memory_offset;
554 openxr_data.gpu.vk_image_blender = color_attachment->vk_image_handle();
555 }
556 break;
557 }
558 }
559}
560
561void VKContext::openxr_release_framebuffer_image_handler(GHOST_VulkanOpenXRData &openxr_data)
562{
563 switch (openxr_data.data_transfer_mode) {
564 case GHOST_kVulkanXRModeCPU:
565 MEM_freeN(openxr_data.cpu.image_data);
566 openxr_data.cpu.image_data = nullptr;
567 break;
568
569 case GHOST_kVulkanXRModeFD:
570 /* Nothing to do as import of the handle by the XrInstance removes the ownership of the
571 * handle. Ref
572 * https://registry.khronos.org/vulkan/specs/latest/man/html/VK_KHR_external_memory_fd.html#_issues
573 */
574 break;
575
576 case GHOST_kVulkanXRModeWin32:
577#ifdef _WIN32
578 if (openxr_data.gpu.new_handle) {
579 /* Exported handle isn't consumed during import and should be freed after use. */
580 CloseHandle(HANDLE(openxr_data.gpu.image_handle));
581 openxr_data.gpu.image_handle = 0;
582 }
583#endif
584 break;
585 }
586}
587
589
590} // namespace blender::gpu
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE uint max_uu(uint a, uint b)
#define UNPACK2(a)
#define ELEM(...)
GHOST C-API function and type declarations.
void GPU_debug_group_end()
Definition gpu_debug.cc:33
void GPU_debug_group_begin(const char *name)
Definition gpu_debug.cc:22
#define GPU_ATTACHMENT_TEXTURE(_texture)
GPUPrimType
@ GPU_PRIM_LINE_LOOP
@ GPU_PRIM_LINE_STRIP_ADJ
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_LINES_ADJ
@ GPU_PRIM_LINE_STRIP
void GPU_shader_uniform_1f(blender::gpu::Shader *sh, const char *name, float value)
void GPU_shader_bind(blender::gpu::Shader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
eGPUDataFormat
@ GPU_DATA_HALF_FLOAT
@ GPU_DATA_UBYTE
@ GPU_TEXTURE_USAGE_SHADER_READ
@ GPU_TEXTURE_USAGE_ATTACHMENT
blender::gpu::Texture * GPU_texture_create_2d(const char *name, int width, int height, int mip_len, blender::gpu::TextureFormat format, eGPUTextureUsage usage, const float *data)
void GPU_texture_free(blender::gpu::Texture *texture)
blender::gpu::Texture * color_tex(int slot) const
virtual void image_bind(Texture *tex, int unit)=0
static VKBackend & get()
Definition vk_backend.hh:91
void compute_dispatch(int groups_x_len, int groups_y_len, int groups_z_len) override
VkBuffer vk_handle() const
Definition vk_buffer.hh:101
void debug_unbind_all_ssbo() override
render_graph::VKResourceAccessInfo & reset_and_get_access_info()
void finish() override
static void openxr_acquire_framebuffer_image_callback(GHOST_VulkanOpenXRData *data)
void specialization_constants_set(const shader::SpecializationConstants *constants_state)
void deactivate() override
void memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb) override
VKContext(void *ghost_window, void *ghost_context)
Definition vk_context.cc:29
VKDiscardPool discard_pool
Definition vk_context.hh:77
static void swap_buffer_acquired_callback()
static void openxr_release_framebuffer_image_callback(GHOST_VulkanOpenXRData *data)
static void swap_buffer_draw_callback(const GHOST_VulkanSwapChainData *data)
void flush() override
VKFrameBuffer * active_framebuffer_get() const
void activate() override
const render_graph::VKRenderGraph & render_graph() const
Definition vk_context.hh:79
bool has_active_framebuffer() const
void debug_unbind_all_ubo() override
void activate_framebuffer(VKFrameBuffer &framebuffer)
VKStateManager & state_manager_get() const
VKDescriptorSetTracker & descriptor_set_get()
std::unique_ptr< VKStreamingBuffer > & get_or_create_streaming_buffer(VKBuffer &buffer, VkDeviceSize min_offset_alignment)
void update_pipeline_data(render_graph::VKPipelineData &r_pipeline_data)
static VKContext * get()
void begin_frame() override
VKDescriptorPools & descriptor_pools_get()
void end_frame() override
TimelineValue flush_render_graph(RenderGraphFlushFlags flags, VkPipelineStageFlags wait_dst_stage_mask=VK_PIPELINE_STAGE_NONE, VkSemaphore wait_semaphore=VK_NULL_HANDLE, VkSemaphore signal_semaphore=VK_NULL_HANDLE, VkFence signal_fence=VK_NULL_HANDLE)
const VKExtensions & extensions_get() const
Definition vk_device.hh:371
render_graph::VKRenderGraph * render_graph_new()
VKThreadData & current_thread_data()
Definition vk_device.cc:461
TimelineValue render_graph_submit(render_graph::VKRenderGraph *render_graph, VKDiscardPool &context_discard_pool, bool submit_to_device, bool wait_for_completion, VkPipelineStageFlags wait_dst_stage_mask, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, VkFence signal_fence)
void context_unregister(VKContext &context)
Definition vk_device.cc:482
void memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb) const
Definition vk_device.cc:505
void rendering_end(VKContext &context)
const VKPushConstants::Layout & push_constants_layout_get() const
VkPipeline ensure_and_get_graphics_pipeline(GPUPrimType primitive, VKVertexAttributeObject &vao, VKStateManager &state_manager, VKFrameBuffer &framebuffer, shader::SpecializationConstants &constants_state)
VkPipeline ensure_and_get_compute_pipeline(const shader::SpecializationConstants &constants_state)
VKPushConstants push_constants
Definition vk_shader.hh:55
bool has_descriptor_set() const
Definition vk_shader.hh:107
VkPipelineLayout vk_pipeline_layout
Definition vk_shader.hh:54
const VKShaderInterface & interface_get() const
VKMemoryExport export_memory(VkExternalMemoryHandleTypeFlagBits handle_type)
TextureFormat device_format_get() const
VkImage vk_image_handle() const
void * read(int mip, eGPUDataFormat format) override
@ GPU_FB_COLOR_ATTACHMENT0
void immDeactivate()
void immActivate()
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ColorTheme4f get_debug_group_color(StringRefNull name)
uint64_t TimelineValue
Definition vk_common.hh:36
static Context * unwrap(GPUContext *ctx)
VkFormat to_vk_format(const TextureFormat format)
Definition vk_common.cc:136
GPUCapabilities GCaps
TextureFormat to_gpu_format(const VkFormat format)
Definition vk_common.cc:120
VecBase< T, Size > divide_ceil(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
VecBase< int32_t, 2 > int2