Blender V4.3
vk_texture.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "GPU_capabilities.hh"
10
11#include "vk_texture.hh"
12
13#include "vk_buffer.hh"
14#include "vk_context.hh"
15#include "vk_data_conversion.hh"
16#include "vk_framebuffer.hh"
17#include "vk_memory.hh"
18#include "vk_pixel_buffer.hh"
19#include "vk_shader.hh"
21#include "vk_state_manager.hh"
22#include "vk_vertex_buffer.hh"
23
24#include "BLI_math_vector.hh"
25
26#include "BKE_global.hh"
27
28namespace blender::gpu {
29
30static VkImageAspectFlags to_vk_image_aspect_single_bit(const VkImageAspectFlags format,
31 bool stencil)
32{
33 switch (format) {
34 case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
35 return (stencil) ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
36 default:
37 break;
38 }
39 return format;
40}
41
43{
44 if (vk_image_ != VK_NULL_HANDLE && allocation_ != VK_NULL_HANDLE) {
45 VKDevice &device = VKBackend::get().device;
46 device.discard_pool_for_current_thread().discard_image(vk_image_, allocation_);
47 vk_image_ = VK_NULL_HANDLE;
48 allocation_ = VK_NULL_HANDLE;
49 }
50}
51
52void VKTexture::init(VkImage vk_image, VkImageLayout layout, eGPUTextureFormat texture_format)
53{
54 vk_image_ = vk_image;
55 current_layout_ = layout;
56 format_ = texture_format;
57 device_format_ = texture_format;
58}
59
61{
62 BLI_assert(!is_texture_view());
63 if (mipmaps_ <= 1) {
64 return;
65 }
66 /* Allow users to provide mipmaps stored in compressed textures.
67 * Skip generating mipmaps to avoid overriding the existing ones. */
69 return;
70 }
71
72 VKContext &context = *VKContext::get();
74 update_mipmaps.vk_image = vk_image_handle();
75 update_mipmaps.l0_size = int3(1);
76 mip_size_get(0, update_mipmaps.l0_size);
77 if (ELEM(this->type_get(), GPU_TEXTURE_1D_ARRAY)) {
78 update_mipmaps.l0_size.y = 1;
79 update_mipmaps.l0_size.z = 1;
80 }
81 else if (ELEM(this->type_get(), GPU_TEXTURE_2D_ARRAY)) {
82 update_mipmaps.l0_size.z = 1;
83 }
84 update_mipmaps.vk_image_aspect = to_vk_image_aspect_flag_bits(device_format_);
85 update_mipmaps.mipmaps = mipmaps_;
86 update_mipmaps.layer_count = vk_layer_count(1);
87 context.render_graph.add_node(update_mipmaps);
88}
89
90void VKTexture::copy_to(VKTexture &dst_texture, VkImageAspectFlags vk_image_aspect)
91{
93 copy_image.node_data.src_image = vk_image_handle();
94 copy_image.node_data.dst_image = dst_texture.vk_image_handle();
95 copy_image.node_data.region.srcSubresource.aspectMask = vk_image_aspect;
96 copy_image.node_data.region.srcSubresource.mipLevel = 0;
97 copy_image.node_data.region.srcSubresource.layerCount = vk_layer_count(1);
98 copy_image.node_data.region.dstSubresource.aspectMask = vk_image_aspect;
99 copy_image.node_data.region.dstSubresource.mipLevel = 0;
100 copy_image.node_data.region.dstSubresource.layerCount = vk_layer_count(1);
101 copy_image.node_data.region.extent = vk_extent_3d(0);
103
104 VKContext &context = *VKContext::get();
105 context.render_graph.add_node(copy_image);
106}
107
109{
110 VKTexture *dst = unwrap(tex);
111 VKTexture *src = this;
112 BLI_assert(dst);
113 BLI_assert(src->w_ == dst->w_ && src->h_ == dst->h_ && src->d_ == dst->d_);
114 BLI_assert(src->device_format_ == dst->device_format_);
115 BLI_assert(!is_texture_view());
117
118 copy_to(*dst, to_vk_image_aspect_flag_bits(device_format_));
119}
120
122{
124 clear_color_image.vk_clear_color_value = to_vk_clear_color_value(format, data);
125 clear_color_image.vk_image = vk_image_handle();
126 clear_color_image.vk_image_subresource_range.aspectMask = to_vk_image_aspect_flag_bits(
127 device_format_);
128
129 IndexRange layers = layer_range();
130 clear_color_image.vk_image_subresource_range.baseArrayLayer = layers.start();
131 clear_color_image.vk_image_subresource_range.layerCount = layers.size();
132 IndexRange levels = mip_map_range();
133 clear_color_image.vk_image_subresource_range.baseMipLevel = levels.start();
134 clear_color_image.vk_image_subresource_range.levelCount = levels.size();
135
136 VKContext &context = *VKContext::get();
137
138 context.render_graph.add_node(clear_color_image);
139}
140
142 float clear_depth,
143 uint clear_stencil)
144{
146 VkImageAspectFlags vk_image_aspect_device = to_vk_image_aspect_flag_bits(device_format_get());
147 VkImageAspectFlags vk_image_aspect = to_vk_image_aspect_flag_bits(
149 vk_image_aspect_device;
150 if (vk_image_aspect == VK_IMAGE_ASPECT_NONE) {
151 /* Early exit: texture doesn't have any aspect that needs to be cleared. */
152 return;
153 }
154
155 render_graph::VKClearDepthStencilImageNode::CreateInfo clear_depth_stencil_image = {};
156 clear_depth_stencil_image.node_data.vk_image = vk_image_handle();
157 clear_depth_stencil_image.vk_image_aspects = vk_image_aspect_device;
158 clear_depth_stencil_image.node_data.vk_clear_depth_stencil_value.depth = clear_depth;
159 clear_depth_stencil_image.node_data.vk_clear_depth_stencil_value.stencil = clear_stencil;
160 clear_depth_stencil_image.node_data.vk_image_subresource_range.aspectMask = vk_image_aspect;
161 clear_depth_stencil_image.node_data.vk_image_subresource_range.layerCount =
162 VK_REMAINING_ARRAY_LAYERS;
163 clear_depth_stencil_image.node_data.vk_image_subresource_range.levelCount =
164 VK_REMAINING_MIP_LEVELS;
165
166 VKContext &context = *VKContext::get();
167 context.render_graph.add_node(clear_depth_stencil_image);
168}
169
170void VKTexture::swizzle_set(const char swizzle_mask[4])
171{
172 memcpy(image_view_info_.swizzle, swizzle_mask, 4);
173}
174
176{
177 mip_min_ = min;
178 mip_max_ = max;
179}
180
182 int mip, eGPUDataFormat format, const int region[6], const IndexRange layers, void *r_data)
183{
184 const int3 extent = int3(region[3] - region[0], region[4] - region[1], region[5] - region[2]);
185 size_t sample_len = extent.x * extent.y * extent.z * layers.size();
186
187 /* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */
188 VKBuffer staging_buffer;
189 size_t device_memory_size = sample_len * to_bytesize(device_format_);
190 staging_buffer.create(device_memory_size, GPU_USAGE_DYNAMIC, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
191
193 copy_image_to_buffer.src_image = vk_image_handle();
194 copy_image_to_buffer.dst_buffer = staging_buffer.vk_handle();
195 copy_image_to_buffer.region.imageOffset.x = region[0];
196 copy_image_to_buffer.region.imageOffset.y = region[1];
197 copy_image_to_buffer.region.imageOffset.z = region[2];
198 copy_image_to_buffer.region.imageExtent.width = extent.x;
199 copy_image_to_buffer.region.imageExtent.height = extent.y;
200 copy_image_to_buffer.region.imageExtent.depth = extent.z;
201 copy_image_to_buffer.region.imageSubresource.aspectMask = to_vk_image_aspect_single_bit(
202 to_vk_image_aspect_flag_bits(device_format_), false);
203 copy_image_to_buffer.region.imageSubresource.mipLevel = mip;
204 copy_image_to_buffer.region.imageSubresource.baseArrayLayer = layers.start();
205 copy_image_to_buffer.region.imageSubresource.layerCount = layers.size();
206
207 VKContext &context = *VKContext::get();
208 context.rendering_end();
209 context.render_graph.add_node(copy_image_to_buffer);
210 context.descriptor_set_get().upload_descriptor_sets();
211 context.render_graph.submit_buffer_for_read(staging_buffer.vk_handle());
212
214 r_data, staging_buffer.mapped_memory_get(), sample_len, format, format_, device_format_);
215}
216
218{
220
221 int mip_size[3] = {1, 1, 1};
222 VkImageType vk_image_type = to_vk_image_type(type_);
223 mip_size_get(mip, mip_size);
224 switch (vk_image_type) {
225 case VK_IMAGE_TYPE_1D: {
226 mip_size[1] = 1;
227 mip_size[2] = 1;
228 } break;
229 case VK_IMAGE_TYPE_2D: {
230 mip_size[2] = 1;
231 } break;
232 case VK_IMAGE_TYPE_3D:
233 default:
234 break;
235 }
236
237 if (mip_size[2] == 0) {
238 mip_size[2] = 1;
239 }
240 IndexRange layers = IndexRange(layer_offset_, vk_layer_count(1));
241 size_t sample_len = mip_size[0] * mip_size[1] * mip_size[2] * layers.size();
242 size_t host_memory_size = sample_len * to_bytesize(format_, format);
243
244 void *data = MEM_mallocN(host_memory_size, __func__);
245 int region[6] = {0, 0, 0, mip_size[0], mip_size[1], mip_size[2]};
246 read_sub(mip, format, region, layers, data);
247 return data;
248}
249
251 int mip, int offset_[3], int extent_[3], eGPUDataFormat format, const void *data)
252{
253 BLI_assert(!is_texture_view());
254
255 const bool is_compressed = (format_flag_ & GPU_FORMAT_COMPRESSED);
256
257 int3 extent = int3(extent_[0], max_ii(extent_[1], 1), max_ii(extent_[2], 1));
258 int3 offset = int3(offset_[0], offset_[1], offset_[2]);
259 int layers = 1;
260 int start_layer = 0;
261 if (type_ & GPU_TEXTURE_1D) {
262 layers = extent.y;
263 start_layer = offset.y;
264 extent.y = 1;
265 extent.z = 1;
266 offset.y = 0;
267 offset.z = 0;
268 }
270 layers = extent.z;
271 start_layer = offset.z;
272 extent.z = 1;
273 offset.z = 0;
274 }
275
276 /* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */
277 VKContext &context = *VKContext::get();
278 size_t sample_len = size_t(extent.x) * extent.y * extent.z * layers;
279 size_t device_memory_size = sample_len * to_bytesize(device_format_);
280
281 if (is_compressed) {
282 BLI_assert_msg(extent.z == 1, "Compressed 3D textures are not supported");
283 size_t block_size = to_block_size(device_format_);
284 size_t blocks_x = divide_ceil_u(extent.x, 4);
285 size_t blocks_y = divide_ceil_u(extent.y, 4);
286 device_memory_size = blocks_x * blocks_y * block_size;
287 /* `convert_buffer` later on will use `sample_len * to_bytesize(device_format_)`
288 * as total memory size calculation. Make that work for compressed case. */
289 sample_len = device_memory_size / to_bytesize(device_format_);
290 }
291
292 VKBuffer staging_buffer;
293 staging_buffer.create(device_memory_size, GPU_USAGE_DYNAMIC, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
295 staging_buffer.mapped_memory_get(), data, sample_len, format, format_, device_format_);
296
298 copy_buffer_to_image.src_buffer = staging_buffer.vk_handle();
299 copy_buffer_to_image.dst_image = vk_image_handle();
300 copy_buffer_to_image.region.imageExtent.width = extent.x;
301 copy_buffer_to_image.region.imageExtent.height = extent.y;
302 copy_buffer_to_image.region.imageExtent.depth = extent.z;
303 copy_buffer_to_image.region.bufferRowLength =
304 context.state_manager_get().texture_unpack_row_length_get();
305 copy_buffer_to_image.region.imageOffset.x = offset.x;
306 copy_buffer_to_image.region.imageOffset.y = offset.y;
307 copy_buffer_to_image.region.imageOffset.z = offset.z;
308 copy_buffer_to_image.region.imageSubresource.aspectMask = to_vk_image_aspect_single_bit(
309 to_vk_image_aspect_flag_bits(device_format_), false);
310 copy_buffer_to_image.region.imageSubresource.mipLevel = mip;
311 copy_buffer_to_image.region.imageSubresource.baseArrayLayer = start_layer;
312 copy_buffer_to_image.region.imageSubresource.layerCount = layers;
313
314 context.render_graph.add_node(copy_buffer_to_image);
315}
316
317void VKTexture::update_sub(int offset_[3],
318 int extent_[3],
320 GPUPixelBuffer *pixbuf)
321{
322 VKPixelBuffer &pixel_buffer = *unwrap(unwrap(pixbuf));
323 update_sub(0, offset_, extent_, format, pixel_buffer.map());
324}
325
327{
328 /* TODO(fclem): Legacy. Should be removed at some point. */
329
330 return 0;
331}
332
334{
335 const VKDevice &device = VKBackend::get().device;
336 const VKWorkarounds &workarounds = device.workarounds_get();
337 device_format_ = format_;
338 if (device_format_ == GPU_DEPTH_COMPONENT24 && workarounds.not_aligned_pixel_formats) {
339 device_format_ = GPU_DEPTH_COMPONENT32F;
340 }
341 if (device_format_ == GPU_DEPTH24_STENCIL8 && workarounds.not_aligned_pixel_formats) {
342 device_format_ = GPU_DEPTH32F_STENCIL8;
343 }
344 /* R16G16F16 formats are typically not supported (<1%) but R16G16B16A16 is
345 * typically supported (+90%). */
346 if (device_format_ == GPU_RGB16F) {
347 device_format_ = GPU_RGBA16F;
348 }
349 if (device_format_ == GPU_RGB32F) {
350 device_format_ = GPU_RGBA32F;
351 }
352
353 if (!allocate()) {
354 return false;
355 }
356 this->mip_range_set(0, mipmaps_ - 1);
357
358 return true;
359}
360
362{
363 BLI_assert(source_buffer_ == nullptr);
364 device_format_ = format_;
365 source_buffer_ = unwrap(vbo);
366 return true;
367}
368
369bool VKTexture::init_internal(GPUTexture *src, int mip_offset, int layer_offset, bool use_stencil)
370{
371 BLI_assert(source_texture_ == nullptr);
372 BLI_assert(src);
373
374 VKTexture *texture = unwrap(unwrap(src));
375 source_texture_ = texture;
376 device_format_ = texture->device_format_;
377 mip_min_ = mip_offset;
378 mip_max_ = mip_offset;
379 layer_offset_ = layer_offset;
380 use_stencil_ = use_stencil;
381
382 return true;
383}
384
385bool VKTexture::is_texture_view() const
386{
387 return source_texture_ != nullptr;
388}
389
390static VkImageUsageFlags to_vk_image_usage(const eGPUTextureUsage usage,
391 const eGPUTextureFormatFlag format_flag)
392{
393 VkImageUsageFlags result = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
394 VK_IMAGE_USAGE_SAMPLED_BIT;
395 if (usage & GPU_TEXTURE_USAGE_SHADER_READ) {
396 result |= VK_IMAGE_USAGE_STORAGE_BIT;
397 }
398 if (usage & GPU_TEXTURE_USAGE_SHADER_WRITE) {
399 result |= VK_IMAGE_USAGE_STORAGE_BIT;
400 }
401 if (usage & GPU_TEXTURE_USAGE_ATTACHMENT) {
402 if (format_flag & GPU_FORMAT_COMPRESSED) {
403 /* These formats aren't supported as an attachment. When using GPU_TEXTURE_USAGE_DEFAULT they
404 * are still being evaluated to be attachable. So we need to skip them. */
405 }
406 else {
407 if (format_flag & (GPU_FORMAT_DEPTH | GPU_FORMAT_STENCIL)) {
408 result |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
409 }
410 else {
411 result |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
412 }
413 }
414 }
415 if (usage & GPU_TEXTURE_USAGE_HOST_READ) {
416 result |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
417 }
418
419 /* Disable some usages based on the given format flag to support more devices. */
420 if (format_flag & GPU_FORMAT_SRGB) {
421 /* NVIDIA devices don't create SRGB textures when it storage bit is set. */
422 result &= ~VK_IMAGE_USAGE_STORAGE_BIT;
423 }
424 if (format_flag & (GPU_FORMAT_DEPTH | GPU_FORMAT_STENCIL)) {
425 /* NVIDIA devices don't create depth textures when it storage bit is set. */
426 result &= ~VK_IMAGE_USAGE_STORAGE_BIT;
427 }
428
429 return result;
430}
431
432static VkImageCreateFlags to_vk_image_create(const eGPUTextureType texture_type,
433 const eGPUTextureFormatFlag format_flag,
434 const eGPUTextureUsage usage)
435{
436 VkImageCreateFlags result = 0;
437
438 if (ELEM(texture_type, GPU_TEXTURE_CUBE, GPU_TEXTURE_CUBE_ARRAY)) {
439 result |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
440 }
441
442 /* sRGB textures needs to be mutable as they can be used as non-sRGB frame-buffer attachments. */
443 if (usage & GPU_TEXTURE_USAGE_ATTACHMENT && format_flag & GPU_FORMAT_SRGB) {
444 result |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
445 }
446
447 return result;
448}
449
450bool VKTexture::allocate()
451{
452 BLI_assert(vk_image_ == VK_NULL_HANDLE);
453 BLI_assert(!is_texture_view());
454
455 VkExtent3D vk_extent = vk_extent_3d(0);
458 if (vk_extent.depth > limit || vk_extent.height > limit || vk_extent.depth > limit) {
459 return false;
460 }
461
462 VKDevice &device = VKBackend::get().device;
463 VkImageCreateInfo image_info = {};
464 image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
465 image_info.flags = to_vk_image_create(type_, format_flag_, usage_get());
466 image_info.imageType = to_vk_image_type(type_);
467 image_info.extent = vk_extent;
468 image_info.mipLevels = max_ii(mipmaps_, 1);
469 image_info.arrayLayers = vk_layer_count(1);
470 image_info.format = to_vk_format(device_format_);
471 /* Some platforms (NVIDIA) requires that attached textures are always tiled optimal.
472 *
473 * As image data are always accessed via an staging buffer we can enable optimal tiling for all
474 * texture. Tilings based on actual usages should be done in `VKFramebuffer`.
475 */
476 image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
477 image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
479 image_info.samples = VK_SAMPLE_COUNT_1_BIT;
480
481 VkResult result;
482 if (G.debug & G_DEBUG_GPU) {
483 VkImageFormatProperties image_format = {};
484 result = vkGetPhysicalDeviceImageFormatProperties(device.physical_device_get(),
485 image_info.format,
486 image_info.imageType,
487 image_info.tiling,
488 image_info.usage,
489 image_info.flags,
490 &image_format);
491 if (result != VK_SUCCESS) {
492 printf("Image type not supported on device.\n");
493 return false;
494 }
495 }
496
497 VmaAllocationCreateInfo allocCreateInfo = {};
498 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
499 allocCreateInfo.priority = 1.0f;
500 result = vmaCreateImage(device.mem_allocator_get(),
501 &image_info,
502 &allocCreateInfo,
503 &vk_image_,
504 &allocation_,
505 nullptr);
506 if (result != VK_SUCCESS) {
507 return false;
508 }
509 debug::object_label(vk_image_, name_);
510
511 device.resources.add_image(vk_image_,
512 image_info.arrayLayers,
513 VK_IMAGE_LAYOUT_UNDEFINED,
515 name_);
516
517 return result == VK_SUCCESS;
518}
519
520/* -------------------------------------------------------------------- */
524IndexRange VKTexture::mip_map_range() const
525{
526 return IndexRange(mip_min_, mip_max_ - mip_min_ + 1);
527}
528
529IndexRange VKTexture::layer_range() const
530{
531 if (is_texture_view()) {
532 return IndexRange(layer_offset_, layer_count());
533 }
534 else {
535 return IndexRange(
536 0, ELEM(type_, GPU_TEXTURE_CUBE, GPU_TEXTURE_CUBE_ARRAY) ? d_ : VK_REMAINING_ARRAY_LAYERS);
537 }
538}
539
540int VKTexture::vk_layer_count(int non_layered_value) const
541{
542 if (is_texture_view()) {
543 return layer_count();
544 }
545 return type_ == GPU_TEXTURE_CUBE ? d_ :
547 non_layered_value;
548}
549
550VkExtent3D VKTexture::vk_extent_3d(int mip_level) const
551{
552 int extent[3] = {1, 1, 1};
553 mip_size_get(mip_level, extent);
555 extent[2] = 1;
556 }
558 extent[1] = 1;
559 extent[2] = 1;
560 }
561
562 VkExtent3D result{uint32_t(extent[0]), uint32_t(extent[1]), uint32_t(extent[2])};
563 return result;
564}
565
567{
568 if (is_texture_view()) {
569 /* TODO: API should be improved as we don't support image view specialization.
570 * In the current API this is still possible to setup when using attachments. */
571 return image_view_get(info.arrayed);
572 }
573 for (const VKImageView &image_view : image_views_) {
574 if (image_view.info == info) {
575 return image_view;
576 }
577 }
578
579 image_views_.append(VKImageView(*this, info, name_));
580 return image_views_.last();
581}
582
584{
585 image_view_info_.mip_range = mip_map_range();
586 image_view_info_.use_srgb = true;
587 image_view_info_.use_stencil = use_stencil_;
588 image_view_info_.arrayed = arrayed;
589 image_view_info_.layer_range = layer_range();
590 if (arrayed == VKImageViewArrayed::NOT_ARRAYED) {
591 image_view_info_.layer_range = image_view_info_.layer_range.slice(
593 }
594
595 if (is_texture_view()) {
596 return source_texture_->image_view_get(image_view_info_);
597 }
598 return image_view_get(image_view_info_);
599}
600
603} // namespace blender::gpu
@ G_DEBUG_GPU
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE int max_ii(int a, int b)
unsigned int uint
#define UNUSED_VARS_NDEBUG(...)
#define ELEM(...)
int GPU_max_texture_3d_size()
int GPU_max_texture_size()
eGPUFrameBufferBits
@ GPU_DEPTH_BIT
@ GPU_STENCIL_BIT
eGPUDataFormat
eGPUTextureUsage
@ GPU_TEXTURE_USAGE_SHADER_READ
@ GPU_TEXTURE_USAGE_SHADER_WRITE
@ GPU_TEXTURE_USAGE_HOST_READ
@ GPU_TEXTURE_USAGE_ATTACHMENT
eGPUTextureFormat
@ GPU_DEPTH32F_STENCIL8
@ GPU_DEPTH24_STENCIL8
@ GPU_RGB32F
@ GPU_DEPTH_COMPONENT24
@ GPU_RGB16F
@ GPU_DEPTH_COMPONENT32F
@ GPU_USAGE_DYNAMIC
constexpr int64_t size() const
constexpr int64_t start() const
constexpr IndexRange slice(int64_t start, int64_t size) const
eGPUTextureFormatFlag format_flag_
eGPUTextureUsage gpu_image_usage_flags_
eGPUTextureUsage usage_get() const
char name_[DEBUG_NAME_LEN]
eGPUTextureType type_get() const
void mip_size_get(int mip, int r_size[3]) const
static VKBackend & get()
Definition vk_backend.hh:92
bool create(size_t size, GPUUsageType usage, VkBufferUsageFlags buffer_usage, bool is_host_visible=true)
Definition vk_buffer.cc:53
VkBuffer vk_handle() const
Definition vk_buffer.hh:69
void * mapped_memory_get() const
Definition vk_buffer.cc:151
static VKContext * get()
Definition vk_context.hh:97
VKDiscardPool & discard_pool_for_current_thread()
Definition vk_device.cc:399
const VKWorkarounds & workarounds_get() const
Definition vk_device.hh:286
void discard_image(VkImage vk_image, VmaAllocation vma_allocation)
void init(VkImage vk_image, VkImageLayout layout, eGPUTextureFormat texture_format)
Definition vk_texture.cc:52
uint gl_bindcode_get() const override
void copy_to(Texture *tex) override
void clear(eGPUDataFormat format, const void *data) override
virtual ~VKTexture() override
Definition vk_texture.cc:42
void generate_mipmap() override
Definition vk_texture.cc:60
eGPUTextureFormat device_format_get() const
bool init_internal() override
void update_sub(int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) override
VkImage vk_image_handle() const
Definition vk_texture.hh:99
const VKImageView & image_view_get(const VKImageViewInfo &info)
void swizzle_set(const char swizzle_mask[4]) override
void clear_depth_stencil(const eGPUFrameBufferBits buffer, float clear_depth, uint clear_stencil)
void mip_range_set(int min, int max) override
void read_sub(int mip, eGPUDataFormat format, const int region[6], IndexRange layers, void *r_data)
void * read(int mip, eGPUDataFormat format) override
additional_info("compositor_sum_float_shared") .push_constant(Type additional_info("compositor_sum_float_shared") .push_constant(Type GPU_RGBA32F
local_group_size(16, 16) .push_constant(Type texture
#define printf
format
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
#define G(x, y, z)
void object_label(GLenum type, GLuint object, const char *name)
Definition gl_debug.cc:344
static Context * unwrap(GPUContext *ctx)
void convert_host_to_device(void *dst_buffer, const void *src_buffer, size_t buffer_size, eGPUDataFormat host_format, eGPUTextureFormat host_texture_format, eGPUTextureFormat device_format)
size_t to_block_size(eGPUTextureFormat data_type)
static VkImageAspectFlags to_vk_image_aspect_single_bit(const VkImageAspectFlags format, bool stencil)
Definition vk_texture.cc:30
void convert_device_to_host(void *dst_buffer, const void *src_buffer, size_t buffer_size, eGPUDataFormat host_format, eGPUTextureFormat host_texture_format, eGPUTextureFormat device_format)
size_t to_bytesize(GPUIndexBufType type)
VkImageType to_vk_image_type(const eGPUTextureType type)
Definition vk_common.cc:682
VkFormat to_vk_format(const eGPUTextureFormat format)
Definition vk_common.cc:131
VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data)
Definition vk_common.cc:805
static VkImageCreateFlags to_vk_image_create(const eGPUTextureType texture_type, const eGPUTextureFormatFlag format_flag, const eGPUTextureUsage usage)
VkImageAspectFlags to_vk_image_aspect_flag_bits(const eGPUTextureFormat format)
Definition vk_common.cc:14
static VkImageUsageFlags to_vk_image_usage(const eGPUTextureUsage usage, const eGPUTextureFormatFlag format_flag)
VecBase< int32_t, 3 > int3
#define min(a, b)
Definition sort.c:32
unsigned int uint32_t
Definition stdint.h:80
float max
char * buffers[2]