38 case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
39 return (stencil) ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
48 if (vk_image_ != VK_NULL_HANDLE && allocation_ != VK_NULL_HANDLE) {
50 vk_image_ = VK_NULL_HANDLE;
51 allocation_ = VK_NULL_HANDLE;
82 context.render_graph().add_node(update_mipmaps);
90 copy_image.
node_data.
region.srcSubresource.aspectMask = vk_image_aspect;
92 copy_image.
node_data.
region.srcSubresource.layerCount = vk_layer_count(1);
93 copy_image.
node_data.
region.dstSubresource.aspectMask = vk_image_aspect;
95 copy_image.
node_data.
region.dstSubresource.layerCount = vk_layer_count(1);
100 context.render_graph().add_node(copy_image);
109 BLI_assert(src->device_format_ == dst->device_format_);
119 float clear_depth = 0.0f;
124 TextureFormat::SFLOAT_32_DEPTH_UINT_8,
125 TextureFormat::SFLOAT_32_DEPTH_UINT_8);
145 context.render_graph().add_node(clear_color_image);
151 std::optional<int> layer)
157 vk_image_aspect_device;
158 if (vk_image_aspect == VK_IMAGE_ASPECT_NONE) {
170 VK_REMAINING_ARRAY_LAYERS;
171 if (layer.has_value()) {
176 VK_REMAINING_MIP_LEVELS;
179 context.render_graph().add_node(clear_depth_stencil_image);
184 memcpy(swizzle_, swizzle_mask, 4);
196 const int3 offset =
int3(region[0], region[1], region[2]);
197 const int3 extent =
int3(region[3] - region[0], region[4] - region[1], region[5] - region[2]);
199 const VkDeviceSize sample_bytesize =
to_bytesize(device_format_);
200 const uint64_t x_bytesize = sample_bytesize * extent.x;
201 const uint64_t xy_bytesize = x_bytesize * extent.y;
202 const uint64_t xyz_bytesize = xy_bytesize * extent.z;
203 const uint64_t xyzl_bytesize = xyz_bytesize * layers.
size();
206 constexpr uint64_t max_transferbuffer_bytesize = 2ul * 1024ul * 1024ul * 1024ul;
208 "Transfer buffer should at least fit all pixels of a single row.");
214 if (xyzl_bytesize <= max_transferbuffer_bytesize) {
216 transfer_regions.
append(full_transfer_region);
220 for (
int layer : layers) {
221 if (xyz_bytesize <= max_transferbuffer_bytesize) {
226 if (xy_bytesize <= max_transferbuffer_bytesize) {
228 int64_t xy_in_single_transfer = max_transferbuffer_bytesize / xy_bytesize;
229 int depths_added = 0;
230 while (depths_added < extent.z) {
231 int3 offset_region(offset.x, offset.y, offset.z + depths_added);
233 extent.x, extent.y,
min_ii(xy_in_single_transfer, extent.z - depths_added));
234 transfer_regions.
append({offset_region, extent_region,
IndexRange(layer, 1)});
235 depths_added += extent_region.z;
240 int64_t x_in_single_transfer = max_transferbuffer_bytesize / x_bytesize;
241 for (
int z = 0;
z < extent.z;
z++) {
243 while (rows_added < extent.y) {
244 int3 offset_region(offset.x, offset.y + rows_added, offset.z +
z);
245 int3 extent_region(extent.x,
min_ii(x_in_single_transfer, extent.y - rows_added), 1);
246 transfer_regions.
append({offset_region, extent_region,
IndexRange(layer, 1)});
247 rows_added += extent_region.y;
258 context.rendering_end();
261 VKBuffer &staging_buffer = staging_buffers[index];
263 size_t device_memory_size = sample_len *
to_bytesize(device_format_);
264 staging_buffer.
create(device_memory_size,
265 VK_BUFFER_USAGE_TRANSFER_DST_BIT,
266 VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
269 VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
270 VMA_ALLOCATION_CREATE_MAPPED_BIT,
277 node_data.
region.imageOffset.x = transfer_region.
offset.x;
278 node_data.
region.imageOffset.y = transfer_region.
offset.y;
279 node_data.
region.imageOffset.z = transfer_region.
offset.z;
280 node_data.
region.imageExtent.width = transfer_region.
extent.x;
281 node_data.
region.imageExtent.height = transfer_region.
extent.y;
282 node_data.
region.imageExtent.depth = transfer_region.
extent.z;
287 node_data.
region.imageSubresource.mipLevel = mip;
288 node_data.
region.imageSubresource.baseArrayLayer = transfer_region.
layers.
start();
289 node_data.
region.imageSubresource.layerCount = transfer_region.
layers.
size();
291 context.render_graph().add_node(copy_image_to_buffer);
302 const VKBuffer &staging_buffer = staging_buffers[index];
305 size_t data_offset = full_transfer_region.result_offset(transfer_region.
offset,
321 int mip_size[3] = {1, 1, 1};
324 switch (vk_image_type) {
325 case VK_IMAGE_TYPE_1D: {
329 case VK_IMAGE_TYPE_2D: {
332 case VK_IMAGE_TYPE_3D:
337 if (mip_size[2] == 0) {
341 size_t sample_len = mip_size[0] * mip_size[1] * mip_size[2] * layers.
size();
345 int region[6] = {0, 0, 0, mip_size[0], mip_size[1], mip_size[2]};
362 int3 offset =
int3(offset_[0], offset_[1], offset_[2]);
367 start_layer = offset.y;
375 start_layer = offset.z;
385 size_t sample_len = size_t(extent.x) * extent.y * extent.z * layers;
386 size_t device_memory_size = sample_len *
to_bytesize(device_format_);
389 BLI_assert_msg(extent.z == 1,
"Compressed 3D textures are not supported");
393 device_memory_size = blocks_x * blocks_y * block_size;
396 sample_len = device_memory_size /
to_bytesize(device_format_);
400 VkBuffer vk_buffer = VK_NULL_HANDLE;
402 staging_buffer.
create(device_memory_size,
403 VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
404 VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
405 VMA_ALLOCATION_CREATE_MAPPED_BIT |
406 VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
412 const uint texture_unpack_row_length =
413 context.state_manager_get().texture_unpack_row_length_get();
414 if (
ELEM(texture_unpack_row_length, 0, extent.x)) {
420 "Compressed data with texture_unpack_row_length != 0 is not supported.");
422 "3D texture data with texture_unpack_row_length != 0 is not supported.");
423 size_t dst_row_stride = extent.x *
to_bytesize(device_format_);
426 const uint8_t *src_ptr =
static_cast<const uint8_t *
>(
data);
427 for (
int y = 0;
y < extent.y;
y++) {
429 src_ptr += src_row_stride;
430 dst_ptr += dst_row_stride;
443 node_data.
region.imageExtent.width = extent.x;
444 node_data.
region.imageExtent.height = extent.y;
445 node_data.
region.imageExtent.depth = extent.z;
446 node_data.
region.imageOffset.x = offset.x;
447 node_data.
region.imageOffset.y = offset.y;
448 node_data.
region.imageOffset.z = offset.z;
453 node_data.
region.imageSubresource.mipLevel = mip;
454 node_data.
region.imageSubresource.baseArrayLayer = start_layer;
455 node_data.
region.imageSubresource.layerCount = layers;
457 context.render_graph().add_node(copy_buffer_to_image);
469 GPUPixelBuffer *pixbuf)
480 "Can only import external memory when usage flag contains GPU_TEXTURE_USAGE_MEMORY_EXPORT.");
482 "Cannot export memory when the texture is not backed by any device memory.");
484 "Requested to export memory, but isn't supported by the device");
485 if (handle_type == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT) {
486 VkMemoryGetFdInfoKHR vk_memory_get_fd_info = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
488 allocation_info_.deviceMemory,
492 return {
uint64_t(fd_handle), allocation_info_.size, allocation_info_.offset};
496 if (handle_type == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT) {
497 VkMemoryGetWin32HandleInfoKHR vk_memory_get_win32_handle_info = {
498 VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR,
500 allocation_info_.deviceMemory,
502 HANDLE win32_handle =
nullptr;
504 device.
vk_handle(), &vk_memory_get_win32_handle_info, &win32_handle);
505 return {
uint64_t(win32_handle), allocation_info_.size, allocation_info_.offset};
516 if (device_format_ == TextureFormat::SFLOAT_16_16_16) {
517 device_format_ = TextureFormat::SFLOAT_16_16_16_16;
519 if (device_format_ == TextureFormat::SFLOAT_32_32_32) {
520 device_format_ = TextureFormat::SFLOAT_32_32_32_32;
535 source_buffer_ =
unwrap(vbo);
549 device_format_ =
texture->device_format_;
552 layer_offset_ = layer_offset;
553 use_stencil_ = use_stencil;
562 vk_image_ = vk_image;
567bool VKTexture::is_texture_view()
const
569 return source_texture_ !=
nullptr;
578 VkImageUsageFlags
result = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
579 VK_IMAGE_USAGE_SAMPLED_BIT;
581 result |= VK_IMAGE_USAGE_STORAGE_BIT;
584 result |= VK_IMAGE_USAGE_STORAGE_BIT;
593 result |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
596 result |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
597 if (supports_local_read) {
598 result |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
604 result |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
610 result &= ~VK_IMAGE_USAGE_STORAGE_BIT;
614 result &= ~VK_IMAGE_USAGE_STORAGE_BIT;
624 VkImageCreateFlags
result = 0;
627 result |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
632 result |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
649bool VKTexture::allocate()
654 VkExtent3D vk_extent = vk_extent_3d(0);
657 if (vk_extent.depth > limit || vk_extent.height > limit || vk_extent.depth > limit) {
664 VkImageCreateInfo image_info = {};
665 image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
668 image_info.extent = vk_extent;
670 image_info.arrayLayers = vk_layer_count(1);
677 image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
678 image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
680 image_info.samples = VK_SAMPLE_COUNT_1_BIT;
684 VkImageFormatProperties image_format = {};
685 result = vkGetPhysicalDeviceImageFormatProperties(device.physical_device_get(),
687 image_info.imageType,
692 if (
result != VK_SUCCESS) {
693 printf(
"Image type not supported on device.\n");
698 VkExternalMemoryImageCreateInfo external_memory_create_info = {
699 VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
nullptr, 0};
701 VmaAllocationCreateInfo allocCreateInfo = {};
702 allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
706 image_info.pNext = &external_memory_create_info;
708 allocCreateInfo.pool = device.vma_pools.external_memory_image.pool;
710 result = vmaCreateImage(device.mem_allocator_get(),
716 if (
result != VK_SUCCESS) {
721 const bool use_subresource_tracking = image_info.arrayLayers > 1 || image_info.mipLevels > 1;
722 device.resources.add_image(vk_image_, use_subresource_tracking,
name_);
724 return result == VK_SUCCESS;
731IndexRange VKTexture::mip_map_range()
const
736IndexRange VKTexture::layer_range()
const
738 if (is_texture_view()) {
747int VKTexture::vk_layer_count(
int non_layered_value)
const
749 if (is_texture_view()) {
757VkExtent3D VKTexture::vk_extent_3d(
int mip_level)
const
759 int extent[3] = {1, 1, 1};
775 if (is_texture_view()) {
780 for (
const VKImageView &image_view : image_views_) {
781 if (image_view.info == info) {
787 return image_views_.last();
792 image_view_info_.mip_range = mip_map_range();
793 image_view_info_.use_srgb =
true;
794 image_view_info_.use_stencil = use_stencil_;
795 image_view_info_.arrayed = arrayed;
796 image_view_info_.layer_range = layer_range();
799 image_view_info_.layer_range = image_view_info_.layer_range.slice(
804 image_view_info_.swizzle[0] =
'r';
805 image_view_info_.swizzle[1] =
'g';
806 image_view_info_.swizzle[2] =
'b';
807 image_view_info_.swizzle[3] =
'a';
810 image_view_info_.swizzle[0] = swizzle_[0];
811 image_view_info_.swizzle[1] = swizzle_[1];
812 image_view_info_.swizzle[2] = swizzle_[2];
813 image_view_info_.swizzle[3] = swizzle_[3];
816 if (is_texture_view()) {
817 return source_texture_->image_view_get(image_view_info_);
#define BLI_assert_unreachable()
#define BLI_assert_msg(a, msg)
MINLINE int min_ii(int a, int b)
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE int max_ii(int a, int b)
#define UNUSED_VARS_NDEBUG(...)
int GPU_max_texture_3d_size()
int GPU_max_texture_size()
@ GPU_DATA_UINT_24_8_DEPRECATED
@ GPU_TEXTURE_USAGE_MEMORY_EXPORT
@ GPU_TEXTURE_USAGE_SHADER_READ
@ GPU_TEXTURE_USAGE_SHADER_WRITE
@ GPU_TEXTURE_USAGE_HOST_READ
@ GPU_TEXTURE_USAGE_ATTACHMENT
BMesh const char void * data
unsigned long long int uint64_t
SIMD_FORCE_INLINE const btScalar & z() const
Return the z value.
constexpr int64_t size() const
constexpr int64_t start() const
void append(const T &value)
IndexRange index_range() const
GPUTextureType type_get() const
eGPUTextureUsage gpu_image_usage_flags_
eGPUTextureUsage usage_get() const
char name_[DEBUG_NAME_LEN]
void mip_size_get(int mip, int r_size[3]) const
void usage_set(eGPUTextureUsage usage_flags)
Texture(const char *name)
GPUTextureFormatFlag format_flag_
VkBuffer vk_handle() const
bool create(size_t size, VkBufferUsageFlags buffer_usage, VmaMemoryUsage vma_memory_usage, VmaAllocationCreateFlags vma_allocation_flags, float priority, bool export_memory=false)
void * mapped_memory_get() const
PFN_vkGetMemoryFdKHR vkGetMemoryFd
VkDevice vk_handle() const
const VKExtensions & extensions_get() const
struct blender::gpu::VKDevice::@152120360333013146246346216002113345357100126073 functions
static VKDiscardPool & discard_pool_get()
void discard_image(VkImage vk_image, VmaAllocation vma_allocation)
VKMemoryExport export_memory(VkExternalMemoryHandleTypeFlagBits handle_type)
void clear_depth_stencil(const GPUFrameBufferBits buffer, float clear_depth, uint clear_stencil, std::optional< int > layer)
void copy_to(Texture *tex) override
void clear(eGPUDataFormat format, const void *data) override
virtual ~VKTexture() override
void generate_mipmap() override
bool init_internal() override
void init_swapchain(VkImage vk_image, TextureFormat gpu_format)
void update_sub(int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data, VKPixelBuffer *pixel_buffer)
TextureFormat device_format_get() const
VkImage vk_image_handle() const
const VKImageView & image_view_get(const VKImageViewInfo &info)
void swizzle_set(const char swizzle_mask[4]) override
VKTexture(const char *name)
void mip_range_set(int min, int max) override
void read_sub(int mip, eGPUDataFormat format, const int region[6], IndexRange layers, void *r_data)
void * read(int mip, eGPUDataFormat format) override
VKCopyImageCreateInfo CreateInfo
TEX_TEMPLATE DataVec texture(T, FltCoord, float=0.0f) RET
void * MEM_mallocN(size_t len, const char *str)
void object_label(GLenum type, GLuint object, const char *name)
constexpr VkExternalMemoryHandleTypeFlags vk_external_memory_handle_type()
static Context * unwrap(GPUContext *ctx)
VkImageType to_vk_image_type(const GPUTextureType type)
VkFormat to_vk_format(const TextureFormat format)
static VkImageAspectFlags to_vk_image_aspect_single_bit(const VkImageAspectFlags format, bool stencil)
size_t to_block_size(TextureFormat data_type)
int to_bytesize(const DataFormat format)
VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data)
void convert_host_to_device(void *dst_buffer, const void *src_buffer, size_t buffer_size, eGPUDataFormat host_format, TextureFormat host_texture_format, TextureFormat device_format)
static VkImageCreateFlags to_vk_image_create(const GPUTextureType texture_type, const GPUTextureFormatFlag format_flag, const eGPUTextureUsage usage)
VkImageAspectFlags to_vk_image_aspect_flag_bits(const TextureFormat format)
static float memory_priority(const eGPUTextureUsage texture_usage)
void convert_device_to_host(void *dst_buffer, const void *src_buffer, size_t buffer_size, eGPUDataFormat host_format, TextureFormat host_texture_format, TextureFormat device_format)
GPUTextureFormatFlag to_format_flag(TextureFormat format)
static VkImageUsageFlags to_vk_image_usage(const eGPUTextureUsage usage, const GPUTextureFormatFlag format_flag)
VecBase< int32_t, 3 > int3
int64_t sample_count() const
bool dynamic_rendering_local_read
VKImageViewArrayed arrayed
VkClearColorValue vk_clear_color_value
VkImageSubresourceRange vk_image_subresource_range
VKClearDepthStencilImageData node_data
VkImageAspectFlags vk_image_aspects
VkImageSubresourceRange vk_image_subresource_range
VkClearDepthStencilValue vk_clear_depth_stencil_value
VKCopyBufferToImageData node_data
VkImageAspectFlags vk_image_aspects
VKCopyImageData node_data
VkImageAspectFlags vk_image_aspect
VKCopyImageToBufferData node_data
VkImageAspectFlags vk_image_aspects
VkImageAspectFlags vk_image_aspect