Blender V5.0
gl_texture.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2020 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <string>
10
11#include "BLI_assert.h"
12#include "BLI_math_half.hh"
13#include "BLI_string.h"
14
15#include "DNA_userdef_types.h"
16
17#include "GPU_capabilities.hh"
18#include "GPU_framebuffer.hh"
19#include "GPU_platform.hh"
20
21#include "GPU_vertex_buffer.hh" /* TODO: should be `gl_vertex_buffer.hh`. */
22#include "gl_backend.hh"
23#include "gl_debug.hh"
24#include "gl_state.hh"
25
26#include "gl_texture.hh"
27
28namespace blender::gpu {
29
30/* -------------------------------------------------------------------- */
33
35{
36 BLI_assert(GLContext::get() != nullptr);
37
38 glGenTextures(1, &tex_id_);
39}
40
42{
43 if (framebuffer_) {
44 GPU_framebuffer_free(framebuffer_);
45 }
47 if (ctx != nullptr && is_bound_) {
48 /* This avoid errors when the texture is still inside the bound texture array. */
49 ctx->state_manager->texture_unbind(this);
50 ctx->state_manager->image_unbind(this);
51 }
53}
54
56{
57 target_ = to_gl_target(type_);
58
59 /* We need to bind once to define the texture type. */
61
62 if (!this->proxy_check(0)) {
63 return false;
64 }
65
66 GLenum internal_format = to_gl_internal_format(format_);
67 const bool is_cubemap = bool(type_ == GPU_TEXTURE_CUBE);
68 const int dimensions = (is_cubemap) ? 2 : this->dimensions_count();
69
70 switch (dimensions) {
71 default:
72 case 1:
73 glTexStorage1D(target_, mipmaps_, internal_format, w_);
74 break;
75 case 2:
76 glTexStorage2D(target_, mipmaps_, internal_format, w_, h_);
77 break;
78 case 3:
79 glTexStorage3D(target_, mipmaps_, internal_format, w_, h_, d_);
80 break;
81 }
82 this->mip_range_set(0, mipmaps_ - 1);
83
84 /* Avoid issue with formats not supporting filtering. Nearest by default. */
86 glTextureParameteri(tex_id_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
87 }
88 else {
89 glTexParameteri(target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
90 }
91
92 debug::object_label(GL_TEXTURE, tex_id_, name_);
93 return true;
94}
95
97{
98 GLVertBuf *gl_vbo = static_cast<GLVertBuf *>(vbo);
99 target_ = to_gl_target(type_);
100
101 /* We need to bind once to define the texture type. */
103
104 GLenum internal_format = to_gl_internal_format(format_);
105
107 glTextureBuffer(tex_id_, internal_format, gl_vbo->vbo_id_);
108 }
109 else {
110 glTexBuffer(target_, internal_format, gl_vbo->vbo_id_);
111 }
112
113 debug::object_label(GL_TEXTURE, tex_id_, name_);
114
115 return true;
116}
117
119 int mip_offset,
120 int layer_offset,
121 bool use_stencil)
122{
123 const GLTexture *gl_src = static_cast<const GLTexture *>(src);
124 GLenum internal_format = to_gl_internal_format(format_);
125 target_ = to_gl_target(type_);
126
127 glTextureView(tex_id_,
128 target_,
129 gl_src->tex_id_,
130 internal_format,
131 mip_offset,
132 mipmaps_,
133 layer_offset,
134 this->layer_count());
135
136 debug::object_label(GL_TEXTURE, tex_id_, name_);
137
138 /* Stencil view support. */
139 if (ELEM(format_, TextureFormat::SFLOAT_32_DEPTH_UINT_8)) {
140 stencil_texture_mode_set(use_stencil);
141 }
142
143 return true;
144}
145
147
148/* -------------------------------------------------------------------- */
151
152void GLTexture::update_sub_direct_state_access(
153 int mip, int offset[3], int extent[3], GLenum format, GLenum type, const void *data)
154{
156 size_t size = ((extent[0] + 3) / 4) * ((extent[1] + 3) / 4) * to_block_size(format_);
157 switch (this->dimensions_count()) {
158 default:
159 case 1:
160 glCompressedTextureSubImage1D(tex_id_, mip, offset[0], extent[0], format, size, data);
161 break;
162 case 2:
163 glCompressedTextureSubImage2D(
164 tex_id_, mip, UNPACK2(offset), UNPACK2(extent), format, size, data);
165 break;
166 case 3:
167 glCompressedTextureSubImage3D(
168 tex_id_, mip, UNPACK3(offset), UNPACK3(extent), format, size, data);
169 break;
170 }
171 }
172 else {
173 switch (this->dimensions_count()) {
174 default:
175 case 1:
176 glTextureSubImage1D(tex_id_, mip, offset[0], extent[0], format, type, data);
177 break;
178 case 2:
179 glTextureSubImage2D(tex_id_, mip, UNPACK2(offset), UNPACK2(extent), format, type, data);
180 break;
181 case 3:
182 glTextureSubImage3D(tex_id_, mip, UNPACK3(offset), UNPACK3(extent), format, type, data);
183 break;
184 }
185 }
186
187 has_pixels_ = true;
188}
189
191 int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data)
192{
194 BLI_assert(data != nullptr);
195
196 if (mip >= mipmaps_) {
197 debug::raise_gl_error("Updating a miplvl on a texture too small to have this many levels.");
198 return;
199 }
200
201 /* If `texture_unpack_row_length` is 0, rows are sequentially stored. Otherwise we unpack data
202 * into a staging block, so the half conversion below doesn't happen on the full input. */
203 const uint texture_unpack_row_length =
205 const bool do_texture_unpack = !ELEM(texture_unpack_row_length, 0, extent[0]);
206
207 /* Unpack `data` if `texture_unpack_row_length` is set. */
208 std::unique_ptr<uint8_t, MEM_freeN_smart_ptr_deleter> unpack_buffer = nullptr;
209 if (do_texture_unpack) {
211 "Compressed data with texture_unpack_row_length != 0 is not supported.");
212 BLI_assert_msg(extent[2] <= 1,
213 "3D texture data with texture_unpack_row_length != 0 is not supported.");
214
215 size_t src_row_stride = texture_unpack_row_length * to_bytesize(format_, type);
216 size_t dst_row_stride = max_ii(extent[0], 1) * to_bytesize(format_, type);
217 size_t dst_total_count = dst_row_stride * max_ii(extent[1], 1) * max_ii(extent[2], 1);
218
219 /* Allocate buffer to size necessary for gather */
220 unpack_buffer.reset((uint8_t *)MEM_mallocN_aligned(dst_total_count, 128, __func__));
221
222 /* Strided loop; we advance source and destination pointers separately during a gather. */
223 const uint8_t *src_ptr = static_cast<const uint8_t *>(data);
224 uint8_t *dst_ptr = unpack_buffer.get();
225 for (int y = 0; y < max_ii(extent[1], 1); ++y) {
226 std::memcpy(dst_ptr, src_ptr, dst_row_stride);
227 src_ptr += src_row_stride;
228 dst_ptr += dst_row_stride;
229 }
230
231 /* Replace the 'data' ptr with `unpack_buffer`,
232 * which has lifetime in the function scope. */
233 data = unpack_buffer.get();
234 }
235
236 /* If `data` is float and target storage is half, convert to half */
237 std::unique_ptr<uint16_t, MEM_freeN_smart_ptr_deleter> clamped_half_buffer = nullptr;
238 if (type == GPU_DATA_FLOAT && is_half_float(format_)) {
239 size_t dst_pixel_count = max_ii(extent[0], 1) * max_ii(extent[1], 1) * max_ii(extent[2], 1);
240 size_t dst_total_count = to_component_len(format_) * dst_pixel_count;
241
242 /* Allocate buffer to size necessary for conversion.. */
243 clamped_half_buffer.reset(
244 (uint16_t *)MEM_mallocN_aligned(sizeof(uint16_t) * dst_total_count, 128, __func__));
245
246 Span<float> src(static_cast<const float *>(data), dst_total_count);
247 MutableSpan<uint16_t> dst(static_cast<uint16_t *>(clamped_half_buffer.get()), dst_total_count);
248
249 constexpr int64_t chunk_size = 4 * 1024 * 1024;
250 threading::parallel_for(IndexRange(dst_total_count), chunk_size, [&](const IndexRange range) {
251 /* Doing float to half conversion manually to avoid implementation specific behavior
252 * regarding Inf and NaNs. Use make finite version to avoid unexpected black pixels on
253 * certain implementation. For platform parity we clamp these infinite values to finite
254 * values. */
256 src.slice(range).data(), dst.slice(range).data(), range.size());
257 });
258
259 /* Replace the 'data' ptr with `clamped_half_buffer`,
260 * which has lifetime in the function scope. */
261 data = clamped_half_buffer.get();
262 type = GPU_DATA_HALF_FLOAT;
263
264 /* If the `data` ptr was previously replaced by `unpack_buffer`,
265 * clear `unpack_buffer` as it is no longer necessary. */
266 if (do_texture_unpack) {
267 unpack_buffer.reset(nullptr);
268 }
269 }
270
271 const int dimensions = this->dimensions_count();
272 GLenum gl_format = to_gl_data_format(format_);
273 GLenum gl_type = to_gl(type);
274
275 /* Some drivers have issues with cubemap & glTextureSubImage3D even if it is correct. */
277 this->update_sub_direct_state_access(mip, offset, extent, gl_format, gl_type, data);
278 return;
279 }
280
282 if (type_ == GPU_TEXTURE_CUBE) {
283 for (int i = 0; i < extent[2]; i++) {
284 GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + offset[2] + i;
285 glTexSubImage2D(target, mip, UNPACK2(offset), UNPACK2(extent), gl_format, gl_type, data);
286 }
287 }
289 size_t size = ((extent[0] + 3) / 4) * ((extent[1] + 3) / 4) * to_block_size(format_);
290 switch (dimensions) {
291 default:
292 case 1:
293 glCompressedTexSubImage1D(target_, mip, offset[0], extent[0], gl_format, size, data);
294 break;
295 case 2:
296 glCompressedTexSubImage2D(
297 target_, mip, UNPACK2(offset), UNPACK2(extent), gl_format, size, data);
298 break;
299 case 3:
300 glCompressedTexSubImage3D(
301 target_, mip, UNPACK3(offset), UNPACK3(extent), gl_format, size, data);
302 break;
303 }
304 }
305 else {
306 switch (dimensions) {
307 default:
308 case 1:
309 glTexSubImage1D(target_, mip, offset[0], extent[0], gl_format, gl_type, data);
310 break;
311 case 2:
312 glTexSubImage2D(target_, mip, UNPACK2(offset), UNPACK2(extent), gl_format, gl_type, data);
313 break;
314 case 3:
315 glTexSubImage3D(target_, mip, UNPACK3(offset), UNPACK3(extent), gl_format, gl_type, data);
316 break;
317 }
318 }
319
320 has_pixels_ = true;
321}
322
323void GLTexture::update_sub(int offset[3],
324 int extent[3],
326 GPUPixelBuffer *pixbuf)
327{
328 /* Update texture from pixel buffer. */
330 BLI_assert(pixbuf != nullptr);
331
332 const int dimensions = this->dimensions_count();
333 GLenum gl_format = to_gl_data_format(format_);
334 GLenum gl_type = to_gl(format);
335
336 /* Temporarily Bind texture. */
338
339 /* Bind pixel buffer for source data. */
340 GLint pix_buf_handle = (GLint)GPU_pixel_buffer_get_native_handle(pixbuf).handle;
341 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pix_buf_handle);
342
343 switch (dimensions) {
344 default:
345 case 1:
346 glTexSubImage1D(target_, 0, offset[0], extent[0], gl_format, gl_type, nullptr);
347 break;
348 case 2:
349 glTexSubImage2D(target_, 0, UNPACK2(offset), UNPACK2(extent), gl_format, gl_type, nullptr);
350 break;
351 case 3:
352 glTexSubImage3D(target_, 0, UNPACK3(offset), UNPACK3(extent), gl_format, gl_type, nullptr);
353 break;
354 }
355
356 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
357}
358
360{
361 /* Allow users to provide mipmaps stored in compressed textures.
362 * Skip generating mipmaps to avoid overriding the existing ones. */
364 return;
365 }
366
367 /* Some drivers have bugs when using #glGenerateMipmap with depth textures (see #56789).
368 * In this case we just create a complete texture with mipmaps manually without
369 * down-sampling. You must initialize the texture levels using other methods. */
371 return;
372 }
373
375 /* Broken glGenerateMipmap, don't call it and render without mipmaps.
376 * If no top level pixels have been filled in, the levels will get filled by
377 * other means and there is no need to disable mipmapping. */
378 if (has_pixels_) {
379 this->mip_range_set(0, 0);
380 }
381 return;
382 }
383
384 /* Down-sample from mip 0 using implementation. */
386 glGenerateTextureMipmap(tex_id_);
387 }
388 else {
390 glGenerateMipmap(target_);
391 }
392}
393
394void GLTexture::clear(eGPUDataFormat data_format, const void *data)
395{
397
398 /* Note: do not use glClearTexImage, even if it is available (via
399 * extension or GL 4.4). It causes GL framebuffer binding to be
400 * way slower at least on some drivers (e.g. Win10 / NV RTX 3080,
401 * but also reportedly others), as if glClearTexImage causes
402 * "pixel data" to exist which is then uploaded CPU -> GPU at bind
403 * time. */
404
406
407 FrameBuffer *fb = this->framebuffer_get();
408 fb->bind(true);
409 fb->clear_attachment(this->attachment_type(0), data_format, data);
410
411 GPU_framebuffer_bind(prev_fb);
412}
413
415{
416 GLTexture *dst = static_cast<GLTexture *>(dst_);
417 GLTexture *src = this;
418
419 BLI_assert((dst->w_ == src->w_) && (dst->h_ == src->h_) && (dst->d_ == src->d_));
420 BLI_assert(dst->format_ == src->format_);
421 BLI_assert(dst->type_ == src->type_);
422
423 int mip = 0;
424 /* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
425 int extent[3] = {1, 1, 1};
426 this->mip_size_get(mip, extent);
427 glCopyImageSubData(
428 src->tex_id_, target_, mip, 0, 0, 0, dst->tex_id_, target_, mip, 0, 0, 0, UNPACK3(extent));
429
430 has_pixels_ = true;
431}
432
433void *GLTexture::read(int mip, eGPUDataFormat type)
434{
436 BLI_assert(mip <= mipmaps_ || mip == 0);
438
439 /* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
440 int extent[3] = {1, 1, 1};
441 this->mip_size_get(mip, extent);
442
443 size_t sample_len = extent[0] * extent[1] * extent[2];
444 size_t sample_size = to_bytesize(format_, type);
445 size_t texture_size = sample_len * sample_size;
446
447 /* AMD Pro driver have a bug that write 8 bytes past buffer size
448 * if the texture is big. (see #66573) */
449 void *data = MEM_mallocN(texture_size + 8, "GPU_texture_read");
450
451 GLenum gl_format = to_gl_data_format(
452 format_ == TextureFormat::SFLOAT_32_DEPTH_UINT_8 ? TextureFormat::SFLOAT_32_DEPTH : format_);
453 GLenum gl_type = to_gl(type);
454
456 glGetTextureImage(tex_id_, mip, gl_format, gl_type, texture_size, data);
457 }
458 else {
460 if (type_ == GPU_TEXTURE_CUBE) {
461 size_t cube_face_size = texture_size / 6;
462 char *pdata = (char *)data;
463 for (int i = 0; i < 6; i++, pdata += cube_face_size) {
464 glGetTexImage(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, mip, gl_format, gl_type, pdata);
465 }
466 }
467 else {
468 glGetTexImage(target_, mip, gl_format, gl_type, data);
469 }
470 }
471 return data;
472}
473
475
476/* -------------------------------------------------------------------- */
479
480void GLTexture::swizzle_set(const char swizzle[4])
481{
482 GLint gl_swizzle[4] = {(GLint)swizzle_to_gl(swizzle[0]),
483 (GLint)swizzle_to_gl(swizzle[1]),
484 (GLint)swizzle_to_gl(swizzle[2]),
485 (GLint)swizzle_to_gl(swizzle[3])};
487 glTextureParameteriv(tex_id_, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle);
488 }
489 else {
491 glTexParameteriv(target_, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle);
492 }
493}
494
495void GLTexture::stencil_texture_mode_set(bool use_stencil)
496{
498 GLint value = use_stencil ? GL_STENCIL_INDEX : GL_DEPTH_COMPONENT;
500 glTextureParameteri(tex_id_, GL_DEPTH_STENCIL_TEXTURE_MODE, value);
501 }
502 else {
504 glTexParameteri(target_, GL_DEPTH_STENCIL_TEXTURE_MODE, value);
505 }
506}
507
509{
511 mip_min_ = min;
512 mip_max_ = max;
514 glTextureParameteri(tex_id_, GL_TEXTURE_BASE_LEVEL, min);
515 glTextureParameteri(tex_id_, GL_TEXTURE_MAX_LEVEL, max);
516 }
517 else {
519 glTexParameteri(target_, GL_TEXTURE_BASE_LEVEL, min);
520 glTexParameteri(target_, GL_TEXTURE_MAX_LEVEL, max);
521 }
522}
523
524FrameBuffer *GLTexture::framebuffer_get()
525{
526 if (framebuffer_) {
527 return framebuffer_;
528 }
530 framebuffer_ = GPU_framebuffer_create(name_);
531 framebuffer_->attachment_set(this->attachment_type(0), GPU_ATTACHMENT_TEXTURE(this));
532 has_pixels_ = true;
533 return framebuffer_;
534}
535
537
538/* -------------------------------------------------------------------- */
541
543static inline GLenum to_gl(GPUSamplerExtendMode extend_mode)
544{
545 switch (extend_mode) {
547 return GL_CLAMP_TO_EDGE;
549 return GL_REPEAT;
551 return GL_MIRRORED_REPEAT;
553 return GL_CLAMP_TO_BORDER;
554 default:
556 return GL_CLAMP_TO_EDGE;
557 }
558}
559
560GLuint GLTexture::samplers_state_cache_[GPU_SAMPLER_EXTEND_MODES_COUNT]
563GLuint GLTexture::custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_TYPES_COUNT] = {};
564
566{
567 glGenSamplers(samplers_state_cache_count_, &samplers_state_cache_[0][0][0]);
568
569 for (int extend_yz_i = 0; extend_yz_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_yz_i++) {
570 const GPUSamplerExtendMode extend_yz = static_cast<GPUSamplerExtendMode>(extend_yz_i);
571 const GLenum extend_t = to_gl(extend_yz);
572
573 for (int extend_x_i = 0; extend_x_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_x_i++) {
574 const GPUSamplerExtendMode extend_x = static_cast<GPUSamplerExtendMode>(extend_x_i);
575 const GLenum extend_s = to_gl(extend_x);
576
577 for (int filtering_i = 0; filtering_i < GPU_SAMPLER_FILTERING_TYPES_COUNT; filtering_i++) {
578 const GPUSamplerFiltering filtering = GPUSamplerFiltering(filtering_i);
579
580 const GLenum mag_filter = (filtering & GPU_SAMPLER_FILTERING_LINEAR) ? GL_LINEAR :
581 GL_NEAREST;
582 const GLenum linear_min_filter = (filtering & GPU_SAMPLER_FILTERING_MIPMAP) ?
583 GL_LINEAR_MIPMAP_LINEAR :
584 GL_LINEAR;
585 const GLenum nearest_min_filter = (filtering & GPU_SAMPLER_FILTERING_MIPMAP) ?
586 GL_NEAREST_MIPMAP_LINEAR :
587 GL_NEAREST;
588 const GLenum min_filter = (filtering & GPU_SAMPLER_FILTERING_LINEAR) ? linear_min_filter :
589 nearest_min_filter;
590
591 GLuint sampler = samplers_state_cache_[extend_yz_i][extend_x_i][filtering_i];
592 glSamplerParameteri(sampler, GL_TEXTURE_WRAP_S, extend_s);
593 glSamplerParameteri(sampler, GL_TEXTURE_WRAP_T, extend_t);
594 glSamplerParameteri(sampler, GL_TEXTURE_WRAP_R, extend_t);
595 glSamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, min_filter);
596 glSamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, mag_filter);
597
598 /* Other states are left to default:
599 * - GL_TEXTURE_BORDER_COLOR is {0, 0, 0, 0}.
600 * - GL_TEXTURE_MIN_LOD is -1000.
601 * - GL_TEXTURE_MAX_LOD is 1000.
602 * - GL_TEXTURE_LOD_BIAS is 0.0f.
603 */
604
605 const GPUSamplerState sampler_state = {filtering, extend_x, extend_yz};
606 const std::string sampler_name = sampler_state.to_string();
607 debug::object_label(GL_SAMPLER, sampler, sampler_name.c_str());
608 }
609 }
610 }
612
613 glGenSamplers(GPU_SAMPLER_CUSTOM_TYPES_COUNT, custom_samplers_state_cache_);
614
615 /* Compare sampler for depth textures. */
616 GLuint compare_sampler = custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_COMPARE];
617 glSamplerParameteri(compare_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
618 glSamplerParameteri(compare_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
619 glSamplerParameteri(compare_sampler, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
620 glSamplerParameteri(compare_sampler, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
621 glSamplerParameteri(compare_sampler, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
622 glSamplerParameteri(compare_sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
623 glSamplerParameteri(compare_sampler, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
624
625 debug::object_label(GL_SAMPLER, compare_sampler, "compare");
626
627 /* Custom sampler for icons. The icon texture is sampled within the shader using a -0.5f LOD
628 * bias. */
629 GLuint icon_sampler = custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_ICON];
630 glSamplerParameteri(icon_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
631 glSamplerParameteri(icon_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
632
633 debug::object_label(GL_SAMPLER, icon_sampler, "icons");
634}
635
637{
639 return;
640 }
641
642 float max_anisotropy = 1.0f;
643 glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &max_anisotropy);
644
645 const float anisotropic_filter = min_ff(max_anisotropy, U.anisotropic_filter);
646
647 for (int extend_yz_i = 0; extend_yz_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_yz_i++) {
648 for (int extend_x_i = 0; extend_x_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_x_i++) {
649 for (int filtering_i = 0; filtering_i < GPU_SAMPLER_FILTERING_TYPES_COUNT; filtering_i++) {
650 const GPUSamplerFiltering filtering = GPUSamplerFiltering(filtering_i);
651
652 if ((filtering & GPU_SAMPLER_FILTERING_ANISOTROPIC) &&
653 (filtering & GPU_SAMPLER_FILTERING_MIPMAP))
654 {
655 glSamplerParameterf(samplers_state_cache_[extend_yz_i][extend_x_i][filtering_i],
656 GL_TEXTURE_MAX_ANISOTROPY_EXT,
657 anisotropic_filter);
658 }
659 }
660 }
661 }
662}
663
665{
666 glDeleteSamplers(samplers_state_cache_count_, &samplers_state_cache_[0][0][0]);
667 glDeleteSamplers(GPU_SAMPLER_CUSTOM_TYPES_COUNT, custom_samplers_state_cache_);
668}
669
671{
672 /* Internal sampler states are signal values and do not correspond to actual samplers. */
674
676 return custom_samplers_state_cache_[sampler_state.custom_type];
677 }
678
679 return samplers_state_cache_[sampler_state.extend_yz][sampler_state.extend_x]
680 [sampler_state.filtering];
681}
682
684
685/* -------------------------------------------------------------------- */
690
691bool GLTexture::proxy_check(int mip)
692{
693 /* NOTE: This only checks if this mipmap is valid / supported.
694 * TODO(fclem): make the check cover the whole mipmap chain. */
695
696 /* Manual validation first, since some implementation have issues with proxy creation. */
697 int max_size = GPU_max_texture_size();
698 int max_3d_size = GPU_max_texture_3d_size();
699 int max_cube_size = GLContext::max_cubemap_size;
700 int size[3] = {1, 1, 1};
701 this->mip_size_get(mip, size);
702
703 if (type_ & GPU_TEXTURE_ARRAY) {
704 if (this->layer_count() > GPU_max_texture_layers()) {
705 return false;
706 }
707 }
708
709 if (type_ == GPU_TEXTURE_3D) {
710 if (size[0] > max_3d_size || size[1] > max_3d_size || size[2] > max_3d_size) {
711 return false;
712 }
713 }
714 else if ((type_ & ~GPU_TEXTURE_ARRAY) == GPU_TEXTURE_2D) {
715 if (size[0] > max_size || size[1] > max_size) {
716 return false;
717 }
718 }
719 else if ((type_ & ~GPU_TEXTURE_ARRAY) == GPU_TEXTURE_1D) {
720 if (size[0] > max_size) {
721 return false;
722 }
723 }
724 else if ((type_ & ~GPU_TEXTURE_ARRAY) == GPU_TEXTURE_CUBE) {
725 if (size[0] > max_cube_size) {
726 return false;
727 }
728 }
729
732 {
733 /* Some AMD drivers have a faulty `GL_PROXY_TEXTURE_..` check.
734 * (see #55888, #56185, #59351).
735 * Checking with `GL_PROXY_TEXTURE_..` doesn't prevent `Out Of Memory` issue,
736 * it just states that the OGL implementation can support the texture.
737 * So we already manually check the maximum size and maximum number of layers.
738 * Same thing happens on Nvidia/macOS 10.15 (#78175). */
739 return true;
740 }
741
742 GLenum gl_proxy = to_gl_proxy(type_);
743 GLenum internal_format = to_gl_internal_format(format_);
744 GLenum gl_format = to_gl_data_format(format_);
745 GLenum gl_type = to_gl(to_texture_data_format(format_));
746 /* Small exception. */
747 int dimensions = (type_ == GPU_TEXTURE_CUBE) ? 2 : this->dimensions_count();
748
750 size_t img_size = ((size[0] + 3) / 4) * ((size[1] + 3) / 4) * to_block_size(format_);
751 switch (dimensions) {
752 default:
753 case 1:
754 glCompressedTexImage1D(gl_proxy, mip, size[0], 0, gl_format, img_size, nullptr);
755 break;
756 case 2:
757 glCompressedTexImage2D(gl_proxy, mip, UNPACK2(size), 0, gl_format, img_size, nullptr);
758 break;
759 case 3:
760 glCompressedTexImage3D(gl_proxy, mip, UNPACK3(size), 0, gl_format, img_size, nullptr);
761 break;
762 }
763 }
764 else {
765 switch (dimensions) {
766 default:
767 case 1:
768 glTexImage1D(gl_proxy, mip, internal_format, size[0], 0, gl_format, gl_type, nullptr);
769 break;
770 case 2:
771 glTexImage2D(
772 gl_proxy, mip, internal_format, UNPACK2(size), 0, gl_format, gl_type, nullptr);
773 break;
774 case 3:
775 glTexImage3D(
776 gl_proxy, mip, internal_format, UNPACK3(size), 0, gl_format, gl_type, nullptr);
777 break;
778 }
779 }
780
781 int width = 0;
782 glGetTexLevelParameteriv(gl_proxy, 0, GL_TEXTURE_WIDTH, &width);
783 return (width > 0);
784}
785
787
789{
790 /* Do not check if using compute shader. */
791 GLShader *sh = dynamic_cast<GLShader *>(Context::get()->shader);
792 if (sh && sh->is_compute()) {
793 return;
794 }
796 for (int i = 0; i < ARRAY_SIZE(fb_); i++) {
797 if (fb_[i] == fb) {
799 GPUAttachment attachment = fb->attachments_[type];
800 /* Check for when texture is used with texture barrier. */
801 GPUAttachment attachment_read = fb->tmp_detached_[type];
802 if (attachment.mip <= mip_max_ && attachment.mip >= mip_min_ &&
803 attachment_read.tex == nullptr)
804 {
805 char msg[256];
806 SNPRINTF(msg,
807 "Feedback loop: Trying to bind a texture (%s) with mip range %d-%d but mip %d is "
808 "attached to the active framebuffer (%s)",
809 name_,
810 mip_min_,
811 mip_max_,
812 attachment.mip,
813 fb->name_);
815 }
816 return;
817 }
818 }
819}
820
821/* -------------------------------------------------------------------- */
824
826{
827 glGenBuffers(1, &gl_id_);
828 BLI_assert(gl_id_);
829
830 if (!gl_id_) {
831 return;
832 }
833
834 /* Ensure size is non-zero for pixel buffer backing storage creation. */
835 size = max_ii(size, 32);
836
837 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, gl_id_);
838 glBufferData(GL_PIXEL_UNPACK_BUFFER, size, nullptr, GL_DYNAMIC_DRAW);
839 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
840}
841
843{
844 if (!gl_id_) {
845 return;
846 }
847 glDeleteBuffers(1, &gl_id_);
848}
849
851{
852 if (!gl_id_) {
853 BLI_assert(false);
854 return nullptr;
855 }
856
857 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, gl_id_);
858 void *ptr = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
860 return ptr;
861}
862
864{
865 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
866 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
867}
868
870{
871 GPUPixelBufferNativeHandle native_handle;
872 native_handle.handle = int64_t(gl_id_);
873 native_handle.size = size_;
874 return native_handle;
875}
876
878{
879 return size_;
880}
881
883} // namespace blender::gpu
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
MINLINE float min_ff(float a, float b)
MINLINE int max_ii(int a, int b)
#define SNPRINTF(dst, format,...)
Definition BLI_string.h:604
unsigned int uint
#define UNPACK2(a)
#define ARRAY_SIZE(arr)
#define UNPACK3(a)
#define ELEM(...)
int GPU_max_texture_3d_size()
int GPU_max_texture_layers()
int GPU_max_texture_size()
blender::gpu::FrameBuffer * GPU_framebuffer_create(const char *name)
#define GPU_ATTACHMENT_TEXTURE(_texture)
void GPU_framebuffer_free(blender::gpu::FrameBuffer *fb)
void GPU_framebuffer_bind(blender::gpu::FrameBuffer *fb)
blender::gpu::FrameBuffer * GPU_framebuffer_active_get()
@ GPU_DEVICE_ATI
bool GPU_type_matches(GPUDeviceType device, GPUOSType os, GPUDriverType driver)
@ GPU_DRIVER_ANY
@ GPU_DRIVER_OFFICIAL
@ GPU_OS_WIN
@ GPU_OS_UNIX
@ GPU_SAMPLER_CUSTOM_ICON
@ GPU_SAMPLER_CUSTOM_COMPARE
GPUPixelBufferNativeHandle GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pixel_buf)
@ GPU_SAMPLER_STATE_TYPE_CUSTOM
@ GPU_SAMPLER_STATE_TYPE_INTERNAL
static const int GPU_SAMPLER_FILTERING_TYPES_COUNT
eGPUDataFormat
@ GPU_DATA_HALF_FLOAT
@ GPU_DATA_FLOAT
GPUSamplerExtendMode
@ GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT
@ GPU_SAMPLER_EXTEND_MODE_REPEAT
@ GPU_SAMPLER_EXTEND_MODE_EXTEND
@ GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER
#define GPU_SAMPLER_CUSTOM_TYPES_COUNT
GPUSamplerFiltering
@ GPU_SAMPLER_FILTERING_MIPMAP
@ GPU_SAMPLER_FILTERING_ANISOTROPIC
@ GPU_SAMPLER_FILTERING_LINEAR
#define GPU_SAMPLER_EXTEND_MODES_COUNT
#define U
BMesh const char void * data
long long int int64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
constexpr int64_t size() const
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr T * data() const
Definition BLI_span.hh:539
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:137
constexpr const T * data() const
Definition BLI_span.hh:215
static Context * get()
void attachment_set(GPUAttachmentType type, const GPUAttachment &new_attachment)
static bool stencil_texturing_support
Definition gl_context.hh:68
static GLStateManager * state_manager_active_get()
static bool direct_state_access_support
Definition gl_context.hh:61
static GLContext * get()
static bool texture_filter_anisotropic_support
Definition gl_context.hh:70
static void texture_free(GLuint tex_id)
static GLint max_cubemap_size
Definition gl_context.hh:54
static bool generate_mipmap_workaround
Definition gl_context.hh:76
GPUPixelBufferNativeHandle get_native_handle() override
size_t get_size() override
bool is_compute() const
Definition gl_shader.hh:160
void texture_bind_temp(GLTexture *tex)
Definition gl_state.cc:485
uint texture_unpack_row_length_get() const
Definition gl_state.cc:558
GLTexture(const char *name)
Definition gl_texture.cc:34
void copy_to(Texture *dst) override
void mip_range_set(int min, int max) override
friend class GLFrameBuffer
Definition gl_texture.hh:22
static void samplers_init()
static GLuint get_sampler(const GPUSamplerState &sampler_state)
static void samplers_update()
void * read(int mip, eGPUDataFormat type) override
static void samplers_free()
bool init_internal() override
Definition gl_texture.cc:55
void generate_mipmap() override
void update_sub(int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data) override
void clear(eGPUDataFormat format, const void *data) override
void swizzle_set(const char swizzle_mask[4]) override
virtual void texture_unbind(Texture *tex)=0
virtual void image_unbind(Texture *tex)=0
char name_[DEBUG_NAME_LEN]
FrameBuffer * fb_[GPU_TEX_MAX_FBO_ATTACHED]
GPUAttachmentType fb_attachment_[GPU_TEX_MAX_FBO_ATTACHED]
GPUAttachmentType attachment_type(int slot) const
void mip_size_get(int mip, int r_size[3]) const
Texture(const char *name)
GPUTextureFormatFlag format_flag_
BLI_INLINE float fb(float length, float L)
format
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_mallocN_aligned(size_t len, size_t alignment, const char *str)
Definition mallocn.cc:138
void raise_gl_error(const char *info)
Definition gl_debug.cc:276
void object_label(GLenum type, GLuint object, const char *name)
Definition gl_debug.cc:329
GLenum to_gl_internal_format(TextureFormat format)
GLenum to_gl_target(GPUTextureType type)
bool is_half_float(TextureFormat format)
eGPUDataFormat to_texture_data_format(TextureFormat tex_format)
GLenum to_gl_data_format(TextureFormat format)
GLenum to_gl_proxy(GPUTextureType type)
size_t to_block_size(TextureFormat data_type)
int to_bytesize(const DataFormat format)
GLenum swizzle_to_gl(const char swizzle)
int to_component_len(TextureFormat format)
constexpr bool validate_data_format(TextureFormat tex_format, eGPUDataFormat data_format)
static GLenum to_gl(const GPUAttachmentType type)
void float_to_half_make_finite_array(const float *src, uint16_t *dst, size_t length)
Definition math_half.cc:274
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
const char * name
#define min(a, b)
Definition sort.cc:36
blender::gpu::Texture * tex
i
Definition text_draw.cc:230
max
Definition text_draw.cc:251
PointerRNA * ptr
Definition wm_files.cc:4238