Blender V4.3
DRW_gpu_wrapper.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#pragma once
6
60#include "DRW_render.hh"
61
62#include "MEM_guardedalloc.h"
63
64#include "draw_manager_c.hh"
65#include "draw_texture_pool.hh"
66
67#include "BKE_global.hh"
68
70#include "BLI_span.hh"
71#include "BLI_utildefines.h"
72#include "BLI_utility_mixins.hh"
73#include "BLI_vector.hh"
74
75#include "GPU_framebuffer.hh"
76#include "GPU_storage_buffer.hh"
77#include "GPU_texture.hh"
78#include "GPU_uniform_buffer.hh"
79
80namespace blender::draw {
81
82/* -------------------------------------------------------------------- */
86namespace detail {
87
88template<
90 typename T,
94 bool device_only>
96 protected:
97 T *data_ = nullptr;
99
100 BLI_STATIC_ASSERT(((sizeof(T) * len) % 16) == 0,
101 "Buffer size need to be aligned to size of float4.");
102
103 public:
108 const T &operator[](int64_t index) const
109 {
110 BLI_STATIC_ASSERT(!device_only, "");
111 BLI_assert(index >= 0);
112 BLI_assert(index < len_);
113 return data_[index];
114 }
115
117 {
118 BLI_STATIC_ASSERT(!device_only, "");
119 BLI_assert(index >= 0);
120 BLI_assert(index < len_);
121 return data_[index];
122 }
123
127 const T *data() const
128 {
129 BLI_STATIC_ASSERT(!device_only, "");
130 return data_;
131 }
132 T *data()
133 {
134 BLI_STATIC_ASSERT(!device_only, "");
135 return data_;
136 }
137
141 const T *begin() const
142 {
143 BLI_STATIC_ASSERT(!device_only, "");
144 return data_;
145 }
146 const T *end() const
147 {
148 BLI_STATIC_ASSERT(!device_only, "");
149 return data_ + len_;
150 }
151
152 T *begin()
153 {
154 BLI_STATIC_ASSERT(!device_only, "");
155 return data_;
156 }
157 T *end()
158 {
159 BLI_STATIC_ASSERT(!device_only, "");
160 return data_ + len_;
161 }
162
163 operator Span<T>() const
164 {
165 BLI_STATIC_ASSERT(!device_only, "");
166 return Span<T>(data_, len_);
167 }
168};
169
170template<typename T, int64_t len, bool device_only>
171class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable {
172 protected:
173 GPUUniformBuf *ubo_;
174
175#ifndef NDEBUG
176 const char *name_ = typeid(T).name();
177#else
178 const char *name_ = "UniformBuffer";
179#endif
180
181 public:
182 UniformCommon(const char *name = nullptr)
183 {
184 if (name) {
185 name_ = name;
186 }
187 ubo_ = GPU_uniformbuf_create_ex(sizeof(T) * len, nullptr, name_);
188 }
189
194
196 {
197 GPU_uniformbuf_update(ubo_, this->data_);
198 }
199
200 /* To be able to use it with DRW_shgroup_*_ref(). */
201 operator GPUUniformBuf *() const
202 {
203 return ubo_;
204 }
205
206 /* To be able to use it with DRW_shgroup_*_ref(). */
207 GPUUniformBuf **operator&()
208 {
209 return &ubo_;
210 }
211};
212
213template<typename T, int64_t len, bool device_only>
214class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable {
215 protected:
216 GPUStorageBuf *ssbo_;
217
218#ifndef NDEBUG
219 const char *name_ = typeid(T).name();
220#else
221 const char *name_ = "StorageBuffer";
222#endif
223
224 public:
225 StorageCommon(const char *name = nullptr)
226 {
227 if (name) {
228 name_ = name;
229 }
230 this->len_ = len;
231 constexpr GPUUsageType usage = device_only ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_DYNAMIC;
232 ssbo_ = GPU_storagebuf_create_ex(sizeof(T) * this->len_, nullptr, usage, this->name_);
233 }
234
239
241 {
242 BLI_assert(device_only == false);
243 GPU_storagebuf_update(ssbo_, this->data_);
244 }
245
250
255
256 void read()
257 {
258 GPU_storagebuf_read(ssbo_, this->data_);
259 }
260
261 operator GPUStorageBuf *() const
262 {
263 return ssbo_;
264 }
265 /* To be able to use it with DRW_shgroup_*_ref(). */
266 GPUStorageBuf **operator&()
267 {
268 return &ssbo_;
269 }
270};
271
272} // namespace detail
273
276/* -------------------------------------------------------------------- */
280template<
282 typename T,
286 /* TODO(@fclem): Currently unsupported. */
287 /* bool device_only = false */>
288class UniformArrayBuffer : public detail::UniformCommon<T, len, false> {
289 public:
290 UniformArrayBuffer(const char *name = nullptr) : detail::UniformCommon<T, len, false>(name)
291 {
292 /* TODO(@fclem): We should map memory instead. */
293 this->data_ = (T *)MEM_mallocN_aligned(len * sizeof(T), 16, this->name_);
294 }
296 {
297 MEM_freeN(this->data_);
298 }
299};
300
301template<
303 typename T
305 /* TODO(@fclem): Currently unsupported. */
306 /* bool device_only = false */>
307class UniformBuffer : public T, public detail::UniformCommon<T, 1, false> {
308 public:
309 UniformBuffer(const char *name = nullptr) : detail::UniformCommon<T, 1, false>(name)
310 {
311 /* TODO(@fclem): How could we map this? */
312 this->data_ = static_cast<T *>(this);
313 }
314
316 {
317 *static_cast<T *>(this) = other;
318 return *this;
319 }
320};
321
324/* -------------------------------------------------------------------- */
328template<
330 typename T,
332 int64_t len = (512u + (sizeof(T) - 1)) / sizeof(T),
334 bool device_only = false>
335class StorageArrayBuffer : public detail::StorageCommon<T, len, device_only> {
336 public:
337 StorageArrayBuffer(const char *name = nullptr) : detail::StorageCommon<T, len, device_only>(name)
338 {
339 /* TODO(@fclem): We should map memory instead. */
340 this->data_ = (T *)MEM_mallocN_aligned(len * sizeof(T), 16, this->name_);
341 }
343 {
344 MEM_freeN(this->data_);
345 }
346
347 /* Resize to \a new_size elements. */
348 void resize(int64_t new_size)
349 {
350 BLI_assert(new_size > 0);
351 if (new_size != this->len_) {
352 /* Manual realloc since MEM_reallocN_aligned does not exists. */
353 T *new_data_ = (T *)MEM_mallocN_aligned(new_size * sizeof(T), 16, this->name_);
354 memcpy(new_data_, this->data_, min_uu(this->len_, new_size) * sizeof(T));
355 MEM_freeN(this->data_);
356 this->data_ = new_data_;
358
359 this->len_ = new_size;
360 constexpr GPUUsageType usage = device_only ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_DYNAMIC;
361 this->ssbo_ = GPU_storagebuf_create_ex(sizeof(T) * this->len_, nullptr, usage, this->name_);
362 }
363 }
364
365 /* Resize on access. */
367 {
368 BLI_assert(index >= 0);
369 if (index >= this->len_) {
370 size_t size = power_of_2_max_u(index + 1);
371 this->resize(size);
372 }
373 return this->data_[index];
374 }
375
376 /*
377 * Ensure the allocated size is not much larger than the currently required size,
378 * using the same heuristic as `get_or_resize`.
379 */
381 {
382 /* Don't go below the size used at creation. */
383 required_size = std::max(required_size, len);
384 size_t target_size = power_of_2_max_u(required_size);
385 if (this->len_ > target_size) {
386 this->resize(target_size);
387 }
388 }
389
390 int64_t size() const
391 {
392 return this->len_;
393 }
394
396 {
397 return {this->data_, this->len_};
398 }
399
401 {
402 std::swap(a.data_, b.data_);
403 std::swap(a.ssbo_, b.ssbo_);
404 std::swap(a.len_, b.len_);
405 std::swap(a.name_, b.name_);
406 }
407};
408
409template<
411 typename T,
413 int64_t len = (512u + (sizeof(T) - 1)) / sizeof(T)>
414class StorageVectorBuffer : public StorageArrayBuffer<T, len, false> {
415 private:
416 /* Number of items, not the allocated length. */
417 int64_t item_len_ = 0;
418
419 public:
420 StorageVectorBuffer(const char *name = nullptr) : StorageArrayBuffer<T, len, false>(name){};
422
426 void clear()
427 {
428 item_len_ = 0;
429 }
430
436 {
437 this->trim_to_next_power_of_2(item_len_);
438 clear();
439 }
440
447 void append(const T &value)
448 {
449 this->append_as(value);
450 }
451 void append(T &&value)
452 {
453 this->append_as(std::move(value));
454 }
455 template<typename... ForwardT> void append_as(ForwardT &&...value)
456 {
457 if (item_len_ >= this->len_) {
458 size_t size = power_of_2_max_u(item_len_ + 1);
459 this->resize(size);
460 }
461 T *ptr = &this->data_[item_len_++];
462 new (ptr) T(std::forward<ForwardT>(value)...);
463 }
464
465 void extend(const Span<T> values)
466 {
467 /* TODO(fclem): Optimize to a single memcpy. */
468 for (auto v : values) {
469 this->append(v);
470 }
471 }
472
473 int64_t size() const
474 {
475 return item_len_;
476 }
477
478 bool is_empty() const
479 {
480 return this->size() == 0;
481 }
482
483 /* Avoid confusion with the other clear. */
484 void clear_to_zero() = delete;
485
487 {
489 std::swap(a.item_len_, b.item_len_);
490 }
491};
492
493template<
495 typename T,
497 bool device_only = false>
498class StorageBuffer : public T, public detail::StorageCommon<T, 1, device_only> {
499 public:
500 StorageBuffer(const char *name = nullptr) : detail::StorageCommon<T, 1, device_only>(name)
501 {
502 /* TODO(@fclem): How could we map this? */
503 this->data_ = static_cast<T *>(this);
504 }
505
507 {
508 *static_cast<T *>(this) = other;
509 return *this;
510 }
511
513 {
514 /* Swap content, but not `data_` pointers since they point to `this`. */
515 SWAP(T, static_cast<T>(a), static_cast<T>(b));
516 std::swap(a.ssbo_, b.ssbo_);
517 }
518};
519
522/* -------------------------------------------------------------------- */
527 protected:
528 GPUTexture *tx_ = nullptr;
529 GPUTexture *stencil_view_ = nullptr;
532 GPUTexture *layer_range_view_ = nullptr;
533 const char *name_;
534
535 public:
536 Texture(const char *name = "gpu::Texture") : name_(name) {}
537
538 Texture(const char *name,
540 eGPUTextureUsage usage,
541 int extent,
542 const float *data = nullptr,
543 bool cubemap = false,
544 int mip_len = 1)
545 : name_(name)
546 {
547 tx_ = create(extent, 0, 0, mip_len, format, usage, data, false, cubemap);
548 }
549
550 Texture(const char *name,
552 eGPUTextureUsage usage,
553 int extent,
554 int layers,
555 const float *data = nullptr,
556 bool cubemap = false,
557 int mip_len = 1)
558 : name_(name)
559 {
560 tx_ = create(extent, layers, 0, mip_len, format, usage, data, true, cubemap);
561 }
562
563 Texture(const char *name,
565 eGPUTextureUsage usage,
566 int2 extent,
567 const float *data = nullptr,
568 int mip_len = 1)
569 : name_(name)
570 {
571 tx_ = create(UNPACK2(extent), 0, mip_len, format, usage, data, false, false);
572 }
573
574 Texture(const char *name,
576 eGPUTextureUsage usage,
577 int2 extent,
578 int layers,
579 const float *data = nullptr,
580 int mip_len = 1)
581 : name_(name)
582 {
583 tx_ = create(UNPACK2(extent), layers, mip_len, format, usage, data, true, false);
584 }
585
586 Texture(const char *name,
588 eGPUTextureUsage usage,
589 int3 extent,
590 const float *data = nullptr,
591 int mip_len = 1)
592 : name_(name)
593 {
594 tx_ = create(UNPACK3(extent), mip_len, format, usage, data, false, false);
595 }
596
597 Texture(Texture &&other) = default;
599 {
600 free();
601 }
602
603 GPUTexture *gpu_texture()
604 {
605 return tx_;
606 }
607
608 /* To be able to use it with DRW_shgroup_uniform_texture(). */
609 operator GPUTexture *() const
610 {
611 BLI_assert(tx_ != nullptr);
612 return tx_;
613 }
614
615 /* To be able to use it with DRW_shgroup_uniform_texture_ref(). */
616 GPUTexture **operator&()
617 {
618 return &tx_;
619 }
620
623 {
624 return this;
625 }
626
628 {
629 if (this != std::addressof(a)) {
630 this->free();
631
632 this->tx_ = a.tx_;
633 this->name_ = a.name_;
634 this->stencil_view_ = a.stencil_view_;
635 this->layer_range_view_ = a.layer_range_view_;
636 this->mip_views_ = std::move(a.mip_views_);
637 this->layer_views_ = std::move(a.layer_views_);
638
639 a.tx_ = nullptr;
640 a.name_ = nullptr;
641 a.stencil_view_ = nullptr;
642 a.layer_range_view_ = nullptr;
643 a.mip_views_.clear();
644 a.layer_views_.clear();
645 }
646 return *this;
647 }
648
654 int extent,
656 const float *data = nullptr,
657 int mip_len = 1)
658 {
659 return ensure_impl(extent, 0, 0, mip_len, format, usage, data, false, false);
660 }
661
667 int extent,
668 int layers,
670 const float *data = nullptr,
671 int mip_len = 1)
672 {
673 BLI_assert(layers > 0);
674 return ensure_impl(extent, layers, 0, mip_len, format, usage, data, true, false);
675 }
676
682 int2 extent,
684 const float *data = nullptr,
685 int mip_len = 1)
686 {
687 return ensure_impl(UNPACK2(extent), 0, mip_len, format, usage, data, false, false);
688 }
689
695 int2 extent,
696 int layers,
698 const float *data = nullptr,
699 int mip_len = 1)
700 {
701 BLI_assert(layers > 0);
702 return ensure_impl(UNPACK2(extent), layers, mip_len, format, usage, data, true, false);
703 }
704
710 int3 extent,
712 const float *data = nullptr,
713 int mip_len = 1)
714 {
715 return ensure_impl(UNPACK3(extent), mip_len, format, usage, data, false, false);
716 }
717
723 int extent,
725 float *data = nullptr,
726 int mip_len = 1)
727 {
728 return ensure_impl(extent, extent, 0, mip_len, format, usage, data, false, true);
729 }
730
736 int extent,
737 int layers,
739 const float *data = nullptr,
740 int mip_len = 1)
741 {
742 return ensure_impl(extent, extent, layers, mip_len, format, usage, data, true, true);
743 }
744
749 bool ensure_mip_views(bool cube_as_array = false)
750 {
751 int mip_len = GPU_texture_mip_count(tx_);
752 if (mip_views_.size() != mip_len) {
753 for (GPUTexture *&view : mip_views_) {
755 }
757 for (auto i : IndexRange(mip_len)) {
759 GPU_texture_create_view(name_, tx_, format, i, 1, 0, 9999, cube_as_array, false));
760 }
761 return true;
762 }
763 return false;
764 }
765
766 GPUTexture *mip_view(int miplvl)
767 {
768 BLI_assert_msg(miplvl < mip_views_.size(),
769 "Incorrect mip level requested. "
770 "Might be missing call to ensure_mip_views().");
771 return mip_views_[miplvl];
772 }
773
774 int mip_count() const
775 {
777 }
778
784 bool ensure_layer_views(bool cube_as_array = false)
785 {
786 int layer_len = GPU_texture_layer_count(tx_);
787 if (layer_views_.size() != layer_len) {
788 for (GPUTexture *&view : layer_views_) {
790 }
792 for (auto i : IndexRange(layer_len)) {
794 GPU_texture_create_view(name_, tx_, format, 0, 9999, i, 1, cube_as_array, false));
795 }
796 return true;
797 }
798 return false;
799 }
800
801 GPUTexture *layer_view(int layer)
802 {
803 return layer_views_[layer];
804 }
805
806 GPUTexture *stencil_view(bool cube_as_array = false)
807 {
808 if (stencil_view_ == nullptr) {
811 name_, tx_, format, 0, 9999, 0, 9999, cube_as_array, true);
812 }
813 return stencil_view_;
814 }
815
825 GPUTexture *layer_range_view(int layer_start, int layer_len, bool cube_as_array = false)
826 {
827 BLI_assert(this->is_valid());
828 /* Make sure the range is valid as the GPU_texture_layer_count only returns the effective
829 * (clipped) range and not the requested range. */
830 BLI_assert_msg((layer_start + layer_len) <= GPU_texture_layer_count(tx_),
831 "Layer range needs to be valid");
832
833 int view_layer_len = (layer_range_view_) ? GPU_texture_layer_count(layer_range_view_) : -1;
834 if (layer_len != view_layer_len) {
838 name_, tx_, format, 0, 9999, layer_start, layer_len, cube_as_array, false);
839 }
840 return layer_range_view_;
841 }
842
846 bool is_valid() const
847 {
848 return tx_ != nullptr;
849 }
850
851 int width() const
852 {
853 return GPU_texture_width(tx_);
854 }
855
856 int height() const
857 {
858 return GPU_texture_height(tx_);
859 }
860
861 int depth() const
862 {
863 return GPU_texture_depth(tx_);
864 }
865
866 int pixel_count() const
867 {
869 }
870
871 bool is_depth() const
872 {
874 }
875
876 bool is_stencil() const
877 {
879 }
880
881 bool is_integer() const
882 {
884 }
885
886 bool is_cube() const
887 {
888 return GPU_texture_is_cube(tx_);
889 }
890
891 bool is_array() const
892 {
894 }
895
896 int3 size(int miplvl = 0) const
897 {
898 int3 size(1);
899 GPU_texture_get_mipmap_size(tx_, miplvl, size);
900 return size;
901 }
902
906 void clear(float4 values)
907 {
909 }
910
914 void clear(uint4 values)
915 {
916 GPU_texture_clear(tx_, GPU_DATA_UINT, &values[0]);
917 }
918
922 void clear(int4 values)
923 {
924 GPU_texture_clear(tx_, GPU_DATA_INT, &values[0]);
925 }
926
932 {
934 this->clear(float4(NAN_FLT));
935 }
936 else if (GPU_texture_has_integer_format(this->tx_)) {
937 if (GPU_texture_has_signed_format(this->tx_)) {
938 this->clear(int4(0xF0F0F0F0));
939 }
940 else {
941 this->clear(uint4(0xF0F0F0F0));
942 }
943 }
944 }
945
950 template<typename T> T *read(eGPUDataFormat format, int miplvl = 0)
951 {
952 return reinterpret_cast<T *>(GPU_texture_read(tx_, format, miplvl));
953 }
954
955 void filter_mode(bool do_filter)
956 {
957 GPU_texture_filter_mode(tx_, do_filter);
958 }
959
963 void free()
964 {
966 for (GPUTexture *&view : mip_views_) {
968 }
969 for (GPUTexture *&view : layer_views_) {
971 }
976 }
977
981 static void swap(Texture &a, Texture &b)
982 {
983 std::swap(a.tx_, b.tx_);
984 std::swap(a.name_, b.name_);
985 std::swap(a.stencil_view_, b.stencil_view_);
986 std::swap(a.layer_range_view_, b.layer_range_view_);
987 std::swap(a.mip_views_, b.mip_views_);
988 std::swap(a.layer_views_, b.layer_views_);
989 }
990
991 private:
992 bool ensure_impl(int w,
993 int h = 0,
994 int d = 0,
995 int mip_len = 1,
998 const float *data = nullptr,
999 bool layered = false,
1000 bool cubemap = false)
1001
1002 {
1003 /* TODO(@fclem): In the future, we need to check if mip_count did not change.
1004 * For now it's ok as we always define all MIP level. */
1005 if (tx_) {
1006 int3 size(0);
1008 if (size != int3(w, h, d) || GPU_texture_format(tx_) != format ||
1009 GPU_texture_is_cube(tx_) != cubemap || GPU_texture_is_array(tx_) != layered)
1010 {
1011 free();
1012 }
1013 }
1014 if (tx_ == nullptr) {
1015 tx_ = create(w, h, d, mip_len, format, usage, data, layered, cubemap);
1016 if (is_valid() && data == nullptr && (G.debug & G_DEBUG_GPU)) {
1017 debug_clear();
1018 }
1019 return true;
1020 }
1021 return false;
1022 }
1023
1024 GPUTexture *create(int w,
1025 int h,
1026 int d,
1027 int mip_len,
1029 eGPUTextureUsage usage,
1030 const float *data,
1031 bool layered,
1032 bool cubemap)
1033 {
1034 if (h == 0) {
1035 return GPU_texture_create_1d(name_, w, mip_len, format, usage, data);
1036 }
1037 else if (cubemap) {
1038 if (layered) {
1039 return GPU_texture_create_cube_array(name_, w, d, mip_len, format, usage, data);
1040 }
1041 else {
1042 return GPU_texture_create_cube(name_, w, mip_len, format, usage, data);
1043 }
1044 }
1045 else if (d == 0) {
1046 if (layered) {
1047 return GPU_texture_create_1d_array(name_, w, h, mip_len, format, usage, data);
1048 }
1049 else {
1050 return GPU_texture_create_2d(name_, w, h, mip_len, format, usage, data);
1051 }
1052 }
1053 else {
1054 if (layered) {
1055 return GPU_texture_create_2d_array(name_, w, h, d, mip_len, format, usage, data);
1056 }
1057 else {
1058 return GPU_texture_create_3d(name_, w, h, d, mip_len, format, usage, data);
1059 }
1060 }
1061 }
1062};
1063
1065 public:
1066 TextureFromPool(const char *name = "gpu::Texture") : Texture(name){};
1067
1068 /* Always use `release()` after rendering. */
1069 void acquire(int2 extent,
1072 {
1073 BLI_assert(this->tx_ == nullptr);
1074
1076 DST.vmempool->texture_pool, UNPACK2(extent), format, usage);
1077
1078 if (G.debug & G_DEBUG_GPU) {
1079 debug_clear();
1080 }
1081 }
1082
1083 void release()
1084 {
1085 /* Allows multiple release. */
1086 if (this->tx_ == nullptr) {
1087 return;
1088 }
1090 this->tx_ = nullptr;
1091 }
1092
1103 static void swap(Texture &a, TextureFromPool &b)
1104 {
1105 swap(b, a);
1106 }
1108 {
1109 Texture::swap(a, b);
1110 }
1111
1114 {
1115 return this;
1116 }
1117
1119 bool ensure_1d(int, int, eGPUTextureFormat, eGPUTextureUsage, const float *) = delete;
1120 bool ensure_1d_array(int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *) = delete;
1121 bool ensure_2d(int, int, int, eGPUTextureFormat, eGPUTextureUsage, float *) = delete;
1122 bool ensure_2d_array(int, int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *) =
1123 delete;
1124 bool ensure_3d(int, int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *) = delete;
1125 bool ensure_cube(int, int, eGPUTextureFormat, eGPUTextureUsage, const float *) = delete;
1126 bool ensure_cube_array(int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *) =
1127 delete;
1128 void filter_mode(bool) = delete;
1129 void free() = delete;
1130 GPUTexture *mip_view(int) = delete;
1131 GPUTexture *layer_view(int) = delete;
1132 GPUTexture *stencil_view() = delete;
1133};
1134
1135class TextureRef : public Texture {
1136 public:
1137 TextureRef() = default;
1138
1140 {
1141 this->tx_ = nullptr;
1142 }
1143
1144 void wrap(GPUTexture *tex)
1145 {
1146 this->tx_ = tex;
1147 }
1148
1150 bool ensure_1d(int, int, eGPUTextureFormat, const float *) = delete;
1151 bool ensure_1d_array(int, int, int, eGPUTextureFormat, const float *) = delete;
1152 bool ensure_2d(int, int, int, eGPUTextureFormat, const float *) = delete;
1153 bool ensure_2d_array(int, int, int, int, eGPUTextureFormat, const float *) = delete;
1154 bool ensure_3d(int, int, int, int, eGPUTextureFormat, const float *) = delete;
1155 bool ensure_cube(int, int, eGPUTextureFormat, const float *) = delete;
1156 bool ensure_cube_array(int, int, int, eGPUTextureFormat, const float *) = delete;
1157 void filter_mode(bool) = delete;
1158 void free() = delete;
1159 GPUTexture *mip_view(int) = delete;
1160 GPUTexture *layer_view(int) = delete;
1161 GPUTexture *stencil_view() = delete;
1162};
1163
1168class Image {};
1169
1170static inline Image *as_image(GPUTexture *tex)
1171{
1172 return reinterpret_cast<Image *>(tex);
1173}
1174
1175static inline Image **as_image(GPUTexture **tex)
1176{
1177 return reinterpret_cast<Image **>(tex);
1178}
1179
1180static inline GPUTexture *as_texture(Image *img)
1181{
1182 return reinterpret_cast<GPUTexture *>(img);
1183}
1184
1185static inline GPUTexture **as_texture(Image **img)
1186{
1187 return reinterpret_cast<GPUTexture **>(img);
1188}
1189
1192/* -------------------------------------------------------------------- */
1197 private:
1198 GPUFrameBuffer *fb_ = nullptr;
1199 const char *name_;
1200
1201 public:
1202 Framebuffer() : name_(""){};
1203 Framebuffer(const char *name) : name_(name){};
1204
1206 {
1208 }
1209
1219 {
1220 if (fb_ == nullptr) {
1221 fb_ = GPU_framebuffer_create(name_);
1222 }
1223 GPUAttachment config[] = {
1224 depth, color1, color2, color3, color4, color5, color6, color7, color8};
1225 GPU_framebuffer_config_array(fb_, config, sizeof(config) / sizeof(GPUAttachment));
1226 }
1227
1231 void ensure(int2 target_size)
1232 {
1233 if (fb_ == nullptr) {
1234 fb_ = GPU_framebuffer_create(name_);
1235 }
1236 GPU_framebuffer_default_size(fb_, UNPACK2(target_size));
1237 }
1238
1239 void bind()
1240 {
1242 }
1243
1244 void clear_depth(float depth)
1245 {
1246 GPU_framebuffer_clear_depth(fb_, depth);
1247 }
1248
1250 {
1251 if (*this != a) {
1252 this->fb_ = a.fb_;
1253 this->name_ = a.name_;
1254 a.fb_ = nullptr;
1255 }
1256 return *this;
1257 }
1258
1259 operator GPUFrameBuffer *() const
1260 {
1261 return fb_;
1262 }
1263
1264 GPUFrameBuffer **operator&()
1265 {
1266 return &fb_;
1267 }
1268
1272 static void swap(Framebuffer &a, Framebuffer &b)
1273 {
1274 std::swap(a.fb_, b.fb_);
1275 std::swap(a.name_, b.name_);
1276 }
1277};
1278
1281/* -------------------------------------------------------------------- */
1287template<typename T, int64_t len> class SwapChain {
1288 private:
1289 BLI_STATIC_ASSERT(len > 1, "A swap-chain needs more than 1 unit in length.");
1290 std::array<T, len> chain_;
1291
1292 public:
1293 void swap()
1294 {
1295 for (auto i : IndexRange(len - 1)) {
1296 auto i_next = (i + 1) % len;
1297 if constexpr (std::is_trivial_v<T>) {
1298 std::swap(chain_[i], chain_[i_next]);
1299 }
1300 else {
1301 T::swap(chain_[i], chain_[i_next]);
1302 }
1303 }
1304 }
1305
1306 constexpr int64_t size()
1307 {
1308 return len;
1309 }
1310
1312 {
1313 return chain_[0];
1314 }
1315
1317 {
1318 /* Avoid modulo operation with negative numbers. */
1319 return chain_[(0 + len - 1) % len];
1320 }
1321
1322 T &next()
1323 {
1324 return chain_[(0 + 1) % len];
1325 }
1326
1327 const T &current() const
1328 {
1329 return chain_[0];
1330 }
1331
1332 const T &previous() const
1333 {
1334 /* Avoid modulo operation with negative numbers. */
1335 return chain_[(0 + len - 1) % len];
1336 }
1337
1338 const T &next() const
1339 {
1340 return chain_[(0 + 1) % len];
1341 }
1342};
1343
1346} // namespace blender::draw
@ G_DEBUG_GPU
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
MINLINE uint min_uu(uint a, uint b)
MINLINE unsigned int power_of_2_max_u(unsigned int x)
#define NAN_FLT
#define UNPACK2(a)
#define SWAP(type, a, b)
#define UNPACK3(a)
GPUFrameBuffer * GPU_framebuffer_create(const char *name)
#define GPU_FRAMEBUFFER_FREE_SAFE(fb)
void GPU_framebuffer_default_size(GPUFrameBuffer *framebuffer, int width, int height)
void GPU_framebuffer_config_array(GPUFrameBuffer *framebuffer, const GPUAttachment *config, int config_len)
#define GPU_ATTACHMENT_NONE
void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer)
void GPU_framebuffer_clear_depth(GPUFrameBuffer *fb, float clear_depth)
void GPU_storagebuf_sync_to_host(GPUStorageBuf *ssbo)
GPUStorageBuf * GPU_storagebuf_create_ex(size_t size, const void *data, GPUUsageType usage, const char *name)
void GPU_storagebuf_clear_to_zero(GPUStorageBuf *ssbo)
void GPU_storagebuf_free(GPUStorageBuf *ssbo)
void GPU_storagebuf_update(GPUStorageBuf *ssbo, const void *data)
void GPU_storagebuf_read(GPUStorageBuf *ssbo, void *data)
int GPU_texture_height(const GPUTexture *texture)
GPUTexture * GPU_texture_create_2d(const char *name, int width, int height, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
GPUTexture * GPU_texture_create_1d(const char *name, int width, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
int GPU_texture_width(const GPUTexture *texture)
void GPU_texture_clear(GPUTexture *texture, eGPUDataFormat data_format, const void *data)
bool GPU_texture_has_float_format(const GPUTexture *tex)
void * GPU_texture_read(GPUTexture *texture, eGPUDataFormat data_format, int mip_level)
bool GPU_texture_is_cube(const GPUTexture *texture)
int GPU_texture_depth(const GPUTexture *texture)
int GPU_texture_mip_count(const GPUTexture *texture)
GPUTexture * GPU_texture_create_cube_array(const char *name, int width, int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
eGPUDataFormat
@ GPU_DATA_INT
@ GPU_DATA_UINT
@ GPU_DATA_FLOAT
bool GPU_texture_has_integer_format(const GPUTexture *texture)
GPUTexture * GPU_texture_create_view(const char *name, GPUTexture *source_texture, eGPUTextureFormat view_format, int mip_start, int mip_len, int layer_start, int layer_len, bool cube_as_array, bool use_stencil)
bool GPU_texture_is_array(const GPUTexture *texture)
eGPUTextureUsage
@ GPU_TEXTURE_USAGE_GENERAL
bool GPU_texture_has_stencil_format(const GPUTexture *texture)
GPUTexture * GPU_texture_create_2d_array(const char *name, int width, int height, int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
bool GPU_texture_has_signed_format(const GPUTexture *tex)
GPUTexture * GPU_texture_create_3d(const char *name, int width, int height, int depth, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const void *data)
bool GPU_texture_has_normalized_format(const GPUTexture *tex)
void GPU_texture_filter_mode(GPUTexture *texture, bool use_filter)
#define GPU_TEXTURE_FREE_SAFE(texture)
eGPUTextureFormat
GPUTexture * GPU_texture_create_cube(const char *name, int width, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
GPUTexture * GPU_texture_create_1d_array(const char *name, int width, int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
int GPU_texture_layer_count(const GPUTexture *texture)
bool GPU_texture_has_depth_format(const GPUTexture *texture)
void GPU_texture_get_mipmap_size(GPUTexture *texture, int mip_level, int *r_size)
eGPUTextureFormat GPU_texture_format(const GPUTexture *texture)
GPUUniformBuf * GPU_uniformbuf_create_ex(size_t size, const void *data, const char *name)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_free(GPUUniformBuf *ubo)
@ GPU_USAGE_DYNAMIC
@ GPU_USAGE_DEVICE_ONLY
Read Guarded memory(de)allocation.
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
int64_t size() const
void append(const T &value)
static void swap(Framebuffer &a, Framebuffer &b)
void ensure(int2 target_size)
GPUFrameBuffer ** operator&()
Framebuffer & operator=(Framebuffer &&a)
void ensure(GPUAttachment depth=GPU_ATTACHMENT_NONE, GPUAttachment color1=GPU_ATTACHMENT_NONE, GPUAttachment color2=GPU_ATTACHMENT_NONE, GPUAttachment color3=GPU_ATTACHMENT_NONE, GPUAttachment color4=GPU_ATTACHMENT_NONE, GPUAttachment color5=GPU_ATTACHMENT_NONE, GPUAttachment color6=GPU_ATTACHMENT_NONE, GPUAttachment color7=GPU_ATTACHMENT_NONE, GPUAttachment color8=GPU_ATTACHMENT_NONE)
StorageArrayBuffer(const char *name=nullptr)
static void swap(StorageArrayBuffer &a, StorageArrayBuffer &b)
MutableSpan< T > as_span() const
void trim_to_next_power_of_2(int64_t required_size)
static void swap(StorageBuffer< T > &a, StorageBuffer< T > &b)
StorageBuffer< T > & operator=(const T &other)
StorageBuffer(const char *name=nullptr)
StorageVectorBuffer(const char *name=nullptr)
static void swap(StorageVectorBuffer &a, StorageVectorBuffer &b)
void append_as(ForwardT &&...value)
void extend(const Span< T > values)
static void swap(TextureFromPool &a, Texture &b)
bool ensure_1d_array(int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *)=delete
GPUTexture * stencil_view()=delete
GPUTexture * mip_view(int)=delete
bool ensure_cube(int, int, eGPUTextureFormat, eGPUTextureUsage, const float *)=delete
bool ensure_cube_array(int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *)=delete
void acquire(int2 extent, eGPUTextureFormat format, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL)
static void swap(TextureFromPool &a, TextureFromPool &b)
bool ensure_2d_array(int, int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *)=delete
static void swap(Texture &a, TextureFromPool &b)
TextureFromPool(const char *name="gpu::Texture")
GPUTexture * layer_view(int)=delete
void filter_mode(bool)=delete
bool ensure_3d(int, int, int, int, eGPUTextureFormat, eGPUTextureUsage, const float *)=delete
bool ensure_2d(int, int, int, eGPUTextureFormat, eGPUTextureUsage, float *)=delete
bool ensure_1d(int, int, eGPUTextureFormat, eGPUTextureUsage, const float *)=delete
GPUTexture * mip_view(int)=delete
GPUTexture * stencil_view()=delete
bool ensure_1d_array(int, int, int, eGPUTextureFormat, const float *)=delete
bool ensure_cube_array(int, int, int, eGPUTextureFormat, const float *)=delete
GPUTexture * layer_view(int)=delete
void filter_mode(bool)=delete
void wrap(GPUTexture *tex)
bool ensure_cube(int, int, eGPUTextureFormat, const float *)=delete
bool ensure_2d_array(int, int, int, int, eGPUTextureFormat, const float *)=delete
bool ensure_3d(int, int, int, int, eGPUTextureFormat, const float *)=delete
bool ensure_1d(int, int, eGPUTextureFormat, const float *)=delete
bool ensure_2d(int, int, int, eGPUTextureFormat, const float *)=delete
bool ensure_1d_array(eGPUTextureFormat format, int extent, int layers, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, const float *data=nullptr, int mip_len=1)
Texture(const char *name, eGPUTextureFormat format, eGPUTextureUsage usage, int extent, const float *data=nullptr, bool cubemap=false, int mip_len=1)
Texture(const char *name, eGPUTextureFormat format, eGPUTextureUsage usage, int extent, int layers, const float *data=nullptr, bool cubemap=false, int mip_len=1)
bool ensure_mip_views(bool cube_as_array=false)
Vector< GPUTexture *, 0 > mip_views_
bool ensure_1d(eGPUTextureFormat format, int extent, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, const float *data=nullptr, int mip_len=1)
static void swap(Texture &a, Texture &b)
Texture(const char *name="gpu::Texture")
void clear(float4 values)
Vector< GPUTexture *, 0 > layer_views_
T * read(eGPUDataFormat format, int miplvl=0)
void clear(uint4 values)
bool ensure_cube(eGPUTextureFormat format, int extent, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, float *data=nullptr, int mip_len=1)
void clear(int4 values)
Texture(const char *name, eGPUTextureFormat format, eGPUTextureUsage usage, int2 extent, const float *data=nullptr, int mip_len=1)
void filter_mode(bool do_filter)
bool ensure_cube_array(eGPUTextureFormat format, int extent, int layers, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, const float *data=nullptr, int mip_len=1)
bool ensure_2d_array(eGPUTextureFormat format, int2 extent, int layers, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, const float *data=nullptr, int mip_len=1)
bool ensure_layer_views(bool cube_as_array=false)
bool ensure_2d(eGPUTextureFormat format, int2 extent, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, const float *data=nullptr, int mip_len=1)
bool ensure_3d(eGPUTextureFormat format, int3 extent, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL, const float *data=nullptr, int mip_len=1)
GPUTexture * layer_view(int layer)
Texture(const char *name, eGPUTextureFormat format, eGPUTextureUsage usage, int3 extent, const float *data=nullptr, int mip_len=1)
GPUTexture * stencil_view(bool cube_as_array=false)
Texture & operator=(Texture &&a)
GPUTexture * mip_view(int miplvl)
int3 size(int miplvl=0) const
GPUTexture * layer_range_view(int layer_start, int layer_len, bool cube_as_array=false)
Texture(Texture &&other)=default
Texture(const char *name, eGPUTextureFormat format, eGPUTextureUsage usage, int2 extent, int layers, const float *data=nullptr, int mip_len=1)
UniformArrayBuffer(const char *name=nullptr)
UniformBuffer< T > & operator=(const T &other)
UniformBuffer(const char *name=nullptr)
BLI_STATIC_ASSERT(((sizeof(T) *len) % 16)==0, "Buffer size need to be aligned to size of float4.")
const T & operator[](int64_t index) const
StorageCommon(const char *name=nullptr)
UniformCommon(const char *name=nullptr)
local_group_size(16, 16) .push_constant(Type b
append
DRWManager DST
int len
GPUTexture * DRW_texture_pool_texture_acquire(DRWTexturePool *pool, int width, int height, eGPUTextureFormat format, eGPUTextureUsage usage)
void DRW_texture_pool_texture_release(DRWTexturePool *pool, GPUTexture *tmp_tex)
void DRW_texture_pool_take_texture_ownership(DRWTexturePool *pool, GPUTexture *tex)
void DRW_texture_pool_give_texture_ownership(DRWTexturePool *pool, GPUTexture *tex)
RAYTRACE_GROUP_SIZE additional_info("eevee_shared", "eevee_gbuffer_data", "eevee_global_ubo", "eevee_sampling_data", "eevee_utility_texture", "eevee_hiz_data", "draw_view") .specialization_constant(Type RAYTRACE_GROUP_SIZE in_sh_0_tx in_sh_2_tx screen_normal_tx GPU_RGBA8
format
void * MEM_mallocN_aligned(size_t len, size_t alignment, const char *str)
Definition mallocn.cc:110
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
#define T
#define G(x, y, z)
static GPUTexture * as_texture(Image *img)
static Image * as_image(GPUTexture *tex)
BLI_STATIC_ASSERT(MBC_BATCH_LEN< 32, "Number of batches exceeded the limit of bit fields")
VecBase< int32_t, 4 > int4
VecBase< uint32_t, 4 > uint4
VecBase< float, 4 > float4
VecBase< int32_t, 3 > int3
#define swap(a, b)
Definition sort.c:55
__int64 int64_t
Definition stdint.h:89
DRWTexturePool * texture_pool
DRWData * vmempool
PointerRNA * ptr
Definition wm_files.cc:4126