Blender V5.0
gpu_vertex_format.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 by Mike Erwin. All rights reserved.
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include "GPU_vertex_format.hh"
12#include "BLI_assert.h"
13#include "BLI_math_base.h"
14#include "GPU_capabilities.hh"
15
17#include "gpu_shader_private.hh"
19
20#include <cstddef>
21#include <cstring>
22
23#include "BLI_hash_mm2a.hh"
24#include "BLI_string.h"
25#include "BLI_utildefines.h"
26
27#define PACK_DEBUG 0
28
29#if PACK_DEBUG
30# include <stdio.h>
31#endif
32
33namespace blender::gpu {
34
35/* Used to combine legacy enums into new vertex attribute type. */
37 GPUVertFetchMode fetch_mode,
38 uint32_t component_len)
39{
40 switch (component_type) {
41 case GPU_COMP_I8: {
42 switch (fetch_mode) {
44 switch (component_len) {
45 case 1:
46 return VertAttrType::SNORM_8_DEPRECATED;
47 case 2:
48 return VertAttrType::SNORM_8_8_DEPRECATED;
49 case 3:
50 return VertAttrType::SNORM_8_8_8_DEPRECATED;
51 case 4:
52 return VertAttrType::SNORM_8_8_8_8;
53 }
54 break;
55 case GPU_FETCH_INT:
56 switch (component_len) {
57 case 1:
58 return VertAttrType::SINT_8_DEPRECATED;
59 case 2:
60 return VertAttrType::SINT_8_8_DEPRECATED;
61 case 3:
62 return VertAttrType::SINT_8_8_8_DEPRECATED;
63 case 4:
64 return VertAttrType::SINT_8_8_8_8;
65 }
66 break;
67 default:
68 break;
69 }
70 break;
71 }
72 case GPU_COMP_U8: {
73 switch (fetch_mode) {
75 switch (component_len) {
76 case 1:
77 return VertAttrType::UNORM_8_DEPRECATED;
78 case 2:
79 return VertAttrType::UNORM_8_8_DEPRECATED;
80 case 3:
81 return VertAttrType::UNORM_8_8_8_DEPRECATED;
82 case 4:
83 return VertAttrType::UNORM_8_8_8_8;
84 }
85 break;
86 case GPU_FETCH_INT:
87 switch (component_len) {
88 case 1:
89 return VertAttrType::UINT_8_DEPRECATED;
90 case 2:
91 return VertAttrType::UINT_8_8_DEPRECATED;
92 case 3:
93 return VertAttrType::UINT_8_8_8_DEPRECATED;
94 case 4:
95 return VertAttrType::UINT_8_8_8_8;
96 }
97 break;
98 default:
99 break;
100 }
101 break;
102 }
103 case GPU_COMP_I16: {
104 switch (fetch_mode) {
106 switch (component_len) {
107 case 1:
108 return VertAttrType::SNORM_16_DEPRECATED;
109 case 2:
110 return VertAttrType::SNORM_16_16;
111 case 3:
112 return VertAttrType::SNORM_16_16_16_DEPRECATED;
113 case 4:
114 return VertAttrType::SNORM_16_16_16_16;
115 }
116 break;
117 case GPU_FETCH_INT:
118 switch (component_len) {
119 case 1:
120 return VertAttrType::SINT_16_DEPRECATED;
121 case 2:
122 return VertAttrType::SINT_16_16;
123 case 3:
124 return VertAttrType::SINT_16_16_16_DEPRECATED;
125 case 4:
126 return VertAttrType::SINT_16_16_16_16;
127 }
128 break;
129 default:
130 break;
131 }
132 break;
133 }
134 case GPU_COMP_U16: {
135 switch (fetch_mode) {
137 switch (component_len) {
138 case 1:
139 return VertAttrType::UNORM_16_DEPRECATED;
140 case 2:
141 return VertAttrType::UNORM_16_16;
142 case 3:
143 return VertAttrType::UNORM_16_16_16_DEPRECATED;
144 case 4:
145 return VertAttrType::UNORM_16_16_16_16;
146 }
147 break;
148 case GPU_FETCH_INT:
149 switch (component_len) {
150 case 1:
151 return VertAttrType::UINT_16_DEPRECATED;
152 case 2:
153 return VertAttrType::UINT_16_16;
154 case 3:
155 return VertAttrType::UINT_16_16_16_DEPRECATED;
156 case 4:
157 return VertAttrType::UINT_16_16_16_16;
158 }
159 break;
160 default:
161 break;
162 }
163 break;
164 }
165 case GPU_COMP_I32: {
166 switch (fetch_mode) {
167 case GPU_FETCH_INT:
168 switch (component_len) {
169 case 1:
170 return VertAttrType::SINT_32;
171 case 2:
172 return VertAttrType::SINT_32_32;
173 case 3:
174 return VertAttrType::SINT_32_32_32;
175 case 4:
176 return VertAttrType::SINT_32_32_32_32;
177 }
178 break;
179 default:
180 break;
181 }
182 break;
183 }
184 case GPU_COMP_U32: {
185 switch (fetch_mode) {
186 case GPU_FETCH_INT:
187 switch (component_len) {
188 case 1:
189 return VertAttrType::UINT_32;
190 case 2:
191 return VertAttrType::UINT_32_32;
192 case 3:
193 return VertAttrType::UINT_32_32_32;
194 case 4:
195 return VertAttrType::UINT_32_32_32_32;
196 }
197 break;
198 default:
199 break;
200 }
201 break;
202 }
203 case GPU_COMP_F32: {
204 switch (fetch_mode) {
205 case GPU_FETCH_FLOAT:
206 switch (component_len) {
207 case 1:
208 return VertAttrType::SFLOAT_32;
209 case 2:
210 return VertAttrType::SFLOAT_32_32;
211 case 3:
212 return VertAttrType::SFLOAT_32_32_32;
213 case 4:
214 return VertAttrType::SFLOAT_32_32_32_32;
215 }
216 break;
217 default:
218 break;
219 }
220 break;
221 }
222 case GPU_COMP_I10: {
223 switch (fetch_mode) {
225 return VertAttrType::SNORM_10_10_10_2;
226 default:
227 break;
228 }
229 break;
230 }
231 case GPU_COMP_MAX:
232 break;
233 }
234
236};
237
239{
240 switch (attr_type) {
241 case VertAttrType::SNORM_8_8_8_8:
242 case VertAttrType::SNORM_16_16:
243 case VertAttrType::SNORM_16_16_16_16:
244 case VertAttrType::UNORM_8_8_8_8:
245 case VertAttrType::UNORM_16_16:
246 case VertAttrType::UNORM_16_16_16_16:
247 case VertAttrType::SNORM_10_10_10_2:
248 case VertAttrType::UNORM_10_10_10_2:
249 return true;
250 default:
251 return false;
252 }
253};
254
256{
257 switch (attr_type) {
258 case VertAttrType::SFLOAT_32:
259 case VertAttrType::SFLOAT_32_32:
260 case VertAttrType::SFLOAT_32_32_32:
261 case VertAttrType::SFLOAT_32_32_32_32:
262 return true;
263 default:
264 return false;
265 }
266};
267
268} // namespace blender::gpu
269
271using namespace blender::gpu;
272using namespace blender::gpu::shader;
273
275{
276 if (is_fetch_float(this->format)) {
277 return GPU_FETCH_FLOAT;
278 }
279 if (is_fetch_normalized(this->format)) {
281 }
282 return GPU_FETCH_INT;
283}
284
286{
287 switch (this->format) {
288 case VertAttrType::SNORM_8_8_8_8:
289 case VertAttrType::SINT_8_8_8_8:
290 return GPU_COMP_I8;
291 case VertAttrType::SNORM_16_16:
292 case VertAttrType::SNORM_16_16_16_16:
293 case VertAttrType::SINT_16_16:
294 case VertAttrType::SINT_16_16_16_16:
295 return GPU_COMP_I16;
296 case VertAttrType::SINT_32:
297 case VertAttrType::SINT_32_32:
298 case VertAttrType::SINT_32_32_32:
299 case VertAttrType::SINT_32_32_32_32:
300 return GPU_COMP_I32;
301 case VertAttrType::UNORM_8_8_8_8:
302 case VertAttrType::UINT_8_8_8_8:
303 return GPU_COMP_U8;
304 case VertAttrType::UNORM_16_16:
305 case VertAttrType::UNORM_16_16_16_16:
306 case VertAttrType::UINT_16_16:
307 case VertAttrType::UINT_16_16_16_16:
308 return GPU_COMP_U16;
309 case VertAttrType::UINT_32:
310 case VertAttrType::UINT_32_32:
311 case VertAttrType::UINT_32_32_32:
312 case VertAttrType::UINT_32_32_32_32:
313 return GPU_COMP_U32;
314 case VertAttrType::SFLOAT_32:
315 case VertAttrType::SFLOAT_32_32:
316 case VertAttrType::SFLOAT_32_32_32:
317 case VertAttrType::SFLOAT_32_32_32_32:
318 return GPU_COMP_F32;
319 case VertAttrType::SNORM_10_10_10_2:
320 case VertAttrType::UNORM_10_10_10_2:
321 return GPU_COMP_I10;
322 default: /* TODO(fclem): This avoids warning caused by deprecated formats. */
324 break;
325 }
327 return GPU_COMP_I8;
328}
329
331{
332#ifndef NDEBUG
333 memset(format, 0, sizeof(GPUVertFormat));
334#else
335 format->attr_len = 0;
336 format->packed = false;
337 format->name_offset = 0;
338 format->name_len = 0;
339 format->deinterleaved = false;
340
341 for (uint i = 0; i < GPU_VERT_ATTR_MAX_LEN; i++) {
342 format->attrs[i].name_len = 0;
343 }
344#endif
345}
346
348{
349 /* copy regular struct fields */
350 memcpy(dest, &src, sizeof(GPUVertFormat));
351}
352
354{
355 BLI_assert(!this->packed);
356 VertexFormat_pack(this);
357}
358
360{
361 BLI_assert(format->packed && format->stride > 0);
362 return format->stride * vertex_len;
363}
364
366{
367 const uchar name_offset = format->name_offset;
368 /* Subtract one to make sure there's enough space for the last null terminator. */
369 const int64_t available = GPU_VERT_ATTR_NAMES_BUF_LEN - name_offset - 1;
370 const int64_t chars_to_copy = std::min(name.size(), available);
371
372 name.substr(0, available).copy_unsafe(format->names + name_offset);
373 BLI_assert((format->name_offset + chars_to_copy + 1) <= GPU_VERT_ATTR_NAMES_BUF_LEN);
374 format->name_offset += chars_to_copy + 1;
375
376 return name_offset;
377}
378
380 const StringRef name,
381 GPUVertCompType comp_type,
382 uint comp_len,
383 GPUVertFetchMode fetch_mode)
384{
385 return format->attribute_add(name, vertex_format_combine(comp_type, fetch_mode, comp_len));
386}
387
389 const StringRef name,
391{
392 return format->attribute_add(name, type);
393}
394
397 size_t offset)
398{
399 BLI_assert(this->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
400 BLI_assert(this->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
401 BLI_assert(!this->packed); /* packed means frozen/locked */
403
404 this->name_len++; /* Multi-name support. */
405
406 const uint attr_id = this->attr_len++;
407 GPUVertAttr *attr = &this->attrs[attr_id];
408 attr->names[attr->name_len++] = copy_attr_name(this, name);
409 if (offset != -1) {
410 attr->offset = offset; /* Offset computed externally. */
411 }
412 else {
413 attr->offset = 0; /* offsets & stride are calculated later (during pack) */
414 }
415 attr->type.format = type;
416 return attr_id;
417}
418
420{
421 GPUVertAttr *attr = &format->attrs[format->attr_len - 1];
422 BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
424 format->name_len++; /* Multi-name support. */
425 attr->names[attr->name_len++] = copy_attr_name(format, alias);
426}
427
435
437{
438 /* Sanity check. Maximum can be upgraded if needed. */
439 BLI_assert(load_count > 1 && load_count < 5);
440 /* We need a packed format because of format->stride. */
441 if (!format->packed) {
443 }
444
445 BLI_assert((format->name_len + 1) * load_count < GPU_VERT_FORMAT_MAX_NAMES);
446 BLI_assert(format->attr_len * load_count <= GPU_VERT_ATTR_MAX_LEN);
447 BLI_assert(format->name_offset * load_count < GPU_VERT_ATTR_NAMES_BUF_LEN);
448
449 const GPUVertAttr *attr = format->attrs;
450 int attr_len = format->attr_len;
451 for (int i = 0; i < attr_len; i++, attr++) {
452 const char *attr_name = GPU_vertformat_attr_name_get(format, attr, 0);
453 for (int j = 1; j < load_count; j++) {
454 char load_name[/*MAX_CUSTOMDATA_LAYER_NAME*/ 68];
455 SNPRINTF(load_name, "%s%d", attr_name, j);
456 GPUVertAttr *dst_attr = &format->attrs[format->attr_len++];
457 *dst_attr = *attr;
458
459 dst_attr->names[0] = copy_attr_name(format, load_name);
460 dst_attr->name_len = 1;
461 dst_attr->offset += format->stride * j;
462 }
463 }
464}
465
467{
468 for (int i = 0; i < format->attr_len; i++) {
469 const GPUVertAttr *attr = &format->attrs[i];
470 for (int j = 0; j < attr->name_len; j++) {
471 const char *attr_name = GPU_vertformat_attr_name_get(format, attr, j);
472 if (name == attr_name) {
473 return i;
474 }
475 }
476 }
477 return -1;
478}
479
481{
482 BLI_assert(attr_id > -1 && attr_id < format->attr_len);
483 GPUVertAttr *attr = &format->attrs[attr_id];
484 char *attr_name = (char *)GPU_vertformat_attr_name_get(format, attr, 0);
485 BLI_assert(strlen(attr_name) == strlen(new_name));
486 int i = 0;
487 while (attr_name[i] != '\0') {
488 attr_name[i] = new_name[i];
489 i++;
490 }
491 attr->name_len = 1;
492}
493
494/* Encode 8 original bytes into 11 safe bytes. */
495static void safe_bytes(char out[11], const char data[8])
496{
497 const char safe_chars[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
498
499 uint64_t in = *(uint64_t *)data;
500 for (int i = 0; i < 11; i++) {
501 out[i] = safe_chars[in % 62lu];
502 in /= 62lu;
503 }
504}
505
506void GPU_vertformat_safe_attr_name(const StringRef attr_name, char *r_safe_name, uint /*max_len*/)
507{
508 char data[8] = {0};
509 uint len = attr_name.size();
510
511 if (len > 8) {
512 /* Start with the first 4 chars of the name. */
513 memcpy(data, attr_name.data(), 4);
514 /* We use a hash to identify each data layer based on its name.
515 * NOTE: This is still prone to hash collision but the risks are very low. */
516 /* Start hashing after the first 2 chars. */
517 const StringRef to_hash = attr_name.drop_prefix(4);
518 *(uint *)&data[4] = BLI_hash_mm2(
519 reinterpret_cast<const uchar *>(to_hash.data()), to_hash.size(), 0);
520 }
521 else {
522 /* Copy the whole name. Collision is barely possible
523 * (hash would have to be equal to the last 4 bytes). */
524 memcpy(data, attr_name.data(), std::min<int>(8, len));
525 }
526 /* Convert to safe bytes characters. */
527 safe_bytes(r_safe_name, data);
528 /* End the string */
529 r_safe_name[11] = '\0';
530
532#if 0 /* For debugging */
533 printf("%s > %lx > %s\n", attr_name, *(uint64_t *)data, r_safe_name);
534#endif
535}
536
538{
539 /* Ideally we should change the stride and offset here. This would allow
540 * us to use GPU_vertbuf_attr_set / GPU_vertbuf_attr_fill. But since
541 * we use only 11 bits for attr->offset this limits the size of the
542 * buffer considerably. So instead we do the conversion when creating
543 * bindings in create_bindings(). */
544 format->deinterleaved = true;
545}
546
548{
549 const uint mod = offset % alignment;
550 return (mod == 0) ? 0 : (alignment - mod);
551}
552
553#if PACK_DEBUG
554static void show_pack(uint a_idx, uint size, uint pad)
555{
556 const char c = 'A' + a_idx;
557 for (uint i = 0; i < pad; i++) {
558 putchar('-');
559 }
560 for (uint i = 0; i < size; i++) {
561 putchar(c);
562 }
563}
564#endif
565
567{
568 uint offset = 0;
569 for (uint a_idx = 0; a_idx < format->attr_len; a_idx++) {
570 GPUVertAttr *a = &format->attrs[a_idx];
571 a->offset = offset;
572 offset += a->type.size();
573 }
574 format->stride = offset;
575 format->packed = true;
576}
577
578static uint component_size_get(const Type gpu_type)
579{
580 switch (gpu_type) {
581 case Type::float2_t:
582 case Type::int2_t:
583 case Type::uint2_t:
584 return 2;
585 case Type::float3_t:
586 case Type::int3_t:
587 case Type::uint3_t:
588 return 3;
589 case Type::float4_t:
590 case Type::int4_t:
591 case Type::uint4_t:
592 return 4;
593 case Type::float3x3_t:
594 return 12;
595 case Type::float4x4_t:
596 return 16;
597 default:
598 return 1;
599 }
600}
601
603 GPUVertCompType *r_comp_type,
604 GPUVertFetchMode *r_fetch_mode)
605{
606 switch (gpu_type) {
607 case Type::float_t:
608 case Type::float2_t:
609 case Type::float3_t:
610 case Type::float4_t:
611 case Type::float3x3_t:
612 case Type::float4x4_t:
613 *r_comp_type = GPU_COMP_F32;
614 *r_fetch_mode = GPU_FETCH_FLOAT;
615 break;
616 case Type::int_t:
617 case Type::int2_t:
618 case Type::int3_t:
619 case Type::int4_t:
620 *r_comp_type = GPU_COMP_I32;
621 *r_fetch_mode = GPU_FETCH_INT;
622 break;
623 case Type::uint_t:
624 case Type::uint2_t:
625 case Type::uint3_t:
626 case Type::uint4_t:
627 *r_comp_type = GPU_COMP_U32;
628 *r_fetch_mode = GPU_FETCH_INT;
629 break;
630 default:
631 BLI_assert(0);
632 }
633}
634
636{
638
640 int location_test = 0, attrs_added = 0;
641 while (attrs_added < attr_len) {
642 char name[256];
643 Type gpu_type;
644 if (!GPU_shader_get_attribute_info(shader, location_test++, name, (int *)&gpu_type)) {
645 continue;
646 }
647
648 GPUVertCompType comp_type;
649 GPUVertFetchMode fetch_mode;
650 recommended_fetch_mode_and_comp_type(gpu_type, &comp_type, &fetch_mode);
651
652 int comp_len = component_size_get(gpu_type);
653
654 GPU_vertformat_attr_add_legacy(format, name, comp_type, comp_len, fetch_mode);
655 attrs_added++;
656 }
657}
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
uint32_t BLI_hash_mm2(const unsigned char *data, size_t len, uint32_t seed)
Definition hash_mm2a.cc:100
#define SNPRINTF(dst, format,...)
Definition BLI_string.h:604
unsigned char uchar
unsigned int uint
bool GPU_shader_get_attribute_info(const blender::gpu::Shader *shader, int attr_location, char r_name[256], int *r_type)
uint GPU_shader_get_attribute_len(const blender::gpu::Shader *shader)
void GPU_vertformat_attr_rename(GPUVertFormat *format, int attr, const char *new_name)
static constexpr int GPU_VERT_ATTR_MAX_LEN
BLI_INLINE const char * GPU_vertformat_attr_name_get(const GPUVertFormat *format, const GPUVertAttr *attr, uint n_idx)
int GPU_vertformat_attr_id_get(const GPUVertFormat *, blender::StringRef name)
static constexpr int GPU_VERT_ATTR_MAX_NAMES
GPUVertFetchMode
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add_legacy(GPUVertFormat *, blender::StringRef name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
static constexpr int GPU_VERT_FORMAT_MAX_NAMES
void GPU_vertformat_from_shader(GPUVertFormat *format, const blender::gpu::Shader *shader)
void GPU_vertformat_alias_add(GPUVertFormat *, blender::StringRef alias)
GPUVertFormat GPU_vertformat_from_attribute(blender::StringRef name, blender::gpu::VertAttrType type)
void GPU_vertformat_safe_attr_name(blender::StringRef attr_name, char *r_safe_name, uint max_len)
static constexpr int GPU_VERT_ATTR_NAMES_BUF_LEN
void GPU_vertformat_multiload_enable(GPUVertFormat *format, int load_count)
void GPU_vertformat_clear(GPUVertFormat *)
void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat &src)
static constexpr int GPU_MAX_SAFE_ATTR_NAME
uint GPU_vertformat_attr_add(GPUVertFormat *format, blender::StringRef name, blender::gpu::VertAttrType type)
void GPU_vertformat_deinterleave(GPUVertFormat *format)
GPUVertCompType
@ GPU_COMP_U16
@ GPU_COMP_MAX
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_I8
@ GPU_COMP_U32
@ GPU_COMP_I16
@ GPU_COMP_U8
int pad[32 - sizeof(int)]
BMesh const char void * data
long long int int64_t
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
constexpr int64_t size() const
constexpr const char * data() const
constexpr StringRef drop_prefix(int64_t n) const
struct @021025263243242147216143265077100330027142264337::@240232116316110053135047106323056371161236243121 attr_id
#define in
#define out
#define printf(...)
VecBase< float, D > constexpr mod(VecOp< float, D >, VecOp< float, D >) RET
static void safe_bytes(char out[11], const char data[8])
static void recommended_fetch_mode_and_comp_type(Type gpu_type, GPUVertCompType *r_comp_type, GPUVertFetchMode *r_fetch_mode)
static uchar copy_attr_name(GPUVertFormat *format, const StringRef name)
static uint component_size_get(const Type gpu_type)
uint vertex_buffer_size(const GPUVertFormat *format, uint vertex_len)
uint padding(uint offset, uint alignment)
void VertexFormat_pack(GPUVertFormat *format)
format
static VertAttrType vertex_format_combine(GPUVertCompType component_type, GPUVertFetchMode fetch_mode, uint32_t component_len)
bool is_fetch_normalized(VertAttrType attr_type)
bool is_fetch_float(VertAttrType attr_type)
const char * name
blender::gpu::VertAttrType format
GPUVertFetchMode fetch_mode() const
GPUVertCompType comp_type() const
struct GPUVertAttr::Type type
uchar names[GPU_VERT_ATTR_MAX_NAMES]
GPUVertAttr attrs[GPU_VERT_ATTR_MAX_LEN]
uint attribute_add(blender::StringRef name, blender::gpu::VertAttrType type, size_t offset=-1)
i
Definition text_draw.cc:230
uint len