Blender V4.3
draw_manager_data.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "DRW_pbvh.hh"
10
11#include "draw_attributes.hh"
12#include "draw_manager_c.hh"
13
14#include "BKE_attribute.hh"
15#include "BKE_curve.hh"
16#include "BKE_duplilist.hh"
17#include "BKE_global.hh"
18#include "BKE_image.hh"
19#include "BKE_mesh.hh"
20#include "BKE_object.hh"
21#include "BKE_paint.hh"
22#include "BKE_volume.hh"
23
24/* For debug cursor position. */
25#include "WM_api.hh"
26#include "wm_window.hh"
27
28#include "DNA_curve_types.h"
29#include "DNA_mesh_types.h"
30#include "DNA_meta_types.h"
31#include "DNA_screen_types.h"
32
33#include "BLI_array.hh"
34#include "BLI_hash.h"
35#include "BLI_link_utils.h"
36#include "BLI_listbase.h"
37#include "BLI_math_bits.h"
38#include "BLI_memblock.h"
39#include "BLI_mempool.h"
40
41#ifdef DRW_DEBUG_CULLING
42# include "BLI_math_bits.h"
43#endif
44
45#include "GPU_capabilities.hh"
46#include "GPU_material.hh"
47#include "GPU_uniform_buffer.hh"
48
49#include "intern/gpu_codegen.hh"
50
59#define DISABLE_DEBUG_SHADER_PRINT_BARRIER
60
61/* -------------------------------------------------------------------- */
65static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len)
66{
67 /* Count unique batches. It's not really important if
68 * there is collisions. If there is a lot of different batches,
69 * the sorting benefit will be negligible.
70 * So at least sort fast! */
71 uchar idx[128] = {0};
72 /* Shift by 6 positions knowing each blender::gpu::Batch is > 64 bytes */
73#define KEY(a) ((size_t((a).draw.batch) >> 6) % ARRAY_SIZE(idx))
74 BLI_assert(array_len <= ARRAY_SIZE(idx));
75
76 for (int i = 0; i < array_len; i++) {
77 /* Early out if nothing to sort. */
78 if (++idx[KEY(array[i])] == array_len) {
79 return;
80 }
81 }
82 /* Accumulate batch indices */
83 for (int i = 1; i < ARRAY_SIZE(idx); i++) {
84 idx[i] += idx[i - 1];
85 }
86 /* Traverse in reverse to not change the order of the resource ID's. */
87 for (int src = array_len - 1; src >= 0; src--) {
88 array_tmp[--idx[KEY(array[src])]] = array[src];
89 }
90#undef KEY
91
92 memcpy(array, array_tmp, sizeof(*array) * array_len);
93}
94
96{
99 int ubo_len = 1 + chunk_id - ((elem_id == 0) ? 1 : 0);
100 size_t list_size = sizeof(GPUUniformBuf *) * ubo_len;
101
102 /* TODO: find a better system. currently a lot of obinfos UBO are going to be unused
103 * if not rendering with Eevee. */
104
105 if (vmempool->matrices_ubo == nullptr) {
106 vmempool->matrices_ubo = static_cast<GPUUniformBuf **>(MEM_callocN(list_size, __func__));
107 vmempool->obinfos_ubo = static_cast<GPUUniformBuf **>(MEM_callocN(list_size, __func__));
108 vmempool->ubo_len = ubo_len;
109 }
110
111 /* Remove unnecessary buffers */
112 for (int i = ubo_len; i < vmempool->ubo_len; i++) {
113 GPU_uniformbuf_free(vmempool->matrices_ubo[i]);
114 GPU_uniformbuf_free(vmempool->obinfos_ubo[i]);
115 }
116
117 if (ubo_len != vmempool->ubo_len) {
118 vmempool->matrices_ubo = static_cast<GPUUniformBuf **>(
119 MEM_recallocN(vmempool->matrices_ubo, list_size));
120 vmempool->obinfos_ubo = static_cast<GPUUniformBuf **>(
121 MEM_recallocN(vmempool->obinfos_ubo, list_size));
122 vmempool->ubo_len = ubo_len;
123 }
124
125 /* Create/Update buffers. */
126 for (int i = 0; i < ubo_len; i++) {
127 void *data_obmat = BLI_memblock_elem_get(vmempool->obmats, i, 0);
128 void *data_infos = BLI_memblock_elem_get(vmempool->obinfos, i, 0);
129 if (vmempool->matrices_ubo[i] == nullptr) {
130 vmempool->matrices_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectMatrix) *
132 vmempool->obinfos_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectInfos) *
134 }
135 GPU_uniformbuf_update(vmempool->matrices_ubo[i], data_obmat);
136 GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos);
137 }
138
140
141 /* Aligned alloc to avoid unaligned memcpy. */
142 DRWCommandChunk *chunk_tmp = static_cast<DRWCommandChunk *>(
143 MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, __func__));
144 DRWCommandChunk *chunk;
146 BLI_memblock_iternew(vmempool->commands, &iter);
147 while ((chunk = static_cast<DRWCommandChunk *>(BLI_memblock_iterstep(&iter)))) {
148 bool sortable = true;
149 /* We can only sort chunks that contain #DRWCommandDraw only. */
150 for (int i = 0; i < ARRAY_SIZE(chunk->command_type) && sortable; i++) {
151 if (chunk->command_type[i] != 0) {
152 sortable = false;
153 }
154 }
155 if (sortable) {
156 draw_call_sort(chunk->commands, chunk_tmp->commands, chunk->command_used);
157 }
158 }
159 MEM_freeN(chunk_tmp);
160}
161
164/* -------------------------------------------------------------------- */
169 int loc,
170 DRWUniformType type,
171 const void *value,
172 GPUSamplerState sampler_state,
173 int length,
174 int arraysize)
175{
176 if (loc == -1) {
177 /* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */
178 // BLI_assert(0);
179 return;
180 }
181
182 DRWUniformChunk *unichunk = shgroup->uniforms;
183 /* Happens on first uniform or if chunk is full. */
184 if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) {
185 unichunk = static_cast<DRWUniformChunk *>(BLI_memblock_alloc(DST.vmempool->uniforms));
186 unichunk->uniform_len = BOUNDED_ARRAY_TYPE_SIZE<decltype(shgroup->uniforms->uniforms)>();
187 unichunk->uniform_used = 0;
188 BLI_LINKS_PREPEND(shgroup->uniforms, unichunk);
189 }
190
191 DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used++;
192
193 uni->location = loc;
194 uni->type = type;
195 uni->length = length;
196 uni->arraysize = arraysize;
197
198 switch (type) {
200 BLI_assert(length <= 4);
201 memcpy(uni->ivalue, value, sizeof(int) * length);
202 break;
204 BLI_assert(length <= 4);
205 memcpy(uni->fvalue, value, sizeof(float) * length);
206 break;
208 uni->block = (GPUUniformBuf *)value;
209 break;
211 uni->block_ref = (GPUUniformBuf **)value;
212 break;
215 uni->texture = (GPUTexture *)value;
216 uni->sampler_state = sampler_state;
217 break;
220 uni->texture_ref = (GPUTexture **)value;
221 uni->sampler_state = sampler_state;
222 break;
224 uni->uniform_attrs = (GPUUniformAttrList *)value;
225 break;
226 default:
227 uni->pvalue = (const float *)value;
228 break;
229 }
230}
231
233 const char *name,
234 DRWUniformType type,
235 const void *value,
236 int length,
237 int arraysize)
238{
239 BLI_assert(arraysize > 0 && arraysize <= 16);
240 BLI_assert(length >= 0 && length <= 16);
241 BLI_assert(!ELEM(type,
248 int location = GPU_shader_get_uniform(shgroup->shader, name);
250 shgroup, location, type, value, GPUSamplerState::default_sampler(), length, arraysize);
251}
252
254 const char *name,
255 const GPUTexture *tex,
256 GPUSamplerState sampler_state)
257{
258 BLI_assert(tex != nullptr);
259 int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
260 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, sampler_state, 0, 1);
261}
262
263void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
264{
266}
267
269 const char *name,
270 GPUTexture **tex,
271 GPUSamplerState sampler_state)
272{
273 BLI_assert(tex != nullptr);
274 int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
275 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, sampler_state, 0, 1);
276}
277
278void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
279{
281}
282
283void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
284{
285 BLI_assert(tex != nullptr);
286 int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
289}
290
291void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
292{
293 BLI_assert(tex != nullptr);
294 int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
297}
298
300 const char *name,
301 const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS)
302{
303 BLI_assert(ubo != nullptr);
304 int loc = GPU_shader_get_ubo_binding(shgroup->shader, name);
305 if (loc == -1) {
306#ifdef DRW_UNUSED_RESOURCE_TRACKING
307 printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n",
308 file,
309 line,
310 name);
311#else
312 /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
313 // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects.");
314#endif
315 return;
316 }
318 shgroup, loc, DRW_UNIFORM_BLOCK, ubo, GPUSamplerState::default_sampler(), 0, 1);
319}
320
322 const char *name,
323 GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS)
324{
325 BLI_assert(ubo != nullptr);
326 int loc = GPU_shader_get_ubo_binding(shgroup->shader, name);
327 if (loc == -1) {
328#ifdef DRW_UNUSED_RESOURCE_TRACKING
329 printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n",
330 file,
331 line,
332 name);
333#else
334 /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
335 // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects.");
336#endif
337 return;
338 }
340 shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, GPUSamplerState::default_sampler(), 0, 1);
341}
342
344 const char *name,
345 const GPUStorageBuf *ssbo DRW_DEBUG_FILE_LINE_ARGS)
346{
347 BLI_assert(ssbo != nullptr);
348 /* TODO(@fclem): Fix naming inconsistency. */
349 int loc = GPU_shader_get_ssbo_binding(shgroup->shader, name);
350 if (loc == -1) {
351#ifdef DRW_UNUSED_RESOURCE_TRACKING
352 printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
353 file,
354 line,
355 name);
356#else
357 /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
358 // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
359#endif
360 return;
361 }
364}
365
367 const char *name,
368 GPUStorageBuf **ssbo DRW_DEBUG_FILE_LINE_ARGS)
369{
370 BLI_assert(ssbo != nullptr);
371 /* TODO(@fclem): Fix naming inconsistency. */
372 int loc = GPU_shader_get_ssbo_binding(shgroup->shader, name);
373 if (loc == -1) {
374#ifdef DRW_UNUSED_RESOURCE_TRACKING
375 printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
376 file,
377 line,
378 name);
379#else
380 /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
381 // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
382#endif
383 return;
384 }
387}
388
390 const char *name,
391 const int *value,
392 int arraysize)
393{
394 /* Boolean are expected to be 4bytes longs for OpenGL! */
395 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
396}
397
399 const char *name,
400 const float *value,
401 int arraysize)
402{
403 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
404}
405
407 const char *name,
408 const float *value,
409 int arraysize)
410{
411 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
412}
413
415 const char *name,
416 const float *value,
417 int arraysize)
418{
419 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
420}
421
423 const char *name,
424 const float *value,
425 int arraysize)
426{
427 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
428}
429
431 const char *name,
432 const int *value,
433 int arraysize)
434{
435 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
436}
437
439 const char *name,
440 const int *value,
441 int arraysize)
442{
443 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
444}
445
447 const char *name,
448 const int *value,
449 int arraysize)
450{
451 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
452}
453
455 const char *name,
456 const int *value,
457 int arraysize)
458{
459 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
460}
461
462void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
463{
464 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
465}
466
467void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
468{
469 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
470}
471
472void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
473{
474 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
475}
476
477void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
478{
479 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 2, 1);
480}
481
482void DRW_shgroup_uniform_ivec3_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
483{
484 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 3, 1);
485}
486
487void DRW_shgroup_uniform_ivec4_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
488{
489 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 4, 1);
490}
491
492void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
493{
494 int ival = value;
495 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &ival, 1, 1);
496}
497
498void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
499{
500 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
501}
502
503void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
504{
505 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 2, 1);
506}
507
508void DRW_shgroup_uniform_vec3_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
509{
510 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 3, 1);
511}
512
513void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
514{
515 drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 4, 1);
516}
517
519 const char *name,
520 const float (*value)[4])
521{
522 int location = GPU_shader_get_uniform(shgroup->shader, name);
523
524 if (location == -1) {
525 /* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */
526 // BLI_assert(0);
527 return;
528 }
529
530 /* Each array element stored as an individual entry in the uniform list.
531 * All entries from the same array share the same base location,
532 * and array-size used to determine the number of elements
533 * copied in draw_update_uniforms. */
534 for (int i = 0; i < 4; i++) {
536 location,
538 &value[i],
540 4,
541 4);
542 }
543}
544
546 const char *name,
548{
549 int location = GPU_shader_get_ssbo_binding(shgroup->shader, name);
550 if (location == -1) {
551#ifdef DRW_UNUSED_RESOURCE_TRACKING
552 printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
553 file,
554 line,
555 name);
556#else
557 BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
558#endif
559 return;
560 }
562 location,
564 vertex_buffer,
566 0,
567 1);
568}
569
571 const char *name,
572 blender::gpu::VertBuf **vertex_buffer
574{
575 int location = GPU_shader_get_ssbo_binding(shgroup->shader, name);
576 if (location == -1) {
577#ifdef DRW_UNUSED_RESOURCE_TRACKING
578 printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
579 file,
580 line,
581 name);
582#else
583 BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
584#endif
585 return;
586 }
588 location,
590 vertex_buffer,
592 0,
593 1);
594}
595
597 const char *name,
598 blender::gpu::VertBuf *vertex_buffer)
599{
600 int location = GPU_shader_get_sampler_binding(shgroup->shader, name);
601 if (location == -1) {
602 return;
603 }
605 location,
607 vertex_buffer,
609 0,
610 1);
611}
612
614 const char *name,
615 blender::gpu::VertBuf **vertex_buffer)
616{
617 int location = GPU_shader_get_sampler_binding(shgroup->shader, name);
618 if (location == -1) {
619 return;
620 }
622 location,
624 vertex_buffer,
626 0,
627 1);
628}
631/* -------------------------------------------------------------------- */
635static void drw_call_calc_orco(const Object *ob, float (*r_orcofacs)[4])
636{
637 const ID *ob_data = (ob) ? static_cast<const ID *>(ob->data) : nullptr;
638 struct {
639 float texspace_location[3], texspace_size[3];
640 } static_buf;
641 float *texspace_location = nullptr;
642 float *texspace_size = nullptr;
643 if (ob_data != nullptr) {
644 switch (GS(ob_data->name)) {
645 case ID_VO: {
646 const Volume &volume = *reinterpret_cast<const Volume *>(ob_data);
647 const std::optional<blender::Bounds<blender::float3>> bounds = BKE_volume_min_max(&volume);
648 if (bounds) {
649 texspace_location = static_buf.texspace_location;
650 texspace_size = static_buf.texspace_size;
651 mid_v3_v3v3(texspace_location, bounds->max, bounds->min);
652 sub_v3_v3v3(texspace_size, bounds->max, bounds->min);
653 texspace_size[0] = std::max(texspace_size[0], 0.001f);
654 texspace_size[1] = std::max(texspace_size[1], 0.001f);
655 texspace_size[2] = std::max(texspace_size[2], 0.001f);
656 }
657 break;
658 }
659 case ID_ME:
661 (Mesh *)ob_data, nullptr, &texspace_location, &texspace_size);
662 break;
663 case ID_CU_LEGACY: {
664 Curve *cu = (Curve *)ob_data;
666 texspace_location = cu->texspace_location;
667 texspace_size = cu->texspace_size;
668 break;
669 }
670 case ID_MB: {
671 MetaBall *mb = (MetaBall *)ob_data;
672 texspace_location = mb->texspace_location;
673 texspace_size = mb->texspace_size;
674 break;
675 }
676 default:
677 break;
678 }
679 }
680
681 if ((texspace_location != nullptr) && (texspace_size != nullptr)) {
682 mul_v3_v3fl(r_orcofacs[1], texspace_size, 2.0f);
683 invert_v3(r_orcofacs[1]);
684 sub_v3_v3v3(r_orcofacs[0], texspace_location, texspace_size);
685 negate_v3(r_orcofacs[0]);
686 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
687 }
688 else {
689 copy_v3_fl(r_orcofacs[0], 0.0f);
690 copy_v3_fl(r_orcofacs[1], 1.0f);
691 }
692}
693
695 const Object *ob,
696 const float (*obmat)[4])
697{
698 copy_m4_m4(ob_mats->model, obmat);
699 if (ob) {
700 copy_m4_m4(ob_mats->modelinverse, ob->world_to_object().ptr());
701 }
702 else {
703 /* WATCH: Can be costly. */
704 invert_m4_m4(ob_mats->modelinverse, ob_mats->model);
705 }
706}
707
708static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, const Object *ob)
709{
710 BLI_assert(ob);
711 /* Index. */
712 ob_infos->ob_index = ob->index;
713 /* Orco factors. */
714 drw_call_calc_orco(ob, ob_infos->orcotexfac);
715 /* Random float value. */
716 uint random = (DST.dupli_source) ?
718 /* TODO(fclem): this is rather costly to do at runtime. Maybe we can
719 * put it in ob->runtime and make depsgraph ensure it is up to date. */
721 ob_infos->ob_random = random * (1.0f / float(0xFFFFFFFF));
722 /* Object State. */
723 ob_infos->ob_flag = 1.0f; /* Required to have a correct sign */
724 ob_infos->ob_flag += (ob->base_flag & BASE_SELECTED) ? (1 << 1) : 0;
725 ob_infos->ob_flag += (ob->base_flag & BASE_FROM_DUPLI) ? (1 << 2) : 0;
726 ob_infos->ob_flag += (ob->base_flag & BASE_FROM_SET) ? (1 << 3) : 0;
727 if (ob->base_flag & BASE_FROM_DUPLI) {
728 ob_infos->ob_flag += (DRW_object_get_dupli_parent(ob) == DST.draw_ctx.obact) ? (1 << 4) : 0;
729 }
730 else {
731 ob_infos->ob_flag += (ob == DST.draw_ctx.obact) ? (1 << 4) : 0;
732 }
733 /* Negative scaling. */
734 ob_infos->ob_flag *= (ob->transflag & OB_NEG_SCALE) ? -1.0f : 1.0f;
735 /* Object Color. */
736 copy_v4_v4(ob_infos->ob_color, ob->color);
737}
738
739static void drw_call_culling_init(DRWCullingState *cull, const Object *ob)
740{
741 using namespace blender;
742 std::optional<Bounds<float3>> bounds;
743 if (ob != nullptr && (bounds = BKE_object_boundbox_get(ob))) {
744 float corner[3];
745 /* Get BoundSphere center and radius from the BoundBox. */
746 mid_v3_v3v3(cull->bsphere.center, bounds->max, bounds->min);
747 mul_v3_m4v3(corner, ob->object_to_world().ptr(), bounds->max);
748 mul_m4_v3(ob->object_to_world().ptr(), cull->bsphere.center);
749 cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner);
750
751 /* Bypass test for very large objects (see #67319). */
752 if (UNLIKELY(cull->bsphere.radius > 1e12)) {
753 cull->bsphere.radius = -1.0f;
754 }
755 }
756 else {
757 /* Bypass test. */
758 cull->bsphere.radius = -1.0f;
759 }
760 /* Reset user data */
761 cull->user_data = nullptr;
762}
763
764static DRWResourceHandle drw_resource_handle_new(const float (*obmat)[4], const Object *ob)
765{
766 DRWCullingState *culling = static_cast<DRWCullingState *>(
768 DRWObjectMatrix *ob_mats = static_cast<DRWObjectMatrix *>(
770 /* FIXME Meh, not always needed but can be accessed after creation.
771 * Also it needs to have the same resource handle. */
772 DRWObjectInfos *ob_infos = static_cast<DRWObjectInfos *>(
774 UNUSED_VARS(ob_infos);
775
778
779 if (ob && (ob->transflag & OB_NEG_SCALE)) {
781 }
782
783 drw_call_matrix_init(ob_mats, ob, obmat);
784 drw_call_culling_init(culling, ob);
785 /* ob_infos is init only if needed. */
786
787 return handle;
788}
789
791{
793 if (handle == 0) {
794 /* Handle not yet allocated. Return next handle. */
795 handle = DST.resource_handle;
796 }
797 return handle & ~(1u << 31);
798}
799
801 const float (*obmat)[4],
802 const Object *ob)
803{
804 if (ob == nullptr) {
805 if (obmat == nullptr) {
806 DRWResourceHandle handle = 0;
807 return handle;
808 }
809
810 return drw_resource_handle_new(obmat, nullptr);
811 }
812
813 if (DST.ob_handle == 0) {
816 }
817
818 if (shgroup->objectinfo) {
821 DRWObjectInfos *ob_infos = static_cast<DRWObjectInfos *>(
823
824 drw_call_obinfos_init(ob_infos, ob);
825 }
826 }
827
828 if (shgroup->uniform_attrs) {
830 shgroup->uniform_attrs,
831 &DST.ob_handle,
832 ob,
835 }
836
837 return DST.ob_handle;
838}
839
840static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type)
841{
842 command_type_bits[index / 16] |= uint64_t(type) << ((index % 16) * 4);
843}
844
845eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
846{
847 return eDRWCommandType((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF);
848}
849
851{
852 DRWCommandChunk *chunk = shgroup->cmd.last;
853
854 if (chunk == nullptr) {
855 DRWCommandSmallChunk *smallchunk = static_cast<DRWCommandSmallChunk *>(
857 smallchunk->command_len = ARRAY_SIZE(smallchunk->commands);
858 smallchunk->command_used = 0;
859 smallchunk->command_type[0] = 0x0lu;
860 chunk = (DRWCommandChunk *)smallchunk;
861 BLI_LINKS_APPEND(&shgroup->cmd, chunk);
862 }
863 else if (chunk->command_used == chunk->command_len) {
864 chunk = static_cast<DRWCommandChunk *>(BLI_memblock_alloc(DST.vmempool->commands));
865 chunk->command_len = ARRAY_SIZE(chunk->commands);
866 chunk->command_used = 0;
867 memset(chunk->command_type, 0x0, sizeof(chunk->command_type));
868 BLI_LINKS_APPEND(&shgroup->cmd, chunk);
869 }
870
871 command_type_set(chunk->command_type, chunk->command_used, type);
872
873 return chunk->commands + chunk->command_used++;
874}
875
877 blender::gpu::Batch *batch,
878 DRWResourceHandle handle)
879{
880 DRWCommandDraw *cmd = static_cast<DRWCommandDraw *>(drw_command_create(shgroup, DRW_CMD_DRAW));
881 cmd->batch = batch;
882 cmd->handle = handle;
883}
884
886 blender::gpu::Batch *batch,
887 DRWResourceHandle handle,
888 uint start,
889 uint count)
890{
891 DRWCommandDrawRange *cmd = static_cast<DRWCommandDrawRange *>(
893 cmd->batch = batch;
894 cmd->handle = handle;
895 cmd->vert_first = start;
896 cmd->vert_count = count;
897}
898
900 blender::gpu::Batch *batch,
901 DRWResourceHandle handle,
902 uint count,
903 bool use_attr)
904{
907 cmd->batch = batch;
908 cmd->handle = handle;
909 cmd->inst_count = count;
910 cmd->use_attrs = use_attr;
911}
912
914 blender::gpu::Batch *batch,
915 DRWResourceHandle handle,
916 uint start,
917 uint count)
918{
921 cmd->batch = batch;
922 cmd->handle = handle;
923 cmd->inst_first = start;
924 cmd->inst_count = count;
925}
926
928 int groups_x_len,
929 int groups_y_len,
930 int groups_z_len)
931{
932 DRWCommandCompute *cmd = static_cast<DRWCommandCompute *>(
934 cmd->groups_x_len = groups_x_len;
935 cmd->groups_y_len = groups_y_len;
936 cmd->groups_z_len = groups_z_len;
937}
938
939static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
940{
941 DRWCommandComputeRef *cmd = static_cast<DRWCommandComputeRef *>(
943 cmd->groups_ref = groups_ref;
944}
945
946static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
947{
950 cmd->indirect_buf = indirect_buf;
951}
952
954{
955 DRWCommandBarrier *cmd = static_cast<DRWCommandBarrier *>(
957 cmd->type = type;
958}
959
961 blender::gpu::Batch *batch,
962 DRWResourceHandle handle,
963 uint vert_count)
964{
967 cmd->batch = batch;
968 cmd->handle = handle;
969 cmd->vert_count = vert_count;
970}
971
973 blender::gpu::Batch *batch,
974 DRWResourceHandle handle,
975 GPUStorageBuf *indirect_buf)
976{
979 cmd->batch = batch;
980 cmd->handle = handle;
981 cmd->indirect_buf = indirect_buf;
982}
983
986 uint select_id)
987{
988 /* Only one can be valid. */
989 BLI_assert(buf == nullptr || select_id == -1);
990 DRWCommandSetSelectID *cmd = static_cast<DRWCommandSetSelectID *>(
992 cmd->select_buf = buf;
993 cmd->select_id = select_id;
994}
995
997 uint write_mask,
998 uint reference,
999 uint compare_mask)
1000{
1001 BLI_assert(write_mask <= 0xFF);
1002 BLI_assert(reference <= 0xFF);
1003 BLI_assert(compare_mask <= 0xFF);
1004 DRWCommandSetStencil *cmd = static_cast<DRWCommandSetStencil *>(
1006 cmd->write_mask = write_mask;
1007 cmd->comp_mask = compare_mask;
1008 cmd->ref = reference;
1009}
1010
1012 eGPUFrameBufferBits channels,
1013 uchar r,
1014 uchar g,
1015 uchar b,
1016 uchar a,
1017 float depth,
1018 uchar stencil)
1019{
1020 DRWCommandClear *cmd = static_cast<DRWCommandClear *>(
1022 cmd->clear_channels = channels;
1023 cmd->r = r;
1024 cmd->g = g;
1025 cmd->b = b;
1026 cmd->a = a;
1027 cmd->depth = depth;
1028 cmd->stencil = stencil;
1029}
1030
1032 DRWState enable,
1033 DRWState disable)
1034{
1035 /* TODO: Restrict what state can be changed. */
1038 cmd->enable = enable;
1039 cmd->disable = disable;
1040}
1041
1043 const Object *ob,
1044 const float (*obmat)[4],
1045 blender::gpu::Batch *geom,
1046 bool bypass_culling,
1047 void *user_data)
1048{
1049 BLI_assert(geom != nullptr);
1050 if (G.f & G_FLAG_PICKSEL) {
1051 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1052 }
1054 shgroup, ob ? ob->object_to_world().ptr() : obmat, ob);
1055 drw_command_draw(shgroup, geom, handle);
1056
1057 /* Culling data. */
1058 if (user_data || bypass_culling) {
1059 DRWCullingState *culling = static_cast<DRWCullingState *>(
1061
1062 if (user_data) {
1063 culling->user_data = user_data;
1064 }
1065 if (bypass_culling) {
1066 /* NOTE: this will disable culling for the whole object. */
1067 culling->bsphere.radius = -1.0f;
1068 }
1069 }
1070}
1071
1073 DRWShadingGroup *shgroup, const Object *ob, blender::gpu::Batch *geom, uint v_sta, uint v_num)
1074{
1075 BLI_assert(geom != nullptr);
1076 if (G.f & G_FLAG_PICKSEL) {
1077 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1078 }
1080 shgroup, ob ? ob->object_to_world().ptr() : nullptr, ob);
1081 drw_command_draw_range(shgroup, geom, handle, v_sta, v_num);
1082}
1083
1085 DRWShadingGroup *shgroup, const Object *ob, blender::gpu::Batch *geom, uint i_sta, uint i_num)
1086{
1087 BLI_assert(geom != nullptr);
1088 if (G.f & G_FLAG_PICKSEL) {
1089 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1090 }
1092 shgroup, ob ? ob->object_to_world().ptr() : nullptr, ob);
1093 drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_num);
1094}
1095
1097 int groups_x_len,
1098 int groups_y_len,
1099 int groups_z_len)
1100{
1101 BLI_assert(groups_x_len > 0 && groups_y_len > 0 && groups_z_len > 0);
1102 drw_command_compute(shgroup, groups_x_len, groups_y_len, groups_z_len);
1103}
1104
1105void DRW_shgroup_call_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
1106{
1107 drw_command_compute_ref(shgroup, groups_ref);
1108}
1109
1110void DRW_shgroup_call_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
1111{
1112 drw_command_compute_indirect(shgroup, indirect_buf);
1113}
1114
1116{
1117 drw_command_barrier(shgroup, type);
1118}
1119
1121 blender::gpu::Batch *geom,
1122 const Object *ob,
1123 uint vert_count)
1124{
1125 BLI_assert(vert_count > 0);
1126 BLI_assert(geom != nullptr);
1127 if (G.f & G_FLAG_PICKSEL) {
1128 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1129 }
1131 shgroup, ob ? ob->object_to_world().ptr() : nullptr, ob);
1132 drw_command_draw_procedural(shgroup, geom, handle, vert_count);
1133}
1134
1136 const Object *ob,
1137 uint point_count)
1138{
1139 blender::gpu::Batch *geom = drw_cache_procedural_points_get();
1140 drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_count);
1141}
1142
1144{
1145 blender::gpu::Batch *geom = drw_cache_procedural_lines_get();
1146 drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, line_count * 2);
1147}
1148
1150 const Object *ob,
1151 uint tri_count)
1152{
1153 blender::gpu::Batch *geom = drw_cache_procedural_triangles_get();
1154 drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3);
1155}
1156
1158 GPUPrimType primitive_type,
1159 Object *ob,
1160 GPUStorageBuf *indirect_buf)
1161{
1162 blender::gpu::Batch *geom = nullptr;
1163 switch (primitive_type) {
1164 case GPU_PRIM_POINTS:
1166 break;
1167 case GPU_PRIM_LINES:
1169 break;
1170 case GPU_PRIM_TRIS:
1172 break;
1173 case GPU_PRIM_TRI_STRIP:
1175 break;
1176 default:
1178 "Unsupported primitive type in DRW_shgroup_call_procedural_indirect. Add new "
1179 "one as needed.");
1180 break;
1181 }
1182 if (G.f & G_FLAG_PICKSEL) {
1183 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1184 }
1186 shgroup, ob ? ob->object_to_world().ptr() : nullptr, ob);
1187 drw_command_draw_indirect(shgroup, geom, handle, indirect_buf);
1188}
1189
1191 const Object *ob,
1192 blender::gpu::Batch *geom,
1193 uint count)
1194{
1195 BLI_assert(geom != nullptr);
1196 if (G.f & G_FLAG_PICKSEL) {
1197 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1198 }
1200 shgroup, ob ? ob->object_to_world().ptr() : nullptr, ob);
1201 drw_command_draw_instance(shgroup, geom, handle, count, false);
1202}
1203
1205 const Object *ob,
1206 blender::gpu::Batch *geom,
1207 blender::gpu::Batch *inst_attributes)
1208{
1209 BLI_assert(geom != nullptr);
1210 BLI_assert(inst_attributes != nullptr);
1211 if (G.f & G_FLAG_PICKSEL) {
1212 drw_command_set_select_id(shgroup, nullptr, DST.select_id);
1213 }
1215 shgroup, ob ? ob->object_to_world().ptr() : nullptr, ob);
1216 blender::gpu::Batch *batch = DRW_temp_batch_instance_request(
1217 DST.vmempool->idatalist, nullptr, inst_attributes, geom);
1218 drw_command_draw_instance(shgroup, batch, handle, 0, true);
1219}
1220
1221#define SCULPT_DEBUG_BUFFERS (G.debug_value == 889)
1235
1236#define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
1237static float sculpt_debug_colors[9][4] = {
1238 {1.0f, 0.2f, 0.2f, 1.0f},
1239 {0.2f, 1.0f, 0.2f, 1.0f},
1240 {0.2f, 0.2f, 1.0f, 1.0f},
1241 {1.0f, 1.0f, 0.2f, 1.0f},
1242 {0.2f, 1.0f, 1.0f, 1.0f},
1243 {1.0f, 0.2f, 1.0f, 1.0f},
1244 {1.0f, 0.7f, 0.2f, 1.0f},
1245 {0.2f, 1.0f, 0.7f, 1.0f},
1246 {0.7f, 0.2f, 1.0f, 1.0f},
1247};
1248
1249static void draw_pbvh_nodes(const Object &object,
1251 const blender::Span<int> material_indices,
1252 const blender::Span<DRWShadingGroup *> shading_groups,
1253 const blender::IndexMask &nodes_to_draw)
1254{
1255 nodes_to_draw.foreach_index([&](const int i) {
1256 if (!batches[i]) {
1257 return;
1258 }
1259 const int material_index = material_indices.is_empty() ? 0 : material_indices[i];
1260 DRWShadingGroup *shgrp = shading_groups[material_index];
1261 if (!shgrp) {
1262 return;
1263 }
1265 /* Color each buffers in different colors. Only work in solid/X-ray mode. */
1266 shgrp = DRW_shgroup_create_sub(shgrp);
1267 DRW_shgroup_uniform_vec3(shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(i), 1);
1268 }
1269
1270 /* DRW_shgroup_call_no_cull reuses matrices calculations for all the drawcalls of this
1271 * object. */
1272 DRW_shgroup_call_no_cull(shgrp, batches[i], &object);
1273 });
1274}
1275
1277 void *user_data,
1278 const float bmin[3],
1279 const float bmax[3],
1281{
1282 int *debug_node_nr = (int *)user_data;
1283 BoundBox bb;
1284 BKE_boundbox_init_from_minmax(&bb, bmin, bmax);
1285
1286#if 0 /* Nodes hierarchy. */
1287 if (flag & PBVH_Leaf) {
1288 DRW_debug_bbox(&bb, blender::float4{0.0f, 1.0f, 0.0f, 1.0f});
1289 }
1290 else {
1291 DRW_debug_bbox(&bb, blender::float4{0.5f, 0.5f, 0.5f, 0.6f});
1292 }
1293#else /* Color coded leaf bounds. */
1294 if (flag & (PBVH_Leaf | PBVH_TexLeaf)) {
1295 DRW_debug_bbox(&bb, SCULPT_DEBUG_COLOR((*debug_node_nr)++));
1296 int color = (*debug_node_nr)++;
1297 color += BKE_pbvh_debug_draw_gen_get(*node);
1298
1300 }
1301#endif
1302}
1303
1304static void drw_sculpt_get_frustum_planes(const Object *ob, float planes[6][4])
1305{
1306 /* TODO: take into account partial redraw for clipping planes. */
1308
1309 /* Transform clipping planes to object space. Transforming a plane with a
1310 * 4x4 matrix is done by multiplying with the transpose inverse.
1311 * The inverse cancels out here since we transform by inverse(obmat). */
1312 float tmat[4][4];
1313 transpose_m4_m4(tmat, ob->object_to_world().ptr());
1314 for (int i = 0; i < 6; i++) {
1315 mul_m4_v4(tmat, planes[i]);
1316 }
1317}
1318
1320{
1321 using namespace blender;
1322 /* pbvh::Tree should always exist for non-empty meshes, created by depsgraph eval. */
1323 const Object &object = *scd->ob;
1324 bke::pbvh::Tree *pbvh = (object.sculpt) ?
1325 const_cast<bke::pbvh::Tree *>(bke::object::pbvh_get(object)) :
1326 nullptr;
1327 if (!pbvh) {
1328 return;
1329 }
1330
1331 const DRWContextState *drwctx = DRW_context_state_get();
1332 RegionView3D *rv3d = drwctx->rv3d;
1333 const bool navigating = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
1334
1335 Paint *paint = nullptr;
1336 if (drwctx->evil_C != nullptr) {
1338 }
1339
1340 /* Frustum planes to show only visible pbvh::Tree nodes. */
1341 float update_planes[6][4];
1342 float draw_planes[6][4];
1343 PBVHFrustumPlanes update_frustum;
1344 PBVHFrustumPlanes draw_frustum;
1345
1346 if (paint && (paint->flags & PAINT_SCULPT_DELAY_UPDATES)) {
1347 update_frustum.planes = update_planes;
1348 update_frustum.num_planes = 6;
1349 bke::pbvh::get_frustum_planes(*pbvh, &update_frustum);
1350 if (!navigating) {
1351 drw_sculpt_get_frustum_planes(scd->ob, update_planes);
1352 update_frustum.planes = update_planes;
1353 update_frustum.num_planes = 6;
1354 bke::pbvh::set_frustum_planes(*pbvh, &update_frustum);
1355 }
1356 }
1357 else {
1358 drw_sculpt_get_frustum_planes(scd->ob, update_planes);
1359 update_frustum.planes = update_planes;
1360 update_frustum.num_planes = 6;
1361 }
1362
1363 drw_sculpt_get_frustum_planes(scd->ob, draw_planes);
1364 draw_frustum.planes = draw_planes;
1365 draw_frustum.num_planes = 6;
1366
1367 /* Fast mode to show low poly multires while navigating. */
1368 scd->fast_mode = false;
1369 if (paint && (paint->flags & PAINT_FAST_NAVIGATE)) {
1370 scd->fast_mode = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
1371 }
1372
1373 /* Update draw buffers only for visible nodes while painting.
1374 * But do update them otherwise so navigating stays smooth. */
1375 bool update_only_visible = rv3d && !(rv3d->rflag & RV3D_PAINTING);
1376 if (paint && (paint->flags & PAINT_SCULPT_DELAY_UPDATES)) {
1377 update_only_visible = true;
1378 }
1379
1380 bke::pbvh::update_normals_from_eval(*const_cast<Object *>(scd->ob), *pbvh);
1381
1382 draw::pbvh::DrawCache &draw_data = draw::pbvh::ensure_draw_data(pbvh->draw_data);
1383
1384 IndexMaskMemory memory;
1385 const IndexMask visible_nodes = bke::pbvh::search_nodes(
1386 *pbvh, memory, [&](const bke::pbvh::Node &node) {
1387 return !BKE_pbvh_node_fully_hidden_get(node) &&
1388 BKE_pbvh_node_frustum_contain_AABB(&node, &draw_frustum);
1389 });
1390
1391 const IndexMask nodes_to_update = update_only_visible ? visible_nodes :
1392 bke::pbvh::all_leaf_nodes(*pbvh, memory);
1393
1394 const draw::pbvh::ViewportRequest request{scd->attrs, scd->fast_mode};
1395 Span<gpu::Batch *> batches;
1396 if (scd->use_wire) {
1397 batches = draw_data.ensure_lines_batches(object, request, nodes_to_update);
1398 }
1399 else {
1400 batches = draw_data.ensure_tris_batches(object, request, nodes_to_update);
1401 }
1402
1403 Span<int> material_indices;
1404 if (scd->use_mats) {
1405 material_indices = draw_data.ensure_material_indices(object);
1406 }
1407
1408 draw_pbvh_nodes(object,
1409 batches,
1410 material_indices,
1412 visible_nodes);
1413
1415 int debug_node_nr = 0;
1416 DRW_debug_modelmat(object.object_to_world().ptr());
1418 *pbvh,
1419 (void (*)(
1420 bke::pbvh::Node *n, void *d, const float min[3], const float max[3], PBVHNodeFlags f))
1422 &debug_node_nr);
1423 }
1424}
1425
1427 Object *ob,
1428 bool use_wire,
1429 bool use_mask,
1430 bool use_fset,
1431 bool use_color,
1432 bool use_uv)
1433{
1434 using namespace blender;
1435 using namespace blender::draw;
1437 scd.ob = ob;
1438 scd.shading_groups = &shgroup;
1439 scd.num_shading_groups = 1;
1440 scd.use_wire = use_wire;
1441 scd.use_mats = false;
1442 scd.use_mask = use_mask;
1443
1444 Vector<pbvh::AttributeRequest, 16> attrs;
1445
1446 attrs.append(pbvh::CustomRequest::Position);
1447 attrs.append(pbvh::CustomRequest::Normal);
1448 if (use_mask) {
1449 attrs.append(pbvh::CustomRequest::Mask);
1450 }
1451 if (use_fset) {
1452 attrs.append(pbvh::CustomRequest::FaceSet);
1453 }
1454
1456 const bke::AttributeAccessor attributes = mesh->attributes();
1457
1458 if (use_color) {
1459 if (const char *name = mesh->active_color_attribute) {
1460 if (const std::optional<bke::AttributeMetaData> meta_data = attributes.lookup_meta_data(
1461 name))
1462 {
1463 attrs.append(pbvh::GenericRequest{name, meta_data->data_type, meta_data->domain});
1464 }
1465 }
1466 }
1467
1468 if (use_uv) {
1469 if (const char *name = CustomData_get_active_layer_name(&mesh->corner_data, CD_PROP_FLOAT2)) {
1470 attrs.append(pbvh::GenericRequest{name, CD_PROP_FLOAT2, bke::AttrDomain::Corner});
1471 }
1472 }
1473
1474 scd.attrs = attrs;
1475
1477}
1478
1480 GPUMaterial **gpumats,
1481 int num_shgroups,
1482 const Object *ob)
1483{
1484 using namespace blender;
1485 using namespace blender::draw;
1486 DRW_Attributes draw_attrs;
1487 DRW_MeshCDMask cd_needed;
1488
1489 const Mesh *mesh = static_cast<const Mesh *>(ob->data);
1490
1491 if (gpumats) {
1492 DRW_mesh_get_attributes(*ob, *mesh, gpumats, num_shgroups, &draw_attrs, &cd_needed);
1493 }
1494 else {
1495 memset(&draw_attrs, 0, sizeof(draw_attrs));
1496 memset(&cd_needed, 0, sizeof(cd_needed));
1497 }
1498
1500
1501 attrs.append(pbvh::CustomRequest::Position);
1502 attrs.append(pbvh::CustomRequest::Normal);
1503
1504 for (int i = 0; i < draw_attrs.num_requests; i++) {
1505 const DRW_AttributeRequest &req = draw_attrs.requests[i];
1507 }
1508
1509 /* UV maps are not in attribute requests. */
1510 for (uint i = 0; i < 32; i++) {
1511 if (cd_needed.uv & (1 << i)) {
1512 int layer_i = CustomData_get_layer_index_n(&mesh->corner_data, CD_PROP_FLOAT2, i);
1513 CustomDataLayer *layer = layer_i != -1 ? mesh->corner_data.layers + layer_i : nullptr;
1514 if (layer) {
1515 attrs.append(pbvh::GenericRequest{layer->name, CD_PROP_FLOAT2, bke::AttrDomain::Corner});
1516 }
1517 }
1518 }
1519
1521 scd.ob = ob;
1522 scd.shading_groups = shgroups;
1523 scd.num_shading_groups = num_shgroups;
1524 scd.use_wire = false;
1525 scd.use_mats = true;
1526 scd.use_mask = false;
1527 scd.attrs = std::move(attrs);
1528
1530}
1531
1533
1536 GPUPrimType prim_type)
1537{
1539 BLI_assert(format != nullptr);
1540
1541 DRWCallBuffer *callbuf = static_cast<DRWCallBuffer *>(
1543 callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count);
1544 callbuf->buf_select = nullptr;
1545 callbuf->count = 0;
1546
1547 if (G.f & G_FLAG_PICKSEL) {
1548 /* Not actually used for rendering but allocated in one chunk. */
1549 if (inst_select_format.attr_len == 0) {
1551 }
1554 drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
1555 }
1556
1557 DRWResourceHandle handle = drw_resource_handle(shgroup, nullptr, nullptr);
1558 blender::gpu::Batch *batch = DRW_temp_batch_request(
1559 DST.vmempool->idatalist, callbuf->buf, prim_type);
1560 drw_command_draw(shgroup, batch, handle);
1561
1562 return callbuf;
1563}
1564
1567 blender::gpu::Batch *geom)
1568{
1569 BLI_assert(geom != nullptr);
1570 BLI_assert(format != nullptr);
1571
1572 DRWCallBuffer *callbuf = static_cast<DRWCallBuffer *>(
1574 callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count);
1575 callbuf->buf_select = nullptr;
1576 callbuf->count = 0;
1577
1578 if (G.f & G_FLAG_PICKSEL) {
1579 /* Not actually used for rendering but allocated in one chunk. */
1580 if (inst_select_format.attr_len == 0) {
1582 }
1585 drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
1586 }
1587
1588 DRWResourceHandle handle = drw_resource_handle(shgroup, nullptr, nullptr);
1589 blender::gpu::Batch *batch = DRW_temp_batch_instance_request(
1590 DST.vmempool->idatalist, callbuf->buf, nullptr, geom);
1591 drw_command_draw(shgroup, batch, handle);
1592
1593 return callbuf;
1594}
1595
1596void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data)
1597{
1598 blender::gpu::VertBuf *buf = callbuf->buf;
1599 const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
1600
1601 if (UNLIKELY(resize)) {
1603 }
1604
1605 GPU_vertbuf_vert_set(buf, callbuf->count, data);
1606
1607 if (G.f & G_FLAG_PICKSEL) {
1608 if (UNLIKELY(resize)) {
1610 }
1611 GPU_vertbuf_attr_set(callbuf->buf_select, 0, callbuf->count, &DST.select_id);
1612 }
1613
1614 callbuf->count++;
1615}
1616
1617void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
1618{
1619 blender::gpu::VertBuf *buf = callbuf->buf;
1620 const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
1621
1622 BLI_assert(attr_len == GPU_vertbuf_get_format(buf)->attr_len);
1623 UNUSED_VARS_NDEBUG(attr_len);
1624
1625 if (UNLIKELY(resize)) {
1627 }
1628
1629 for (int i = 0; i < attr_len; i++) {
1630 GPU_vertbuf_attr_set(buf, i, callbuf->count, attr[i]);
1631 }
1632
1633 if (G.f & G_FLAG_PICKSEL) {
1634 if (UNLIKELY(resize)) {
1636 }
1637 GPU_vertbuf_attr_set(callbuf->buf_select, 0, callbuf->count, &DST.select_id);
1638 }
1639
1640 callbuf->count++;
1641}
1642
1645/* -------------------------------------------------------------------- */
1649static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
1650{
1651 shgroup->uniforms = nullptr;
1652 shgroup->uniform_attrs = nullptr;
1653
1654 int clipping_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_CLIPPING);
1655 int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW);
1656 int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL);
1657 int info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_INFO);
1658 int baseinst_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_BASE_INSTANCE);
1659 int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK);
1660 int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID);
1661
1662 /* TODO(@fclem): Will take the place of the above after the GPUShaderCreateInfo port. */
1663 if (view_ubo_location == -1) {
1664 view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_VIEW);
1665 }
1666 if (model_ubo_location == -1) {
1667 model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_MODEL);
1668 }
1669 if (info_ubo_location == -1) {
1670 info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_INFOS);
1671 }
1672
1673 if (chunkid_location != -1) {
1675 chunkid_location,
1677 nullptr,
1679 0,
1680 1);
1681 }
1682
1683 if (resourceid_location != -1) {
1685 resourceid_location,
1687 nullptr,
1689 0,
1690 1);
1691 }
1692
1693 if (baseinst_location != -1) {
1695 baseinst_location,
1697 nullptr,
1699 0,
1700 1);
1701 }
1702
1703 if (model_ubo_location != -1) {
1705 model_ubo_location,
1707 nullptr,
1709 0,
1710 1);
1711 }
1712 else {
1713 /* NOTE: This is only here to support old hardware fallback where uniform buffer is still
1714 * too slow or buggy. */
1716 int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
1717 if (model != -1) {
1719 model,
1721 nullptr,
1723 0,
1724 1);
1725 }
1726 if (modelinverse != -1) {
1728 modelinverse,
1730 nullptr,
1732 0,
1733 1);
1734 }
1735 }
1736
1737 if (info_ubo_location != -1) {
1739 info_ubo_location,
1741 nullptr,
1743 0,
1744 1);
1745
1746 /* Abusing this loc to tell shgroup we need the obinfos. */
1747 shgroup->objectinfo = 1;
1748 }
1749 else {
1750 shgroup->objectinfo = 0;
1751 }
1752
1753 if (view_ubo_location != -1) {
1755 view_ubo_location,
1759 0,
1760 1);
1761 }
1762
1763 if (clipping_ubo_location != -1) {
1765 clipping_ubo_location,
1769 0,
1770 1);
1771 }
1772
1773 /* Not supported. */
1785}
1786
1788{
1789 DRWShadingGroup *shgroup = static_cast<DRWShadingGroup *>(
1791
1792 BLI_LINKS_APPEND(&pass->shgroups, shgroup);
1793
1794 shgroup->shader = shader;
1795 shgroup->cmd.first = nullptr;
1796 shgroup->cmd.last = nullptr;
1797 shgroup->pass_handle = pass->handle;
1798
1799 return shgroup;
1800}
1801
1803{
1804 if (!gpupass) {
1805 /* Shader compilation error */
1806 return nullptr;
1807 }
1808
1809 GPUShader *sh = GPU_pass_shader_get(gpupass);
1810
1811 if (!sh) {
1812 /* Shader not yet compiled */
1813 return nullptr;
1814 }
1815
1816 DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
1817 return grp;
1818}
1819
1821 GPUTexture *gputex,
1822 const char *name,
1824{
1825 DRW_shgroup_uniform_texture_ex(grp, name, gputex, state);
1826
1827 GPUTexture **gputex_ref = static_cast<GPUTexture **>(BLI_memblock_alloc(DST.vmempool->images));
1828 *gputex_ref = gputex;
1829 GPU_texture_ref(gputex);
1830}
1831
1833{
1834 ListBase textures = GPU_material_textures(material);
1835
1836 /* Bind all textures needed by the material. */
1837 LISTBASE_FOREACH (GPUMaterialTexture *, tex, &textures) {
1838 if (tex->ima) {
1839 const bool use_tile_mapping = tex->tiled_mapping_name[0];
1840 ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
1842 tex->ima, iuser, use_tile_mapping);
1843
1844 drw_shgroup_material_texture(grp, gputex.texture, tex->sampler_name, tex->sampler_state);
1845 if (gputex.tile_mapping) {
1847 grp, gputex.tile_mapping, tex->tiled_mapping_name, tex->sampler_state);
1848 }
1849 }
1850 else if (tex->colorband) {
1851 /* Color Ramp */
1852 DRW_shgroup_uniform_texture(grp, tex->sampler_name, *tex->colorband);
1853 }
1854 else if (tex->sky) {
1855 /* Sky */
1856 DRW_shgroup_uniform_texture_ex(grp, tex->sampler_name, *tex->sky, tex->sampler_state);
1857 }
1858 }
1859
1860 GPUUniformBuf *ubo = GPU_material_uniform_buffer_get(material);
1861 if (ubo != nullptr) {
1863 }
1864
1865 const GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material);
1866 if (uattrs != nullptr) {
1870 grp->uniform_attrs = uattrs;
1871 }
1872
1873 if (GPU_material_layer_attributes(material) != nullptr) {
1877 }
1878}
1879
1881 int arraysize)
1882{
1883 GPUVertFormat *format = MEM_cnew<GPUVertFormat>(__func__);
1884
1885 for (int i = 0; i < arraysize; i++) {
1887 attrs[i].name,
1888 (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
1889 attrs[i].components,
1891 }
1892 return format;
1893}
1894
1896{
1897 GPUPass *gpupass = GPU_material_get_pass(material);
1898 DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
1899
1900 if (shgroup) {
1901 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
1902 DRW_shgroup_add_material_resources(shgroup, material);
1903 }
1904 return shgroup;
1905}
1906
1908{
1909 DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
1910 drw_shgroup_init(shgroup, shader);
1911 return shgroup;
1912}
1913
1915 DRWPass *pass,
1916 blender::gpu::VertBuf *tf_target)
1917{
1918 BLI_assert(tf_target != nullptr);
1919 DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
1920 drw_shgroup_init(shgroup, shader);
1922 0,
1924 tf_target,
1926 0,
1927 1);
1928 return shgroup;
1929}
1930
1935
1940
1942 uint write_mask,
1943 uint reference,
1944 uint compare_mask)
1945{
1946 drw_command_set_stencil_mask(shgroup, write_mask, reference, compare_mask);
1947}
1948
1950{
1951 drw_command_set_stencil_mask(shgroup, 0xFF, mask, 0xFF);
1952}
1953
1955 eGPUFrameBufferBits channels,
1956 uchar r,
1957 uchar g,
1958 uchar b,
1959 uchar a,
1960 float depth,
1961 uchar stencil)
1962{
1963 drw_command_clear(shgroup, channels, r, g, b, a, depth, stencil);
1964}
1965
1967{
1968 DRWCommandChunk *chunk = shgroup->cmd.first;
1969 for (; chunk; chunk = chunk->next) {
1970 for (int i = 0; i < chunk->command_used; i++) {
1972 return false;
1973 }
1974 }
1975 }
1976 return true;
1977}
1978
1980{
1981 DRWShadingGroup *shgroup_new = static_cast<DRWShadingGroup *>(
1983
1984 *shgroup_new = *shgroup;
1985 drw_shgroup_init(shgroup_new, shgroup_new->shader);
1986 shgroup_new->cmd.first = nullptr;
1987 shgroup_new->cmd.last = nullptr;
1988
1989 DRWPass *parent_pass = static_cast<DRWPass *>(
1991
1992 BLI_LINKS_INSERT_AFTER(&parent_pass->shgroups, shgroup, shgroup_new);
1993
1994 return shgroup_new;
1995}
1996
1999/* -------------------------------------------------------------------- */
2003/* Extract the 8 corners from a Projection Matrix.
2004 * Although less accurate, this solution can be simplified as follows:
2005 * BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const
2006 * float[3]){1.0f, 1.0f, 1.0f}); for (int i = 0; i < 8; i++) {mul_project_m4_v3(projinv,
2007 * bbox.vec[i]);}
2008 */
2009static void draw_frustum_boundbox_calc(const float (*viewinv)[4],
2010 const float (*projmat)[4],
2011 BoundBox *r_bbox)
2012{
2013 float left, right, bottom, top, near, far;
2014 bool is_persp = projmat[3][3] == 0.0f;
2015
2016#if 0 /* Equivalent to this but it has accuracy problems. */
2018 &bbox, blender::float3{-1.0f, -1.0f, -1.0f}, blender::float3{1.0f, 1.0f, 1.0f});
2019 for (int i = 0; i < 8; i++) {
2020 mul_project_m4_v3(projinv, bbox.vec[i]);
2021 }
2022#endif
2023
2024 projmat_dimensions(projmat, &left, &right, &bottom, &top, &near, &far);
2025
2026 r_bbox->vec[0][2] = r_bbox->vec[3][2] = r_bbox->vec[7][2] = r_bbox->vec[4][2] = -near;
2027 r_bbox->vec[0][0] = r_bbox->vec[3][0] = left;
2028 r_bbox->vec[4][0] = r_bbox->vec[7][0] = right;
2029 r_bbox->vec[0][1] = r_bbox->vec[4][1] = bottom;
2030 r_bbox->vec[7][1] = r_bbox->vec[3][1] = top;
2031
2032 /* Get the coordinates of the far plane. */
2033 if (is_persp) {
2034 float sca_far = far / near;
2035 left *= sca_far;
2036 right *= sca_far;
2037 bottom *= sca_far;
2038 top *= sca_far;
2039 }
2040
2041 r_bbox->vec[1][2] = r_bbox->vec[2][2] = r_bbox->vec[6][2] = r_bbox->vec[5][2] = -far;
2042 r_bbox->vec[1][0] = r_bbox->vec[2][0] = left;
2043 r_bbox->vec[6][0] = r_bbox->vec[5][0] = right;
2044 r_bbox->vec[1][1] = r_bbox->vec[5][1] = bottom;
2045 r_bbox->vec[2][1] = r_bbox->vec[6][1] = top;
2046
2047 /* Transform into world space. */
2048 for (int i = 0; i < 8; i++) {
2049 mul_m4_v3(viewinv, r_bbox->vec[i]);
2050 }
2051}
2052
2053static void draw_frustum_culling_planes_calc(const float (*persmat)[4], float (*frustum_planes)[4])
2054{
2055 planes_from_projmat(persmat,
2056 frustum_planes[0],
2057 frustum_planes[5],
2058 frustum_planes[1],
2059 frustum_planes[3],
2060 frustum_planes[4],
2061 frustum_planes[2]);
2062
2063 /* Normalize. */
2064 for (int p = 0; p < 6; p++) {
2065 frustum_planes[p][3] /= normalize_v3(frustum_planes[p]);
2066 }
2067}
2068
2070 const float (*viewinv)[4],
2071 const float (*projmat)[4],
2072 const float (*projinv)[4],
2073 BoundSphere *bsphere)
2074{
2075 /* Extract Bounding Sphere */
2076 if (projmat[3][3] != 0.0f) {
2077 /* Orthographic */
2078 /* The most extreme points on the near and far plane. (normalized device coords). */
2079 const float *nearpoint = bbox->vec[0];
2080 const float *farpoint = bbox->vec[6];
2081
2082 /* just use median point */
2083 mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
2084 bsphere->radius = len_v3v3(bsphere->center, farpoint);
2085 }
2086 else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
2087 /* Perspective with symmetrical frustum. */
2088
2089 /* We obtain the center and radius of the circumscribed circle of the
2090 * isosceles trapezoid composed by the diagonals of the near and far clipping plane */
2091
2092 /* center of each clipping plane */
2093 float mid_min[3], mid_max[3];
2094 mid_v3_v3v3(mid_min, bbox->vec[3], bbox->vec[4]);
2095 mid_v3_v3v3(mid_max, bbox->vec[2], bbox->vec[5]);
2096
2097 /* square length of the diagonals of each clipping plane */
2098 float a_sq = len_squared_v3v3(bbox->vec[3], bbox->vec[4]);
2099 float b_sq = len_squared_v3v3(bbox->vec[2], bbox->vec[5]);
2100
2101 /* distance squared between clipping planes */
2102 float h_sq = len_squared_v3v3(mid_min, mid_max);
2103
2104 float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
2105
2106 /* The goal is to get the smallest sphere,
2107 * not the sphere that passes through each corner */
2108 CLAMP(fac, 0.0f, 1.0f);
2109
2110 interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
2111
2112 /* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
2113 bsphere->radius = len_v3v3(bsphere->center, bbox->vec[1]);
2114 }
2115 else {
2116 /* Perspective with asymmetrical frustum. */
2117
2118 /* We put the sphere center on the line that goes from origin
2119 * to the center of the far clipping plane. */
2120
2121 /* Detect which of the corner of the far clipping plane is the farthest to the origin */
2122 float nfar[4]; /* most extreme far point in NDC space */
2123 float farxy[2]; /* far-point projection onto the near plane */
2124 float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
2125 float nearpoint[3]; /* most extreme near point in camera coordinate */
2126 float farcenter[3] = {0.0f}; /* center of far clipping plane in camera coordinate */
2127 float F = -1.0f, N; /* square distance of far and near point to origin */
2128 float f, n; /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
2129 float e, s; /* far and near clipping distance (<0) */
2130 float c; /* slope of center line = distance of far clipping center
2131 * to z axis / far clipping distance. */
2132 float z; /* projection of sphere center on z axis (<0) */
2133
2134 /* Find farthest corner and center of far clip plane. */
2135 float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
2136 for (int i = 0; i < 4; i++) {
2137 float point[3];
2138 mul_v3_project_m4_v3(point, projinv, corner);
2139 float len = len_squared_v3(point);
2140 if (len > F) {
2141 copy_v3_v3(nfar, corner);
2142 copy_v3_v3(farpoint, point);
2143 F = len;
2144 }
2145 add_v3_v3(farcenter, point);
2146 /* rotate by 90 degree to walk through the 4 points of the far clip plane */
2147 float tmp = corner[0];
2148 corner[0] = -corner[1];
2149 corner[1] = tmp;
2150 }
2151
2152 /* the far center is the average of the far clipping points */
2153 mul_v3_fl(farcenter, 0.25f);
2154 /* the extreme near point is the opposite point on the near clipping plane */
2155 copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
2156 mul_v3_project_m4_v3(nearpoint, projinv, nfar);
2157 /* this is a frustum projection */
2158 N = len_squared_v3(nearpoint);
2159 e = farpoint[2];
2160 s = nearpoint[2];
2161 /* distance to view Z axis */
2162 f = len_v2(farpoint);
2163 /* get corresponding point on the near plane */
2164 mul_v2_v2fl(farxy, farpoint, s / e);
2165 /* this formula preserve the sign of n */
2166 sub_v2_v2(nearpoint, farxy);
2167 n = f * s / e - len_v2(nearpoint);
2168 c = len_v2(farcenter) / e;
2169 /* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
2170 z = (F - N) / (2.0f * (e - s + c * (f - n)));
2171
2172 bsphere->center[0] = farcenter[0] * z / e;
2173 bsphere->center[1] = farcenter[1] * z / e;
2174 bsphere->center[2] = z;
2175
2176 /* For XR, the view matrix may contain a scale factor. Then, transforming only the center
2177 * into world space after calculating the radius will result in incorrect behavior. */
2178 mul_m4_v3(viewinv, bsphere->center); /* Transform to world space. */
2179 mul_m4_v3(viewinv, farpoint);
2180 bsphere->radius = len_v3v3(bsphere->center, farpoint);
2181 }
2182}
2183
2185 const float viewmat[4][4],
2186 const float winmat[4][4])
2187{
2188 ViewMatrices *storage = &view->storage;
2189
2190 copy_m4_m4(storage->viewmat.ptr(), viewmat);
2191 invert_m4_m4(storage->viewinv.ptr(), storage->viewmat.ptr());
2192
2193 copy_m4_m4(storage->winmat.ptr(), winmat);
2194 invert_m4_m4(storage->wininv.ptr(), storage->winmat.ptr());
2195
2196 mul_m4_m4m4(view->persmat.ptr(), winmat, viewmat);
2197 invert_m4_m4(view->persinv.ptr(), view->persmat.ptr());
2198}
2199
2200DRWView *DRW_view_create(const float viewmat[4][4],
2201 const float winmat[4][4],
2202 const float (*culling_viewmat)[4],
2203 const float (*culling_winmat)[4],
2204 DRWCallVisibilityFn *visibility_fn)
2205{
2206 DRWView *view = static_cast<DRWView *>(BLI_memblock_alloc(DST.vmempool->views));
2207
2209 view->culling_mask = 1u << DST.primary_view_num++;
2210 }
2211 else {
2212 BLI_assert(0);
2213 view->culling_mask = 0u;
2214 }
2215 view->clip_planes_len = 0;
2216 view->visibility_fn = visibility_fn;
2217 view->parent = nullptr;
2218
2219 DRW_view_update(view, viewmat, winmat, culling_viewmat, culling_winmat);
2220
2221 return view;
2222}
2223
2225 const float viewmat[4][4],
2226 const float winmat[4][4])
2227{
2228 /* Search original parent. */
2229 const DRWView *ori_view = parent_view;
2230 while (ori_view->parent != nullptr) {
2231 ori_view = ori_view->parent;
2232 }
2233
2234 DRWView *view = static_cast<DRWView *>(BLI_memblock_alloc(DST.vmempool->views));
2235
2236 /* Perform copy. */
2237 *view = *ori_view;
2238 view->parent = (DRWView *)ori_view;
2239
2240 DRW_view_update_sub(view, viewmat, winmat);
2241
2242 return view;
2243}
2244
2245/* DRWView Update:
2246 * This is meant to be done on existing views when rendering in a loop and there is no
2247 * need to allocate more DRWViews. */
2248
2249void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
2250{
2251 BLI_assert(view->parent != nullptr);
2252
2253 view->is_dirty = true;
2254 view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
2255
2256 draw_view_matrix_state_update(view, viewmat, winmat);
2257}
2258
2260 const float viewmat[4][4],
2261 const float winmat[4][4],
2262 const float (*culling_viewmat)[4],
2263 const float (*culling_winmat)[4])
2264{
2265 /* DO NOT UPDATE THE DEFAULT VIEW.
2266 * Create sub-views instead, or a copy. */
2267 BLI_assert(view != DST.view_default);
2268 BLI_assert(view->parent == nullptr);
2269
2270 view->is_dirty = true;
2271 view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
2272
2273 draw_view_matrix_state_update(view, viewmat, winmat);
2274
2275 /* Prepare frustum culling. */
2276
2277#ifdef DRW_DEBUG_CULLING
2278 static float mv[MAX_CULLED_VIEWS][4][4], mw[MAX_CULLED_VIEWS][4][4];
2279
2280 /* Select view here. */
2281 if (view->culling_mask != 0) {
2282 uint index = bitscan_forward_uint(view->culling_mask);
2283
2284 if (G.debug_value == 0) {
2285 copy_m4_m4(mv[index], culling_viewmat ? culling_viewmat : viewmat);
2286 copy_m4_m4(mw[index], culling_winmat ? culling_winmat : winmat);
2287 }
2288 else {
2289 culling_winmat = mw[index];
2290 culling_viewmat = mv[index];
2291 }
2292 }
2293#endif
2294
2295 float wininv[4][4];
2296 if (culling_winmat) {
2297 winmat = culling_winmat;
2298 invert_m4_m4(wininv, winmat);
2299 }
2300 else {
2301 copy_m4_m4(wininv, view->storage.wininv.ptr());
2302 }
2303
2304 float viewinv[4][4];
2305 if (culling_viewmat) {
2306 viewmat = culling_viewmat;
2307 invert_m4_m4(viewinv, viewmat);
2308 }
2309 else {
2310 copy_m4_m4(viewinv, view->storage.viewinv.ptr());
2311 }
2312
2313 draw_frustum_boundbox_calc(viewinv, winmat, &view->frustum_corners);
2314 draw_frustum_culling_planes_calc(view->persmat.ptr(), view->frustum_planes);
2316 &view->frustum_corners, viewinv, winmat, wininv, &view->frustum_bsphere);
2317
2318#ifdef DRW_DEBUG_CULLING
2319 if (G.debug_value != 0) {
2321 view->frustum_bsphere.center, view->frustum_bsphere.radius, blender::float4{1, 1, 0, 1});
2322 DRW_debug_bbox(&view->frustum_corners, blender::float4{1, 1, 0, 1});
2323 }
2324#endif
2325}
2326
2328{
2329 return DST.view_default;
2330}
2331
2333{
2334 DST.view_default = nullptr;
2335 DST.view_active = nullptr;
2336 DST.view_previous = nullptr;
2337}
2338
2340{
2341 BLI_assert(DST.view_default == nullptr);
2342 DST.view_default = (DRWView *)view;
2343}
2344
2345void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len)
2346{
2347 BLI_assert(plane_len <= MAX_CLIP_PLANES);
2348 view->clip_planes_len = plane_len;
2349 if (plane_len > 0) {
2350 memcpy(view->clip_planes, planes, sizeof(float[4]) * plane_len);
2351 }
2352}
2353
2355{
2356 memcpy(corners, &view->frustum_corners, sizeof(view->frustum_corners));
2357}
2358
2359void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4])
2360{
2361 memcpy(planes, &view->frustum_planes, sizeof(view->frustum_planes));
2362}
2363
2365{
2366 view = (view) ? view : DST.view_default;
2367 return view->storage.winmat[3][3] == 0.0f;
2368}
2369
2371{
2372 view = (view) ? view : DST.view_default;
2373 const float4x4 &projmat = view->storage.winmat;
2374
2375 if (DRW_view_is_persp_get(view)) {
2376 return -projmat[3][2] / (projmat[2][2] - 1.0f);
2377 }
2378
2379 return -(projmat[3][2] + 1.0f) / projmat[2][2];
2380}
2381
2383{
2384 view = (view) ? view : DST.view_default;
2385 const float4x4 &projmat = view->storage.winmat;
2386
2387 if (DRW_view_is_persp_get(view)) {
2388 return -projmat[3][2] / (projmat[2][2] + 1.0f);
2389 }
2390
2391 return -(projmat[3][2] - 1.0f) / projmat[2][2];
2392}
2393
2394void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse)
2395{
2396 view = (view) ? view : DST.view_default;
2397 const ViewMatrices *storage = &view->storage;
2398 copy_m4_m4(mat, (inverse) ? storage->viewinv.ptr() : storage->viewmat.ptr());
2399}
2400
2401void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse)
2402{
2403 view = (view) ? view : DST.view_default;
2404 const ViewMatrices *storage = &view->storage;
2405 copy_m4_m4(mat, (inverse) ? storage->wininv.ptr() : storage->winmat.ptr());
2406}
2407
2408void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
2409{
2410 view = (view) ? view : DST.view_default;
2411 copy_m4_m4(mat, (inverse) ? view->persinv.ptr() : view->persmat.ptr());
2412}
2413
2416/* -------------------------------------------------------------------- */
2421{
2422 DRWPass *pass = static_cast<DRWPass *>(BLI_memblock_alloc(DST.vmempool->passes));
2424 if (G.debug & G_DEBUG_GPU) {
2425 STRNCPY(pass->name, name);
2426 }
2427
2428 pass->shgroups.first = nullptr;
2429 pass->shgroups.last = nullptr;
2430 pass->handle = DST.pass_handle;
2432
2433 pass->original = nullptr;
2434 pass->next = nullptr;
2435
2436 return pass;
2437}
2438
2440{
2441 DRWPass *pass = DRW_pass_create(name, state);
2442 pass->original = original;
2443
2444 return pass;
2445}
2446
2447void DRW_pass_link(DRWPass *first, DRWPass *second)
2448{
2449 BLI_assert(first != second);
2450 BLI_assert(first->next == nullptr);
2451 first->next = second;
2452}
2453
2455{
2456 if (pass->original) {
2457 return DRW_pass_is_empty(pass->original);
2458 }
2459
2460 LISTBASE_FOREACH (DRWShadingGroup *, shgroup, &pass->shgroups) {
2461 if (!DRW_shgroup_is_empty(shgroup)) {
2462 return false;
2463 }
2464 }
2465 return true;
2466}
2467
2469 void (*callback)(void *user_data, DRWShadingGroup *shgrp),
2470 void *user_data)
2471{
2472 LISTBASE_FOREACH (DRWShadingGroup *, shgroup, &pass->shgroups) {
2473 callback(user_data, shgroup);
2474 }
2475}
2476
2477static int pass_shgroup_dist_sort(const void *a, const void *b)
2478{
2479 const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
2480 const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
2481
2482 if (shgrp_a->z_sorting.distance < shgrp_b->z_sorting.distance) {
2483 return 1;
2484 }
2485 if (shgrp_a->z_sorting.distance > shgrp_b->z_sorting.distance) {
2486 return -1;
2487 }
2488
2489 /* If distances are the same, keep original order. */
2490 if (shgrp_a->z_sorting.original_index > shgrp_b->z_sorting.original_index) {
2491 return -1;
2492 }
2493
2494 return 0;
2495}
2496
2497/* ------------------ Shading group sorting --------------------- */
2498
2499#define SORT_IMPL_LINKTYPE DRWShadingGroup
2500
2501#define SORT_IMPL_FUNC shgroup_sort_fn_r
2503#undef SORT_IMPL_FUNC
2504
2505#undef SORT_IMPL_LINKTYPE
2506
2508{
2509 const float4x4 &viewinv = DST.view_active->storage.viewinv;
2510
2511 if (!(pass->shgroups.first && pass->shgroups.first->next)) {
2512 /* Nothing to sort */
2513 return;
2514 }
2515
2516 uint index = 0;
2517 DRWShadingGroup *shgroup = pass->shgroups.first;
2518 do {
2519 DRWResourceHandle handle = 0;
2520 /* Find first DRWCommandDraw. */
2521 DRWCommandChunk *cmd_chunk = shgroup->cmd.first;
2522 for (; cmd_chunk && handle == 0; cmd_chunk = cmd_chunk->next) {
2523 for (int i = 0; i < cmd_chunk->command_used && handle == 0; i++) {
2524 if (DRW_CMD_DRAW == command_type_get(cmd_chunk->command_type, i)) {
2525 handle = cmd_chunk->commands[i].draw.handle;
2526 }
2527 }
2528 }
2529 /* To be sorted a shgroup needs to have at least one draw command. */
2530 /* FIXME(fclem): In some case, we can still have empty shading group to sort. However their
2531 * final order is not well defined.
2532 * (see #76730 & D7729). */
2533 // BLI_assert(handle != 0);
2534
2535 DRWObjectMatrix *obmats = static_cast<DRWObjectMatrix *>(
2537
2538 /* Compute distance to camera. */
2539 float tmp[3];
2540 sub_v3_v3v3(tmp, viewinv[3], obmats->model[3]);
2541 shgroup->z_sorting.distance = dot_v3v3(viewinv[2], tmp);
2542 shgroup->z_sorting.original_index = index++;
2543
2544 } while ((shgroup = shgroup->next));
2545
2546 /* Sort using computed distances. */
2547 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort);
2548
2549 /* Find the new last */
2550 DRWShadingGroup *last = pass->shgroups.first;
2551 while ((last = last->next)) {
2552 /* Reset the pass id for debugging. */
2553 last->pass_handle = pass->handle;
2554 }
2555 pass->shgroups.last = last;
2556}
2557
2559{
2560 pass->shgroups.last = pass->shgroups.first;
2561 /* WARNING: Assume that DRWShadingGroup->next is the first member. */
2562 BLI_linklist_reverse((LinkNode **)&pass->shgroups.first);
2563}
2564
void BKE_curve_texspace_ensure(Curve *cu)
Definition curve.cc:500
int CustomData_get_layer_index_n(const CustomData *data, eCustomDataType type, int n)
const char * CustomData_get_active_layer_name(const CustomData *data, eCustomDataType type)
@ G_FLAG_PICKSEL
@ G_DEBUG_GPU
ImageGPUTextures BKE_image_get_gpu_material_texture(Image *image, ImageUser *iuser, const bool use_tile_mapping)
Definition image_gpu.cc:486
void BKE_mesh_texspace_get_reference(Mesh *mesh, char **r_texspace_flag, float **r_texspace_location, float **r_texspace_size)
General operations, lookup, etc. for blender objects.
void BKE_boundbox_init_from_minmax(BoundBox *bb, const float min[3], const float max[3])
std::optional< blender::Bounds< blender::float3 > > BKE_object_boundbox_get(const Object *ob)
Mesh * BKE_object_get_original_mesh(const Object *object)
Paint * BKE_paint_get_active_from_context(const bContext *C)
Definition paint.cc:477
void BKE_pbvh_draw_debug_cb(blender::bke::pbvh::Tree &pbvh, void(*draw_fn)(blender::bke::pbvh::Node *node, void *user_data, const float bmin[3], const float bmax[3], PBVHNodeFlags flag), void *user_data)
Definition pbvh.cc:2368
PBVHNodeFlags
Definition BKE_pbvh.hh:29
@ PBVH_TexLeaf
Definition BKE_pbvh.hh:40
@ PBVH_Leaf
Definition BKE_pbvh.hh:30
bool BKE_pbvh_node_fully_hidden_get(const blender::bke::pbvh::Node &node)
Definition pbvh.cc:1542
int BKE_pbvh_debug_draw_gen_get(blender::bke::pbvh::Node &node)
Definition pbvh.cc:2525
bool BKE_pbvh_node_frustum_contain_AABB(const blender::bke::pbvh::Node *node, const PBVHFrustumPlanes *frustum)
Definition pbvh.cc:2354
Volume data-block.
std::optional< blender::Bounds< blender::float3 > > BKE_volume_min_max(const Volume *volume)
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
#define BLI_INLINE
BLI_INLINE unsigned int BLI_hash_string(const char *str)
Definition BLI_hash.h:71
BLI_INLINE unsigned int BLI_hash_int_2d(unsigned int kx, unsigned int ky)
Definition BLI_hash.h:55
#define LISTBASE_FOREACH(type, var, list)
MINLINE unsigned int bitscan_forward_uint(unsigned int a)
void projmat_dimensions(const float winmat[4][4], float *r_left, float *r_right, float *r_bottom, float *r_top, float *r_near, float *r_far)
void planes_from_projmat(const float mat[4][4], float left[4], float right[4], float bottom[4], float top[4], float near[4], float far[4])
void mul_v3_project_m4_v3(float r[3], const float mat[4][4], const float vec[3])
void mul_m4_m4m4(float R[4][4], const float A[4][4], const float B[4][4])
void transpose_m4_m4(float R[4][4], const float M[4][4])
void mul_project_m4_v3(const float mat[4][4], float vec[3])
void mul_m4_v3(const float M[4][4], float r[3])
void copy_m4_m4(float m1[4][4], const float m2[4][4])
void mul_v3_m4v3(float r[3], const float mat[4][4], const float vec[3])
bool is_negative_m4(const float mat[4][4])
bool invert_m4_m4(float inverse[4][4], const float mat[4][4])
void mul_m4_v4(const float mat[4][4], float r[4])
MINLINE void copy_v4_v4(float r[4], const float a[4])
MINLINE float len_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT
MINLINE float len_squared_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float len_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE void mul_v3_v3(float r[3], const float a[3])
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_fl3(float v[3], float x, float y, float z)
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void interp_v3_v3v3(float r[3], const float a[3], const float b[3], float t)
Definition math_vector.c:36
MINLINE void negate_v3(float r[3])
MINLINE void invert_v3(float r[3])
void mid_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void copy_v3_fl(float r[3], float f)
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void mul_v2_v2fl(float r[2], const float a[2], float f)
MINLINE void add_v3_v3(float r[3], const float a[3])
MINLINE float normalize_v3(float n[3])
void * BLI_memblock_elem_get(BLI_memblock *mblk, int chunk, int elem) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
void * BLI_memblock_alloc(BLI_memblock *mblk) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
#define STRNCPY(dst, src)
Definition BLI_string.h:593
unsigned char uchar
unsigned int uint
#define CLAMP(a, b, c)
#define ARRAY_SIZE(arr)
#define UNUSED_VARS(...)
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define ELEM(...)
@ ID_VO
@ ID_CU_LEGACY
@ ID_ME
@ ID_MB
@ CD_PROP_FLOAT2
@ BASE_FROM_DUPLI
@ BASE_FROM_SET
@ OB_NEG_SCALE
#define BASE_SELECTED(v3d, base)
@ PAINT_SCULPT_DELAY_UPDATES
@ PAINT_FAST_NAVIGATE
@ RV3D_PAINTING
@ RV3D_NAVIGATING
@ DRW_ATTR_INT
#define DRW_shgroup_call_no_cull(shgroup, geom, ob)
#define DRW_DEBUG_FILE_LINE_ARGS
Definition DRW_render.hh:61
bool DRWCallVisibilityFn(bool vis_in, void *user_data)
#define DRW_shgroup_uniform_block(shgroup, name, ubo)
static AppView * view
eGPUFrameBufferBits
GPUPass * GPU_material_get_pass(GPUMaterial *material)
const ListBase * GPU_material_layer_attributes(const GPUMaterial *material)
ListBase GPU_material_textures(GPUMaterial *material)
const GPUUniformAttrList * GPU_material_uniform_attributes(const GPUMaterial *material)
GPUUniformBuf * GPU_material_uniform_buffer_get(GPUMaterial *material)
GPUPrimType
@ GPU_PRIM_TRI_FAN
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRI_STRIP
@ GPU_PRIM_TRIS
int GPU_shader_get_sampler_binding(GPUShader *shader, const char *name)
int GPU_shader_get_uniform(GPUShader *shader, const char *name)
int GPU_shader_get_ubo_binding(GPUShader *shader, const char *name)
int GPU_shader_get_ssbo_binding(GPUShader *shader, const char *name)
int GPU_shader_get_builtin_block(GPUShader *shader, int builtin)
@ GPU_UNIFORM_VIEWPROJECTION_INV
@ GPU_UNIFORM_PROJECTION
@ GPU_UNIFORM_RESOURCE_ID
@ GPU_UNIFORM_VIEWPROJECTION
@ GPU_UNIFORM_VIEW
@ GPU_UNIFORM_MODEL
@ GPU_UNIFORM_MODELVIEW
@ GPU_UNIFORM_BASE_INSTANCE
@ GPU_UNIFORM_VIEW_INV
@ GPU_UNIFORM_MODEL_INV
@ GPU_UNIFORM_PROJECTION_INV
@ GPU_UNIFORM_CLIPPLANES
@ GPU_UNIFORM_MODELVIEW_INV
@ GPU_UNIFORM_NORMAL
@ GPU_UNIFORM_RESOURCE_CHUNK
@ GPU_UNIFORM_MVP
int GPU_shader_get_builtin_uniform(GPUShader *shader, int builtin)
@ GPU_UNIFORM_BLOCK_DRW_VIEW
@ GPU_UNIFORM_BLOCK_DRW_MODEL
@ GPU_UNIFORM_BLOCK_MODEL
@ GPU_UNIFORM_BLOCK_VIEW
@ GPU_UNIFORM_BLOCK_DRW_CLIPPING
@ GPU_UNIFORM_BLOCK_DRW_INFOS
@ GPU_UNIFORM_BLOCK_INFO
eGPUBarrier
Definition GPU_state.hh:29
void GPU_texture_ref(GPUTexture *texture)
#define GPU_LAYER_ATTRIBUTE_UBO_BLOCK_NAME
#define GPU_UBO_BLOCK_NAME
#define GPU_uniformbuf_create(size)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_free(GPUUniformBuf *ubo)
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME
void GPU_vertbuf_vert_set(blender::gpu::VertBuf *verts, uint v_idx, const void *data)
void GPU_vertbuf_data_resize(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_attr_set(blender::gpu::VertBuf *, uint a_idx, uint v_idx, const void *data)
const GPUVertFormat * GPU_vertbuf_get_format(const blender::gpu::VertBuf *verts)
uint GPU_vertbuf_get_vertex_alloc(const blender::gpu::VertBuf *verts)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
@ GPU_COMP_I32
#define MEM_recallocN(vmemh, len)
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Brightness Control the brightness and contrast of the input color Vector Map input vector components with curves Camera Retrieve information about the camera and how it relates to the current shading point s position Clamp a value between a minimum and a maximum Vector Perform vector math operation Invert Invert a producing a negative Combine Generate a color from its and blue channels(Deprecated)") DefNode(ShaderNode
struct GPUShader GPUShader
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition btDbvt.cpp:299
btMatrix3x3 inverse() const
Return the inverse of the matrix.
SIMD_FORCE_INLINE const btScalar & z() const
Return the z value.
Definition btQuadWord.h:117
SIMD_FORCE_INLINE btScalar length() const
Return the length of the vector.
Definition btVector3.h:257
constexpr bool is_empty() const
Definition BLI_span.hh:261
void append(const T &value)
std::unique_ptr< DrawCache > draw_data
virtual Span< int > ensure_material_indices(const Object &object)=0
virtual Span< gpu::Batch * > ensure_lines_batches(const Object &object, const ViewportRequest &request, const IndexMask &nodes_to_update)=0
virtual Span< gpu::Batch * > ensure_tris_batches(const Object &object, const ViewportRequest &request, const IndexMask &nodes_to_update)=0
void foreach_index(Fn &&fn) const
local_group_size(16, 16) .push_constant(Type b
#define printf
DEGForeachIDComponentCallback callback
Utilities for rendering attributes.
blender::gpu::Batch * drw_cache_procedural_lines_get()
blender::gpu::Batch * drw_cache_procedural_triangles_get()
blender::gpu::Batch * drw_cache_procedural_triangle_strips_get()
blender::gpu::Batch * drw_cache_procedural_points_get()
DRW_Global G_draw
void DRW_debug_modelmat(const float modelmat[4][4])
void DRW_debug_sphere(const float center[3], float radius, const float color[4])
void DRW_debug_bbox(const BoundBox *bbox, const float color[4])
blender::gpu::Batch * DRW_temp_batch_request(DRWInstanceDataList *idatalist, blender::gpu::VertBuf *buf, GPUPrimType prim_type)
void DRW_uniform_attrs_pool_flush_all(GHash *table)
blender::gpu::VertBuf * DRW_temp_buffer_request(DRWInstanceDataList *idatalist, GPUVertFormat *format, int *vert_len)
blender::gpu::Batch * DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist, blender::gpu::VertBuf *buf, blender::gpu::Batch *instancer, blender::gpu::Batch *geom)
void drw_uniform_attrs_pool_update(GHash *table, const GPUUniformAttrList *key, DRWResourceHandle *handle, const Object *ob, const Object *dupli_parent, const DupliObject *dupli_source)
#define DRW_BUFFER_VERTS_CHUNK
Object * DRW_object_get_dupli_parent(const Object *)
DRWManager DST
const DRWContextState * DRW_context_state_get()
int len
BLI_INLINE void * DRW_memblock_elem_from_handle(BLI_memblock *memblock, const DRWResourceHandle *handle)
#define DRW_MAX_DRAW_CMD_TYPE
DRWUniformType
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF
@ DRW_UNIFORM_BLOCK_OBINFOS
@ DRW_UNIFORM_TFEEDBACK_TARGET
@ DRW_UNIFORM_TEXTURE_REF
@ DRW_UNIFORM_MODEL_MATRIX
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE
@ DRW_UNIFORM_FLOAT_COPY
@ DRW_UNIFORM_MODEL_MATRIX_INVERSE
@ DRW_UNIFORM_FLOAT
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF
@ DRW_UNIFORM_BASE_INSTANCE
@ DRW_UNIFORM_BLOCK_OBMATS
@ DRW_UNIFORM_IMAGE_REF
@ DRW_UNIFORM_RESOURCE_ID
@ DRW_UNIFORM_BLOCK
@ DRW_UNIFORM_TEXTURE
@ DRW_UNIFORM_STORAGE_BLOCK_REF
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE
@ DRW_UNIFORM_RESOURCE_CHUNK
@ DRW_UNIFORM_IMAGE
@ DRW_UNIFORM_BLOCK_OBATTRS
@ DRW_UNIFORM_STORAGE_BLOCK
@ DRW_UNIFORM_BLOCK_VLATTRS
@ DRW_UNIFORM_INT
@ DRW_UNIFORM_BLOCK_REF
@ DRW_UNIFORM_INT_COPY
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
#define MAX_CLIP_PLANES
#define DRW_RESOURCE_CHUNK_LEN
eDRWCommandType
@ DRW_CMD_COMPUTE_INDIRECT
@ DRW_CMD_COMPUTE
@ DRW_CMD_COMPUTE_REF
@ DRW_CMD_DRAW
@ DRW_CMD_DRWSTATE
@ DRW_CMD_DRAW_RANGE
@ DRW_CMD_CLEAR
@ DRW_CMD_BARRIER
@ DRW_CMD_STENCIL
@ DRW_CMD_DRAW_INSTANCE_RANGE
@ DRW_CMD_DRAW_PROCEDURAL
@ DRW_CMD_DRAW_INDIRECT
@ DRW_CMD_SELECTID
@ DRW_CMD_DRAW_INSTANCE
#define MAX_CULLED_VIEWS
BLI_INLINE void DRW_handle_increment(DRWResourceHandle *handle)
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
uint32_t DRWResourceHandle
BLI_INLINE void DRW_handle_negative_scale_enable(DRWResourceHandle *handle)
const DRWView * DRW_view_default_get()
DRWShadingGroup * DRW_shgroup_create(GPUShader *shader, DRWPass *pass)
static void drw_command_compute(DRWShadingGroup *shgroup, int groups_x_len, int groups_y_len, int groups_z_len)
void DRW_view_frustum_corners_get(const DRWView *view, BoundBox *corners)
float DRW_view_near_distance_get(const DRWView *view)
void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
void drw_resource_buffer_finish(DRWData *vmempool)
static void drw_command_draw(DRWShadingGroup *shgroup, blender::gpu::Batch *batch, DRWResourceHandle handle)
static void * drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type)
void DRW_shgroup_call_ex(DRWShadingGroup *shgroup, const Object *ob, const float(*obmat)[4], blender::gpu::Batch *geom, bool bypass_culling, void *user_data)
void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex, GPUSamplerState sampler_state)
void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup, const char *name, blender::gpu::VertBuf *vertex_buffer)
static void drw_command_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4])
void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type)
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
void DRW_shgroup_uniform_ivec3_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
uint32_t DRW_object_resource_id_get(Object *)
static int pass_shgroup_dist_sort(const void *a, const void *b)
static void drw_command_draw_procedural(DRWShadingGroup *shgroup, blender::gpu::Batch *batch, DRWResourceHandle handle, uint vert_count)
static void drw_call_culling_init(DRWCullingState *cull, const Object *ob)
void DRW_pass_foreach_shgroup(DRWPass *pass, void(*callback)(void *user_data, DRWShadingGroup *shgrp), void *user_data)
void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup, const Object *ob, blender::gpu::Batch *geom, blender::gpu::Batch *inst_attributes)
void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data)
void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup, uint write_mask, uint reference, uint compare_mask)
static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc, DRWUniformType type, const void *value, GPUSamplerState sampler_state, int length, int arraysize)
DRWPass * DRW_pass_create(const char *name, DRWState state)
static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup, const float(*obmat)[4], const Object *ob)
void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, const Object *ob, blender::gpu::Batch *geom, uint count)
void DRW_shgroup_call_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len)
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
static DRWShadingGroup * drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
DRWCallBuffer * DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup, GPUVertFormat *format, blender::gpu::Batch *geom)
void DRW_pass_link(DRWPass *first, DRWPass *second)
void DRW_shgroup_uniform_vec3_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
bool DRW_view_is_persp_get(const DRWView *view)
void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask, bool use_fset, bool use_color, bool use_uv)
void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
void DRW_sculpt_debug_cb(blender::bke::pbvh::Node *node, void *user_data, const float bmin[3], const float bmax[3], PBVHNodeFlags flag)
DRWShadingGroup * DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
void DRW_shgroup_call_compute(DRWShadingGroup *shgroup, int groups_x_len, int groups_y_len, int groups_z_len)
static void drw_command_draw_instance(DRWShadingGroup *shgroup, blender::gpu::Batch *batch, DRWResourceHandle handle, uint count, bool use_attr)
static void drw_command_draw_intance_range(DRWShadingGroup *shgroup, blender::gpu::Batch *batch, DRWResourceHandle handle, uint start, uint count)
static void drw_command_set_mutable_state(DRWShadingGroup *shgroup, DRWState enable, DRWState disable)
DRWView * DRW_view_create_sub(const DRWView *parent_view, const float viewmat[4][4], const float winmat[4][4])
static void drw_shgroup_material_texture(DRWShadingGroup *grp, GPUTexture *gputex, const char *name, GPUSamplerState state)
void DRW_shgroup_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse)
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, const Object *ob)
void DRW_pass_sort_shgroup_z(DRWPass *pass)
bool DRW_pass_is_empty(DRWPass *pass)
#define SCULPT_DEBUG_BUFFERS
static void drw_call_calc_orco(const Object *ob, float(*r_orcofacs)[4])
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float(*value)[3])
static void drw_command_set_select_id(DRWShadingGroup *shgroup, blender::gpu::VertBuf *buf, uint select_id)
void DRW_shgroup_call_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
GPUVertFormat * DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], int arraysize)
static DRWResourceHandle drw_resource_handle_new(const float(*obmat)[4], const Object *ob)
void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup, const char *name, const GPUStorageBuf *ssbo DRW_DEBUG_FILE_LINE_ARGS)
void DRW_view_update(DRWView *view, const float viewmat[4][4], const float winmat[4][4], const float(*culling_viewmat)[4], const float(*culling_winmat)[4])
void DRW_shgroup_call_instance_range(DRWShadingGroup *shgroup, const Object *ob, blender::gpu::Batch *geom, uint i_sta, uint i_num)
static float sculpt_debug_colors[9][4]
void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
static void drw_command_set_stencil_mask(DRWShadingGroup *shgroup, uint write_mask, uint reference, uint compare_mask)
static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name, DRWUniformType type, const void *value, int length, int arraysize)
void DRW_shgroup_call_range(DRWShadingGroup *shgroup, const Object *ob, blender::gpu::Batch *geom, uint v_sta, uint v_num)
void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, GPUMaterial *material)
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, const Object *ob, uint tri_count)
void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, const Object *ob, uint point_count)
static void drw_command_draw_indirect(DRWShadingGroup *shgroup, blender::gpu::Batch *batch, DRWResourceHandle handle, GPUStorageBuf *indirect_buf)
void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup, const char *name, blender::gpu::VertBuf **vertex_buffer)
float DRW_view_far_distance_get(const DRWView *view)
static void draw_pbvh_nodes(const Object &object, const blender::Span< blender::gpu::Batch * > batches, const blender::Span< int > material_indices, const blender::Span< DRWShadingGroup * > shading_groups, const blender::IndexMask &nodes_to_draw)
void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups, GPUMaterial **gpumats, int num_shgroups, const Object *ob)
void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUStorageBuf **ssbo DRW_DEBUG_FILE_LINE_ARGS)
void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS)
void DRW_view_clip_planes_set(DRWView *view, float(*planes)[4], int plane_len)
void DRW_shgroup_call_procedural_indirect(DRWShadingGroup *shgroup, GPUPrimType primitive_type, Object *ob, GPUStorageBuf *indirect_buf)
static GPUVertFormat inst_select_format
void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup, const Object *ob, uint line_count)
void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
BLI_INLINE void drw_call_matrix_init(DRWObjectMatrix *ob_mats, const Object *ob, const float(*obmat)[4])
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
void DRW_shgroup_clear_framebuffer(DRWShadingGroup *shgroup, eGPUFrameBufferBits channels, uchar r, uchar g, uchar b, uchar a, float depth, uchar stencil)
void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
void DRW_shgroup_uniform_mat4_copy(DRWShadingGroup *shgroup, const char *name, const float(*value)[4])
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float(*value)[4])
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
static void draw_frustum_culling_planes_calc(const float(*persmat)[4], float(*frustum_planes)[4])
void DRW_view_reset()
static void draw_frustum_boundbox_calc(const float(*viewinv)[4], const float(*projmat)[4], BoundBox *r_bbox)
void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex, GPUSamplerState sampler_state)
static void draw_frustum_bound_sphere_calc(const BoundBox *bbox, const float(*viewinv)[4], const float(*projmat)[4], const float(*projinv)[4], BoundSphere *bsphere)
DRWShadingGroup * DRW_shgroup_transform_feedback_create(GPUShader *shader, DRWPass *pass, blender::gpu::VertBuf *tf_target)
void DRW_view_default_set(const DRWView *view)
static void drw_command_draw_range(DRWShadingGroup *shgroup, blender::gpu::Batch *batch, DRWResourceHandle handle, uint start, uint count)
void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS)
static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, blender::gpu::Batch *geom, const Object *ob, uint vert_count)
DRWCallBuffer * DRW_shgroup_call_buffer(DRWShadingGroup *shgroup, GPUVertFormat *format, GPUPrimType prim_type)
void DRW_pass_sort_shgroup_reverse(DRWPass *pass)
void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
static void draw_view_matrix_state_update(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
static void drw_sculpt_get_frustum_planes(const Object *ob, float planes[6][4])
void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup, const char *name, blender::gpu::VertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
void DRW_shgroup_uniform_ivec4_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
static DRWShadingGroup * drw_shgroup_create_ex(GPUShader *shader, DRWPass *pass)
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
#define KEY(a)
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
DRWShadingGroup * DRW_shgroup_material_create(GPUMaterial *material, DRWPass *pass)
void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup, const char *name, blender::gpu::VertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
DRWView * DRW_view_create(const float viewmat[4][4], const float winmat[4][4], const float(*culling_viewmat)[4], const float(*culling_winmat)[4], DRWCallVisibilityFn *visibility_fn)
static void drw_command_clear(DRWShadingGroup *shgroup, eGPUFrameBufferBits channels, uchar r, uchar g, uchar b, uchar a, float depth, uchar stencil)
DRWPass * DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state)
#define SCULPT_DEBUG_COLOR(id)
void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse)
DRWState
Definition draw_state.hh:25
@ DRW_STATE_NO_DRAW
Definition draw_state.hh:27
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition draw_state.hh:72
draw_view in_light_buf[] float
struct @620::@622 batch
GPUShader * GPU_pass_shader_get(GPUPass *pass)
uint top
int count
#define GS(x)
Definition iris.cc:202
format
void * MEM_mallocN_aligned(size_t len, size_t alignment, const char *str)
Definition mallocn.cc:110
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
static ulong state[N]
static int left
#define N
#define G(x, y, z)
#define min(a, b)
Definition sort.c:32
unsigned int uint32_t
Definition stdint.h:80
unsigned __int64 uint64_t
Definition stdint.h:90
float vec[8][3]
float center[3]
Definition DRW_render.hh:93
float texspace_size[3]
float texspace_location[3]
blender::gpu::VertBuf * buf
blender::gpu::VertBuf * buf_select
DRWCommandChunk * next
DRWCommand commands[96]
uint64_t command_type[6]
eGPUFrameBufferBits clear_channels
blender::gpu::Batch * batch
DRWResourceHandle handle
GPUStorageBuf * indirect_buf
blender::gpu::Batch * batch
DRWResourceHandle handle
blender::gpu::Batch * batch
blender::gpu::Batch * batch
blender::gpu::Batch * batch
DRWResourceHandle handle
DRWResourceHandle handle
blender::gpu::Batch * batch
blender::gpu::VertBuf * select_buf
const bContext * evil_C
RegionView3D * rv3d
BoundSphere bsphere
BLI_memblock * views
BLI_memblock * uniforms
GPUUniformBuf ** obinfos_ubo
BLI_memblock * cullstates
BLI_memblock * passes
DRWInstanceDataList * idatalist
BLI_memblock * commands
GHash * obattrs_ubo_pool
BLI_memblock * images
BLI_memblock * callbuffers
BLI_memblock * obmats
BLI_memblock * commands_small
GPUUniformBuf ** matrices_ubo
BLI_memblock * obinfos
BLI_memblock * shgroups
DRWResourceHandle ob_handle
DRWResourceHandle pass_handle
DRWView * view_active
DRWContextState draw_ctx
DRWView * view_previous
DRWResourceHandle resource_handle
DupliObject * dupli_source
bool ob_state_obinfo_init
DRWData * vmempool
DRWView * view_default
Object * dupli_parent
float orcotexfac[2][4]
float modelinverse[4][4]
DRWState state
struct DRWPass::@294 shgroups
DRWPass * next
blender::Span< blender::draw::pbvh::AttributeRequest > attrs
DRWShadingGroup ** shading_groups
struct DRWCommandChunk * first
struct DRWShadingGroup::@289::@293 z_sorting
const struct GPUUniformAttrList * uniform_attrs
DRWResourceHandle pass_handle
DRWShadingGroup * next
struct DRWShadingGroup::@288 cmd
struct DRWCommandChunk * last
DRWUniformChunk * uniforms
DRWUniform uniforms[10]
uint8_t arraysize
GPUTexture ** texture_ref
GPUUniformBuf ** block_ref
GPUUniformBuf * block
GPUSamplerState sampler_state
GPUTexture * texture
float fvalue[4]
const struct GPUUniformAttrList * uniform_attrs
const void * pvalue
ViewMatrices storage
uint32_t culling_mask
DRWView * parent
GPUUniformBuf * view_ubo
GPUUniformBuf * clipping_ubo
unsigned int random_id
static constexpr GPUSamplerState internal_sampler()
static constexpr GPUSamplerState default_sampler()
Definition DNA_ID.h:413
char name[66]
Definition DNA_ID.h:425
GPUTexture * tile_mapping
Definition BKE_image.hh:584
GPUTexture * texture
Definition BKE_image.hh:583
float texspace_size[3]
float texspace_location[3]
short transflag
short base_flag
float color[4]
struct ImageUser iuser
struct Image * ima
DRW_AttributeRequest requests[GPU_MAX_ATTR]
DRWCommandDraw draw
PointerRNA * ptr
Definition wm_files.cc:4126
uint8_t flag
Definition wm_window.cc:138