Blender V5.0
draw_cache_impl_mesh.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <array>
12#include <optional>
13
14#include "MEM_guardedalloc.h"
15
16#include "BLI_index_range.hh"
17#include "BLI_listbase.h"
18#include "BLI_span.hh"
19#include "BLI_string_ref.hh"
20
21#include "DNA_mesh_types.h"
22#include "DNA_object_types.h"
23#include "DNA_scene_types.h"
24#include "DNA_userdef_types.h"
25
26#include "BKE_attribute.hh"
27#include "BKE_customdata.hh"
28#include "BKE_editmesh.hh"
29#include "BKE_material.hh"
30#include "BKE_mesh.hh"
31#include "BKE_object.hh"
32#include "BKE_object_deform.h"
33#include "BKE_paint.hh"
34#include "BKE_paint_bvh.hh"
36
37#include "atomic_ops.h"
38
39#include "GPU_batch.hh"
40#include "GPU_material.hh"
41
42#include "DRW_render.hh"
43
44#include "draw_cache_extract.hh"
45#include "draw_cache_inline.hh"
46#include "draw_subdivision.hh"
47
48#include "draw_cache_impl.hh" /* own include */
50
52
53namespace blender::draw {
54
55/* ---------------------------------------------------------------------- */
58
59#define TRIS_PER_MAT_INDEX BUFFER_LEN
60
61static void mesh_batch_cache_clear(MeshBatchCache &cache);
62
64 const Span<VBOType> vbos,
65 const Span<IBOType> ibos)
66{
67 Set<const void *, 16> buffer_ptrs;
68 buffer_ptrs.reserve(vbos.size() + ibos.size());
69 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
70 for (const VBOType vbo : vbos) {
71 if (const auto *buffer = mbc->buff.vbos.lookup_ptr(vbo)) {
72 buffer_ptrs.add(buffer->get());
73 }
74 }
75 }
76 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
77 for (const IBOType ibo : ibos) {
78 if (const auto *buffer = mbc->buff.ibos.lookup_ptr(ibo)) {
79 buffer_ptrs.add(buffer->get());
80 }
81 }
82 }
83
84 const auto batch_contains_data = [&](gpu::Batch &batch) {
85 if (buffer_ptrs.contains(batch.elem)) {
86 return true;
87 }
88 if (std::any_of(batch.verts, batch.verts + ARRAY_SIZE(batch.verts), [&](gpu::VertBuf *vbo) {
89 return vbo && buffer_ptrs.contains(vbo);
90 }))
91 {
92 return true;
93 }
94 return false;
95 };
96
97 for (const int i : IndexRange(MBC_BATCH_LEN)) {
98 gpu::Batch *batch = ((gpu::Batch **)&cache.batch)[i];
99 if (batch && batch_contains_data(*batch)) {
100 GPU_BATCH_DISCARD_SAFE(((gpu::Batch **)&cache.batch)[i]);
101 cache.batch_ready &= ~DRWBatchFlag(uint64_t(1u) << i);
102 }
103 }
104
105 if (!cache.surface_per_mat.is_empty()) {
106 if (cache.surface_per_mat.first() && batch_contains_data(*cache.surface_per_mat.first())) {
107 /* The format for all `surface_per_mat` batches is the same, discard them all. */
108 for (const int i : cache.surface_per_mat.index_range()) {
110 }
112 }
113 }
114
115 for (const VBOType vbo : vbos) {
116 cache.final.buff.vbos.remove(vbo);
117 cache.cage.buff.vbos.remove(vbo);
118 cache.uv_cage.buff.vbos.remove(vbo);
119 }
120 for (const IBOType ibo : ibos) {
121 cache.final.buff.ibos.remove(ibo);
122 cache.cage.buff.ibos.remove(ibo);
123 cache.uv_cage.buff.ibos.remove(ibo);
124 }
125}
126
127/* Return true is all layers in _b_ are inside _a_. */
129{
130 return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
131}
132
134{
135 return *((uint32_t *)&a) == *((uint32_t *)&b);
136}
137
139{
140 uint32_t *a_p = (uint32_t *)a;
141 uint32_t *b_p = (uint32_t *)&b;
143}
144
146{
147 *((uint32_t *)a) = 0;
148}
149
150static void mesh_cd_calc_edit_uv_layer(const Mesh & /*mesh*/, DRW_MeshCDMask *cd_used)
151{
152 cd_used->edit_uv = 1;
153}
154
155static void mesh_cd_calc_active_uv_layer(const Object &object,
156 const Mesh &mesh,
157 DRW_MeshCDMask &cd_used)
158{
159 const Mesh &me_final = editmesh_final_or_this(object, mesh);
160 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
161 int layer = CustomData_get_active_layer(&cd_ldata, CD_PROP_FLOAT2);
162 if (layer != -1) {
163 cd_used.uv |= (1 << layer);
164 }
165}
166
168 const Mesh &mesh,
169 DRW_MeshCDMask &cd_used)
170{
171 const Mesh &me_final = editmesh_final_or_this(object, mesh);
172 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
173 int layer = CustomData_get_stencil_layer(&cd_ldata, CD_PROP_FLOAT2);
174 if (layer != -1) {
175 cd_used.uv |= (1 << layer);
176 }
177}
178
180 const Mesh &mesh,
181 const Span<const GPUMaterial *> materials,
182 VectorSet<std::string> *attributes)
183{
184 const Mesh &me_final = editmesh_final_or_this(object, mesh);
185 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
186 const CustomData &cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
187 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
188 const CustomData &cd_edata = mesh_cd_edata_get_from_mesh(me_final);
189
190 /* See: DM_vertex_attributes_from_gpu for similar logic */
191 DRW_MeshCDMask cd_used;
193
194 const StringRefNull default_color_name = me_final.default_color_attribute ?
195 me_final.default_color_attribute :
196 "";
197
198 for (const GPUMaterial *gpumat : materials) {
199 if (gpumat == nullptr) {
200 continue;
201 }
202 ListBase gpu_attrs = GPU_material_attributes(gpumat);
203 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
204 StringRef name = gpu_attr->name;
205 eCustomDataType type = eCustomDataType(gpu_attr->type);
206 int layer = -1;
207 std::optional<bke::AttrDomain> domain;
208
209 if (gpu_attr->is_default_color) {
210 name = default_color_name.c_str();
211 }
212
213 if (type == CD_AUTO_FROM_NAME) {
214 /* We need to deduce what exact layer is used.
215 *
216 * We do it based on the specified name.
217 */
218 if (!name.is_empty()) {
220 type = CD_MTFACE;
221
222 if (layer == -1) {
223 /* Try to match a generic attribute, we use the first attribute domain with a
224 * matching name. */
225 if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
226 domain = bke::AttrDomain::Point;
227 }
228 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
230 }
231 else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
232 domain = bke::AttrDomain::Face;
233 }
234 else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
235 domain = bke::AttrDomain::Edge;
236 }
237 else {
238 layer = -1;
239 }
240 }
241
242 if (layer == -1) {
243 continue;
244 }
245 }
246 else {
247 /* Fall back to the UV layer, which matches old behavior. */
248 type = CD_MTFACE;
249 }
250 }
251
252 switch (type) {
253 case CD_MTFACE: {
254 if (layer == -1) {
255 layer = !name.is_empty() ?
258 }
259 if (layer != -1 && !CustomData_layer_is_anonymous(&cd_ldata, CD_PROP_FLOAT2, layer)) {
260 cd_used.uv |= (1 << layer);
261 }
262 break;
263 }
264 case CD_TANGENT: {
265 if (layer == -1) {
266 layer = !name.is_empty() ?
269
270 /* Only fall back to orco (below) when we have no UV layers, see: #56545 */
271 if (layer == -1 && !name.is_empty()) {
273 }
274 }
275 if (layer != -1) {
276 cd_used.tan |= (1 << layer);
277 }
278 else {
279 /* no UV layers at all => requesting orco */
280 cd_used.tan_orco = 1;
281 cd_used.orco = 1;
282 }
283 break;
284 }
285
286 case CD_ORCO: {
287 cd_used.orco = 1;
288 break;
289 }
291 case CD_PROP_COLOR:
293 case CD_PROP_FLOAT3:
294 case CD_PROP_BOOL:
295 case CD_PROP_INT8:
296 case CD_PROP_INT32:
297 case CD_PROP_INT16_2D:
298 case CD_PROP_INT32_2D:
299 case CD_PROP_FLOAT:
300 case CD_PROP_FLOAT2: {
301 if (layer != -1 && domain.has_value()) {
302 drw_attributes_add_request(attributes, name);
303 }
304 break;
305 }
306 default:
307 break;
308 }
309 }
310 }
311 return cd_used;
312}
313
315
316/* ---------------------------------------------------------------------- */
319
322{
326
327 memset(wstate, 0, sizeof(*wstate));
328
329 wstate->defgroup_active = -1;
330}
331
334 const DRW_MeshWeightState *wstate_src)
335{
336 MEM_SAFE_FREE(wstate_dst->defgroup_sel);
337 MEM_SAFE_FREE(wstate_dst->defgroup_locked);
338 MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
339
340 memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
341
342 if (wstate_src->defgroup_sel) {
343 wstate_dst->defgroup_sel = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_sel));
344 }
345 if (wstate_src->defgroup_locked) {
346 wstate_dst->defgroup_locked = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_locked));
347 }
348 if (wstate_src->defgroup_unlocked) {
349 wstate_dst->defgroup_unlocked = static_cast<bool *>(
350 MEM_dupallocN(wstate_src->defgroup_unlocked));
351 }
352}
353
354static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
355{
356 return ((!array1 && !array2) ||
357 (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
358}
359
362 const DRW_MeshWeightState *b)
363{
364 return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
365 a->flags == b->flags && a->alert_mode == b->alert_mode &&
366 a->defgroup_sel_count == b->defgroup_sel_count &&
367 drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
368 drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
369 drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
370}
371
373 Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
374{
375 /* Extract complete vertex weight group selection state and mode flags. */
376 memset(wstate, 0, sizeof(*wstate));
377
380
381 wstate->alert_mode = ts.weightuser;
382
383 if (paint_mode && ts.multipaint) {
384 /* Multi-paint needs to know all selected bones, not just the active group.
385 * This is actually a relatively expensive operation, but caching would be difficult. */
387 &ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
388
389 if (wstate->defgroup_sel_count > 1) {
392
395 wstate->defgroup_len,
396 wstate->defgroup_sel,
397 wstate->defgroup_sel,
398 &wstate->defgroup_sel_count);
399 }
400 }
401 /* With only one selected bone Multi-paint reverts to regular mode. */
402 else {
403 wstate->defgroup_sel_count = 0;
405 }
406 }
407
408 if (paint_mode && ts.wpaint_lock_relative) {
409 /* Set of locked vertex groups for the lock relative mode. */
412
413 /* Check that a deform group is active, and none of selected groups are locked. */
415 wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
417 wstate->defgroup_locked,
418 wstate->defgroup_sel,
419 wstate->defgroup_sel_count))
420 {
422
423 /* Compute the set of locked and unlocked deform vertex groups. */
425 wstate->defgroup_locked,
426 wstate->defgroup_unlocked,
427 wstate->defgroup_locked, /* out */
428 wstate->defgroup_unlocked);
429 }
430 else {
433 }
434 }
435}
436
438
439/* ---------------------------------------------------------------------- */
442
443/* gpu::Batch cache management. */
444
445static bool mesh_batch_cache_valid(Mesh &mesh)
446{
447 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
448
449 if (cache == nullptr) {
450 return false;
451 }
452
453 /* NOTE: bke::pbvh::Tree draw data should not be checked here. */
454
455 if (cache->is_editmode != (mesh.runtime->edit_mesh != nullptr)) {
456 return false;
457 }
458
459 if (cache->is_dirty) {
460 return false;
461 }
462
464 return false;
465 }
466
467 return true;
468}
469
470static void mesh_batch_cache_init(Mesh &mesh)
471{
472 if (!mesh.runtime->batch_cache) {
473 mesh.runtime->batch_cache = MEM_new<MeshBatchCache>(__func__);
474 }
475 else {
476 *static_cast<MeshBatchCache *>(mesh.runtime->batch_cache) = {};
477 }
478 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
479
480 cache->is_editmode = mesh.runtime->edit_mesh != nullptr;
481
482 if (cache->is_editmode == false) {
483 // cache->edge_len = mesh_render_edges_len_get(mesh);
484 // cache->tri_len = mesh_render_corner_tris_len_get(mesh);
485 // cache->face_len = mesh_render_faces_len_get(mesh);
486 // cache->vert_len = mesh_render_verts_len_get(mesh);
487 }
488
490 cache->surface_per_mat = Array<gpu::Batch *>(cache->mat_len, nullptr);
491 cache->tris_per_mat.reinitialize(cache->mat_len);
492
493 cache->is_dirty = false;
494 cache->batch_ready = (DRWBatchFlag)0;
495 cache->batch_requested = (DRWBatchFlag)0;
496
498}
499
501{
502 if (!mesh_batch_cache_valid(mesh)) {
503 if (mesh.runtime->batch_cache) {
504 mesh_batch_cache_clear(*static_cast<MeshBatchCache *>(mesh.runtime->batch_cache));
505 }
507 }
508}
509
511{
512 return static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
513}
514
516 const DRW_MeshWeightState *wstate)
517{
518 if (!drw_mesh_weight_state_compare(&cache.weight_state, wstate)) {
519 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
520 mbc->buff.vbos.remove(VBOType::VertexGroupWeight);
521 }
523
525
527 }
528}
529
531{
534
535 /* If there are only a few materials at most, just request batches for everything. However, if
536 * the maximum material index is large, detect the actually used material indices first and only
537 * request those. This reduces the overhead of dealing with all these batches down the line. */
538 if (cache.mat_len < 16) {
539 for (int i = 0; i < cache.mat_len; i++) {
541 }
542 }
543 else {
544 const VectorSet<int> &used_material_indices = mesh.material_indices_used();
545 for (const int i : used_material_indices) {
547 }
548 }
549}
550
555
557{
558 discard_buffers(cache,
571
572 cache.tot_area = 0.0f;
573 cache.tot_uv_area = 0.0f;
574
575 /* We discarded the vbo.uv so we need to reset the cd_used flag. */
576 cache.cd_used.uv = 0;
577 cache.cd_used.edit_uv = 0;
578}
579
591
593{
594 if (!mesh->runtime->batch_cache) {
595 return;
596 }
597 MeshBatchCache &cache = *static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
598 switch (mode) {
601
602 /* Because visible UVs depends on edit mode selection, discard topology. */
604 break;
607 break;
609 cache.is_dirty = true;
610 break;
614 break;
617 break;
620 break;
621 default:
622 BLI_assert(0);
623 }
624}
625
627{
628 mbc->buff.ibos.clear();
629 mbc->buff.vbos.clear();
630
631 mbc->loose_geom = {};
632 mbc->face_sorted = {};
633}
634
636{
637 if (cache.subdiv_cache) {
639 MEM_delete(cache.subdiv_cache);
640 cache.subdiv_cache = nullptr;
641 }
642}
643
645{
646 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
648 }
649
650 cache.tris_per_mat = {};
651
652 for (int i = 0; i < sizeof(cache.batch) / sizeof(void *); i++) {
653 gpu::Batch **batch = (gpu::Batch **)&cache.batch;
655 }
656 for (const int i : cache.surface_per_mat.index_range()) {
658 }
659
662 cache.surface_per_mat = {};
663 cache.mat_len = 0;
664
665 cache.batch_ready = (DRWBatchFlag)0;
667
669}
670
671void DRW_mesh_batch_cache_free(void *batch_cache)
672{
673 MeshBatchCache *cache = static_cast<MeshBatchCache *>(batch_cache);
675 MEM_delete(cache);
676}
677
679
680/* ---------------------------------------------------------------------- */
683
684static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
685{
686 DRW_MeshCDMask cd_needed;
687 mesh_cd_layers_type_clear(&cd_needed);
688 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
689
690 BLI_assert(cd_needed.uv != 0 &&
691 "No uv layer available in texpaint, but batches requested anyway!");
692
693 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
694 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
695}
696
698 const Mesh &mesh,
699 VectorSet<std::string> &attributes)
700{
701 const Mesh &me_final = editmesh_final_or_this(object, mesh);
702 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
703 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
704
705 auto request_color_attribute = [&](const StringRef name) {
706 if (!name.is_empty()) {
707 int layer_index;
708 eCustomDataType type;
709 if (drw_custom_data_match_attribute(cd_vdata, name, &layer_index, &type)) {
710 drw_attributes_add_request(&attributes, name);
711 }
712 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer_index, &type)) {
713 drw_attributes_add_request(&attributes, name);
714 }
715 }
716 };
717
718 request_color_attribute(me_final.active_color_attribute);
719 request_color_attribute(me_final.default_color_attribute);
720}
721
723{
724 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
726 return DRW_batch_request(&cache.batch.all_verts);
727}
728
735
737{
738 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
740 return DRW_batch_request(&cache.batch.all_edges);
741}
742
744{
745 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
747
748 return cache.batch.surface;
749}
750
757
759{
760 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
761 if (cache.no_loose_wire) {
762 return nullptr;
763 }
765 return DRW_batch_request(&cache.batch.loose_edges);
766}
767
774
775gpu::Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
776{
777 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
779 /* Even if is_manifold is not correct (not updated),
780 * the default (not manifold) is just the worst case. */
781 if (r_is_manifold) {
782 *r_is_manifold = cache.is_manifold;
783 }
785}
786
793
800
802 const Mesh &mesh,
803 const Span<const GPUMaterial *> materials,
804 VectorSet<std::string> *r_attrs,
805 DRW_MeshCDMask *r_cd_needed)
806{
807 VectorSet<std::string> attrs_needed;
808 DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(object, mesh, materials, &attrs_needed);
809
810 if (r_attrs) {
811 *r_attrs = attrs_needed;
812 }
813
814 if (r_cd_needed) {
815 *r_cd_needed = cd_needed;
816 }
817}
818
820 Object &object, Mesh &mesh, const Span<const GPUMaterial *> materials)
821{
822 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
823 VectorSet<std::string> attrs_needed;
824 DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(object, mesh, materials, &attrs_needed);
825
826 BLI_assert(materials.size() == cache.mat_len);
827
828 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
829 drw_attributes_merge(&cache.attr_needed, &attrs_needed);
831 return cache.surface_per_mat;
832}
833
841
843{
844 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
845 texpaint_request_active_uv(cache, object, mesh);
847 return cache.batch.surface;
848}
849
851{
852 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
853
854 VectorSet<std::string> attrs_needed{};
855 request_active_and_default_color_attributes(object, mesh, attrs_needed);
856
857 drw_attributes_merge(&cache.attr_needed, &attrs_needed);
858
860 return cache.batch.surface;
861}
862
864{
865 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
866
867 VectorSet<std::string> attrs_needed{};
868 request_active_and_default_color_attributes(object, mesh, attrs_needed);
869
870 drw_attributes_merge(&cache.attr_needed, &attrs_needed);
871
873 return cache.batch.surface;
874}
875
877{
878 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
879
880 cache.cd_needed.sculpt_overlays = 1;
883
884 return cache.batch.sculpt_overlays;
885}
886
896
898
899/* ---------------------------------------------------------------------- */
902
909
911{
912 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
914 return DRW_batch_request(&cache.batch.edit_edges);
915}
916
923
930
937
944
951
953
954/* ---------------------------------------------------------------------- */
957
964
971
978
985
987
988/* ---------------------------------------------------------------------- */
991
992static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
993{
994 DRW_MeshCDMask cd_needed;
995 mesh_cd_layers_type_clear(&cd_needed);
996 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
997 mesh_cd_calc_edit_uv_layer(mesh, &cd_needed);
998
999 BLI_assert(cd_needed.edit_uv != 0 &&
1000 "No uv layer available in edituv, but batches requested anyway!");
1001
1002 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
1003 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
1004}
1005
1007 Mesh &mesh,
1008 float **tot_area,
1009 float **tot_uv_area)
1010{
1011 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1012 edituv_request_active_uv(cache, object, mesh);
1014
1015 if (tot_area != nullptr) {
1016 *tot_area = &cache.tot_area;
1017 }
1018 if (tot_uv_area != nullptr) {
1019 *tot_uv_area = &cache.tot_uv_area;
1020 }
1022}
1023
1031
1033{
1034 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1035 edituv_request_active_uv(cache, object, mesh);
1037 return DRW_batch_request(&cache.batch.edituv_faces);
1038}
1039
1041{
1042 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1043 edituv_request_active_uv(cache, object, mesh);
1045 return DRW_batch_request(&cache.batch.edituv_edges);
1046}
1047
1049{
1050 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1051 edituv_request_active_uv(cache, object, mesh);
1053 return DRW_batch_request(&cache.batch.edituv_verts);
1054}
1055
1057{
1058 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1059 edituv_request_active_uv(cache, object, mesh);
1061 return DRW_batch_request(&cache.batch.edituv_fdots);
1062}
1063
1065{
1066 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1067 edituv_request_active_uv(cache, object, mesh);
1069 return DRW_batch_request(&cache.batch.uv_faces);
1070}
1071
1073{
1074 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1075 edituv_request_active_uv(cache, object, mesh);
1078}
1079
1081{
1082 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1083 edituv_request_active_uv(cache, object, mesh);
1085 return DRW_batch_request(&cache.batch.wire_loops_uvs);
1086}
1087
1089{
1090 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1091 edituv_request_active_uv(cache, object, mesh);
1094}
1095
1102
1104
1105/* ---------------------------------------------------------------------- */
1108
1110{
1111 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
1112
1113 if (cache == nullptr) {
1114 return;
1115 }
1116
1118 cache->lastmatch = ctime;
1119 }
1120
1122 cache->lastmatch = ctime;
1123 }
1124
1125 if (ctime - cache->lastmatch > U.vbotimeout) {
1127 }
1128
1130 cache->attr_used_over_time.clear();
1131}
1132
1133static void init_empty_dummy_batch(gpu::Batch &batch)
1134{
1135 /* The dummy batch is only used in cases with invalid edit mode mapping, so the overhead of
1136 * creating a vertex buffer shouldn't matter. */
1138 GPU_vertformat_attr_add(&format, "dummy", gpu::VertAttrType::SFLOAT_32);
1140 GPU_vertbuf_data_alloc(*vbo, 1);
1141 /* Avoid the batch being rendered at all. */
1142 GPU_vertbuf_data_len_set(*vbo, 0);
1143
1144 GPU_batch_vertbuf_add(&batch, vbo, true);
1145}
1146
1148 Object &ob,
1149 Mesh &mesh,
1150 const Scene &scene,
1151 const bool is_paint_mode,
1152 const bool use_hide)
1153{
1154 const ToolSettings *ts = scene.toolsettings;
1155
1156 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1157 bool cd_uv_update = false;
1158
1159 /* Early out */
1160 if (cache.batch_requested == 0) {
1161 return;
1162 }
1163
1164 /* Sanity check. */
1165 if ((mesh.runtime->edit_mesh != nullptr) && (ob.mode & OB_MODE_EDIT)) {
1167 }
1168
1169 const bool is_editmode = ob.mode == OB_MODE_EDIT;
1170
1171 DRWBatchFlag batch_requested = cache.batch_requested;
1172 cache.batch_requested = (DRWBatchFlag)0;
1173
1174 if (batch_requested & MBC_SURFACE_WEIGHTS) {
1175 /* Check vertex weights. */
1176 if ((cache.batch.surface_weights != nullptr) && (ts != nullptr)) {
1177 DRW_MeshWeightState wstate;
1178 BLI_assert(ob.type == OB_MESH);
1179 drw_mesh_weight_state_extract(ob, mesh, *ts, is_paint_mode, &wstate);
1183 }
1184 }
1185
1186 if (batch_requested &
1190 {
1191 /* Modifiers will only generate an orco layer if the mesh is deformed. */
1192 if (cache.cd_needed.orco != 0) {
1193 /* Orco is always extracted from final mesh. */
1194 const Mesh *me_final = (mesh.runtime->edit_mesh) ? BKE_object_get_editmesh_eval_final(&ob) :
1195 &mesh;
1196 if (CustomData_get_layer(&me_final->vert_data, CD_ORCO) == nullptr) {
1197 /* Skip orco calculation */
1198 cache.cd_needed.orco = 0;
1199 }
1200 }
1201
1202 /* Verify that all surface batches have needed attribute layers.
1203 */
1204 /* TODO(fclem): We could be a bit smarter here and only do it per
1205 * material. */
1206 bool cd_overlap = mesh_cd_layers_type_overlap(cache.cd_used, cache.cd_needed);
1207 bool attr_overlap = drw_attributes_overlap(&cache.attr_used, &cache.attr_needed);
1208 if (cd_overlap == false || attr_overlap == false) {
1209 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1210 if ((cache.cd_used.uv & cache.cd_needed.uv) != cache.cd_needed.uv) {
1211 mbc->buff.vbos.remove(VBOType::UVs);
1212 cd_uv_update = true;
1213 }
1214 if ((cache.cd_used.tan & cache.cd_needed.tan) != cache.cd_needed.tan ||
1215 cache.cd_used.tan_orco != cache.cd_needed.tan_orco)
1216 {
1217 mbc->buff.vbos.remove(VBOType::Tangents);
1218 }
1219 if (cache.cd_used.orco != cache.cd_needed.orco) {
1220 mbc->buff.vbos.remove(VBOType::Orco);
1221 }
1222 if (cache.cd_used.sculpt_overlays != cache.cd_needed.sculpt_overlays) {
1223 mbc->buff.vbos.remove(VBOType::SculptData);
1224 }
1225 if (!drw_attributes_overlap(&cache.attr_used, &cache.attr_needed)) {
1226 for (int i = 0; i < GPU_MAX_ATTR; i++) {
1227 mbc->buff.vbos.remove(VBOType(int8_t(VBOType::Attr0) + i));
1228 }
1229 }
1230 }
1231 /* We can't discard batches at this point as they have been
1232 * referenced for drawing. Just clear them in place. */
1233 for (int i = 0; i < cache.mat_len; i++) {
1235 }
1239
1242 }
1245
1247 cache.attr_needed.clear();
1248 }
1249
1250 if ((batch_requested & MBC_EDITUV) || cd_uv_update) {
1251 /* Discard UV batches if sync_selection changes */
1252 const bool is_uvsyncsel = ts && (ts->uv_flag & UV_FLAG_SELECT_SYNC);
1253 if (cd_uv_update || (cache.is_uvsyncsel != is_uvsyncsel)) {
1254 cache.is_uvsyncsel = is_uvsyncsel;
1255 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1256 mbc->buff.vbos.remove(VBOType::EditUVData);
1257 mbc->buff.vbos.remove(VBOType::FaceDotUV);
1258 mbc->buff.vbos.remove(VBOType::FaceDotEditUVData);
1259 mbc->buff.ibos.remove(IBOType::EditUVTris);
1260 mbc->buff.ibos.remove(IBOType::EditUVLines);
1261 mbc->buff.ibos.remove(IBOType::EditUVPoints);
1262 mbc->buff.ibos.remove(IBOType::EditUVFaceDots);
1263 }
1264 /* We only clear the batches as they may already have been
1265 * referenced. */
1276 cache.batch_ready &= ~MBC_EDITUV;
1277 }
1278 }
1279
1280 /* Second chance to early out */
1281 if ((batch_requested & ~cache.batch_ready) == 0) {
1282 return;
1283 }
1284
1285 /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-pbvh::Tree).
1286 * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1287 * Normal updates should be part of the brush loop and only run during the stroke when the
1288 * brush needs to sample the surface. The drawing code should only update the normals
1289 * per redraw when smooth shading is enabled. */
1292 }
1293
1294 /* This is the mesh before modifier evaluation, used to test how the mesh changed during
1295 * evaluation to decide which data is valid to extract. */
1296 const Mesh *orig_edit_mesh = is_editmode ? BKE_object_get_pre_modified_mesh(&ob) : nullptr;
1297
1298 bool do_cage = false;
1299 const Mesh *edit_data_mesh = nullptr;
1300 if (is_editmode) {
1301 const Mesh *eval_cage = DRW_object_get_editmesh_cage_for_drawing(ob);
1302 if (eval_cage && eval_cage != &mesh) {
1303 /* Extract "cage" data separately when it exists and it's not just the same mesh as the
1304 * regular evaluated mesh. Otherwise edit data will be extracted from the final evaluated
1305 * mesh. */
1306 do_cage = true;
1307 edit_data_mesh = eval_cage;
1308 }
1309 else {
1310 edit_data_mesh = &mesh;
1311 }
1312 }
1313
1314 bool do_uvcage = false;
1315 if (is_editmode) {
1316 /* Currently we don't extract UV data from the evaluated mesh unless it's the same mesh as the
1317 * original edit mesh. */
1318 do_uvcage = !(mesh.runtime->is_original_bmesh &&
1319 mesh.runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH);
1320 }
1321
1322 const DRWBatchFlag batches_to_create = batch_requested & ~cache.batch_ready;
1323
1324 const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(&mesh);
1325
1326 enum class BufferList : int8_t { Final, Cage, UVCage };
1327
1328 struct BatchCreateData {
1329 gpu::Batch &batch;
1330 GPUPrimType prim_type;
1331 BufferList list;
1332 std::optional<IBOType> ibo;
1333 Vector<VBOType> vbos;
1334 };
1335 Vector<BatchCreateData> batch_info;
1336
1337 {
1338 const BufferList list = BufferList::Final;
1339 if (batches_to_create & MBC_SURFACE) {
1340 BatchCreateData batch{*cache.batch.surface,
1342 list,
1345 if (cache.cd_used.uv != 0) {
1346 batch.vbos.append(VBOType::UVs);
1347 }
1348 for (const int i : cache.attr_used.index_range()) {
1349 batch.vbos.append(VBOType(int8_t(VBOType::Attr0) + i));
1350 }
1351 batch_info.append(std::move(batch));
1352 }
1353 if (batches_to_create & MBC_PAINT_OVERLAY_SURFACE) {
1354 BatchCreateData batch{*cache.batch.paint_overlay_surface,
1356 list,
1359 batch_info.append(std::move(batch));
1360 }
1361 if (batches_to_create & MBC_VIEWER_ATTRIBUTE_OVERLAY) {
1362 batch_info.append({*cache.batch.surface_viewer_attribute,
1364 list,
1367 }
1368 if (batches_to_create & MBC_ALL_VERTS) {
1369 batch_info.append(
1370 {*cache.batch.all_verts, GPU_PRIM_POINTS, list, std::nullopt, {VBOType::Position}});
1371 }
1372 if (batches_to_create & MBC_PAINT_OVERLAY_VERTS) {
1373 batch_info.append({*cache.batch.paint_overlay_verts,
1375 list,
1376 std::nullopt,
1378 }
1379 if (batches_to_create & MBC_SCULPT_OVERLAYS) {
1380 batch_info.append({*cache.batch.sculpt_overlays,
1382 list,
1385 }
1386 if (batches_to_create & MBC_ALL_EDGES) {
1387 batch_info.append(
1389 }
1390 if (batches_to_create & MBC_LOOSE_EDGES) {
1391 batch_info.append({*cache.batch.loose_edges,
1393 list,
1396 }
1397 if (batches_to_create & MBC_EDGE_DETECTION) {
1398 batch_info.append({*cache.batch.edge_detection,
1400 list,
1403 }
1404 if (batches_to_create & MBC_SURFACE_WEIGHTS) {
1405 batch_info.append({*cache.batch.surface_weights,
1407 list,
1410 }
1411 if (batches_to_create & MBC_PAINT_OVERLAY_WIRE_LOOPS) {
1412 batch_info.append({*cache.batch.paint_overlay_wire_loops,
1414 list,
1417 }
1418 if (batches_to_create & MBC_WIRE_EDGES) {
1419 batch_info.append({*cache.batch.wire_edges,
1421 list,
1424 }
1425 if (batches_to_create & MBC_WIRE_LOOPS_ALL_UVS) {
1426 BatchCreateData batch{
1428 if (cache.cd_used.uv != 0) {
1429 batch.vbos.append(VBOType::UVs);
1430 }
1431 batch_info.append(std::move(batch));
1432 }
1433 if (batches_to_create & MBC_WIRE_LOOPS_UVS) {
1434 BatchCreateData batch{
1436 if (cache.cd_used.uv != 0) {
1437 batch.vbos.append(VBOType::UVs);
1438 }
1439 batch_info.append(std::move(batch));
1440 }
1441 if (batches_to_create & MBC_WIRE_LOOPS_EDITUVS) {
1442 BatchCreateData batch{
1444 if (cache.cd_used.uv != 0) {
1445 batch.vbos.append(VBOType::UVs);
1446 }
1447 batch_info.append(std::move(batch));
1448 }
1449 if (batches_to_create & MBC_UV_FACES) {
1450 const bool use_face_selection = (mesh.editflag & ME_EDIT_PAINT_FACE_SEL);
1451 /* Sculpt mode does not support selection, therefore the generic `is_paint_mode` check cannot
1452 * be used */
1453 const bool is_face_selectable =
1455 use_face_selection;
1456
1457 const IBOType ibo = is_face_selectable || is_editmode ? IBOType::UVTris : IBOType::Tris;
1458 BatchCreateData batch{*cache.batch.uv_faces, GPU_PRIM_TRIS, list, ibo, {}};
1459 if (cache.cd_used.uv != 0) {
1460 batch.vbos.append(VBOType::UVs);
1461 }
1462 batch_info.append(std::move(batch));
1463 }
1464 if (batches_to_create & MBC_EDIT_MESH_ANALYSIS) {
1465 batch_info.append({*cache.batch.edit_mesh_analysis,
1467 list,
1470 }
1471 }
1472
1473 /* When the mesh doesn't correspond to the object's original mesh (i.e. the mesh was replaced by
1474 * another with the object info node during evaluation), don't extract edit mode data for it.
1475 * That data can be invalid because any original indices (#CD_ORIGINDEX) on the evaluated mesh
1476 * won't correspond to the correct mesh. */
1477 const bool edit_mapping_valid = is_editmode && BKE_editmesh_eval_orig_map_available(
1478 *edit_data_mesh, orig_edit_mesh);
1479
1480 {
1481 const BufferList list = do_cage ? BufferList::Cage : BufferList::Final;
1482 if (batches_to_create & MBC_EDIT_TRIANGLES) {
1483 if (edit_mapping_valid) {
1484 batch_info.append({*cache.batch.edit_triangles,
1486 list,
1489 }
1490 else {
1492 }
1493 }
1494 if (batches_to_create & MBC_EDIT_VERTICES) {
1495 if (edit_mapping_valid) {
1496 BatchCreateData batch{*cache.batch.edit_vertices,
1498 list,
1501 if (!do_subdivision || do_cage) {
1502 batch.vbos.append(VBOType::CornerNormal);
1503 }
1504 batch_info.append(std::move(batch));
1505 }
1506 else {
1508 }
1509 }
1510 if (batches_to_create & MBC_EDIT_EDGES) {
1511 if (edit_mapping_valid) {
1512 BatchCreateData batch{*cache.batch.edit_edges,
1514 list,
1517 batch_info.append(std::move(batch));
1518 }
1519 else {
1521 }
1522 }
1523 if (batches_to_create & MBC_EDIT_VNOR) {
1524 if (edit_mapping_valid) {
1525 batch_info.append({*cache.batch.edit_vnor,
1527 list,
1530 }
1531 else {
1533 }
1534 }
1535 if (batches_to_create & MBC_EDIT_LNOR) {
1536 if (edit_mapping_valid) {
1537 batch_info.append({*cache.batch.edit_lnor,
1539 list,
1542 }
1543 else {
1545 }
1546 }
1547 if (batches_to_create & MBC_EDIT_FACEDOTS) {
1548 if (edit_mapping_valid) {
1549 batch_info.append({*cache.batch.edit_fdots,
1551 list,
1554 }
1555 else {
1557 }
1558 }
1559 if (batches_to_create & MBC_SKIN_ROOTS) {
1560 if (edit_mapping_valid) {
1561 batch_info.append({*cache.batch.edit_skin_roots,
1563 list,
1564 std::nullopt,
1566 }
1567 else {
1569 }
1570 }
1571 if (batches_to_create & MBC_EDIT_SELECTION_VERTS) {
1572 if (is_editmode && !edit_mapping_valid) {
1574 }
1575 else {
1576 batch_info.append({*cache.batch.edit_selection_verts,
1578 list,
1581 }
1582 }
1583 if (batches_to_create & MBC_EDIT_SELECTION_EDGES) {
1584 if (is_editmode && !edit_mapping_valid) {
1586 }
1587 else {
1588 batch_info.append({*cache.batch.edit_selection_edges,
1590 list,
1593 }
1594 }
1595 if (batches_to_create & MBC_EDIT_SELECTION_FACES) {
1596 if (is_editmode && !edit_mapping_valid) {
1598 }
1599 else {
1600 batch_info.append({*cache.batch.edit_selection_faces,
1602 list,
1605 }
1606 }
1607 if (batches_to_create & MBC_EDIT_SELECTION_FACEDOTS) {
1608 if (is_editmode && !edit_mapping_valid) {
1610 }
1611 else {
1612 batch_info.append({*cache.batch.edit_selection_fdots,
1614 list,
1617 }
1618 }
1619 }
1620
1621 {
1627 const BufferList list = do_uvcage ? BufferList::UVCage : BufferList::Final;
1628
1629 if (batches_to_create & MBC_EDITUV_FACES) {
1630 if (edit_mapping_valid) {
1631 batch_info.append({*cache.batch.edituv_faces,
1633 list,
1636 }
1637 else {
1639 }
1640 }
1641 if (batches_to_create & MBC_EDITUV_FACES_STRETCH_AREA) {
1642 if (edit_mapping_valid) {
1643 batch_info.append({*cache.batch.edituv_faces_stretch_area,
1645 list,
1648 }
1649 else {
1651 }
1652 }
1653 if (batches_to_create & MBC_EDITUV_FACES_STRETCH_ANGLE) {
1654 if (edit_mapping_valid) {
1655 batch_info.append({*cache.batch.edituv_faces_stretch_angle,
1657 list,
1660 }
1661 else {
1663 }
1664 }
1665 if (batches_to_create & MBC_EDITUV_EDGES) {
1666 if (edit_mapping_valid) {
1667 batch_info.append({*cache.batch.edituv_edges,
1669 list,
1672 }
1673 else {
1675 }
1676 }
1677 if (batches_to_create & MBC_EDITUV_VERTS) {
1678 if (edit_mapping_valid) {
1679 batch_info.append({*cache.batch.edituv_verts,
1681 list,
1684 }
1685 else {
1687 }
1688 }
1689 if (batches_to_create & MBC_EDITUV_FACEDOTS) {
1690 if (edit_mapping_valid) {
1691 batch_info.append({*cache.batch.edituv_fdots,
1693 list,
1696 }
1697 else {
1699 }
1700 }
1701 }
1702
1703 std::array<VectorSet<IBOType>, 3> ibo_requests;
1704 std::array<VectorSet<VBOType>, 3> vbo_requests;
1705 for (const BatchCreateData &batch : batch_info) {
1706 if (batch.ibo) {
1707 ibo_requests[int(batch.list)].add(*batch.ibo);
1708 }
1709 vbo_requests[int(batch.list)].add_multiple(batch.vbos);
1710 }
1711
1712 if (batches_to_create & MBC_SURFACE_PER_MAT) {
1713 ibo_requests[int(BufferList::Final)].add(IBOType::Tris);
1714 vbo_requests[int(BufferList::Final)].add(VBOType::CornerNormal);
1715 vbo_requests[int(BufferList::Final)].add(VBOType::Position);
1716 for (const int i : cache.attr_used.index_range()) {
1717 vbo_requests[int(BufferList::Final)].add(VBOType(int8_t(VBOType::Attr0) + i));
1718 }
1719 if (cache.cd_used.uv != 0) {
1720 vbo_requests[int(BufferList::Final)].add(VBOType::UVs);
1721 }
1722 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1723 vbo_requests[int(BufferList::Final)].add(VBOType::Tangents);
1724 }
1725 if (cache.cd_used.orco != 0) {
1726 vbo_requests[int(BufferList::Final)].add(VBOType::Orco);
1727 }
1728 }
1729
1730 if (do_uvcage) {
1732 scene,
1733 cache,
1734 cache.uv_cage,
1735 ibo_requests[int(BufferList::UVCage)],
1736 vbo_requests[int(BufferList::UVCage)],
1737 ob,
1738 mesh,
1739 is_editmode,
1740 is_paint_mode,
1741 false,
1742 true,
1743 true);
1744 }
1745
1746 if (do_cage) {
1748 scene,
1749 cache,
1750 cache.cage,
1751 ibo_requests[int(BufferList::Cage)],
1752 vbo_requests[int(BufferList::Cage)],
1753 ob,
1754 mesh,
1755 is_editmode,
1756 is_paint_mode,
1757 false,
1758 false,
1759 true);
1760 }
1761
1762 if (do_subdivision) {
1764 mesh,
1765 cache,
1766 cache.final,
1767 ibo_requests[int(BufferList::Final)],
1768 vbo_requests[int(BufferList::Final)],
1769 is_editmode,
1770 is_paint_mode,
1771 true,
1772 false,
1773 do_cage,
1774 ts,
1775 use_hide);
1776 }
1777 else {
1778 /* The subsurf modifier may have been recently removed, or another modifier was added after it,
1779 * so free any potential subdivision cache as it is not needed anymore. */
1781 }
1782
1784 scene,
1785 cache,
1786 cache.final,
1787 ibo_requests[int(BufferList::Final)],
1788 vbo_requests[int(BufferList::Final)],
1789 ob,
1790 mesh,
1791 is_editmode,
1792 is_paint_mode,
1793 true,
1794 false,
1795 use_hide);
1796
1797 std::array<MeshBufferCache *, 3> caches{&cache.final, &cache.cage, &cache.uv_cage};
1798 for (const BatchCreateData &batch : batch_info) {
1799 MeshBufferCache &cache_for_batch = *caches[int(batch.list)];
1800 gpu::IndexBuf *ibo = batch.ibo ? caches[int(batch.list)]->buff.ibos.lookup(*batch.ibo).get() :
1801 nullptr;
1802 GPU_batch_init(&batch.batch, batch.prim_type, nullptr, ibo);
1803 for (const VBOType vbo_request : batch.vbos) {
1805 &batch.batch, cache_for_batch.buff.vbos.lookup(vbo_request).get(), false);
1806 }
1807 }
1808
1809 if (batches_to_create & MBC_SURFACE_PER_MAT) {
1811 gpu::IndexBuf &tris_ibo = *buffers.ibos.lookup(IBOType::Tris);
1813 for (const int material : IndexRange(cache.mat_len)) {
1814 gpu::Batch *batch = cache.surface_per_mat[material];
1815 if (!batch) {
1816 continue;
1817 }
1818 GPU_batch_init(batch, GPU_PRIM_TRIS, nullptr, cache.tris_per_mat[material].get());
1819 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::CornerNormal).get(), false);
1820 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Position).get(), false);
1821 if (cache.cd_used.uv != 0) {
1822 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::UVs).get(), false);
1823 }
1824 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1825 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Tangents).get(), false);
1826 }
1827 if (cache.cd_used.orco != 0) {
1828 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Orco).get(), false);
1829 }
1830 for (const int i : cache.attr_used.index_range()) {
1832 batch, buffers.vbos.lookup(VBOType(int8_t(VBOType::Attr0) + i)).get(), false);
1833 }
1834 }
1835 }
1836
1837 cache.batch_ready |= batch_requested;
1838}
1839
1841
1842} // namespace blender::draw
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_named_layer(const CustomData *data, eCustomDataType type, blender::StringRef name)
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
bool CustomData_layer_is_anonymous(const CustomData *data, eCustomDataType type, int n)
int CustomData_get_stencil_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_active_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_render_layer(const CustomData *data, eCustomDataType type)
bool BKE_editmesh_eval_orig_map_available(const Mesh &mesh_eval, const Mesh *mesh_orig)
Definition editmesh.cc:67
General operations, lookup, etc. for materials.
int BKE_id_material_used_with_fallback_eval(const ID &id)
eMeshBatchDirtyMode
Definition BKE_mesh.h:37
@ BKE_MESH_BATCH_DIRTY_UVEDIT_ALL
Definition BKE_mesh.h:42
@ BKE_MESH_BATCH_DIRTY_SELECT_PAINT
Definition BKE_mesh.h:40
@ BKE_MESH_BATCH_DIRTY_SHADING
Definition BKE_mesh.h:41
@ BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT
Definition BKE_mesh.h:43
@ BKE_MESH_BATCH_DIRTY_ALL
Definition BKE_mesh.h:38
@ BKE_MESH_BATCH_DIRTY_SELECT
Definition BKE_mesh.h:39
@ ME_WRAPPER_TYPE_BMESH
General operations, lookup, etc. for blender objects.
const Mesh * BKE_object_get_pre_modified_mesh(const Object *object)
const Mesh * BKE_object_get_editmesh_eval_final(const Object *object)
Functions for dealing with objects and deform verts, used by painting and tools.
bool BKE_object_defgroup_check_lock_relative(const bool *lock_flags, const bool *validmap, int index)
void BKE_object_defgroup_split_locked_validmap(int defbase_tot, const bool *locked, const bool *deform, bool *r_locked, bool *r_unlocked)
bool * BKE_object_defgroup_validmap_get(struct Object *ob, int defbase_tot)
bool * BKE_object_defgroup_lock_flags_get(struct Object *ob, int defbase_tot)
void BKE_object_defgroup_mirror_selection(struct Object *ob, int defbase_tot, const bool *selection, bool *dg_flags_sel, int *r_dg_flags_sel_tot)
bool BKE_object_defgroup_check_lock_relative_multi(int defbase_tot, const bool *lock_flags, const bool *selected, int sel_tot)
bool * BKE_object_defgroup_selected_get(struct Object *ob, int defbase_tot, int *r_dg_flags_sel_tot)
A BVH for high poly meshes.
bool BKE_subsurf_modifier_has_gpu_subdiv(const Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
int BLI_listbase_count(const ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:524
#define ARRAY_SIZE(arr)
#define ELEM(...)
@ CD_PROP_BYTE_COLOR
@ CD_PROP_FLOAT
@ CD_PROP_FLOAT3
@ CD_PROP_INT32_2D
@ CD_PROP_COLOR
@ CD_PROP_QUATERNION
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
@ CD_PROP_INT16_2D
@ CD_AUTO_FROM_NAME
@ ME_EDIT_PAINT_FACE_SEL
#define ME_USING_MIRROR_X_VERTEX_GROUPS(_me)
@ OB_MODE_EDIT
@ OB_MODE_WEIGHT_PAINT
@ OB_MODE_TEXTURE_PAINT
@ OB_MODE_VERTEX_PAINT
Object is a sort of wrapper for general info.
@ OB_MESH
@ UV_FLAG_SELECT_SYNC
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition GPU_batch.hh:183
int GPU_batch_vertbuf_add(blender::gpu::Batch *batch, blender::gpu::VertBuf *vertex_buf, bool own_vbo)
#define GPU_batch_init(batch, primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:159
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:197
ListBase GPU_material_attributes(const GPUMaterial *material)
GPUPrimType
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_LINES_ADJ
@ GPU_PRIM_TRIS
static constexpr int GPU_MAX_ATTR
Definition GPU_shader.hh:33
static blender::gpu::VertBuf * GPU_vertbuf_create_with_format(const GPUVertFormat &format)
void GPU_vertbuf_data_len_set(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
uint GPU_vertformat_attr_add(GPUVertFormat *format, blender::StringRef name, blender::gpu::VertAttrType type)
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
#define U
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
IndexRange index_range() const
Definition BLI_array.hh:360
const T & first() const
Definition BLI_array.hh:281
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
bool is_empty() const
Definition BLI_array.hh:264
IndexRange index_range() const
void reserve(const int64_t n)
Definition BLI_set.hh:637
bool contains(const Key &key) const
Definition BLI_set.hh:310
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const char * c_str() const
void append(const T &value)
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc)
#define MBC_BATCH_LEN
#define MBC_EDITUV
blender::gpu::Batch * DRW_batch_request(blender::gpu::Batch **batch)
const Mesh * DRW_object_get_editmesh_cage_for_drawing(const Object &object)
Extraction of Mesh data into VBO to feed to GPU.
struct @021025263243242147216143265077100330027142264337::@225245033123204053237120173316075113304004012000 batch
format
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
pbvh::Tree * pbvh_get(Object &object)
Definition paint.cc:3052
void update_normals_from_eval(Object &object_eval, Tree &pbvh)
Definition pbvh.cc:1264
void DRW_mesh_batch_cache_validate(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object &object, Mesh &mesh)
void drw_attributes_add_request(VectorSet< std::string > *attrs, const StringRef name)
const CustomData & mesh_cd_ldata_get_from_mesh(const Mesh &mesh)
static void drw_mesh_weight_state_extract(Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
static void init_empty_dummy_batch(gpu::Batch &batch)
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_loose_edges(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_edges(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_texpaint_single(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_edges(Mesh &mesh)
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_facedots(Object &object, Mesh &mesh)
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
blender::gpu::Batch * DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh &mesh)
static void mesh_cd_calc_active_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_wireframe(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_facedots(Mesh &mesh)
void draw_subdiv_cache_free(DRWSubdivCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_verts(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface(Mesh &mesh)
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static void mesh_cd_calc_edit_uv_layer(const Mesh &, DRW_MeshCDMask *cd_used)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_weights(Mesh &mesh)
Span< gpu::Batch * > DRW_mesh_batch_cache_get_surface_shaded(Object &object, Mesh &mesh, Span< const GPUMaterial * > materials)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces(Object &object, Mesh &mesh)
const CustomData & mesh_cd_edata_get_from_mesh(const Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_vertpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_wireframes_face(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_sculpt(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_edges(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_paint_overlay_verts(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_loop_normals(Mesh &mesh)
static bool drw_mesh_weight_state_compare(const DRW_MeshWeightState *a, const DRW_MeshWeightState *b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
void DRW_mesh_batch_cache_free_old(Mesh *mesh, int ctime)
void DRW_mesh_batch_cache_free(void *batch_cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_triangles(Mesh &mesh)
Span< gpu::Batch * > DRW_mesh_batch_cache_get_surface_texpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vertices(Mesh &mesh)
static void mesh_batch_cache_init(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_uv_wireframe(Object &object, Mesh &mesh)
static void mesh_batch_cache_clear(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh &mesh)
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
static void mesh_batch_cache_discard_uvedit(MeshBatchCache &cache)
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
static void mesh_batch_cache_check_vertex_group(MeshBatchCache &cache, const DRW_MeshWeightState *wstate)
static void drw_mesh_weight_state_copy(DRW_MeshWeightState *wstate_dst, const DRW_MeshWeightState *wstate_src)
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
const Mesh & editmesh_final_or_this(const Object &object, const Mesh &mesh)
static void mesh_cd_calc_active_mask_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
bool drw_attributes_overlap(const VectorSet< std::string > *a, const VectorSet< std::string > *b)
static void mesh_batch_cache_request_surface_batches(Mesh &mesh, MeshBatchCache &cache)
void DRW_create_subdivision(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object &object, const Mesh &mesh, const Span< const GPUMaterial * > materials, VectorSet< std::string > *attributes)
static void request_active_and_default_color_attributes(const Object &object, const Mesh &mesh, VectorSet< std::string > &attributes)
blender::gpu::Batch * DRW_mesh_batch_cache_get_verts_with_select_id(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_sculpt_overlays(Mesh &mesh)
static void discard_buffers(MeshBatchCache &cache, const Span< VBOType > vbos, const Span< IBOType > ibos)
blender::gpu::Batch * DRW_mesh_batch_cache_get_paint_overlay_surface(Mesh &mesh)
void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_viewer_attribute(Mesh &mesh)
const CustomData & mesh_cd_vdata_get_from_mesh(const Mesh &mesh)
void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph, Object &ob, Mesh &mesh, const Scene &scene, bool is_paint_mode, bool use_hide)
void DRW_mesh_get_attributes(const Object &object, const Mesh &mesh, const Span< const GPUMaterial * > materials, VectorSet< std::string > *r_attrs, DRW_MeshCDMask *r_cd_needed)
static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache &cache)
void mesh_buffer_cache_create_requested(TaskGraph &task_graph, const Scene &scene, MeshBatchCache &cache, MeshBufferCache &mbc, Span< IBOType > ibo_requests, Span< VBOType > vbo_requests, Object &object, Mesh &mesh, bool is_editmode, bool is_paint_mode, bool do_final, bool do_uvedit, bool use_hide)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vert_normals(Mesh &mesh)
static MeshBatchCache * mesh_batch_cache_get(Mesh &mesh)
static void drw_mesh_weight_state_clear(DRW_MeshWeightState *wstate)
void drw_attributes_merge(VectorSet< std::string > *dst, const VectorSet< std::string > *src)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_faces(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edges_with_select_id(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object &object, Mesh &mesh, float **tot_area, float **tot_uv_area)
blender::gpu::Batch * DRW_mesh_batch_cache_get_paint_overlay_edges(Mesh &mesh)
static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static bool mesh_batch_cache_valid(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_verts(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_wireframe(Object &object, Mesh &mesh)
bool drw_custom_data_match_attribute(const CustomData &custom_data, const StringRef name, int *r_layer_index, eCustomDataType *r_type)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_skin_roots(Mesh &mesh)
const CustomData & mesh_cd_pdata_get_from_mesh(const Mesh &mesh)
void create_material_subranges(const SortedFaceData &face_sorted, gpu::IndexBuf &tris_ibo, MutableSpan< gpu::IndexBufPtr > ibos)
const char * name
MeshRuntimeHandle * runtime
char * default_color_attribute
ListBase vertex_group_names
char editflag
CustomData vert_data
int vertex_group_active_index
char * active_color_attribute
struct ToolSettings * toolsettings
VectorSet< std::string > attr_used_over_time
Array< gpu::IndexBufPtr > tris_per_mat
VectorSet< std::string > attr_used
Array< gpu::Batch * > surface_per_mat
VectorSet< std::string > attr_needed
Map< IBOType, std::unique_ptr< gpu::IndexBuf, gpu::IndexBufDeleter > > ibos
Map< VBOType, std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > > vbos
i
Definition text_draw.cc:230
char * buffers[2]