Blender V4.5
draw_cache_impl_mesh.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <array>
12#include <optional>
13
14#include "MEM_guardedalloc.h"
15
16#include "BLI_index_range.hh"
17#include "BLI_listbase.h"
18#include "BLI_span.hh"
19#include "BLI_string_ref.hh"
20
21#include "DNA_mesh_types.h"
22#include "DNA_object_types.h"
23#include "DNA_scene_types.h"
24#include "DNA_userdef_types.h"
25
26#include "BKE_attribute.hh"
27#include "BKE_customdata.hh"
28#include "BKE_editmesh.hh"
29#include "BKE_material.hh"
30#include "BKE_mesh.hh"
31#include "BKE_object.hh"
32#include "BKE_object_deform.h"
33#include "BKE_paint.hh"
34#include "BKE_paint_bvh.hh"
36
37#include "atomic_ops.h"
38
39#include "GPU_batch.hh"
40#include "GPU_material.hh"
41
42#include "DRW_render.hh"
43
44#include "draw_cache_extract.hh"
45#include "draw_cache_inline.hh"
46#include "draw_subdivision.hh"
47
48#include "draw_cache_impl.hh" /* own include */
50
52
53namespace blender::draw {
54
55/* ---------------------------------------------------------------------- */
58
59#define TRIS_PER_MAT_INDEX BUFFER_LEN
60
61static void mesh_batch_cache_clear(MeshBatchCache &cache);
62
64 const Span<VBOType> vbos,
65 const Span<IBOType> ibos)
66{
67 Set<const void *, 16> buffer_ptrs;
68 buffer_ptrs.reserve(vbos.size() + ibos.size());
69 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
70 for (const VBOType vbo : vbos) {
71 if (const auto *buffer = mbc->buff.vbos.lookup_ptr(vbo)) {
72 buffer_ptrs.add(buffer->get());
73 }
74 }
75 }
76 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
77 for (const IBOType ibo : ibos) {
78 if (const auto *buffer = mbc->buff.ibos.lookup_ptr(ibo)) {
79 buffer_ptrs.add(buffer->get());
80 }
81 }
82 }
83
84 const auto batch_contains_data = [&](gpu::Batch &batch) {
85 if (buffer_ptrs.contains(batch.elem)) {
86 return true;
87 }
88 if (std::any_of(batch.verts, batch.verts + ARRAY_SIZE(batch.verts), [&](gpu::VertBuf *vbo) {
89 return vbo && buffer_ptrs.contains(vbo);
90 }))
91 {
92 return true;
93 }
94 return false;
95 };
96
97 for (const int i : IndexRange(MBC_BATCH_LEN)) {
98 gpu::Batch *batch = ((gpu::Batch **)&cache.batch)[i];
99 if (batch && batch_contains_data(*batch)) {
100 GPU_BATCH_DISCARD_SAFE(((gpu::Batch **)&cache.batch)[i]);
101 cache.batch_ready &= ~DRWBatchFlag(uint64_t(1u) << i);
102 }
103 }
104
105 if (!cache.surface_per_mat.is_empty()) {
106 if (cache.surface_per_mat.first() && batch_contains_data(*cache.surface_per_mat.first())) {
107 /* The format for all `surface_per_mat` batches is the same, discard them all. */
108 for (const int i : cache.surface_per_mat.index_range()) {
110 }
112 }
113 }
114
115 for (const VBOType vbo : vbos) {
116 cache.final.buff.vbos.remove(vbo);
117 cache.cage.buff.vbos.remove(vbo);
118 cache.uv_cage.buff.vbos.remove(vbo);
119 }
120 for (const IBOType ibo : ibos) {
121 cache.final.buff.ibos.remove(ibo);
122 cache.cage.buff.ibos.remove(ibo);
123 cache.uv_cage.buff.ibos.remove(ibo);
124 }
125}
126
127/* Return true is all layers in _b_ are inside _a_. */
129{
130 return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
131}
132
134{
135 return *((uint32_t *)&a) == *((uint32_t *)&b);
136}
137
139{
140 uint32_t *a_p = (uint32_t *)a;
141 uint32_t *b_p = (uint32_t *)&b;
143}
144
146{
147 *((uint32_t *)a) = 0;
148}
149
150static void mesh_cd_calc_edit_uv_layer(const Mesh & /*mesh*/, DRW_MeshCDMask *cd_used)
151{
152 cd_used->edit_uv = 1;
153}
154
155static void mesh_cd_calc_active_uv_layer(const Object &object,
156 const Mesh &mesh,
157 DRW_MeshCDMask &cd_used)
158{
159 const Mesh &me_final = editmesh_final_or_this(object, mesh);
160 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
161 int layer = CustomData_get_active_layer(&cd_ldata, CD_PROP_FLOAT2);
162 if (layer != -1) {
163 cd_used.uv |= (1 << layer);
164 }
165}
166
168 const Mesh &mesh,
169 DRW_MeshCDMask &cd_used)
170{
171 const Mesh &me_final = editmesh_final_or_this(object, mesh);
172 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
173 int layer = CustomData_get_stencil_layer(&cd_ldata, CD_PROP_FLOAT2);
174 if (layer != -1) {
175 cd_used.uv |= (1 << layer);
176 }
177}
178
180 const Mesh &mesh,
181 const Span<const GPUMaterial *> materials,
182 VectorSet<std::string> *attributes)
183{
184 const Mesh &me_final = editmesh_final_or_this(object, mesh);
185 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
186 const CustomData &cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
187 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
188 const CustomData &cd_edata = mesh_cd_edata_get_from_mesh(me_final);
189
190 /* See: DM_vertex_attributes_from_gpu for similar logic */
191 DRW_MeshCDMask cd_used;
193
194 const StringRefNull default_color_name = me_final.default_color_attribute ?
195 me_final.default_color_attribute :
196 "";
197
198 for (const GPUMaterial *gpumat : materials) {
199 if (gpumat == nullptr) {
200 continue;
201 }
202 ListBase gpu_attrs = GPU_material_attributes(gpumat);
203 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
204 StringRef name = gpu_attr->name;
205 eCustomDataType type = eCustomDataType(gpu_attr->type);
206 int layer = -1;
207 std::optional<bke::AttrDomain> domain;
208
209 if (gpu_attr->is_default_color) {
210 name = default_color_name.c_str();
211 }
212
213 if (type == CD_AUTO_FROM_NAME) {
214 /* We need to deduce what exact layer is used.
215 *
216 * We do it based on the specified name.
217 */
218 if (!name.is_empty()) {
219 layer = CustomData_get_named_layer(&cd_ldata, CD_PROP_FLOAT2, name);
220 type = CD_MTFACE;
221
222 if (layer == -1) {
223 /* Try to match a generic attribute, we use the first attribute domain with a
224 * matching name. */
225 if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
226 domain = bke::AttrDomain::Point;
227 }
228 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
230 }
231 else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
232 domain = bke::AttrDomain::Face;
233 }
234 else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
235 domain = bke::AttrDomain::Edge;
236 }
237 else {
238 layer = -1;
239 }
240 }
241
242 if (layer == -1) {
243 continue;
244 }
245 }
246 else {
247 /* Fall back to the UV layer, which matches old behavior. */
248 type = CD_MTFACE;
249 }
250 }
251
252 switch (type) {
253 case CD_MTFACE: {
254 if (layer == -1) {
255 layer = !name.is_empty() ?
258 }
259 if (layer != -1 && !CustomData_layer_is_anonymous(&cd_ldata, CD_PROP_FLOAT2, layer)) {
260 cd_used.uv |= (1 << layer);
261 }
262 break;
263 }
264 case CD_TANGENT: {
265 if (layer == -1) {
266 layer = !name.is_empty() ?
269
270 /* Only fall back to orco (below) when we have no UV layers, see: #56545 */
271 if (layer == -1 && !name.is_empty()) {
273 }
274 }
275 if (layer != -1) {
276 cd_used.tan |= (1 << layer);
277 }
278 else {
279 /* no UV layers at all => requesting orco */
280 cd_used.tan_orco = 1;
281 cd_used.orco = 1;
282 }
283 break;
284 }
285
286 case CD_ORCO: {
287 cd_used.orco = 1;
288 break;
289 }
291 case CD_PROP_COLOR:
293 case CD_PROP_FLOAT3:
294 case CD_PROP_BOOL:
295 case CD_PROP_INT8:
296 case CD_PROP_INT32:
297 case CD_PROP_INT16_2D:
298 case CD_PROP_INT32_2D:
299 case CD_PROP_FLOAT:
300 case CD_PROP_FLOAT2: {
301 if (layer != -1 && domain.has_value()) {
302 drw_attributes_add_request(attributes, name);
303 }
304 break;
305 }
306 default:
307 break;
308 }
309 }
310 }
311 return cd_used;
312}
313
315
316/* ---------------------------------------------------------------------- */
319
322{
326
327 memset(wstate, 0, sizeof(*wstate));
328
329 wstate->defgroup_active = -1;
330}
331
334 const DRW_MeshWeightState *wstate_src)
335{
336 MEM_SAFE_FREE(wstate_dst->defgroup_sel);
337 MEM_SAFE_FREE(wstate_dst->defgroup_locked);
338 MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
339
340 memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
341
342 if (wstate_src->defgroup_sel) {
343 wstate_dst->defgroup_sel = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_sel));
344 }
345 if (wstate_src->defgroup_locked) {
346 wstate_dst->defgroup_locked = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_locked));
347 }
348 if (wstate_src->defgroup_unlocked) {
349 wstate_dst->defgroup_unlocked = static_cast<bool *>(
350 MEM_dupallocN(wstate_src->defgroup_unlocked));
351 }
352}
353
354static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
355{
356 return ((!array1 && !array2) ||
357 (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
358}
359
362 const DRW_MeshWeightState *b)
363{
364 return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
365 a->flags == b->flags && a->alert_mode == b->alert_mode &&
366 a->defgroup_sel_count == b->defgroup_sel_count &&
367 drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
368 drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
369 drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
370}
371
373 Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
374{
375 /* Extract complete vertex weight group selection state and mode flags. */
376 memset(wstate, 0, sizeof(*wstate));
377
380
381 wstate->alert_mode = ts.weightuser;
382
383 if (paint_mode && ts.multipaint) {
384 /* Multi-paint needs to know all selected bones, not just the active group.
385 * This is actually a relatively expensive operation, but caching would be difficult. */
387 &ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
388
389 if (wstate->defgroup_sel_count > 1) {
392
395 wstate->defgroup_len,
396 wstate->defgroup_sel,
397 wstate->defgroup_sel,
398 &wstate->defgroup_sel_count);
399 }
400 }
401 /* With only one selected bone Multi-paint reverts to regular mode. */
402 else {
403 wstate->defgroup_sel_count = 0;
405 }
406 }
407
408 if (paint_mode && ts.wpaint_lock_relative) {
409 /* Set of locked vertex groups for the lock relative mode. */
412
413 /* Check that a deform group is active, and none of selected groups are locked. */
415 wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
417 wstate->defgroup_locked,
418 wstate->defgroup_sel,
419 wstate->defgroup_sel_count))
420 {
422
423 /* Compute the set of locked and unlocked deform vertex groups. */
425 wstate->defgroup_locked,
426 wstate->defgroup_unlocked,
427 wstate->defgroup_locked, /* out */
428 wstate->defgroup_unlocked);
429 }
430 else {
433 }
434 }
435}
436
438
439/* ---------------------------------------------------------------------- */
442
443/* gpu::Batch cache management. */
444
445static bool mesh_batch_cache_valid(Mesh &mesh)
446{
447 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
448
449 if (cache == nullptr) {
450 return false;
451 }
452
453 /* NOTE: bke::pbvh::Tree draw data should not be checked here. */
454
455 if (cache->is_editmode != (mesh.runtime->edit_mesh != nullptr)) {
456 return false;
457 }
458
459 if (cache->is_dirty) {
460 return false;
461 }
462
464 return false;
465 }
466
467 return true;
468}
469
470static void mesh_batch_cache_init(Mesh &mesh)
471{
472 if (!mesh.runtime->batch_cache) {
473 mesh.runtime->batch_cache = MEM_new<MeshBatchCache>(__func__);
474 }
475 else {
476 *static_cast<MeshBatchCache *>(mesh.runtime->batch_cache) = {};
477 }
478 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
479
480 cache->is_editmode = mesh.runtime->edit_mesh != nullptr;
481
482 if (cache->is_editmode == false) {
483 // cache->edge_len = mesh_render_edges_len_get(mesh);
484 // cache->tri_len = mesh_render_corner_tris_len_get(mesh);
485 // cache->face_len = mesh_render_faces_len_get(mesh);
486 // cache->vert_len = mesh_render_verts_len_get(mesh);
487 }
488
490 cache->surface_per_mat = Array<gpu::Batch *>(cache->mat_len, nullptr);
491 cache->tris_per_mat.reinitialize(cache->mat_len);
492
493 cache->is_dirty = false;
494 cache->batch_ready = (DRWBatchFlag)0;
495 cache->batch_requested = (DRWBatchFlag)0;
496
498}
499
501{
502 if (!mesh_batch_cache_valid(mesh)) {
503 if (mesh.runtime->batch_cache) {
504 mesh_batch_cache_clear(*static_cast<MeshBatchCache *>(mesh.runtime->batch_cache));
505 }
507 }
508}
509
511{
512 return static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
513}
514
516 const DRW_MeshWeightState *wstate)
517{
518 if (!drw_mesh_weight_state_compare(&cache.weight_state, wstate)) {
519 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
520 mbc->buff.vbos.remove(VBOType::VertexGroupWeight);
521 }
523
525
527 }
528}
529
531{
534
535 /* If there are only a few materials at most, just request batches for everything. However, if
536 * the maximum material index is large, detect the actually used material indices first and only
537 * request those. This reduces the overhead of dealing with all these batches down the line. */
538 if (cache.mat_len < 16) {
539 for (int i = 0; i < cache.mat_len; i++) {
541 }
542 }
543 else {
544 const VectorSet<int> &used_material_indices = mesh.material_indices_used();
545 for (const int i : used_material_indices) {
547 }
548 }
549}
550
555
557{
558 discard_buffers(cache,
571
572 cache.tot_area = 0.0f;
573 cache.tot_uv_area = 0.0f;
574
575 /* We discarded the vbo.uv so we need to reset the cd_used flag. */
576 cache.cd_used.uv = 0;
577 cache.cd_used.edit_uv = 0;
578}
579
591
593{
594 if (!mesh->runtime->batch_cache) {
595 return;
596 }
597 MeshBatchCache &cache = *static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
598 switch (mode) {
601
602 /* Because visible UVs depends on edit mode selection, discard topology. */
604 break;
606 /* Paint mode selection flag is packed inside the nor attribute.
607 * Note that it can be slow if auto smooth is enabled. (see #63946) */
609 break;
611 cache.is_dirty = true;
612 break;
616 break;
619 break;
622 break;
623 default:
624 BLI_assert(0);
625 }
626}
627
629{
630 mbc->buff.ibos.clear();
631 mbc->buff.vbos.clear();
632
633 mbc->loose_geom = {};
634 mbc->face_sorted = {};
635}
636
638{
639 if (cache.subdiv_cache) {
641 MEM_delete(cache.subdiv_cache);
642 cache.subdiv_cache = nullptr;
643 }
644}
645
647{
648 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
650 }
651
652 cache.tris_per_mat = {};
653
654 for (int i = 0; i < sizeof(cache.batch) / sizeof(void *); i++) {
655 gpu::Batch **batch = (gpu::Batch **)&cache.batch;
657 }
658 for (const int i : cache.surface_per_mat.index_range()) {
660 }
661
664 cache.surface_per_mat = {};
665 cache.mat_len = 0;
666
667 cache.batch_ready = (DRWBatchFlag)0;
669
671}
672
673void DRW_mesh_batch_cache_free(void *batch_cache)
674{
675 MeshBatchCache *cache = static_cast<MeshBatchCache *>(batch_cache);
677 MEM_delete(cache);
678}
679
681
682/* ---------------------------------------------------------------------- */
685
686static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
687{
688 DRW_MeshCDMask cd_needed;
689 mesh_cd_layers_type_clear(&cd_needed);
690 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
691
692 BLI_assert(cd_needed.uv != 0 &&
693 "No uv layer available in texpaint, but batches requested anyway!");
694
695 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
696 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
697}
698
700 const Mesh &mesh,
701 VectorSet<std::string> &attributes)
702{
703 const Mesh &me_final = editmesh_final_or_this(object, mesh);
704 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
705 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
706
707 auto request_color_attribute = [&](const StringRef name) {
708 if (!name.is_empty()) {
709 int layer_index;
710 eCustomDataType type;
711 if (drw_custom_data_match_attribute(cd_vdata, name, &layer_index, &type)) {
712 drw_attributes_add_request(&attributes, name);
713 }
714 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer_index, &type)) {
715 drw_attributes_add_request(&attributes, name);
716 }
717 }
718 };
719
720 request_color_attribute(me_final.active_color_attribute);
721 request_color_attribute(me_final.default_color_attribute);
722}
723
725{
726 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
728 return DRW_batch_request(&cache.batch.all_verts);
729}
730
732{
733 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
735 return DRW_batch_request(&cache.batch.all_edges);
736}
737
739{
740 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
742
743 return cache.batch.surface;
744}
745
747{
748 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
749 if (cache.no_loose_wire) {
750 return nullptr;
751 }
753 return DRW_batch_request(&cache.batch.loose_edges);
754}
755
762
763gpu::Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
764{
765 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
767 /* Even if is_manifold is not correct (not updated),
768 * the default (not manifold) is just the worst case. */
769 if (r_is_manifold) {
770 *r_is_manifold = cache.is_manifold;
771 }
773}
774
781
788
790 const Mesh &mesh,
791 const Span<const GPUMaterial *> materials,
792 VectorSet<std::string> *r_attrs,
793 DRW_MeshCDMask *r_cd_needed)
794{
795 VectorSet<std::string> attrs_needed;
796 DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(object, mesh, materials, &attrs_needed);
797
798 if (r_attrs) {
799 *r_attrs = attrs_needed;
800 }
801
802 if (r_cd_needed) {
803 *r_cd_needed = cd_needed;
804 }
805}
806
808 Object &object, Mesh &mesh, const Span<const GPUMaterial *> materials)
809{
810 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
811 VectorSet<std::string> attrs_needed;
812 DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(object, mesh, materials, &attrs_needed);
813
814 BLI_assert(materials.size() == cache.mat_len);
815
816 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
817 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
819 return cache.surface_per_mat;
820}
821
829
831{
832 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
833 texpaint_request_active_uv(cache, object, mesh);
835 return cache.batch.surface;
836}
837
839{
840 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
841
842 VectorSet<std::string> attrs_needed{};
843 request_active_and_default_color_attributes(object, mesh, attrs_needed);
844
845 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
846
848 return cache.batch.surface;
849}
850
852{
853 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
854
855 VectorSet<std::string> attrs_needed{};
856 request_active_and_default_color_attributes(object, mesh, attrs_needed);
857
858 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
859
861 return cache.batch.surface;
862}
863
865{
866 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
867
868 cache.cd_needed.sculpt_overlays = 1;
871
872 return cache.batch.sculpt_overlays;
873}
874
884
886
887/* ---------------------------------------------------------------------- */
890
897
899{
900 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
902 return DRW_batch_request(&cache.batch.edit_edges);
903}
904
911
918
925
932
939
941
942/* ---------------------------------------------------------------------- */
945
952
959
966
973
975
976/* ---------------------------------------------------------------------- */
979
980static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
981{
982 DRW_MeshCDMask cd_needed;
983 mesh_cd_layers_type_clear(&cd_needed);
984 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
985 mesh_cd_calc_edit_uv_layer(mesh, &cd_needed);
986
987 BLI_assert(cd_needed.edit_uv != 0 &&
988 "No uv layer available in edituv, but batches requested anyway!");
989
990 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
991 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
992}
993
995 Mesh &mesh,
996 float **tot_area,
997 float **tot_uv_area)
998{
999 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1000 edituv_request_active_uv(cache, object, mesh);
1002
1003 if (tot_area != nullptr) {
1004 *tot_area = &cache.tot_area;
1005 }
1006 if (tot_uv_area != nullptr) {
1007 *tot_uv_area = &cache.tot_uv_area;
1008 }
1010}
1011
1019
1021{
1022 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1023 edituv_request_active_uv(cache, object, mesh);
1025 return DRW_batch_request(&cache.batch.edituv_faces);
1026}
1027
1029{
1030 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1031 edituv_request_active_uv(cache, object, mesh);
1033 return DRW_batch_request(&cache.batch.edituv_edges);
1034}
1035
1037{
1038 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1039 edituv_request_active_uv(cache, object, mesh);
1041 return DRW_batch_request(&cache.batch.edituv_verts);
1042}
1043
1045{
1046 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1047 edituv_request_active_uv(cache, object, mesh);
1049 return DRW_batch_request(&cache.batch.edituv_fdots);
1050}
1051
1053{
1054 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1055 edituv_request_active_uv(cache, object, mesh);
1057 return DRW_batch_request(&cache.batch.uv_faces);
1058}
1059
1061{
1062 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1063 edituv_request_active_uv(cache, object, mesh);
1066}
1067
1069{
1070 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1071 edituv_request_active_uv(cache, object, mesh);
1073 return DRW_batch_request(&cache.batch.wire_loops_uvs);
1074}
1075
1077{
1078 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1079 edituv_request_active_uv(cache, object, mesh);
1082}
1083
1085{
1086 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1088 return DRW_batch_request(&cache.batch.wire_loops);
1089}
1090
1092
1093/* ---------------------------------------------------------------------- */
1096
1098{
1099 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
1100
1101 if (cache == nullptr) {
1102 return;
1103 }
1104
1106 cache->lastmatch = ctime;
1107 }
1108
1110 cache->lastmatch = ctime;
1111 }
1112
1113 if (ctime - cache->lastmatch > U.vbotimeout) {
1115 }
1116
1118 cache->attr_used_over_time.clear();
1119}
1120
1121static void init_empty_dummy_batch(gpu::Batch &batch)
1122{
1123 /* The dummy batch is only used in cases with invalid edit mode mapping, so the overhead of
1124 * creating a vertex buffer shouldn't matter. */
1128 GPU_vertbuf_data_alloc(*vbo, 1);
1129 /* Avoid the batch being rendered at all. */
1130 GPU_vertbuf_data_len_set(*vbo, 0);
1131
1132 GPU_batch_vertbuf_add(&batch, vbo, true);
1133}
1134
1136 Object &ob,
1137 Mesh &mesh,
1138 const Scene &scene,
1139 const bool is_paint_mode,
1140 const bool use_hide)
1141{
1142 const ToolSettings *ts = scene.toolsettings;
1143
1144 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1145 bool cd_uv_update = false;
1146
1147 /* Early out */
1148 if (cache.batch_requested == 0) {
1149 return;
1150 }
1151
1152 /* Sanity check. */
1153 if ((mesh.runtime->edit_mesh != nullptr) && (ob.mode & OB_MODE_EDIT)) {
1155 }
1156
1157 const bool is_editmode = ob.mode == OB_MODE_EDIT;
1158
1159 DRWBatchFlag batch_requested = cache.batch_requested;
1160 cache.batch_requested = (DRWBatchFlag)0;
1161
1162 if (batch_requested & MBC_SURFACE_WEIGHTS) {
1163 /* Check vertex weights. */
1164 if ((cache.batch.surface_weights != nullptr) && (ts != nullptr)) {
1165 DRW_MeshWeightState wstate;
1166 BLI_assert(ob.type == OB_MESH);
1167 drw_mesh_weight_state_extract(ob, mesh, *ts, is_paint_mode, &wstate);
1171 }
1172 }
1173
1174 if (batch_requested &
1178 {
1179 /* Modifiers will only generate an orco layer if the mesh is deformed. */
1180 if (cache.cd_needed.orco != 0) {
1181 /* Orco is always extracted from final mesh. */
1182 const Mesh *me_final = (mesh.runtime->edit_mesh) ? BKE_object_get_editmesh_eval_final(&ob) :
1183 &mesh;
1184 if (CustomData_get_layer(&me_final->vert_data, CD_ORCO) == nullptr) {
1185 /* Skip orco calculation */
1186 cache.cd_needed.orco = 0;
1187 }
1188 }
1189
1190 /* Verify that all surface batches have needed attribute layers.
1191 */
1192 /* TODO(fclem): We could be a bit smarter here and only do it per
1193 * material. */
1194 bool cd_overlap = mesh_cd_layers_type_overlap(cache.cd_used, cache.cd_needed);
1195 bool attr_overlap = drw_attributes_overlap(&cache.attr_used, &cache.attr_needed);
1196 if (cd_overlap == false || attr_overlap == false) {
1197 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1198 if ((cache.cd_used.uv & cache.cd_needed.uv) != cache.cd_needed.uv) {
1199 mbc->buff.vbos.remove(VBOType::UVs);
1200 cd_uv_update = true;
1201 }
1202 if ((cache.cd_used.tan & cache.cd_needed.tan) != cache.cd_needed.tan ||
1203 cache.cd_used.tan_orco != cache.cd_needed.tan_orco)
1204 {
1205 mbc->buff.vbos.remove(VBOType::Tangents);
1206 }
1207 if (cache.cd_used.orco != cache.cd_needed.orco) {
1208 mbc->buff.vbos.remove(VBOType::Orco);
1209 }
1210 if (cache.cd_used.sculpt_overlays != cache.cd_needed.sculpt_overlays) {
1211 mbc->buff.vbos.remove(VBOType::SculptData);
1212 }
1213 if (!drw_attributes_overlap(&cache.attr_used, &cache.attr_needed)) {
1214 for (int i = 0; i < GPU_MAX_ATTR; i++) {
1215 mbc->buff.vbos.remove(VBOType(int8_t(VBOType::Attr0) + i));
1216 }
1217 }
1218 }
1219 /* We can't discard batches at this point as they have been
1220 * referenced for drawing. Just clear them in place. */
1221 for (int i = 0; i < cache.mat_len; i++) {
1223 }
1227
1229 drw_attributes_merge(&cache.attr_used, &cache.attr_needed, mesh.runtime->render_mutex);
1230 }
1233
1235 &cache.attr_used_over_time, &cache.attr_needed, mesh.runtime->render_mutex);
1236 cache.attr_needed.clear();
1237 }
1238
1239 if ((batch_requested & MBC_EDITUV) || cd_uv_update) {
1240 /* Discard UV batches if sync_selection changes */
1241 const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
1242 if (cd_uv_update || (cache.is_uvsyncsel != is_uvsyncsel)) {
1243 cache.is_uvsyncsel = is_uvsyncsel;
1244 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1245 mbc->buff.vbos.remove(VBOType::EditUVData);
1246 mbc->buff.vbos.remove(VBOType::FaceDotUV);
1247 mbc->buff.vbos.remove(VBOType::FaceDotEditUVData);
1248 mbc->buff.ibos.remove(IBOType::EditUVTris);
1249 mbc->buff.ibos.remove(IBOType::EditUVLines);
1250 mbc->buff.ibos.remove(IBOType::EditUVPoints);
1251 mbc->buff.ibos.remove(IBOType::EditUVFaceDots);
1252 }
1253 /* We only clear the batches as they may already have been
1254 * referenced. */
1265 cache.batch_ready &= ~MBC_EDITUV;
1266 }
1267 }
1268
1269 /* Second chance to early out */
1270 if ((batch_requested & ~cache.batch_ready) == 0) {
1271 return;
1272 }
1273
1274 /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-pbvh::Tree).
1275 * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1276 * Normal updates should be part of the brush loop and only run during the stroke when the
1277 * brush needs to sample the surface. The drawing code should only update the normals
1278 * per redraw when smooth shading is enabled. */
1281 }
1282
1283 /* This is the mesh before modifier evaluation, used to test how the mesh changed during
1284 * evaluation to decide which data is valid to extract. */
1285 const Mesh *orig_edit_mesh = is_editmode ? BKE_object_get_pre_modified_mesh(&ob) : nullptr;
1286
1287 bool do_cage = false;
1288 const Mesh *edit_data_mesh = nullptr;
1289 if (is_editmode) {
1290 const Mesh *eval_cage = DRW_object_get_editmesh_cage_for_drawing(ob);
1291 if (eval_cage && eval_cage != &mesh) {
1292 /* Extract "cage" data separately when it exists and it's not just the same mesh as the
1293 * regular evaluated mesh. Otherwise edit data will be extracted from the final evaluated
1294 * mesh. */
1295 do_cage = true;
1296 edit_data_mesh = eval_cage;
1297 }
1298 else {
1299 edit_data_mesh = &mesh;
1300 }
1301 }
1302
1303 bool do_uvcage = false;
1304 if (is_editmode) {
1305 /* Currently we don't extract UV data from the evaluated mesh unless it's the same mesh as the
1306 * original edit mesh. */
1307 do_uvcage = !(mesh.runtime->is_original_bmesh &&
1308 mesh.runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH);
1309 }
1310
1311 const DRWBatchFlag batches_to_create = batch_requested & ~cache.batch_ready;
1312
1313 const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(&mesh);
1314
1315 enum class BufferList { Final, Cage, UVCage };
1316
1317 struct BatchCreateData {
1318 gpu::Batch &batch;
1319 GPUPrimType prim_type;
1320 BufferList list;
1321 std::optional<IBOType> ibo;
1322 Vector<VBOType> vbos;
1323 };
1324 Vector<BatchCreateData> batch_info;
1325
1326 {
1327 const BufferList list = BufferList::Final;
1328 if (batches_to_create & MBC_SURFACE) {
1329 BatchCreateData batch{*cache.batch.surface,
1331 list,
1334 if (cache.cd_used.uv != 0) {
1335 batch.vbos.append(VBOType::UVs);
1336 }
1337 for (const int i : cache.attr_used.index_range()) {
1338 batch.vbos.append(VBOType(int8_t(VBOType::Attr0) + i));
1339 }
1340 batch_info.append(std::move(batch));
1341 }
1342 if (batches_to_create & MBC_VIEWER_ATTRIBUTE_OVERLAY) {
1343 batch_info.append({*cache.batch.surface_viewer_attribute,
1345 list,
1348 }
1349 if (batches_to_create & MBC_ALL_VERTS) {
1350 batch_info.append({*cache.batch.all_verts,
1352 list,
1353 std::nullopt,
1355 }
1356 if (batches_to_create & MBC_SCULPT_OVERLAYS) {
1357 batch_info.append({*cache.batch.sculpt_overlays,
1359 list,
1362 }
1363 if (batches_to_create & MBC_ALL_EDGES) {
1364 batch_info.append(
1366 }
1367 if (batches_to_create & MBC_LOOSE_EDGES) {
1368 batch_info.append({*cache.batch.loose_edges,
1370 list,
1373 }
1374 if (batches_to_create & MBC_EDGE_DETECTION) {
1375 batch_info.append({*cache.batch.edge_detection,
1377 list,
1380 }
1381 if (batches_to_create & MBC_SURFACE_WEIGHTS) {
1382 batch_info.append({*cache.batch.surface_weights,
1384 list,
1387 }
1388 if (batches_to_create & MBC_WIRE_LOOPS) {
1389 batch_info.append({*cache.batch.wire_loops,
1391 list,
1394 }
1395 if (batches_to_create & MBC_WIRE_EDGES) {
1396 batch_info.append({*cache.batch.wire_edges,
1398 list,
1401 }
1402 if (batches_to_create & MBC_WIRE_LOOPS_ALL_UVS) {
1403 BatchCreateData batch{
1405 if (cache.cd_used.uv != 0) {
1406 batch.vbos.append(VBOType::UVs);
1407 }
1408 batch_info.append(std::move(batch));
1409 }
1410 if (batches_to_create & MBC_WIRE_LOOPS_UVS) {
1411 BatchCreateData batch{
1413 if (cache.cd_used.uv != 0) {
1414 batch.vbos.append(VBOType::UVs);
1415 }
1416 batch_info.append(std::move(batch));
1417 }
1418 if (batches_to_create & MBC_WIRE_LOOPS_EDITUVS) {
1419 BatchCreateData batch{
1421 if (cache.cd_used.uv != 0) {
1422 batch.vbos.append(VBOType::UVs);
1423 }
1424 batch_info.append(std::move(batch));
1425 }
1426 if (batches_to_create & MBC_UV_FACES) {
1427 const bool use_face_selection = (mesh.editflag & ME_EDIT_PAINT_FACE_SEL);
1428 /* Sculpt mode does not support selection, therefore the generic `is_paint_mode` check cannot
1429 * be used */
1430 const bool is_face_selectable =
1432 use_face_selection;
1433
1434 const IBOType ibo = is_face_selectable || is_editmode ? IBOType::UVTris : IBOType::Tris;
1435 BatchCreateData batch{*cache.batch.uv_faces, GPU_PRIM_TRIS, list, ibo, {}};
1436 if (cache.cd_used.uv != 0) {
1437 batch.vbos.append(VBOType::UVs);
1438 }
1439 batch_info.append(std::move(batch));
1440 }
1441 if (batches_to_create & MBC_EDIT_MESH_ANALYSIS) {
1442 batch_info.append({*cache.batch.edit_mesh_analysis,
1444 list,
1447 }
1448 }
1449
1450 /* When the mesh doesn't correspond to the object's original mesh (i.e. the mesh was replaced by
1451 * another with the object info node during evaluation), don't extract edit mode data for it.
1452 * That data can be invalid because any original indices (#CD_ORIGINDEX) on the evaluated mesh
1453 * won't correspond to the correct mesh. */
1454 const bool edit_mapping_valid = is_editmode && BKE_editmesh_eval_orig_map_available(
1455 *edit_data_mesh, orig_edit_mesh);
1456
1457 {
1458 const BufferList list = do_cage ? BufferList::Cage : BufferList::Final;
1459 if (batches_to_create & MBC_EDIT_TRIANGLES) {
1460 if (edit_mapping_valid) {
1461 batch_info.append({*cache.batch.edit_triangles,
1463 list,
1466 }
1467 else {
1469 }
1470 }
1471 if (batches_to_create & MBC_EDIT_VERTICES) {
1472 if (edit_mapping_valid) {
1473 BatchCreateData batch{*cache.batch.edit_vertices,
1475 list,
1478 if (!do_subdivision || do_cage) {
1479 batch.vbos.append(VBOType::CornerNormal);
1480 }
1481 batch_info.append(std::move(batch));
1482 }
1483 else {
1485 }
1486 }
1487 if (batches_to_create & MBC_EDIT_EDGES) {
1488 if (edit_mapping_valid) {
1489 BatchCreateData batch{*cache.batch.edit_edges,
1491 list,
1494 if (!do_subdivision || do_cage) {
1495 batch.vbos.append(VBOType::VertexNormal);
1496 }
1497 batch_info.append(std::move(batch));
1498 }
1499 else {
1501 }
1502 }
1503 if (batches_to_create & MBC_EDIT_VNOR) {
1504 if (edit_mapping_valid) {
1505 batch_info.append({*cache.batch.edit_vnor,
1507 list,
1510 }
1511 else {
1513 }
1514 }
1515 if (batches_to_create & MBC_EDIT_LNOR) {
1516 if (edit_mapping_valid) {
1517 batch_info.append({*cache.batch.edit_lnor,
1519 list,
1522 }
1523 else {
1525 }
1526 }
1527 if (batches_to_create & MBC_EDIT_FACEDOTS) {
1528 if (edit_mapping_valid) {
1529 batch_info.append({*cache.batch.edit_fdots,
1531 list,
1534 }
1535 else {
1537 }
1538 }
1539 if (batches_to_create & MBC_SKIN_ROOTS) {
1540 if (edit_mapping_valid) {
1541 batch_info.append({*cache.batch.edit_skin_roots,
1543 list,
1544 std::nullopt,
1546 }
1547 else {
1549 }
1550 }
1551 if (batches_to_create & MBC_EDIT_SELECTION_VERTS) {
1552 if (is_editmode && !edit_mapping_valid) {
1554 }
1555 else {
1556 batch_info.append({*cache.batch.edit_selection_verts,
1558 list,
1561 }
1562 }
1563 if (batches_to_create & MBC_EDIT_SELECTION_EDGES) {
1564 if (is_editmode && !edit_mapping_valid) {
1566 }
1567 else {
1568 batch_info.append({*cache.batch.edit_selection_edges,
1570 list,
1573 }
1574 }
1575 if (batches_to_create & MBC_EDIT_SELECTION_FACES) {
1576 if (is_editmode && !edit_mapping_valid) {
1578 }
1579 else {
1580 batch_info.append({*cache.batch.edit_selection_faces,
1582 list,
1585 }
1586 }
1587 if (batches_to_create & MBC_EDIT_SELECTION_FACEDOTS) {
1588 if (is_editmode && !edit_mapping_valid) {
1590 }
1591 else {
1592 batch_info.append({*cache.batch.edit_selection_fdots,
1594 list,
1597 }
1598 }
1599 }
1600
1601 {
1607 const BufferList list = do_uvcage ? BufferList::UVCage : BufferList::Final;
1608
1609 if (batches_to_create & MBC_EDITUV_FACES) {
1610 if (edit_mapping_valid) {
1611 batch_info.append({*cache.batch.edituv_faces,
1613 list,
1616 }
1617 else {
1619 }
1620 }
1621 if (batches_to_create & MBC_EDITUV_FACES_STRETCH_AREA) {
1622 if (edit_mapping_valid) {
1623 batch_info.append({*cache.batch.edituv_faces_stretch_area,
1625 list,
1628 }
1629 else {
1631 }
1632 }
1633 if (batches_to_create & MBC_EDITUV_FACES_STRETCH_ANGLE) {
1634 if (edit_mapping_valid) {
1635 batch_info.append({*cache.batch.edituv_faces_stretch_angle,
1637 list,
1640 }
1641 else {
1643 }
1644 }
1645 if (batches_to_create & MBC_EDITUV_EDGES) {
1646 if (edit_mapping_valid) {
1647 batch_info.append({*cache.batch.edituv_edges,
1649 list,
1652 }
1653 else {
1655 }
1656 }
1657 if (batches_to_create & MBC_EDITUV_VERTS) {
1658 if (edit_mapping_valid) {
1659 batch_info.append({*cache.batch.edituv_verts,
1661 list,
1664 }
1665 else {
1667 }
1668 }
1669 if (batches_to_create & MBC_EDITUV_FACEDOTS) {
1670 if (edit_mapping_valid) {
1671 batch_info.append({*cache.batch.edituv_fdots,
1673 list,
1676 }
1677 else {
1679 }
1680 }
1681 }
1682
1683 std::array<VectorSet<IBOType>, 3> ibo_requests;
1684 std::array<VectorSet<VBOType>, 3> vbo_requests;
1685 for (const BatchCreateData &batch : batch_info) {
1686 if (batch.ibo) {
1687 ibo_requests[int(batch.list)].add(*batch.ibo);
1688 }
1689 vbo_requests[int(batch.list)].add_multiple(batch.vbos);
1690 }
1691
1692 if (batches_to_create & MBC_SURFACE_PER_MAT) {
1693 ibo_requests[int(BufferList::Final)].add(IBOType::Tris);
1694 vbo_requests[int(BufferList::Final)].add(VBOType::CornerNormal);
1695 vbo_requests[int(BufferList::Final)].add(VBOType::Position);
1696 for (const int i : cache.attr_used.index_range()) {
1697 vbo_requests[int(BufferList::Final)].add(VBOType(int8_t(VBOType::Attr0) + i));
1698 }
1699 if (cache.cd_used.uv != 0) {
1700 vbo_requests[int(BufferList::Final)].add(VBOType::UVs);
1701 }
1702 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1703 vbo_requests[int(BufferList::Final)].add(VBOType::Tangents);
1704 }
1705 if (cache.cd_used.orco != 0) {
1706 vbo_requests[int(BufferList::Final)].add(VBOType::Orco);
1707 }
1708 }
1709
1710 if (do_uvcage) {
1712 scene,
1713 cache,
1714 cache.uv_cage,
1715 ibo_requests[int(BufferList::UVCage)],
1716 vbo_requests[int(BufferList::UVCage)],
1717 ob,
1718 mesh,
1719 is_editmode,
1720 is_paint_mode,
1721 false,
1722 true,
1723 true);
1724 }
1725
1726 if (do_cage) {
1728 scene,
1729 cache,
1730 cache.cage,
1731 ibo_requests[int(BufferList::Cage)],
1732 vbo_requests[int(BufferList::Cage)],
1733 ob,
1734 mesh,
1735 is_editmode,
1736 is_paint_mode,
1737 false,
1738 false,
1739 true);
1740 }
1741
1742 if (do_subdivision) {
1744 mesh,
1745 cache,
1746 cache.final,
1747 ibo_requests[int(BufferList::Final)],
1748 vbo_requests[int(BufferList::Final)],
1749 is_editmode,
1750 is_paint_mode,
1751 true,
1752 false,
1753 do_cage,
1754 ts,
1755 use_hide);
1756 }
1757 else {
1758 /* The subsurf modifier may have been recently removed, or another modifier was added after it,
1759 * so free any potential subdivision cache as it is not needed anymore. */
1761 }
1762
1764 scene,
1765 cache,
1766 cache.final,
1767 ibo_requests[int(BufferList::Final)],
1768 vbo_requests[int(BufferList::Final)],
1769 ob,
1770 mesh,
1771 is_editmode,
1772 is_paint_mode,
1773 true,
1774 false,
1775 use_hide);
1776
1777 std::array<MeshBufferCache *, 3> caches{&cache.final, &cache.cage, &cache.uv_cage};
1778 for (const BatchCreateData &batch : batch_info) {
1779 MeshBufferCache &cache_for_batch = *caches[int(batch.list)];
1780 gpu::IndexBuf *ibo = batch.ibo ? caches[int(batch.list)]->buff.ibos.lookup(*batch.ibo).get() :
1781 nullptr;
1782 GPU_batch_init(&batch.batch, batch.prim_type, nullptr, ibo);
1783 for (const VBOType vbo_request : batch.vbos) {
1785 &batch.batch, cache_for_batch.buff.vbos.lookup(vbo_request).get(), false);
1786 }
1787 }
1788
1789 if (batches_to_create & MBC_SURFACE_PER_MAT) {
1791 gpu::IndexBuf &tris_ibo = *buffers.ibos.lookup(IBOType::Tris);
1793 for (const int material : IndexRange(cache.mat_len)) {
1794 gpu::Batch *batch = cache.surface_per_mat[material];
1795 if (!batch) {
1796 continue;
1797 }
1798 GPU_batch_init(batch, GPU_PRIM_TRIS, nullptr, cache.tris_per_mat[material].get());
1799 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::CornerNormal).get(), false);
1800 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Position).get(), false);
1801 if (cache.cd_used.uv != 0) {
1802 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::UVs).get(), false);
1803 }
1804 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1805 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Tangents).get(), false);
1806 }
1807 if (cache.cd_used.orco != 0) {
1808 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Orco).get(), false);
1809 }
1810 for (const int i : cache.attr_used.index_range()) {
1812 batch, buffers.vbos.lookup(VBOType(int8_t(VBOType::Attr0) + i)).get(), false);
1813 }
1814 }
1815 }
1816
1817 cache.batch_ready |= batch_requested;
1818}
1819
1821
1822} // namespace blender::draw
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_named_layer(const CustomData *data, eCustomDataType type, blender::StringRef name)
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
bool CustomData_layer_is_anonymous(const CustomData *data, eCustomDataType type, int n)
int CustomData_get_stencil_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_active_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_render_layer(const CustomData *data, eCustomDataType type)
bool BKE_editmesh_eval_orig_map_available(const Mesh &mesh_eval, const Mesh *mesh_orig)
Definition editmesh.cc:67
General operations, lookup, etc. for materials.
int BKE_id_material_used_with_fallback_eval(const ID &id)
eMeshBatchDirtyMode
Definition BKE_mesh.h:37
@ BKE_MESH_BATCH_DIRTY_UVEDIT_ALL
Definition BKE_mesh.h:42
@ BKE_MESH_BATCH_DIRTY_SELECT_PAINT
Definition BKE_mesh.h:40
@ BKE_MESH_BATCH_DIRTY_SHADING
Definition BKE_mesh.h:41
@ BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT
Definition BKE_mesh.h:43
@ BKE_MESH_BATCH_DIRTY_ALL
Definition BKE_mesh.h:38
@ BKE_MESH_BATCH_DIRTY_SELECT
Definition BKE_mesh.h:39
@ ME_WRAPPER_TYPE_BMESH
General operations, lookup, etc. for blender objects.
const Mesh * BKE_object_get_pre_modified_mesh(const Object *object)
const Mesh * BKE_object_get_editmesh_eval_final(const Object *object)
Functions for dealing with objects and deform verts, used by painting and tools.
bool BKE_object_defgroup_check_lock_relative(const bool *lock_flags, const bool *validmap, int index)
void BKE_object_defgroup_split_locked_validmap(int defbase_tot, const bool *locked, const bool *deform, bool *r_locked, bool *r_unlocked)
bool * BKE_object_defgroup_validmap_get(struct Object *ob, int defbase_tot)
bool * BKE_object_defgroup_lock_flags_get(struct Object *ob, int defbase_tot)
void BKE_object_defgroup_mirror_selection(struct Object *ob, int defbase_tot, const bool *selection, bool *dg_flags_sel, int *r_dg_flags_sel_tot)
bool BKE_object_defgroup_check_lock_relative_multi(int defbase_tot, const bool *lock_flags, const bool *selected, int sel_tot)
bool * BKE_object_defgroup_selected_get(struct Object *ob, int defbase_tot, int *r_dg_flags_sel_tot)
A BVH for high poly meshes.
bool BKE_subsurf_modifier_has_gpu_subdiv(const Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
int BLI_listbase_count(const ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:524
#define ARRAY_SIZE(arr)
#define ELEM(...)
@ CD_PROP_BYTE_COLOR
@ CD_PROP_FLOAT
@ CD_PROP_FLOAT3
@ CD_PROP_INT32_2D
@ CD_PROP_COLOR
@ CD_PROP_QUATERNION
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
@ CD_PROP_INT16_2D
@ CD_AUTO_FROM_NAME
#define ME_USING_MIRROR_X_VERTEX_GROUPS(_me)
@ ME_EDIT_PAINT_FACE_SEL
@ OB_MODE_EDIT
@ OB_MODE_WEIGHT_PAINT
@ OB_MODE_TEXTURE_PAINT
@ OB_MODE_VERTEX_PAINT
Object is a sort of wrapper for general info.
@ OB_MESH
@ UV_SYNC_SELECTION
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition GPU_batch.hh:190
int GPU_batch_vertbuf_add(blender::gpu::Batch *batch, blender::gpu::VertBuf *vertex_buf, bool own_vbo)
#define GPU_batch_init(batch, primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:166
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:204
ListBase GPU_material_attributes(const GPUMaterial *material)
GPUPrimType
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_LINES_ADJ
@ GPU_PRIM_TRIS
static constexpr int GPU_MAX_ATTR
Definition GPU_shader.hh:34
#define GPU_vertbuf_create_with_format(format)
void GPU_vertbuf_data_len_set(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
@ GPU_FETCH_FLOAT
uint GPU_vertformat_attr_add(GPUVertFormat *, blender::StringRef name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
#define U
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
IndexRange index_range() const
Definition BLI_array.hh:349
const T & first() const
Definition BLI_array.hh:270
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
bool is_empty() const
Definition BLI_array.hh:253
IndexRange index_range() const
void reserve(const int64_t n)
Definition BLI_set.hh:637
bool contains(const Key &key) const
Definition BLI_set.hh:310
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr bool is_empty() const
constexpr const char * c_str() const
void append(const T &value)
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc)
#define MBC_BATCH_LEN
#define MBC_EDITUV
blender::gpu::Batch * DRW_batch_request(blender::gpu::Batch **batch)
const Mesh * DRW_object_get_editmesh_cage_for_drawing(const Object &object)
Extraction of Mesh data into VBO to feed to GPU.
struct @242053044010324116347033273112253060004051364061::@051143074301336237271216303350234260141112266062 batch
#define MEM_SAFE_FREE(v)
format
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
pbvh::Tree * pbvh_get(Object &object)
Definition paint.cc:2912
void update_normals_from_eval(Object &object_eval, Tree &pbvh)
Definition pbvh.cc:1080
void DRW_mesh_batch_cache_validate(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object &object, Mesh &mesh)
void drw_attributes_add_request(VectorSet< std::string > *attrs, const StringRef name)
const CustomData & mesh_cd_ldata_get_from_mesh(const Mesh &mesh)
static void drw_mesh_weight_state_extract(Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
static void init_empty_dummy_batch(gpu::Batch &batch)
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_loose_edges(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_edges(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_texpaint_single(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_edges(Mesh &mesh)
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache &cache)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_facedots(Object &object, Mesh &mesh)
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
blender::gpu::Batch * DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh &mesh)
static void mesh_cd_calc_active_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_edges(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_wireframe(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_facedots(Mesh &mesh)
void draw_subdiv_cache_free(DRWSubdivCache &cache)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_verts(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface(Mesh &mesh)
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static void mesh_cd_calc_edit_uv_layer(const Mesh &, DRW_MeshCDMask *cd_used)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_weights(Mesh &mesh)
Span< gpu::Batch * > DRW_mesh_batch_cache_get_surface_shaded(Object &object, Mesh &mesh, Span< const GPUMaterial * > materials)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces(Object &object, Mesh &mesh)
const CustomData & mesh_cd_edata_get_from_mesh(const Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_vertpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_wireframes_face(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_sculpt(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_edges(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_loop_normals(Mesh &mesh)
static bool drw_mesh_weight_state_compare(const DRW_MeshWeightState *a, const DRW_MeshWeightState *b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
void drw_attributes_merge(VectorSet< std::string > *dst, const VectorSet< std::string > *src, Mutex &render_mutex)
void DRW_mesh_batch_cache_free_old(Mesh *mesh, int ctime)
void DRW_mesh_batch_cache_free(void *batch_cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_triangles(Mesh &mesh)
Span< gpu::Batch * > DRW_mesh_batch_cache_get_surface_texpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vertices(Mesh &mesh)
static void mesh_batch_cache_init(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_uv_wireframe(Object &object, Mesh &mesh)
static void mesh_batch_cache_clear(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh &mesh)
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
static void mesh_batch_cache_discard_uvedit(MeshBatchCache &cache)
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
static void mesh_batch_cache_check_vertex_group(MeshBatchCache &cache, const DRW_MeshWeightState *wstate)
static void drw_mesh_weight_state_copy(DRW_MeshWeightState *wstate_dst, const DRW_MeshWeightState *wstate_src)
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
const Mesh & editmesh_final_or_this(const Object &object, const Mesh &mesh)
static void mesh_cd_calc_active_mask_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
bool drw_attributes_overlap(const VectorSet< std::string > *a, const VectorSet< std::string > *b)
static void mesh_batch_cache_request_surface_batches(Mesh &mesh, MeshBatchCache &cache)
void DRW_create_subdivision(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object &object, const Mesh &mesh, const Span< const GPUMaterial * > materials, VectorSet< std::string > *attributes)
static void request_active_and_default_color_attributes(const Object &object, const Mesh &mesh, VectorSet< std::string > &attributes)
blender::gpu::Batch * DRW_mesh_batch_cache_get_verts_with_select_id(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_sculpt_overlays(Mesh &mesh)
static void discard_buffers(MeshBatchCache &cache, const Span< VBOType > vbos, const Span< IBOType > ibos)
void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_viewer_attribute(Mesh &mesh)
const CustomData & mesh_cd_vdata_get_from_mesh(const Mesh &mesh)
void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph, Object &ob, Mesh &mesh, const Scene &scene, bool is_paint_mode, bool use_hide)
void DRW_mesh_get_attributes(const Object &object, const Mesh &mesh, const Span< const GPUMaterial * > materials, VectorSet< std::string > *r_attrs, DRW_MeshCDMask *r_cd_needed)
static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache &cache)
void mesh_buffer_cache_create_requested(TaskGraph &task_graph, const Scene &scene, MeshBatchCache &cache, MeshBufferCache &mbc, Span< IBOType > ibo_requests, Span< VBOType > vbo_requests, Object &object, Mesh &mesh, bool is_editmode, bool is_paint_mode, bool do_final, bool do_uvedit, bool use_hide)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vert_normals(Mesh &mesh)
static MeshBatchCache * mesh_batch_cache_get(Mesh &mesh)
static void drw_mesh_weight_state_clear(DRW_MeshWeightState *wstate)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_faces(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edges_with_select_id(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object &object, Mesh &mesh, float **tot_area, float **tot_uv_area)
static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static bool mesh_batch_cache_valid(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_verts(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_wireframe(Object &object, Mesh &mesh)
bool drw_custom_data_match_attribute(const CustomData &custom_data, const StringRef name, int *r_layer_index, eCustomDataType *r_type)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_skin_roots(Mesh &mesh)
const CustomData & mesh_cd_pdata_get_from_mesh(const Mesh &mesh)
void create_material_subranges(const SortedFaceData &face_sorted, gpu::IndexBuf &tris_ibo, MutableSpan< gpu::IndexBufPtr > ibos)
MeshRuntimeHandle * runtime
char * default_color_attribute
ListBase vertex_group_names
char editflag
CustomData vert_data
int vertex_group_active_index
char * active_color_attribute
struct ToolSettings * toolsettings
VectorSet< std::string > attr_used_over_time
Array< gpu::IndexBufPtr > tris_per_mat
VectorSet< std::string > attr_used
Array< gpu::Batch * > surface_per_mat
VectorSet< std::string > attr_needed
Map< IBOType, std::unique_ptr< gpu::IndexBuf, gpu::IndexBufDeleter > > ibos
Map< VBOType, std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > > vbos
i
Definition text_draw.cc:230
char * buffers[2]