Blender V4.3
draw_cache_impl_mesh.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include <optional>
12
13#include "MEM_guardedalloc.h"
14
15#include "BLI_bitmap.h"
16#include "BLI_buffer.h"
17#include "BLI_index_range.hh"
18#include "BLI_listbase.h"
19#include "BLI_map.hh"
20#include "BLI_math_bits.h"
21#include "BLI_math_vector.h"
22#include "BLI_span.hh"
23#include "BLI_string.h"
24#include "BLI_string_ref.hh"
25#include "BLI_task.h"
26#include "BLI_utildefines.h"
27
28#include "DNA_mesh_types.h"
29#include "DNA_object_types.h"
30#include "DNA_scene_types.h"
31
32#include "BKE_attribute.hh"
33#include "BKE_customdata.hh"
34#include "BKE_deform.hh"
35#include "BKE_editmesh.hh"
36#include "BKE_editmesh_cache.hh"
38#include "BKE_mesh.hh"
39#include "BKE_mesh_runtime.hh"
40#include "BKE_mesh_tangent.hh"
41#include "BKE_modifier.hh"
42#include "BKE_object.hh"
43#include "BKE_object_deform.h"
44#include "BKE_paint.hh"
45#include "BKE_pbvh_api.hh"
47
48#include "atomic_ops.h"
49
50#include "bmesh.hh"
51
52#include "GPU_batch.hh"
53#include "GPU_material.hh"
54
55#include "DRW_render.hh"
56
57#include "ED_mesh.hh"
58#include "ED_uvedit.hh"
59
60#include "draw_cache_extract.hh"
61#include "draw_cache_inline.hh"
62#include "draw_subdivision.hh"
63
64#include "draw_cache_impl.hh" /* own include */
65#include "draw_manager_c.hh"
66
68
69namespace blender::draw {
70
71/* ---------------------------------------------------------------------- */
75/* clang-format off */
76
77#define BUFFER_INDEX(buff_name) ((offsetof(MeshBufferList, buff_name) - offsetof(MeshBufferList, vbo)) / sizeof(void *))
78#define BUFFER_LEN (sizeof(MeshBufferList) / sizeof(void *))
79
80#define _BATCH_MAP1(a) batches_that_use_buffer(BUFFER_INDEX(a))
81#define _BATCH_MAP2(a, b) _BATCH_MAP1(a) | _BATCH_MAP1(b)
82#define _BATCH_MAP3(a, b, c) _BATCH_MAP2(a, b) | _BATCH_MAP1(c)
83#define _BATCH_MAP4(a, b, c, d) _BATCH_MAP3(a, b, c) | _BATCH_MAP1(d)
84#define _BATCH_MAP5(a, b, c, d, e) _BATCH_MAP4(a, b, c, d) | _BATCH_MAP1(e)
85#define _BATCH_MAP6(a, b, c, d, e, f) _BATCH_MAP5(a, b, c, d, e) | _BATCH_MAP1(f)
86#define _BATCH_MAP7(a, b, c, d, e, f, g) _BATCH_MAP6(a, b, c, d, e, f) | _BATCH_MAP1(g)
87#define _BATCH_MAP8(a, b, c, d, e, f, g, h) _BATCH_MAP7(a, b, c, d, e, f, g) | _BATCH_MAP1(h)
88#define _BATCH_MAP9(a, b, c, d, e, f, g, h, i) _BATCH_MAP8(a, b, c, d, e, f, g, h) | _BATCH_MAP1(i)
89#define _BATCH_MAP10(a, b, c, d, e, f, g, h, i, j) _BATCH_MAP9(a, b, c, d, e, f, g, h, i) | _BATCH_MAP1(j)
90
91#define BATCH_MAP(...) VA_NARGS_CALL_OVERLOAD(_BATCH_MAP, __VA_ARGS__)
92
93/* clang-format on */
94
95#define TRIS_PER_MAT_INDEX BUFFER_LEN
96
97static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
98{
99 switch (buffer_index) {
100 case BUFFER_INDEX(vbo.pos):
107 case BUFFER_INDEX(vbo.nor):
110 case BUFFER_INDEX(vbo.edge_fac):
111 return MBC_WIRE_EDGES;
112 case BUFFER_INDEX(vbo.weights):
113 return MBC_SURFACE_WEIGHTS;
114 case BUFFER_INDEX(vbo.uv):
118 case BUFFER_INDEX(vbo.tan):
119 return MBC_SURFACE_PER_MAT;
120 case BUFFER_INDEX(vbo.sculpt_data):
121 return MBC_SCULPT_OVERLAYS;
122 case BUFFER_INDEX(vbo.orco):
123 return MBC_SURFACE_PER_MAT;
124 case BUFFER_INDEX(vbo.edit_data):
126 case BUFFER_INDEX(vbo.edituv_data):
129 case BUFFER_INDEX(vbo.edituv_stretch_area):
131 case BUFFER_INDEX(vbo.edituv_stretch_angle):
133 case BUFFER_INDEX(vbo.mesh_analysis):
135 case BUFFER_INDEX(vbo.fdots_pos):
137 case BUFFER_INDEX(vbo.fdots_nor):
138 return MBC_EDIT_FACEDOTS;
139 case BUFFER_INDEX(vbo.fdots_uv):
140 return MBC_EDITUV_FACEDOTS;
141 case BUFFER_INDEX(vbo.fdots_edituv_data):
142 return MBC_EDITUV_FACEDOTS;
143 case BUFFER_INDEX(vbo.skin_roots):
144 return MBC_SKIN_ROOTS;
145 case BUFFER_INDEX(vbo.vert_idx):
147 case BUFFER_INDEX(vbo.edge_idx):
149 case BUFFER_INDEX(vbo.face_idx):
151 case BUFFER_INDEX(vbo.fdot_idx):
153 case BUFFER_INDEX(vbo.attr[0]):
154 case BUFFER_INDEX(vbo.attr[1]):
155 case BUFFER_INDEX(vbo.attr[2]):
156 case BUFFER_INDEX(vbo.attr[3]):
157 case BUFFER_INDEX(vbo.attr[4]):
158 case BUFFER_INDEX(vbo.attr[5]):
159 case BUFFER_INDEX(vbo.attr[6]):
160 case BUFFER_INDEX(vbo.attr[7]):
161 case BUFFER_INDEX(vbo.attr[8]):
162 case BUFFER_INDEX(vbo.attr[9]):
163 case BUFFER_INDEX(vbo.attr[10]):
164 case BUFFER_INDEX(vbo.attr[11]):
165 case BUFFER_INDEX(vbo.attr[12]):
166 case BUFFER_INDEX(vbo.attr[13]):
167 case BUFFER_INDEX(vbo.attr[14]):
169 case BUFFER_INDEX(vbo.attr_viewer):
171 case BUFFER_INDEX(vbo.vnor):
172 return MBC_EDIT_VNOR;
173 case BUFFER_INDEX(ibo.tris):
177 case BUFFER_INDEX(ibo.lines):
179 case BUFFER_INDEX(ibo.lines_loose):
180 return MBC_LOOSE_EDGES;
181 case BUFFER_INDEX(ibo.points):
183 case BUFFER_INDEX(ibo.fdots):
185 case BUFFER_INDEX(ibo.lines_paint_mask):
186 return MBC_WIRE_LOOPS;
187 case BUFFER_INDEX(ibo.lines_adjacency):
188 return MBC_EDGE_DETECTION;
189 case BUFFER_INDEX(ibo.edituv_tris):
191 case BUFFER_INDEX(ibo.edituv_lines):
193 case BUFFER_INDEX(ibo.edituv_points):
194 return MBC_EDITUV_VERTS;
195 case BUFFER_INDEX(ibo.edituv_fdots):
196 return MBC_EDITUV_FACEDOTS;
198 return MBC_SURFACE_PER_MAT;
199 }
200 return (DRWBatchFlag)0;
201}
202
203static void mesh_batch_cache_discard_surface_batches(MeshBatchCache &cache);
204static void mesh_batch_cache_clear(MeshBatchCache &cache);
205
207{
208 for (int i = 0; i < MBC_BATCH_LEN; i++) {
209 DRWBatchFlag batch_requested = (DRWBatchFlag)(1u << i);
210 if (batch_map & batch_requested) {
211 GPU_BATCH_DISCARD_SAFE(((gpu::Batch **)&cache.batch)[i]);
212 cache.batch_ready &= ~batch_requested;
213 }
214 }
215
216 if (batch_map & MBC_SURFACE_PER_MAT) {
218 }
219}
220
221/* Return true is all layers in _b_ are inside _a_. */
223{
224 return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
225}
226
228{
229 return *((uint32_t *)&a) == *((uint32_t *)&b);
230}
231
238
243
244static void mesh_cd_calc_edit_uv_layer(const Mesh & /*mesh*/, DRW_MeshCDMask *cd_used)
245{
246 cd_used->edit_uv = 1;
247}
248
249static void mesh_cd_calc_active_uv_layer(const Object &object,
250 const Mesh &mesh,
251 DRW_MeshCDMask &cd_used)
252{
253 const Mesh &me_final = editmesh_final_or_this(object, mesh);
254 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
255 int layer = CustomData_get_active_layer(&cd_ldata, CD_PROP_FLOAT2);
256 if (layer != -1) {
257 cd_used.uv |= (1 << layer);
258 }
259}
260
262 const Mesh &mesh,
263 DRW_MeshCDMask &cd_used)
264{
265 const Mesh &me_final = editmesh_final_or_this(object, mesh);
266 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
267 int layer = CustomData_get_stencil_layer(&cd_ldata, CD_PROP_FLOAT2);
268 if (layer != -1) {
269 cd_used.uv |= (1 << layer);
270 }
271}
272
274 const Mesh &mesh,
275 const GPUMaterial *const *gpumat_array,
276 int gpumat_array_len,
277 DRW_Attributes *attributes)
278{
279 const Mesh &me_final = editmesh_final_or_this(object, mesh);
280 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
281 const CustomData &cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
282 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
283 const CustomData &cd_edata = mesh_cd_edata_get_from_mesh(me_final);
284
285 /* See: DM_vertex_attributes_from_gpu for similar logic */
286 DRW_MeshCDMask cd_used;
288
289 const StringRefNull default_color_name = me_final.default_color_attribute ?
290 me_final.default_color_attribute :
291 "";
292
293 for (int i = 0; i < gpumat_array_len; i++) {
294 const GPUMaterial *gpumat = gpumat_array[i];
295 if (gpumat == nullptr) {
296 continue;
297 }
298 ListBase gpu_attrs = GPU_material_attributes(gpumat);
299 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
300 const char *name = gpu_attr->name;
301 eCustomDataType type = static_cast<eCustomDataType>(gpu_attr->type);
302 int layer = -1;
303 std::optional<bke::AttrDomain> domain;
304
305 if (gpu_attr->is_default_color) {
306 name = default_color_name.c_str();
307 }
308
309 if (type == CD_AUTO_FROM_NAME) {
310 /* We need to deduce what exact layer is used.
311 *
312 * We do it based on the specified name.
313 */
314 if (name[0] != '\0') {
315 layer = CustomData_get_named_layer(&cd_ldata, CD_PROP_FLOAT2, name);
316 type = CD_MTFACE;
317
318#if 0 /* Tangents are always from UVs - this will never happen. */
319 if (layer == -1) {
320 layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
321 type = CD_TANGENT;
322 }
323#endif
324 if (layer == -1) {
325 /* Try to match a generic attribute, we use the first attribute domain with a
326 * matching name. */
327 if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
328 domain = bke::AttrDomain::Point;
329 }
330 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
332 }
333 else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
334 domain = bke::AttrDomain::Face;
335 }
336 else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
337 domain = bke::AttrDomain::Edge;
338 }
339 else {
340 layer = -1;
341 }
342 }
343
344 if (layer == -1) {
345 continue;
346 }
347 }
348 else {
349 /* Fall back to the UV layer, which matches old behavior. */
350 type = CD_MTFACE;
351 }
352 }
353
354 switch (type) {
355 case CD_MTFACE: {
356 if (layer == -1) {
357 layer = (name[0] != '\0') ?
360 }
361 if (layer != -1 && !CustomData_layer_is_anonymous(&cd_ldata, CD_PROP_FLOAT2, layer)) {
362 cd_used.uv |= (1 << layer);
363 }
364 break;
365 }
366 case CD_TANGENT: {
367 if (layer == -1) {
368 layer = (name[0] != '\0') ?
371
372 /* Only fallback to orco (below) when we have no UV layers, see: #56545 */
373 if (layer == -1 && name[0] != '\0') {
375 }
376 }
377 if (layer != -1) {
378 cd_used.tan |= (1 << layer);
379 }
380 else {
381 /* no UV layers at all => requesting orco */
382 cd_used.tan_orco = 1;
383 cd_used.orco = 1;
384 }
385 break;
386 }
387
388 case CD_ORCO: {
389 cd_used.orco = 1;
390 break;
391 }
393 case CD_PROP_COLOR:
395 case CD_PROP_FLOAT3:
396 case CD_PROP_BOOL:
397 case CD_PROP_INT8:
398 case CD_PROP_INT32:
399 case CD_PROP_INT32_2D:
400 case CD_PROP_FLOAT:
401 case CD_PROP_FLOAT2: {
402 if (layer != -1 && domain.has_value()) {
403 drw_attributes_add_request(attributes, name, type, layer, *domain);
404 }
405 break;
406 }
407 default:
408 break;
409 }
410 }
411 }
412 return cd_used;
413}
414
417/* ---------------------------------------------------------------------- */
423{
427
428 memset(wstate, 0, sizeof(*wstate));
429
430 wstate->defgroup_active = -1;
431}
432
435 const DRW_MeshWeightState *wstate_src)
436{
437 MEM_SAFE_FREE(wstate_dst->defgroup_sel);
438 MEM_SAFE_FREE(wstate_dst->defgroup_locked);
439 MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
440
441 memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
442
443 if (wstate_src->defgroup_sel) {
444 wstate_dst->defgroup_sel = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_sel));
445 }
446 if (wstate_src->defgroup_locked) {
447 wstate_dst->defgroup_locked = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_locked));
448 }
449 if (wstate_src->defgroup_unlocked) {
450 wstate_dst->defgroup_unlocked = static_cast<bool *>(
451 MEM_dupallocN(wstate_src->defgroup_unlocked));
452 }
453}
454
455static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
456{
457 return ((!array1 && !array2) ||
458 (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
459}
460
463 const DRW_MeshWeightState *b)
464{
465 return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
466 a->flags == b->flags && a->alert_mode == b->alert_mode &&
467 a->defgroup_sel_count == b->defgroup_sel_count &&
468 drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
469 drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
470 drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
471}
472
474 Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
475{
476 /* Extract complete vertex weight group selection state and mode flags. */
477 memset(wstate, 0, sizeof(*wstate));
478
479 wstate->defgroup_active = mesh.vertex_group_active_index - 1;
480 wstate->defgroup_len = BLI_listbase_count(&mesh.vertex_group_names);
481
482 wstate->alert_mode = ts.weightuser;
483
484 if (paint_mode && ts.multipaint) {
485 /* Multi-paint needs to know all selected bones, not just the active group.
486 * This is actually a relatively expensive operation, but caching would be difficult. */
488 &ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
489
490 if (wstate->defgroup_sel_count > 1) {
493
496 wstate->defgroup_len,
497 wstate->defgroup_sel,
498 wstate->defgroup_sel,
499 &wstate->defgroup_sel_count);
500 }
501 }
502 /* With only one selected bone Multi-paint reverts to regular mode. */
503 else {
504 wstate->defgroup_sel_count = 0;
506 }
507 }
508
509 if (paint_mode && ts.wpaint_lock_relative) {
510 /* Set of locked vertex groups for the lock relative mode. */
513
514 /* Check that a deform group is active, and none of selected groups are locked. */
516 wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
518 wstate->defgroup_locked,
519 wstate->defgroup_sel,
520 wstate->defgroup_sel_count))
521 {
523
524 /* Compute the set of locked and unlocked deform vertex groups. */
526 wstate->defgroup_locked,
527 wstate->defgroup_unlocked,
528 wstate->defgroup_locked, /* out */
529 wstate->defgroup_unlocked);
530 }
531 else {
534 }
535 }
536}
537
540/* ---------------------------------------------------------------------- */
548
549/* gpu::Batch cache management. */
550
551static bool mesh_batch_cache_valid(Object &object, Mesh &mesh)
552{
553 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
554
555 if (cache == nullptr) {
556 return false;
557 }
558
559 /* NOTE: bke::pbvh::Tree draw data should not be checked here. */
560
561 if (cache->is_editmode != (mesh.runtime->edit_mesh != nullptr)) {
562 return false;
563 }
564
565 if (cache->is_dirty) {
566 return false;
567 }
568
569 if (cache->mat_len != mesh_render_mat_len_get(object, mesh)) {
570 return false;
571 }
572
573 return true;
574}
575
576static void mesh_batch_cache_init(Object &object, Mesh &mesh)
577{
578 if (!mesh.runtime->batch_cache) {
579 mesh.runtime->batch_cache = MEM_new<MeshBatchCache>(__func__);
580 }
581 else {
582 *static_cast<MeshBatchCache *>(mesh.runtime->batch_cache) = {};
583 }
584 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
585
586 cache->is_editmode = mesh.runtime->edit_mesh != nullptr;
587
588 if (cache->is_editmode == false) {
589 // cache->edge_len = mesh_render_edges_len_get(mesh);
590 // cache->tri_len = mesh_render_corner_tris_len_get(mesh);
591 // cache->face_len = mesh_render_faces_len_get(mesh);
592 // cache->vert_len = mesh_render_verts_len_get(mesh);
593 }
594
595 cache->mat_len = mesh_render_mat_len_get(object, mesh);
596 cache->surface_per_mat = Array<gpu::Batch *>(cache->mat_len, nullptr);
597 cache->tris_per_mat = Array<gpu::IndexBuf *>(cache->mat_len, nullptr);
598
599 cache->is_dirty = false;
600 cache->batch_ready = (DRWBatchFlag)0;
601 cache->batch_requested = (DRWBatchFlag)0;
602
604}
605
607{
608 if (!mesh_batch_cache_valid(object, mesh)) {
609 if (mesh.runtime->batch_cache) {
610 mesh_batch_cache_clear(*static_cast<MeshBatchCache *>(mesh.runtime->batch_cache));
611 }
612 mesh_batch_cache_init(object, mesh);
613 }
614}
615
617{
618 return static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
619}
620
622 const DRW_MeshWeightState *wstate)
623{
624 if (!drw_mesh_weight_state_compare(&cache.weight_state, wstate)) {
625 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
626 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.weights);
627 }
629
630 cache.batch_ready &= ~MBC_SURFACE_WEIGHTS;
631
633 }
634}
635
637{
640 for (int i = 0; i < cache.mat_len; i++) {
642 }
643}
644
645/* Free batches with material-mapped corner_tris.
646 * NOTE: The updating of the indices buffers (#tris_per_mat) is handled in the extractors.
647 * No need to discard they here. */
649{
651 for (int i = 0; i < cache.mat_len; i++) {
653 }
654 cache.batch_ready &= ~MBC_SURFACE;
655}
656
658{
659 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
660 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
661 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
662 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
663 }
664 DRWBatchFlag batch_map = BATCH_MAP(vbo.uv, vbo.tan, vbo.orco);
665 mesh_batch_cache_discard_batch(cache, batch_map);
667}
668
670{
671 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
672 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_angle);
673 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_area);
674 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
675 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
676 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
677 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
678 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
679 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
680 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
681 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
682 }
683 DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_stretch_angle,
684 vbo.edituv_stretch_area,
685 vbo.uv,
686 vbo.edituv_data,
687 vbo.fdots_uv,
688 vbo.fdots_edituv_data,
689 ibo.edituv_tris,
690 ibo.edituv_lines,
691 ibo.edituv_points,
692 ibo.edituv_fdots);
693 mesh_batch_cache_discard_batch(cache, batch_map);
694
695 cache.tot_area = 0.0f;
696 cache.tot_uv_area = 0.0f;
697
698 cache.batch_ready &= ~MBC_EDITUV;
699
700 /* We discarded the vbo.uv so we need to reset the cd_used flag. */
701 cache.cd_used.uv = 0;
702 cache.cd_used.edit_uv = 0;
703}
704
706{
707 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
708 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
709 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
710 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
711 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
712 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
713 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
714 }
715 DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_data,
716 vbo.fdots_edituv_data,
717 ibo.edituv_tris,
718 ibo.edituv_lines,
719 ibo.edituv_points,
720 ibo.edituv_fdots);
721 mesh_batch_cache_discard_batch(cache, batch_map);
722}
723
725{
726 if (!mesh->runtime->batch_cache) {
727 return;
728 }
729 MeshBatchCache &cache = *static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
730 DRWBatchFlag batch_map;
731 switch (mode) {
733 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
734 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edit_data);
735 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_nor);
736 }
737 batch_map = BATCH_MAP(vbo.edit_data, vbo.fdots_nor);
738 mesh_batch_cache_discard_batch(cache, batch_map);
739
740 /* Because visible UVs depends on edit mode selection, discard topology. */
742 break;
744 /* Paint mode selection flag is packed inside the nor attribute.
745 * Note that it can be slow if auto smooth is enabled. (see #63946) */
746 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
747 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.lines_paint_mask);
748 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.pos);
749 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.nor);
750 }
751 batch_map = BATCH_MAP(ibo.lines_paint_mask, vbo.pos, vbo.nor);
752 mesh_batch_cache_discard_batch(cache, batch_map);
753 break;
755 cache.is_dirty = true;
756 break;
760 break;
763 break;
765 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
766 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
767 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
768 }
769 batch_map = BATCH_MAP(vbo.edituv_data, vbo.fdots_edituv_data);
770 mesh_batch_cache_discard_batch(cache, batch_map);
771 break;
772 default:
773 BLI_assert(0);
774 }
775}
776
778{
779 gpu::VertBuf **vbos = (gpu::VertBuf **)&mbuflist->vbo;
780 gpu::IndexBuf **ibos = (gpu::IndexBuf **)&mbuflist->ibo;
781 for (int i = 0; i < sizeof(mbuflist->vbo) / sizeof(void *); i++) {
783 }
784 for (int i = 0; i < sizeof(mbuflist->ibo) / sizeof(void *); i++) {
786 }
787}
788
790{
792
793 mbc->loose_geom = {};
794 mbc->face_sorted = {};
795}
796
798{
799 if (cache.subdiv_cache) {
801 MEM_delete(cache.subdiv_cache);
802 cache.subdiv_cache = nullptr;
803 }
804}
805
807{
808 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
810 }
811
812 for (int i = 0; i < cache.mat_len; i++) {
814 }
815 cache.tris_per_mat = {};
816
817 for (int i = 0; i < sizeof(cache.batch) / sizeof(void *); i++) {
818 gpu::Batch **batch = (gpu::Batch **)&cache.batch;
820 }
821
824 cache.surface_per_mat = {};
825 cache.mat_len = 0;
826
827 cache.batch_ready = (DRWBatchFlag)0;
829
831}
832
833void DRW_mesh_batch_cache_free(void *batch_cache)
834{
835 MeshBatchCache *cache = static_cast<MeshBatchCache *>(batch_cache);
837 MEM_delete(cache);
838}
839
842/* ---------------------------------------------------------------------- */
846static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
847{
848 DRW_MeshCDMask cd_needed;
849 mesh_cd_layers_type_clear(&cd_needed);
850 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
851
852 BLI_assert(cd_needed.uv != 0 &&
853 "No uv layer available in texpaint, but batches requested anyway!");
854
855 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
856 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
857}
858
860 const Mesh &mesh,
861 DRW_Attributes &attributes)
862{
863 const Mesh &me_final = editmesh_final_or_this(object, mesh);
864 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
865 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
866
867 auto request_color_attribute = [&](const char *name) {
868 if (name) {
869 int layer_index;
870 eCustomDataType type;
871 if (drw_custom_data_match_attribute(cd_vdata, name, &layer_index, &type)) {
872 drw_attributes_add_request(&attributes, name, type, layer_index, bke::AttrDomain::Point);
873 }
874 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer_index, &type)) {
875 drw_attributes_add_request(&attributes, name, type, layer_index, bke::AttrDomain::Corner);
876 }
877 }
878 };
879
880 request_color_attribute(me_final.active_color_attribute);
881 request_color_attribute(me_final.default_color_attribute);
882}
883
890
897
899{
900 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
902
903 return cache.batch.surface;
904}
905
907{
908 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
910 if (cache.no_loose_wire) {
911 return nullptr;
912 }
913
914 return DRW_batch_request(&cache.batch.loose_edges);
915}
916
923
924gpu::Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
925{
926 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
928 /* Even if is_manifold is not correct (not updated),
929 * the default (not manifold) is just the worst case. */
930 if (r_is_manifold) {
931 *r_is_manifold = cache.is_manifold;
932 }
934}
935
942
949
951 const Mesh &mesh,
952 const GPUMaterial *const *gpumat_array,
953 int gpumat_array_len,
954 DRW_Attributes *r_attrs,
955 DRW_MeshCDMask *r_cd_needed)
956{
957 DRW_Attributes attrs_needed;
958 drw_attributes_clear(&attrs_needed);
960 object, mesh, gpumat_array, gpumat_array_len, &attrs_needed);
961
962 if (r_attrs) {
963 *r_attrs = attrs_needed;
964 }
965
966 if (r_cd_needed) {
967 *r_cd_needed = cd_needed;
968 }
969}
970
972 Mesh &mesh,
973 GPUMaterial **gpumat_array,
974 uint gpumat_array_len)
975{
976 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
977 DRW_Attributes attrs_needed;
978 drw_attributes_clear(&attrs_needed);
980 object, mesh, gpumat_array, gpumat_array_len, &attrs_needed);
981
982 BLI_assert(gpumat_array_len == cache.mat_len);
983
984 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
985 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
987 return cache.surface_per_mat.data();
988}
989
991{
992 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
993 texpaint_request_active_uv(cache, object, mesh);
995 return cache.surface_per_mat.data();
996}
997
999{
1000 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1001 texpaint_request_active_uv(cache, object, mesh);
1003 return cache.batch.surface;
1004}
1005
1007{
1008 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1009
1010 DRW_Attributes attrs_needed{};
1011 request_active_and_default_color_attributes(object, mesh, attrs_needed);
1012
1013 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
1014
1016 return cache.batch.surface;
1017}
1018
1020{
1021 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1022
1023 DRW_Attributes attrs_needed{};
1024 request_active_and_default_color_attributes(object, mesh, attrs_needed);
1025
1026 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
1027
1029 return cache.batch.surface;
1030}
1031
1032int DRW_mesh_material_count_get(const Object &object, const Mesh &mesh)
1033{
1034 return mesh_render_mat_len_get(object, mesh);
1035}
1036
1047
1057
1060/* ---------------------------------------------------------------------- */
1065{
1066 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1067 /* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
1069
1070 DRW_vbo_request(nullptr, &cache.final.buff.vbo.pos);
1071 return cache.final.buff.vbo.pos;
1072}
1073
1076/* ---------------------------------------------------------------------- */
1086
1093
1100
1107
1114
1121
1128
1131/* ---------------------------------------------------------------------- */
1141
1148
1155
1162
1165/* ---------------------------------------------------------------------- */
1169static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
1170{
1171 DRW_MeshCDMask cd_needed;
1172 mesh_cd_layers_type_clear(&cd_needed);
1173 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
1174 mesh_cd_calc_edit_uv_layer(mesh, &cd_needed);
1175
1176 BLI_assert(cd_needed.edit_uv != 0 &&
1177 "No uv layer available in edituv, but batches requested anyway!");
1178
1179 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
1180 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
1181}
1182
1184 Mesh &mesh,
1185 float **tot_area,
1186 float **tot_uv_area)
1187{
1188 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1189 edituv_request_active_uv(cache, object, mesh);
1191
1192 if (tot_area != nullptr) {
1193 *tot_area = &cache.tot_area;
1194 }
1195 if (tot_uv_area != nullptr) {
1196 *tot_uv_area = &cache.tot_uv_area;
1197 }
1199}
1200
1208
1210{
1211 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1212 edituv_request_active_uv(cache, object, mesh);
1214 return DRW_batch_request(&cache.batch.edituv_faces);
1215}
1216
1218{
1219 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1220 edituv_request_active_uv(cache, object, mesh);
1222 return DRW_batch_request(&cache.batch.edituv_edges);
1223}
1224
1226{
1227 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1228 edituv_request_active_uv(cache, object, mesh);
1230 return DRW_batch_request(&cache.batch.edituv_verts);
1231}
1232
1234{
1235 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1236 edituv_request_active_uv(cache, object, mesh);
1238 return DRW_batch_request(&cache.batch.edituv_fdots);
1239}
1240
1242{
1243 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1244 edituv_request_active_uv(cache, object, mesh);
1246 return DRW_batch_request(&cache.batch.wire_loops_uvs);
1247}
1248
1250{
1251 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1252 texpaint_request_active_uv(cache, object, mesh);
1254 return DRW_batch_request(&cache.batch.wire_loops);
1255}
1256
1259/* ---------------------------------------------------------------------- */
1264{
1265 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
1266
1267 if (cache == nullptr) {
1268 return;
1269 }
1270
1272 cache->lastmatch = ctime;
1273 }
1274
1276 cache->lastmatch = ctime;
1277 }
1278
1279 if (ctime - cache->lastmatch > U.vbotimeout) {
1281 }
1282
1285}
1286
1287static void drw_add_attributes_vbo(gpu::Batch *batch,
1288 MeshBufferList *mbuflist,
1289 DRW_Attributes *attr_used)
1290{
1291 for (int i = 0; i < attr_used->num_requests; i++) {
1292 DRW_vbo_request(batch, &mbuflist->vbo.attr[i]);
1293 }
1294}
1295
1296#ifndef NDEBUG
1297/* Sanity check function to test if all requested batches are available. */
1299{
1300 MeshBatchCache *cache = mesh_batch_cache_get(mesh);
1301 /* Make sure all requested batches have been setup. */
1302 /* NOTE: The next line creates a different scheduling than during release builds what can lead to
1303 * some issues (See #77867 where we needed to disable this function in order to debug what was
1304 * happening in release builds). */
1305 BLI_task_graph_work_and_wait(&task_graph);
1306 for (int i = 0; i < MBC_BATCH_LEN; i++) {
1307 BLI_assert(!DRW_batch_requested(((gpu::Batch **)&cache->batch)[i], (GPUPrimType)0));
1308 }
1309 for (int i = 0; i < MBC_VBO_LEN; i++) {
1311 }
1312 for (int i = 0; i < MBC_IBO_LEN; i++) {
1314 }
1315 for (int i = 0; i < MBC_VBO_LEN; i++) {
1317 }
1318 for (int i = 0; i < MBC_IBO_LEN; i++) {
1320 }
1321 for (int i = 0; i < MBC_VBO_LEN; i++) {
1323 }
1324 for (int i = 0; i < MBC_IBO_LEN; i++) {
1326 }
1327}
1328#endif
1329
1331 Object &ob,
1332 Mesh &mesh,
1333 const Scene &scene,
1334 const bool is_paint_mode,
1335 const bool use_hide)
1336{
1337 const ToolSettings *ts = scene.toolsettings;
1338
1339 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1340 bool cd_uv_update = false;
1341
1342 /* Early out */
1343 if (cache.batch_requested == 0) {
1344#ifndef NDEBUG
1345 drw_mesh_batch_cache_check_available(task_graph, mesh);
1346#endif
1347 return;
1348 }
1349
1350#ifndef NDEBUG
1351 /* Map the index of a buffer to a flag containing all batches that use it. */
1352 Map<int, DRWBatchFlag> batches_that_use_buffer_local;
1353
1354 auto assert_deps_valid = [&](DRWBatchFlag batch_flag, Span<int> used_buffer_indices) {
1355 for (const int buffer_index : used_buffer_indices) {
1356 batches_that_use_buffer_local.add_or_modify(
1357 buffer_index,
1358 [&](DRWBatchFlag *value) { *value = batch_flag; },
1359 [&](DRWBatchFlag *value) { *value |= batch_flag; });
1360 BLI_assert(batches_that_use_buffer(buffer_index) & batch_flag);
1361 }
1362 };
1363#else
1364 auto assert_deps_valid = [&](DRWBatchFlag /*batch_flag*/, Span<int> /*used_buffer_indices*/) {};
1365
1366#endif
1367
1368 /* Sanity check. */
1369 if ((mesh.runtime->edit_mesh != nullptr) && (ob.mode & OB_MODE_EDIT)) {
1371 }
1372
1373 const bool is_editmode = (mesh.runtime->edit_mesh != nullptr) &&
1374 (BKE_object_get_editmesh_eval_final(&ob) != nullptr) &&
1376
1377 /* This could be set for paint mode too, currently it's only used for edit-mode. */
1378 const bool edit_mode_active = is_editmode && DRW_object_is_in_edit_mode(&ob);
1379
1380 DRWBatchFlag batch_requested = cache.batch_requested;
1381 cache.batch_requested = (DRWBatchFlag)0;
1382
1383 if (batch_requested & MBC_SURFACE_WEIGHTS) {
1384 /* Check vertex weights. */
1385 if ((cache.batch.surface_weights != nullptr) && (ts != nullptr)) {
1386 DRW_MeshWeightState wstate;
1387 BLI_assert(ob.type == OB_MESH);
1388 drw_mesh_weight_state_extract(ob, mesh, *ts, is_paint_mode, &wstate);
1392 }
1393 }
1394
1395 if (batch_requested &
1398 {
1399 /* Modifiers will only generate an orco layer if the mesh is deformed. */
1400 if (cache.cd_needed.orco != 0) {
1401 /* Orco is always extracted from final mesh. */
1402 const Mesh *me_final = (mesh.runtime->edit_mesh) ? BKE_object_get_editmesh_eval_final(&ob) :
1403 &mesh;
1404 if (CustomData_get_layer(&me_final->vert_data, CD_ORCO) == nullptr) {
1405 /* Skip orco calculation */
1406 cache.cd_needed.orco = 0;
1407 }
1408 }
1409
1410 /* Verify that all surface batches have needed attribute layers.
1411 */
1412 /* TODO(fclem): We could be a bit smarter here and only do it per
1413 * material. */
1414 bool cd_overlap = mesh_cd_layers_type_overlap(cache.cd_used, cache.cd_needed);
1415 bool attr_overlap = drw_attributes_overlap(&cache.attr_used, &cache.attr_needed);
1416 if (cd_overlap == false || attr_overlap == false) {
1417 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1418 if ((cache.cd_used.uv & cache.cd_needed.uv) != cache.cd_needed.uv) {
1419 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
1420 cd_uv_update = true;
1421 }
1422 if ((cache.cd_used.tan & cache.cd_needed.tan) != cache.cd_needed.tan ||
1423 cache.cd_used.tan_orco != cache.cd_needed.tan_orco)
1424 {
1425 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
1426 }
1427 if (cache.cd_used.orco != cache.cd_needed.orco) {
1428 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
1429 }
1430 if (cache.cd_used.sculpt_overlays != cache.cd_needed.sculpt_overlays) {
1431 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.sculpt_data);
1432 }
1433 if (!drw_attributes_overlap(&cache.attr_used, &cache.attr_needed)) {
1434 for (int i = 0; i < GPU_MAX_ATTR; i++) {
1435 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.attr[i]);
1436 }
1437 }
1438 }
1439 /* We can't discard batches at this point as they have been
1440 * referenced for drawing. Just clear them in place. */
1441 for (int i = 0; i < cache.mat_len; i++) {
1443 }
1445 cache.batch_ready &= ~(MBC_SURFACE);
1446
1448 drw_attributes_merge(&cache.attr_used, &cache.attr_needed, mesh.runtime->render_mutex);
1449 }
1452
1454 &cache.attr_used_over_time, &cache.attr_needed, mesh.runtime->render_mutex);
1456 }
1457
1458 if (batch_requested & MBC_EDITUV) {
1459 /* Discard UV batches if sync_selection changes */
1460 const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
1461 if (cd_uv_update || (cache.is_uvsyncsel != is_uvsyncsel)) {
1462 cache.is_uvsyncsel = is_uvsyncsel;
1463 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1464 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
1465 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
1466 GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
1467 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
1468 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
1469 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
1470 GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
1471 }
1472 /* We only clear the batches as they may already have been
1473 * referenced. */
1481 cache.batch_ready &= ~MBC_EDITUV;
1482 }
1483 }
1484
1485 /* Second chance to early out */
1486 if ((batch_requested & ~cache.batch_ready) == 0) {
1487#ifndef NDEBUG
1488 drw_mesh_batch_cache_check_available(task_graph, mesh);
1489#endif
1490 return;
1491 }
1492
1493 /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-pbvh::Tree).
1494 * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1495 * Normal updates should be part of the brush loop and only run during the stroke when the
1496 * brush needs to sample the surface. The drawing code should only update the normals
1497 * per redraw when smooth shading is enabled. */
1498 const bool do_update_sculpt_normals = ob.sculpt && bke::object::pbvh_get(ob);
1499 if (do_update_sculpt_normals) {
1501 }
1502
1503 cache.batch_ready |= batch_requested;
1504
1505 bool do_cage = false, do_uvcage = false;
1506 if (is_editmode && edit_mode_active) {
1507 const Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(&ob);
1508 const Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(&ob);
1509
1510 do_cage = editmesh_eval_final != editmesh_eval_cage;
1511 do_uvcage = !(editmesh_eval_final->runtime->is_original_bmesh &&
1512 editmesh_eval_final->runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH);
1513 }
1514
1515 const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(&mesh);
1516
1517 MeshBufferList *mbuflist = &cache.final.buff;
1518
1519 /* Initialize batches and request VBO's & IBO's. */
1520 assert_deps_valid(MBC_SURFACE,
1521 {BUFFER_INDEX(ibo.tris),
1522 BUFFER_INDEX(vbo.nor),
1523 BUFFER_INDEX(vbo.pos),
1524 BUFFER_INDEX(vbo.uv),
1525 BUFFER_INDEX(vbo.attr[0]),
1526 BUFFER_INDEX(vbo.attr[1]),
1527 BUFFER_INDEX(vbo.attr[2]),
1528 BUFFER_INDEX(vbo.attr[3]),
1529 BUFFER_INDEX(vbo.attr[4]),
1530 BUFFER_INDEX(vbo.attr[5]),
1531 BUFFER_INDEX(vbo.attr[6]),
1532 BUFFER_INDEX(vbo.attr[7]),
1533 BUFFER_INDEX(vbo.attr[8]),
1534 BUFFER_INDEX(vbo.attr[9]),
1535 BUFFER_INDEX(vbo.attr[10]),
1536 BUFFER_INDEX(vbo.attr[11]),
1537 BUFFER_INDEX(vbo.attr[12]),
1538 BUFFER_INDEX(vbo.attr[13]),
1539 BUFFER_INDEX(vbo.attr[14])});
1541 DRW_ibo_request(cache.batch.surface, &mbuflist->ibo.tris);
1542 /* Order matters. First ones override latest VBO's attributes. */
1543 DRW_vbo_request(cache.batch.surface, &mbuflist->vbo.nor);
1544 DRW_vbo_request(cache.batch.surface, &mbuflist->vbo.pos);
1545 if (cache.cd_used.uv != 0) {
1546 DRW_vbo_request(cache.batch.surface, &mbuflist->vbo.uv);
1547 }
1548 drw_add_attributes_vbo(cache.batch.surface, mbuflist, &cache.attr_used);
1549 }
1550 assert_deps_valid(MBC_ALL_VERTS, {BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.nor)});
1552 DRW_vbo_request(cache.batch.all_verts, &mbuflist->vbo.pos);
1553 DRW_vbo_request(cache.batch.all_verts, &mbuflist->vbo.nor);
1554 }
1555 assert_deps_valid(
1557 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.sculpt_data)});
1559 DRW_ibo_request(cache.batch.sculpt_overlays, &mbuflist->ibo.tris);
1560 DRW_vbo_request(cache.batch.sculpt_overlays, &mbuflist->vbo.pos);
1562 }
1563 assert_deps_valid(MBC_ALL_EDGES, {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos)});
1565 DRW_ibo_request(cache.batch.all_edges, &mbuflist->ibo.lines);
1566 DRW_vbo_request(cache.batch.all_edges, &mbuflist->vbo.pos);
1567 }
1568 assert_deps_valid(MBC_LOOSE_EDGES, {BUFFER_INDEX(ibo.lines_loose), BUFFER_INDEX(vbo.pos)});
1570 DRW_ibo_request(cache.batch.loose_edges, &mbuflist->ibo.lines_loose);
1571 DRW_vbo_request(cache.batch.loose_edges, &mbuflist->vbo.pos);
1572 }
1573 assert_deps_valid(MBC_EDGE_DETECTION,
1574 {BUFFER_INDEX(ibo.lines_adjacency), BUFFER_INDEX(vbo.pos)});
1577 DRW_vbo_request(cache.batch.edge_detection, &mbuflist->vbo.pos);
1578 }
1579 assert_deps_valid(MBC_SURFACE_WEIGHTS,
1580 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.weights)});
1582 DRW_ibo_request(cache.batch.surface_weights, &mbuflist->ibo.tris);
1583 DRW_vbo_request(cache.batch.surface_weights, &mbuflist->vbo.pos);
1584 DRW_vbo_request(cache.batch.surface_weights, &mbuflist->vbo.nor);
1585 DRW_vbo_request(cache.batch.surface_weights, &mbuflist->vbo.weights);
1586 }
1587 assert_deps_valid(
1589 {BUFFER_INDEX(ibo.lines_paint_mask), BUFFER_INDEX(vbo.nor), BUFFER_INDEX(vbo.pos)});
1592 /* Order matters. First ones override latest VBO's attributes. */
1593 DRW_vbo_request(cache.batch.wire_loops, &mbuflist->vbo.nor);
1594 DRW_vbo_request(cache.batch.wire_loops, &mbuflist->vbo.pos);
1595 }
1596 assert_deps_valid(MBC_WIRE_EDGES,
1597 {BUFFER_INDEX(ibo.lines),
1598 BUFFER_INDEX(vbo.nor),
1599 BUFFER_INDEX(vbo.pos),
1600 BUFFER_INDEX(vbo.edge_fac)});
1602 DRW_ibo_request(cache.batch.wire_edges, &mbuflist->ibo.lines);
1603 DRW_vbo_request(cache.batch.wire_edges, &mbuflist->vbo.nor);
1604 DRW_vbo_request(cache.batch.wire_edges, &mbuflist->vbo.pos);
1605 DRW_vbo_request(cache.batch.wire_edges, &mbuflist->vbo.edge_fac);
1606 }
1607 assert_deps_valid(MBC_WIRE_LOOPS_UVS, {BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv)});
1610 /* For paint overlay. Active layer should have been queried. */
1611 if (cache.cd_used.uv != 0) {
1612 DRW_vbo_request(cache.batch.wire_loops_uvs, &mbuflist->vbo.uv);
1613 }
1614 }
1615 assert_deps_valid(
1617 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.mesh_analysis)});
1619 DRW_ibo_request(cache.batch.edit_mesh_analysis, &mbuflist->ibo.tris);
1620 DRW_vbo_request(cache.batch.edit_mesh_analysis, &mbuflist->vbo.pos);
1622 }
1623
1624 /* Per Material */
1625 assert_deps_valid(
1627 {BUFFER_INDEX(vbo.nor), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.uv),
1628 BUFFER_INDEX(vbo.tan), BUFFER_INDEX(vbo.orco), BUFFER_INDEX(vbo.attr[0]),
1629 BUFFER_INDEX(vbo.attr[1]), BUFFER_INDEX(vbo.attr[2]), BUFFER_INDEX(vbo.attr[3]),
1630 BUFFER_INDEX(vbo.attr[4]), BUFFER_INDEX(vbo.attr[5]), BUFFER_INDEX(vbo.attr[6]),
1631 BUFFER_INDEX(vbo.attr[7]), BUFFER_INDEX(vbo.attr[8]), BUFFER_INDEX(vbo.attr[9]),
1632 BUFFER_INDEX(vbo.attr[10]), BUFFER_INDEX(vbo.attr[11]), BUFFER_INDEX(vbo.attr[12]),
1633 BUFFER_INDEX(vbo.attr[13]), BUFFER_INDEX(vbo.attr[14])});
1634 assert_deps_valid(MBC_SURFACE_PER_MAT, {TRIS_PER_MAT_INDEX});
1635 for (int i = 0; i < cache.mat_len; i++) {
1637 DRW_ibo_request(cache.surface_per_mat[i], &cache.tris_per_mat[i]);
1638 /* Order matters. First ones override latest VBO's attributes. */
1639 DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.nor);
1640 DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.pos);
1641 if (cache.cd_used.uv != 0) {
1642 DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.uv);
1643 }
1644 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1645 DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.tan);
1646 }
1647 if (cache.cd_used.orco != 0) {
1648 DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.orco);
1649 }
1650 drw_add_attributes_vbo(cache.surface_per_mat[i], mbuflist, &cache.attr_used);
1651 }
1652 }
1653
1654 mbuflist = (do_cage) ? &cache.cage.buff : &cache.final.buff;
1655
1656 /* Edit Mesh */
1657 assert_deps_valid(MBC_EDIT_TRIANGLES,
1658 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edit_data)});
1660 DRW_ibo_request(cache.batch.edit_triangles, &mbuflist->ibo.tris);
1661 DRW_vbo_request(cache.batch.edit_triangles, &mbuflist->vbo.pos);
1663 }
1664 assert_deps_valid(
1666 {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edit_data)});
1668 DRW_ibo_request(cache.batch.edit_vertices, &mbuflist->ibo.points);
1669 DRW_vbo_request(cache.batch.edit_vertices, &mbuflist->vbo.pos);
1670 DRW_vbo_request(cache.batch.edit_vertices, &mbuflist->vbo.edit_data);
1671 if (!do_subdivision || do_cage) {
1672 /* For GPU subdivision, vertex normals are included in the `pos` VBO. */
1673 DRW_vbo_request(cache.batch.edit_vertices, &mbuflist->vbo.vnor);
1674 }
1675 }
1676 assert_deps_valid(MBC_EDIT_EDGES,
1677 {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edit_data)});
1679 DRW_ibo_request(cache.batch.edit_edges, &mbuflist->ibo.lines);
1680 DRW_vbo_request(cache.batch.edit_edges, &mbuflist->vbo.pos);
1681 DRW_vbo_request(cache.batch.edit_edges, &mbuflist->vbo.edit_data);
1682 if (!do_subdivision || do_cage) {
1683 /* For GPU subdivision, vertex normals are included in the `pos` VBO. */
1684 DRW_vbo_request(cache.batch.edit_edges, &mbuflist->vbo.vnor);
1685 }
1686 }
1687 assert_deps_valid(MBC_EDIT_VNOR,
1688 {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.vnor)});
1690 DRW_ibo_request(cache.batch.edit_vnor, &mbuflist->ibo.points);
1691 DRW_vbo_request(cache.batch.edit_vnor, &mbuflist->vbo.pos);
1692 if (!do_subdivision) {
1693 /* For GPU subdivision, vertex normals are included in the `pos` VBO. */
1694 DRW_vbo_request(cache.batch.edit_vnor, &mbuflist->vbo.vnor);
1695 }
1696 }
1697 assert_deps_valid(MBC_EDIT_LNOR,
1698 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.nor)});
1700 DRW_ibo_request(cache.batch.edit_lnor, &mbuflist->ibo.tris);
1701 DRW_vbo_request(cache.batch.edit_lnor, &mbuflist->vbo.pos);
1702 DRW_vbo_request(cache.batch.edit_lnor, &mbuflist->vbo.nor);
1703 }
1704 assert_deps_valid(
1706 {BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdots_nor)});
1708 DRW_ibo_request(cache.batch.edit_fdots, &mbuflist->ibo.fdots);
1709 DRW_vbo_request(cache.batch.edit_fdots, &mbuflist->vbo.fdots_pos);
1710 DRW_vbo_request(cache.batch.edit_fdots, &mbuflist->vbo.fdots_nor);
1711 }
1712 assert_deps_valid(MBC_SKIN_ROOTS, {BUFFER_INDEX(vbo.skin_roots)});
1715 }
1716
1717 /* Selection */
1718 assert_deps_valid(MBC_EDIT_SELECTION_VERTS,
1719 {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.vert_idx)});
1724 }
1725 assert_deps_valid(MBC_EDIT_SELECTION_EDGES,
1726 {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edge_idx)});
1731 }
1732 assert_deps_valid(MBC_EDIT_SELECTION_FACES,
1733 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.face_idx)});
1738 }
1739 assert_deps_valid(
1741 {BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdot_idx)});
1746 }
1747
1753 mbuflist = (do_uvcage) ? &cache.uv_cage.buff : &cache.final.buff;
1754
1755 /* Edit UV */
1756 assert_deps_valid(
1758 {BUFFER_INDEX(ibo.edituv_tris), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
1761 DRW_vbo_request(cache.batch.edituv_faces, &mbuflist->vbo.uv);
1763 }
1764 assert_deps_valid(MBC_EDITUV_FACES_STRETCH_AREA,
1765 {BUFFER_INDEX(ibo.edituv_tris),
1766 BUFFER_INDEX(vbo.uv),
1767 BUFFER_INDEX(vbo.edituv_data),
1768 BUFFER_INDEX(vbo.edituv_stretch_area)});
1774 }
1775 assert_deps_valid(MBC_EDITUV_FACES_STRETCH_ANGLE,
1776 {BUFFER_INDEX(ibo.edituv_tris),
1777 BUFFER_INDEX(vbo.uv),
1778 BUFFER_INDEX(vbo.edituv_data),
1779 BUFFER_INDEX(vbo.edituv_stretch_angle)});
1785 }
1786 assert_deps_valid(
1788 {BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
1791 DRW_vbo_request(cache.batch.edituv_edges, &mbuflist->vbo.uv);
1793 }
1794 assert_deps_valid(
1796 {BUFFER_INDEX(ibo.edituv_points), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
1799 DRW_vbo_request(cache.batch.edituv_verts, &mbuflist->vbo.uv);
1801 }
1802 assert_deps_valid(MBC_EDITUV_FACEDOTS,
1803 {BUFFER_INDEX(ibo.edituv_fdots),
1804 BUFFER_INDEX(vbo.fdots_uv),
1805 BUFFER_INDEX(vbo.fdots_edituv_data)});
1808 DRW_vbo_request(cache.batch.edituv_fdots, &mbuflist->vbo.fdots_uv);
1810 }
1811 assert_deps_valid(
1813 {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.attr_viewer)});
1818 }
1819
1820#ifndef NDEBUG
1821 auto assert_final_deps_valid = [&](const int buffer_index) {
1822 BLI_assert(batches_that_use_buffer(buffer_index) ==
1823 batches_that_use_buffer_local.lookup(buffer_index));
1824 };
1825 assert_final_deps_valid(BUFFER_INDEX(vbo.nor));
1826 assert_final_deps_valid(BUFFER_INDEX(vbo.pos));
1827 assert_final_deps_valid(BUFFER_INDEX(vbo.uv));
1828 assert_final_deps_valid(BUFFER_INDEX(vbo.sculpt_data));
1829 assert_final_deps_valid(BUFFER_INDEX(vbo.weights));
1830 assert_final_deps_valid(BUFFER_INDEX(vbo.edge_fac));
1831 assert_final_deps_valid(BUFFER_INDEX(vbo.mesh_analysis));
1832 assert_final_deps_valid(BUFFER_INDEX(vbo.tan));
1833 assert_final_deps_valid(BUFFER_INDEX(vbo.orco));
1834 assert_final_deps_valid(BUFFER_INDEX(vbo.edit_data));
1835 assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_pos));
1836 assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_nor));
1837 assert_final_deps_valid(BUFFER_INDEX(vbo.skin_roots));
1838 assert_final_deps_valid(BUFFER_INDEX(vbo.vert_idx));
1839 assert_final_deps_valid(BUFFER_INDEX(vbo.edge_idx));
1840 assert_final_deps_valid(BUFFER_INDEX(vbo.face_idx));
1841 assert_final_deps_valid(BUFFER_INDEX(vbo.fdot_idx));
1842 assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_data));
1843 assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_area));
1844 assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_angle));
1845 assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_uv));
1846 assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_edituv_data));
1847 for (const int i : IndexRange(GPU_MAX_ATTR)) {
1848 assert_final_deps_valid(BUFFER_INDEX(vbo.attr[i]));
1849 }
1850 assert_final_deps_valid(BUFFER_INDEX(vbo.attr_viewer));
1851 assert_final_deps_valid(BUFFER_INDEX(vbo.vnor));
1852
1853 assert_final_deps_valid(BUFFER_INDEX(ibo.tris));
1854 assert_final_deps_valid(BUFFER_INDEX(ibo.lines));
1855 assert_final_deps_valid(BUFFER_INDEX(ibo.lines_loose));
1856 assert_final_deps_valid(BUFFER_INDEX(ibo.lines_adjacency));
1857 assert_final_deps_valid(BUFFER_INDEX(ibo.lines_paint_mask));
1858 assert_final_deps_valid(BUFFER_INDEX(ibo.points));
1859 assert_final_deps_valid(BUFFER_INDEX(ibo.fdots));
1860 assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_tris));
1861 assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_lines));
1862 assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_points));
1863 assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_fdots));
1864
1865 assert_final_deps_valid(TRIS_PER_MAT_INDEX);
1866#endif
1867
1868 if (do_uvcage) {
1870 cache,
1871 cache.uv_cage,
1872 ob,
1873 mesh,
1874 is_editmode,
1875 is_paint_mode,
1876 edit_mode_active,
1877 ob.object_to_world(),
1878 false,
1879 true,
1880 scene,
1881 ts,
1882 true);
1883 }
1884
1885 if (do_cage) {
1887 cache,
1888 cache.cage,
1889 ob,
1890 mesh,
1891 is_editmode,
1892 is_paint_mode,
1893 edit_mode_active,
1894 ob.object_to_world(),
1895 false,
1896 false,
1897 scene,
1898 ts,
1899 true);
1900 }
1901
1902 if (do_subdivision) {
1904 mesh,
1905 cache,
1906 cache.final,
1907 is_editmode,
1908 is_paint_mode,
1909 edit_mode_active,
1910 ob.object_to_world(),
1911 true,
1912 false,
1913 do_cage,
1914 ts,
1915 use_hide);
1916 }
1917 else {
1918 /* The subsurf modifier may have been recently removed, or another modifier was added after it,
1919 * so free any potential subdivision cache as it is not needed anymore. */
1921 }
1922
1924 cache,
1925 cache.final,
1926 ob,
1927 mesh,
1928 is_editmode,
1929 is_paint_mode,
1930 edit_mode_active,
1931 ob.object_to_world(),
1932 true,
1933 false,
1934 scene,
1935 ts,
1936 use_hide);
1937
1938 /* Ensure that all requested batches have finished.
1939 * Ideally we want to remove this sync, but there are cases where this doesn't work.
1940 * See #79038 for example.
1941 *
1942 * An idea to improve this is to separate the Object mode from the edit mode draw caches. And
1943 * based on the mode the correct one will be updated. Other option is to look into using
1944 * drw_batch_cache_generate_requested_delayed. */
1945 BLI_task_graph_work_and_wait(&task_graph);
1946#ifndef NDEBUG
1947 drw_mesh_batch_cache_check_available(task_graph, mesh);
1948#endif
1949}
1950
1953} // namespace blender::draw
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_named_layer(const CustomData *data, eCustomDataType type, blender::StringRef name)
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
bool CustomData_layer_is_anonymous(const CustomData *data, eCustomDataType type, int n)
int CustomData_get_stencil_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_active_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_render_layer(const CustomData *data, eCustomDataType type)
support for deformation groups and hooks.
eMeshBatchDirtyMode
Definition BKE_mesh.h:37
@ BKE_MESH_BATCH_DIRTY_UVEDIT_ALL
Definition BKE_mesh.h:42
@ BKE_MESH_BATCH_DIRTY_SELECT_PAINT
Definition BKE_mesh.h:40
@ BKE_MESH_BATCH_DIRTY_SHADING
Definition BKE_mesh.h:41
@ BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT
Definition BKE_mesh.h:43
@ BKE_MESH_BATCH_DIRTY_ALL
Definition BKE_mesh.h:38
@ BKE_MESH_BATCH_DIRTY_SELECT
Definition BKE_mesh.h:39
@ ME_WRAPPER_TYPE_BMESH
General operations, lookup, etc. for blender objects.
const Mesh * BKE_object_get_editmesh_eval_cage(const Object *object)
const Mesh * BKE_object_get_editmesh_eval_final(const Object *object)
Functions for dealing with objects and deform verts, used by painting and tools.
bool BKE_object_defgroup_check_lock_relative(const bool *lock_flags, const bool *validmap, int index)
void BKE_object_defgroup_split_locked_validmap(int defbase_tot, const bool *locked, const bool *deform, bool *r_locked, bool *r_unlocked)
bool * BKE_object_defgroup_validmap_get(struct Object *ob, int defbase_tot)
bool * BKE_object_defgroup_lock_flags_get(struct Object *ob, int defbase_tot)
void BKE_object_defgroup_mirror_selection(struct Object *ob, int defbase_tot, const bool *selection, bool *dg_flags_sel, int *r_dg_flags_sel_tot)
bool BKE_object_defgroup_check_lock_relative_multi(int defbase_tot, const bool *lock_flags, const bool *selected, int sel_tot)
bool * BKE_object_defgroup_selected_get(struct Object *ob, int defbase_tot, int *r_dg_flags_sel_tot)
A BVH for high poly meshes.
bool BKE_subsurf_modifier_has_gpu_subdiv(const Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
unsigned int uint
void BLI_task_graph_work_and_wait(struct TaskGraph *task_graph)
@ CD_PROP_BYTE_COLOR
@ CD_PROP_FLOAT
@ CD_PROP_FLOAT3
@ CD_PROP_INT32_2D
@ CD_PROP_COLOR
@ CD_PROP_QUATERNION
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
@ CD_AUTO_FROM_NAME
#define ME_USING_MIRROR_X_VERTEX_GROUPS(_me)
@ OB_MODE_EDIT
Object is a sort of wrapper for general info.
@ OB_MESH
@ UV_SYNC_SELECTION
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition GPU_batch.hh:191
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:205
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
ListBase GPU_material_attributes(const GPUMaterial *material)
GPUPrimType
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_LINES_ADJ
@ GPU_PRIM_TRIS
#define GPU_MAX_ATTR
Definition GPU_shader.hh:29
#define GPU_VERTBUF_DISCARD_SAFE(verts)
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
unsigned int U
Definition btGjkEpa3.h:78
const T * data() const
Definition BLI_array.hh:301
const Value & lookup(const Key &key) const
Definition BLI_map.hh:506
auto add_or_modify(const Key &key, const CreateValueF &create_value, const ModifyValueF &modify_value) -> decltype(create_value(nullptr))
Definition BLI_map.hh:457
constexpr const char * c_str() const
local_group_size(16, 16) .push_constant(Type b
#define MBC_VBO_LEN
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc)
#define MBC_IBO_LEN
#define MBC_BATCH_LEN
#define MBC_EDITUV
#define BATCH_MAP(...)
#define BUFFER_INDEX(buff_name)
#define TRIS_PER_MAT_INDEX
bool DRW_batch_requested(blender::gpu::Batch *batch, GPUPrimType prim_type)
blender::gpu::Batch * DRW_batch_request(blender::gpu::Batch **batch)
void DRW_vbo_request(blender::gpu::Batch *batch, blender::gpu::VertBuf **vbo)
bool DRW_vbo_requested(blender::gpu::VertBuf *vbo)
void DRW_ibo_request(blender::gpu::Batch *batch, blender::gpu::IndexBuf **ibo)
bool DRW_ibo_requested(blender::gpu::IndexBuf *ibo)
bool DRW_object_is_in_edit_mode(const Object *ob)
Extraction of Mesh data into VBO to feed to GPU.
struct @620::@622 batch
void *(* MEM_dupallocN)(const void *vmemh)
Definition mallocn.cc:39
pbvh::Tree * pbvh_get(Object &object)
Definition paint.cc:2846
void update_normals_from_eval(Object &object_eval, Tree &pbvh)
Definition pbvh.cc:1065
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object &object, Mesh &mesh)
const CustomData & mesh_cd_ldata_get_from_mesh(const Mesh &mesh)
static void drw_mesh_weight_state_extract(Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
static void mesh_batch_cache_discard_batch(MeshBatchCache &cache, const DRWBatchFlag batch_map)
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_loose_edges(Mesh &mesh)
blender::gpu::Batch ** DRW_mesh_batch_cache_get_surface_shaded(Object &object, Mesh &mesh, GPUMaterial **gpumat_array, uint gpumat_array_len)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_edges(Object &object, Mesh &mesh)
static void mesh_batch_cache_init(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_texpaint_single(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_edges(Mesh &mesh)
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_facedots(Object &object, Mesh &mesh)
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
blender::gpu::Batch * DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh &mesh)
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object &object, const Mesh &mesh, const GPUMaterial *const *gpumat_array, int gpumat_array_len, DRW_Attributes *attributes)
static void mesh_cd_calc_active_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
int DRW_mesh_material_count_get(const Object &object, const Mesh &mesh)
static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
void drw_attributes_clear(DRW_Attributes *attributes)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_facedots(Mesh &mesh)
void DRW_create_subdivision(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const bool is_editmode, const bool is_paint_mode, const bool edit_mode_active, const float4x4 &object_to_world, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
void draw_subdiv_cache_free(DRWSubdivCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_verts(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface(Mesh &mesh)
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static void mesh_cd_calc_edit_uv_layer(const Mesh &, DRW_MeshCDMask *cd_used)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_weights(Mesh &mesh)
bool drw_custom_data_match_attribute(const CustomData &custom_data, const char *name, int *r_layer_index, eCustomDataType *r_type)
blender::gpu::Batch ** DRW_mesh_batch_cache_get_surface_texpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces(Object &object, Mesh &mesh)
const CustomData & mesh_cd_edata_get_from_mesh(const Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_vertpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_wireframes_face(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_sculpt(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_edges(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_loop_normals(Mesh &mesh)
static bool drw_mesh_weight_state_compare(const DRW_MeshWeightState *a, const DRW_MeshWeightState *b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache &cache)
void DRW_mesh_batch_cache_free_old(Mesh *mesh, int ctime)
void DRW_mesh_batch_cache_free(void *batch_cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_triangles(Mesh &mesh)
int mesh_render_mat_len_get(const Object &object, const Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vertices(Mesh &mesh)
void mesh_buffer_cache_create_requested(TaskGraph &task_graph, MeshBatchCache &cache, MeshBufferCache &mbc, Object &object, Mesh &mesh, bool is_editmode, bool is_paint_mode, bool edit_mode_active, const float4x4 &object_to_world, bool do_final, bool do_uvedit, const Scene &scene, const ToolSettings *ts, bool use_hide)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_edges(Object &object, Mesh &mesh)
static void mesh_batch_cache_clear(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh &mesh)
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
static void mesh_batch_cache_discard_uvedit(MeshBatchCache &cache)
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
static bool mesh_batch_cache_valid(Object &object, Mesh &mesh)
gpu::VertBuf * DRW_mesh_batch_cache_pos_vertbuf_get(Mesh &mesh)
static void mesh_batch_cache_check_vertex_group(MeshBatchCache &cache, const DRW_MeshWeightState *wstate)
static void drw_mesh_weight_state_copy(DRW_MeshWeightState *wstate_dst, const DRW_MeshWeightState *wstate_src)
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
const Mesh & editmesh_final_or_this(const Object &object, const Mesh &mesh)
static void mesh_cd_calc_active_mask_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache &cache, DRWBatchFlag new_flag)
void DRW_mesh_batch_cache_validate(Object &object, Mesh &mesh)
static void mesh_buffer_list_clear(MeshBufferList *mbuflist)
void drw_attributes_merge(DRW_Attributes *dst, const DRW_Attributes *src, std::mutex &render_mutex)
blender::gpu::Batch * DRW_mesh_batch_cache_get_verts_with_select_id(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_sculpt_overlays(Mesh &mesh)
void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_viewer_attribute(Mesh &mesh)
const CustomData & mesh_cd_vdata_get_from_mesh(const Mesh &mesh)
void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph, Object &ob, Mesh &mesh, const Scene &scene, bool is_paint_mode, bool use_hide)
static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache &cache)
static void drw_add_attributes_vbo(gpu::Batch *batch, MeshBufferList *mbuflist, DRW_Attributes *attr_used)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vert_normals(Mesh &mesh)
static void mesh_batch_cache_request_surface_batches(MeshBatchCache &cache)
static MeshBatchCache * mesh_batch_cache_get(Mesh &mesh)
static void drw_mesh_weight_state_clear(DRW_MeshWeightState *wstate)
void DRW_mesh_get_attributes(const Object &object, const Mesh &mesh, const GPUMaterial *const *gpumat_array, int gpumat_array_len, DRW_Attributes *r_attrs, DRW_MeshCDMask *r_cd_needed)
void drw_attributes_add_request(DRW_Attributes *attrs, const char *name, const eCustomDataType type, const int layer_index, const blender::bke::AttrDomain domain)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edges_with_select_id(Mesh &mesh)
static void request_active_and_default_color_attributes(const Object &object, const Mesh &mesh, DRW_Attributes &attributes)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object &object, Mesh &mesh, float **tot_area, float **tot_uv_area)
static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_verts(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_edges(Object &object, Mesh &mesh)
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_skin_roots(Mesh &mesh)
static void drw_mesh_batch_cache_check_available(TaskGraph &task_graph, Mesh &mesh)
const CustomData & mesh_cd_pdata_get_from_mesh(const Mesh &mesh)
unsigned int uint32_t
Definition stdint.h:80
MeshRuntimeHandle * runtime
char * default_color_attribute
CustomData vert_data
char * active_color_attribute
struct SculptSession * sculpt
Array< gpu::IndexBuf * > tris_per_mat
Array< gpu::Batch * > surface_per_mat
struct blender::draw::MeshBufferList::@247 vbo
struct blender::draw::MeshBufferList::@248 ibo
gpu::VertBuf * attr[GPU_MAX_ATTR]