Blender V5.0
draw_pbvh.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2024 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11
12#include "BLI_map.hh"
13#include "BLI_math_geom.h"
15#include "BLI_utildefines.h"
16#include "BLI_vector.hh"
17
18#include "DNA_object_types.h"
19
20#include "BKE_attribute.hh"
22#include "BKE_attribute_math.hh"
23#include "BKE_customdata.hh"
24#include "BKE_mesh.hh"
25#include "BKE_paint.hh"
26#include "BKE_paint_bvh.hh"
27#include "BKE_subdiv_ccg.hh"
28
30
31#include "GPU_batch.hh"
32
33#include "DRW_engine.hh"
34#include "DRW_pbvh.hh"
35#include "DRW_render.hh"
36
37#include "attribute_convert.hh"
38#include "bmesh.hh"
39
40namespace blender {
41
42template<> struct DefaultHash<draw::pbvh::AttributeRequest> {
44 {
45 using namespace draw::pbvh;
46 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&value)) {
47 return get_default_hash(*request_type);
48 }
49 const GenericRequest &attr = std::get<GenericRequest>(value);
50 return get_default_hash(attr);
51 }
52};
53
54} // namespace blender
55
56namespace blender::draw::pbvh {
57
62
76 OrigMeshData(const Mesh &mesh)
77 : active_color(mesh.active_color_attribute),
78 default_color(mesh.default_color_attribute),
81 face_set_default(mesh.face_sets_color_default),
82 face_set_seed(mesh.face_sets_color_seed),
84 {
85 }
86};
87
92class DrawCacheImpl : public DrawCache {
93 struct AttributeData {
103 BitVector<> dirty_nodes;
108 void tag_dirty(const IndexMask &node_mask);
109 };
110
112 BitVector<> use_flat_layout_;
114 Array<int> material_indices_;
115
117 Vector<gpu::IndexBufPtr> lines_ibos_;
119 Vector<gpu::IndexBufPtr> lines_ibos_coarse_;
121 Vector<gpu::IndexBufPtr> tris_ibos_;
123 Vector<gpu::IndexBufPtr> tris_ibos_coarse_;
129
131 Vector<gpu::Batch *> lines_batches_;
133 Vector<gpu::Batch *> lines_batches_coarse_;
140
149 BitVector<> dirty_topology_;
150
151 public:
152 ~DrawCacheImpl() override;
153
154 void tag_positions_changed(const IndexMask &node_mask) override;
155 void tag_visibility_changed(const IndexMask &node_mask) override;
156 void tag_topology_changed(const IndexMask &node_mask) override;
157 void tag_face_sets_changed(const IndexMask &node_mask) override;
158 void tag_masks_changed(const IndexMask &node_mask) override;
159 void tag_attribute_changed(const IndexMask &node_mask, StringRef attribute_name) override;
160
162 const ViewportRequest &request,
163 const IndexMask &nodes_to_update) override;
164
166 const ViewportRequest &request,
167 const IndexMask &nodes_to_update) override;
168
169 Span<int> ensure_material_indices(const Object &object) override;
170
171 private:
176 void free_nodes_with_changed_topology(const bke::pbvh::Tree &pbvh);
177
178 BitSpan ensure_use_flat_layout(const Object &object, const OrigMeshData &orig_mesh_data);
179
180 Span<gpu::VertBufPtr> ensure_attribute_data(const Object &object,
181 const OrigMeshData &orig_mesh_data,
182 const AttributeRequest &attr,
183 const IndexMask &node_mask);
184
185 Span<gpu::IndexBufPtr> ensure_tri_indices(const Object &object,
186 const OrigMeshData &orig_mesh_data,
187 const IndexMask &node_mask,
188 bool coarse);
189
190 Span<gpu::IndexBufPtr> ensure_lines_indices(const Object &object,
191 const OrigMeshData &orig_mesh_data,
192 const IndexMask &node_mask,
193 bool coarse);
194};
195
196void DrawCacheImpl::AttributeData::tag_dirty(const IndexMask &node_mask)
197{
198 this->dirty_nodes.resize(std::max(this->dirty_nodes.size(), node_mask.min_array_size()), false);
199 node_mask.set_bits(this->dirty_nodes);
200}
201
203{
204 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::Position)) {
205 data->tag_dirty(node_mask);
206 }
207 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::Normal)) {
208 data->tag_dirty(node_mask);
209 }
210}
211
213{
214 dirty_topology_.resize(std::max(dirty_topology_.size(), node_mask.min_array_size()), false);
215 node_mask.set_bits(dirty_topology_);
216}
217
219{
222 this->tag_visibility_changed(node_mask);
223}
224
226{
227 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::FaceSet)) {
228 data->tag_dirty(node_mask);
229 }
230}
231
233{
234 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::Mask)) {
235 data->tag_dirty(node_mask);
236 }
237}
238
239void DrawCacheImpl::tag_attribute_changed(const IndexMask &node_mask, StringRef attribute_name)
240{
241 for (const auto &[data_request, data] : attribute_vbos_.items()) {
242 if (const GenericRequest *request = std::get_if<GenericRequest>(&data_request)) {
243 if (*request == attribute_name) {
244 data.tag_dirty(node_mask);
245 }
246 }
247 }
248}
249
250DrawCache &ensure_draw_data(std::unique_ptr<bke::pbvh::DrawCache> &ptr)
251{
252 if (!ptr) {
253 ptr = std::make_unique<DrawCacheImpl>();
254 }
255 return dynamic_cast<DrawCache &>(*ptr);
256}
257
259 const IndexMask &node_mask)
260{
261 IndexMaskMemory memory;
262 const IndexMask mask = IndexMask::from_intersection(node_mask, ibos.index_range(), memory);
263 mask.foreach_index([&](const int i) { ibos[i].reset(); });
264}
265
267 const IndexMask &node_mask)
268{
269 IndexMaskMemory memory;
270 const IndexMask mask = IndexMask::from_intersection(node_mask, vbos.index_range(), memory);
271 mask.foreach_index([&](const int i) { vbos[i].reset(); });
272}
273
275 const IndexMask &node_mask)
276{
277 IndexMaskMemory memory;
278 const IndexMask mask = IndexMask::from_intersection(node_mask, batches.index_range(), memory);
279 mask.foreach_index([&](const int i) { GPU_BATCH_DISCARD_SAFE(batches[i]); });
280}
281
283{
285 "pos", gpu::VertAttrType::SFLOAT_32_32_32);
286 return format;
287}
288
290{
292 "nor", gpu::VertAttrType::SNORM_16_16_16_16);
293 return format;
294}
295
297{
299 gpu::VertAttrType::SFLOAT_32);
300 return format;
301}
302
304{
306 "fset", gpu::VertAttrType::UNORM_8_8_8_8);
307 return format;
308}
309
310static GPUVertFormat attribute_format(const OrigMeshData &orig_mesh_data,
311 const StringRef name,
312 const bke::AttrType data_type)
313{
315
316 bool is_render, is_active;
317 const char *prefix = "a";
318
320 prefix = "c";
321 is_active = orig_mesh_data.active_color == name;
322 is_render = orig_mesh_data.default_color == name;
323 }
324 if (data_type == bke::AttrType::Float2) {
325 prefix = "u";
326 is_active = orig_mesh_data.active_uv_map == name;
327 is_render = orig_mesh_data.default_uv_map == name;
328 }
329
330 DRW_cdlayer_attr_aliases_add(&format, prefix, data_type, name, is_render, is_active);
331 return format;
332}
333
335{
338 return short4(result.x, result.y, result.z, 0);
339}
340
341template<typename T>
343 const Span<int> corner_verts,
344 const Span<T> attribute,
345 const Span<int> face_indices,
346 gpu::VertBuf &vbo)
347{
348 using Converter = AttributeConverter<T>;
349 using VBOType = typename Converter::VBOType;
350 VBOType *data = vbo.data<VBOType>().data();
351 for (const int face : face_indices) {
352 for (const int vert : corner_verts.slice(faces[face])) {
353 *data = Converter::convert(attribute[vert]);
354 data++;
355 }
356 }
357}
358
359template<typename T>
361 const Span<T> attribute,
362 const Span<int> face_indices,
363 gpu::VertBuf &vbo)
364{
365 using Converter = AttributeConverter<T>;
366 using VBOType = typename Converter::VBOType;
367
368 VBOType *data = vbo.data<VBOType>().data();
369 for (const int face : face_indices) {
370 const int face_size = faces[face].size();
371 std::fill_n(data, face_size, Converter::convert(attribute[face]));
372 data += face_size;
373 }
374}
375
376template<typename T>
378 const Span<T> attribute,
379 const Span<int> face_indices,
380 gpu::VertBuf &vbo)
381{
382 using Converter = AttributeConverter<T>;
383 using VBOType = typename Converter::VBOType;
384
385 VBOType *data = vbo.data<VBOType>().data();
386 for (const int face : face_indices) {
387 for (const int corner : faces[face]) {
388 *data = Converter::convert(attribute[corner]);
389 data++;
390 }
391 }
392}
393
394template<typename T> const T &bmesh_cd_vert_get(const BMVert &vert, const int offset)
395{
396 return *static_cast<const T *>(POINTER_OFFSET(vert.head.data, offset));
397}
398
399template<typename T> const T &bmesh_cd_loop_get(const BMLoop &loop, const int offset)
400{
401 return *static_cast<const T *>(POINTER_OFFSET(loop.head.data, offset));
402}
403
404template<typename T> const T &bmesh_cd_face_get(const BMFace &face, const int offset)
405{
406 return *static_cast<const T *>(POINTER_OFFSET(face.head.data, offset));
407}
408
409template<typename T>
410void extract_data_vert_bmesh(const Set<BMFace *, 0> &faces, const int cd_offset, gpu::VertBuf &vbo)
411{
412 using Converter = AttributeConverter<T>;
413 using VBOType = typename Converter::VBOType;
414 VBOType *data = vbo.data<VBOType>().data();
415
416 for (const BMFace *face : faces) {
418 continue;
419 }
420 const BMLoop *l = face->l_first;
421 *data = Converter::convert(bmesh_cd_vert_get<T>(*l->prev->v, cd_offset));
422 data++;
423 *data = Converter::convert(bmesh_cd_vert_get<T>(*l->v, cd_offset));
424 data++;
425 *data = Converter::convert(bmesh_cd_vert_get<T>(*l->next->v, cd_offset));
426 data++;
427 }
428}
429
430template<typename T>
431void extract_data_face_bmesh(const Set<BMFace *, 0> &faces, const int cd_offset, gpu::VertBuf &vbo)
432{
433 using Converter = AttributeConverter<T>;
434 using VBOType = typename Converter::VBOType;
435 VBOType *data = vbo.data<VBOType>().data();
436
437 for (const BMFace *face : faces) {
439 continue;
440 }
441 std::fill_n(data, 3, Converter::convert(bmesh_cd_face_get<T>(*face, cd_offset)));
442 data += 3;
443 }
444}
445
446template<typename T>
448 const int cd_offset,
449 gpu::VertBuf &vbo)
450{
451 using Converter = AttributeConverter<T>;
452 using VBOType = typename Converter::VBOType;
453 VBOType *data = vbo.data<VBOType>().data();
454
455 for (const BMFace *face : faces) {
457 continue;
458 }
459 const BMLoop *l = face->l_first;
460 *data = Converter::convert(bmesh_cd_loop_get<T>(*l->prev, cd_offset));
461 data++;
462 *data = Converter::convert(bmesh_cd_loop_get<T>(*l, cd_offset));
463 data++;
464 *data = Converter::convert(bmesh_cd_loop_get<T>(*l->next, cd_offset));
465 data++;
466 }
467}
468
470{
471 return std::count_if(faces.begin(), faces.end(), [&](const BMFace *face) {
472 return !BM_elem_flag_test_bool(face, BM_ELEM_HIDDEN);
473 });
474}
475
477{
478 free_batches(lines_batches_, lines_batches_.index_range());
479 free_batches(lines_batches_coarse_, lines_batches_coarse_.index_range());
480 for (MutableSpan<gpu::Batch *> batches : tris_batches_.values()) {
481 free_batches(batches, batches.index_range());
482 }
483}
484
485void DrawCacheImpl::free_nodes_with_changed_topology(const bke::pbvh::Tree &pbvh)
486{
487 /* NOTE: Theoretically we shouldn't need to free batches with a changed triangle count, but
488 * currently it's the simplest way to reallocate all the GPU data while keeping everything in a
489 * consistent state. */
490 IndexMaskMemory memory;
491 const IndexMask nodes_to_free = IndexMask::from_bits(dirty_topology_, memory);
492 if (nodes_to_free.is_empty()) {
493 return;
494 }
495
496 dirty_topology_.clear_and_shrink();
497
498 free_ibos(lines_ibos_, nodes_to_free);
499 free_ibos(lines_ibos_coarse_, nodes_to_free);
500 free_ibos(tris_ibos_, nodes_to_free);
501 free_ibos(tris_ibos_coarse_, nodes_to_free);
502 if (pbvh.type() == bke::pbvh::Type::BMesh) {
503 /* For BMesh, VBOs are only filled with data for visible triangles, and topology can also
504 * completely change due to dynamic topology, so VBOs must be rebuilt from scratch. For other
505 * types, actual topology doesn't change, and visibility changes are accounted for by the index
506 * buffers. */
507 for (AttributeData &data : attribute_vbos_.values()) {
508 free_vbos(data.vbos, nodes_to_free);
509 }
510 }
511
512 free_batches(lines_batches_, nodes_to_free);
513 free_batches(lines_batches_coarse_, nodes_to_free);
514 for (MutableSpan<gpu::Batch *> batches : tris_batches_.values()) {
515 free_batches(batches, nodes_to_free);
516 }
517}
518
520 const GPUVertFormat &format,
521 const IndexMask &node_mask,
523{
526 node_mask.foreach_index(GrainSize(64), [&](const int i) {
527 if (!vbos[i]) {
529 }
530 GPU_vertbuf_data_alloc(*vbos[i], nodes[i].corners_num());
531 });
532}
533
535 const GPUVertFormat &format,
536 const BitSpan use_flat_layout,
537 const IndexMask &node_mask,
539{
542 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
543 node_mask.foreach_index(GrainSize(64), [&](const int i) {
544 if (!vbos[i]) {
546 }
547 const int verts_per_grid = use_flat_layout[i] ? square_i(subdiv_ccg.grid_size - 1) * 4 :
548 square_i(subdiv_ccg.grid_size);
549 const int verts_num = nodes[i].grids().size() * verts_per_grid;
550 GPU_vertbuf_data_alloc(*vbos[i], verts_num);
551 });
552}
553
555 const GPUVertFormat &format,
556 const IndexMask &node_mask,
558{
561 node_mask.foreach_index(GrainSize(64), [&](const int i) {
562 if (!vbos[i]) {
564 }
566 &const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
567 const int verts_num = count_visible_tris_bmesh(faces) * 3;
568 GPU_vertbuf_data_alloc(*vbos[i], verts_num);
569 });
570}
571
572static void update_positions_mesh(const Object &object,
573 const IndexMask &node_mask,
575{
578 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
579 const OffsetIndices<int> faces = mesh.faces();
580 const Span<int> corner_verts = mesh.corner_verts();
581 const Span<float3> vert_positions = bke::pbvh::vert_positions_eval_from_eval(object);
582 ensure_vbos_allocated_mesh(object, position_format(), node_mask, vbos);
583 node_mask.foreach_index(GrainSize(1), [&](const int i) {
585 faces, corner_verts, vert_positions, nodes[i].faces(), *vbos[i]);
586 });
587}
588
589static void update_normals_mesh(const Object &object,
590 const IndexMask &node_mask,
592{
595 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
596 const OffsetIndices<int> faces = mesh.faces();
597 const Span<int> corner_verts = mesh.corner_verts();
598 const Span<float3> vert_normals = bke::pbvh::vert_normals_eval_from_eval(object);
599 const Span<float3> face_normals = bke::pbvh::face_normals_eval_from_eval(object);
600 const bke::AttributeAccessor attributes = mesh.attributes();
601 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", bke::AttrDomain::Face);
602 ensure_vbos_allocated_mesh(object, normal_format(), node_mask, vbos);
603 node_mask.foreach_index(GrainSize(1), [&](const int i) {
604 short4 *data = vbos[i]->data<short4>().data();
605
606 for (const int face : nodes[i].faces()) {
607 if (!sharp_faces.is_empty() && sharp_faces[face]) {
608 const int face_size = faces[face].size();
609 std::fill_n(data, face_size, normal_float_to_short(face_normals[face]));
610 data += face_size;
611 }
612 else {
613 for (const int vert : corner_verts.slice(faces[face])) {
614 *data = normal_float_to_short(vert_normals[vert]);
615 data++;
616 }
617 }
618 }
619 });
620}
621
622BLI_NOINLINE static void update_masks_mesh(const Object &object,
623 const OrigMeshData &orig_mesh_data,
624 const IndexMask &node_mask,
626{
629 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
630 const OffsetIndices<int> faces = mesh.faces();
631 const Span<int> corner_verts = mesh.corner_verts();
632 const VArraySpan mask = *orig_mesh_data.attributes.lookup<float>(".sculpt_mask",
634 ensure_vbos_allocated_mesh(object, mask_format(), node_mask, vbos);
635 if (!mask.is_empty()) {
636 node_mask.foreach_index(GrainSize(1), [&](const int i) {
637 float *data = vbos[i]->data<float>().data();
638 for (const int face : nodes[i].faces()) {
639 for (const int vert : corner_verts.slice(faces[face])) {
640 *data = mask[vert];
641 data++;
642 }
643 }
644 });
645 }
646 else {
647 node_mask.foreach_index(GrainSize(64),
648 [&](const int i) { vbos[i]->data<float>().fill(0.0f); });
649 }
650}
651
652BLI_NOINLINE static void update_face_sets_mesh(const Object &object,
653 const OrigMeshData &orig_mesh_data,
654 const IndexMask &node_mask,
656{
659 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
660 const OffsetIndices<int> faces = mesh.faces();
661 const int color_default = orig_mesh_data.face_set_default;
662 const int color_seed = orig_mesh_data.face_set_seed;
663 const VArraySpan face_sets = *orig_mesh_data.attributes.lookup<int>(".sculpt_face_set",
665 ensure_vbos_allocated_mesh(object, face_set_format(), node_mask, vbos);
666 if (!face_sets.is_empty()) {
667 node_mask.foreach_index(GrainSize(1), [&](const int i) {
668 uchar4 *data = vbos[i]->data<uchar4>().data();
669 for (const int face : nodes[i].faces()) {
670 const int id = face_sets[face];
671
672 uchar4 fset_color(UCHAR_MAX);
673 if (id != color_default) {
674 BKE_paint_face_set_overlay_color_get(id, color_seed, fset_color);
675 }
676 else {
677 /* Skip for the default color face set to render it white. */
678 fset_color[0] = fset_color[1] = fset_color[2] = UCHAR_MAX;
679 }
680
681 const int face_size = faces[face].size();
682 std::fill_n(data, face_size, fset_color);
683 data += face_size;
684 }
685 });
686 }
687 else {
688 node_mask.foreach_index(GrainSize(64),
689 [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
690 }
691}
692
694 const OrigMeshData &orig_mesh_data,
695 const IndexMask &node_mask,
696 const StringRef name,
698{
701 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
702 const OffsetIndices<int> faces = mesh.faces();
703 const Span<int> corner_verts = mesh.corner_verts();
704 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
705 const bke::GAttributeReader attr = attributes.lookup(name);
706 if (!attr || attr.domain == bke::AttrDomain::Edge) {
707 return;
708 }
711 object, attribute_format(orig_mesh_data, name, data_type), node_mask, vbos);
712 node_mask.foreach_index(GrainSize(1), [&](const int i) {
714 using T = decltype(dummy);
715 if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
716 const VArraySpan<T> src = attr.varray.typed<T>();
717 switch (attr.domain) {
718 case bke::AttrDomain::Point:
719 extract_data_vert_mesh<T>(faces, corner_verts, src, nodes[i].faces(), *vbos[i]);
720 break;
721 case bke::AttrDomain::Face:
722 extract_data_face_mesh<T>(faces, src, nodes[i].faces(), *vbos[i]);
723 break;
724 case bke::AttrDomain::Corner:
725 extract_data_corner_mesh<T>(faces, src, nodes[i].faces(), *vbos[i]);
726 break;
727 default:
728 BLI_assert_unreachable();
729 }
730 }
731 });
732 });
733}
734
735BLI_NOINLINE static void fill_positions_grids(const Object &object,
736 const BitSpan use_flat_layout,
737 const IndexMask &node_mask,
739{
742 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
743 const Span<float3> positions = subdiv_ccg.positions;
744 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
745 ensure_vbos_allocated_grids(object, position_format(), use_flat_layout, node_mask, vbos);
746 node_mask.foreach_index(GrainSize(1), [&](const int i) {
747 float3 *data = vbos[i]->data<float3>().data();
748 if (use_flat_layout[i]) {
749 const int grid_size_1 = key.grid_size - 1;
750 for (const int grid : nodes[i].grids()) {
751 const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
752 for (int y = 0; y < grid_size_1; y++) {
753 for (int x = 0; x < grid_size_1; x++) {
754 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)];
755 data++;
756 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
757 data++;
758 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
759 data++;
760 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
761 data++;
762 }
763 }
764 }
765 }
766 else {
767 for (const int grid : nodes[i].grids()) {
768 const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
769 std::copy_n(grid_positions.data(), grid_positions.size(), data);
770 data += grid_positions.size();
771 }
772 }
773 });
774}
775
776BLI_NOINLINE static void fill_normals_grids(const Object &object,
777 const OrigMeshData &orig_mesh_data,
778 const BitSpan use_flat_layout,
779 const IndexMask &node_mask,
781{
784 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
785 const Span<float3> positions = subdiv_ccg.positions;
786 const Span<float3> normals = subdiv_ccg.normals;
787 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
788 const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
789 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
790 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", bke::AttrDomain::Face);
791 ensure_vbos_allocated_grids(object, normal_format(), use_flat_layout, node_mask, vbos);
792 node_mask.foreach_index(GrainSize(1), [&](const int i) {
793 short4 *data = vbos[i]->data<short4>().data();
794
795 if (use_flat_layout[i]) {
796 const int grid_size_1 = key.grid_size - 1;
797 for (const int grid : nodes[i].grids()) {
798 const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
799 const Span<float3> grid_normals = normals.slice(bke::ccg::grid_range(key, grid));
800 if (!sharp_faces.is_empty() && sharp_faces[grid_to_face_map[grid]]) {
801 for (int y = 0; y < grid_size_1; y++) {
802 for (int x = 0; x < grid_size_1; x++) {
803 float3 no;
805 grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)],
806 grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)],
807 grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)],
808 grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)]);
809 std::fill_n(data, 4, normal_float_to_short(no));
810 data += 4;
811 }
812 }
813 }
814 else {
815 for (int y = 0; y < grid_size_1; y++) {
816 for (int x = 0; x < grid_size_1; x++) {
817 std::fill_n(
818 data,
819 4,
821 data += 4;
822 }
823 }
824 }
825 }
826 }
827 else {
828 /* The non-flat VBO layout does not support sharp faces. */
829 for (const int grid : nodes[i].grids()) {
830 for (const float3 &normal : normals.slice(bke::ccg::grid_range(key, grid))) {
831 *data = normal_float_to_short(normal);
832 data++;
833 }
834 }
835 }
836 });
837}
838
839BLI_NOINLINE static void fill_masks_grids(const Object &object,
840 const BitSpan use_flat_layout,
841 const IndexMask &node_mask,
843{
846 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
847 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
848 const Span<float> masks = subdiv_ccg.masks;
849 ensure_vbos_allocated_grids(object, mask_format(), use_flat_layout, node_mask, vbos);
850 if (!masks.is_empty()) {
851 node_mask.foreach_index(GrainSize(1), [&](const int i) {
852 float *data = vbos[i]->data<float>().data();
853 if (use_flat_layout[i]) {
854 const int grid_size_1 = key.grid_size - 1;
855 for (const int grid : nodes[i].grids()) {
856 const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
857 for (int y = 0; y < grid_size_1; y++) {
858 for (int x = 0; x < grid_size_1; x++) {
859 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y)];
860 data++;
861 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
862 data++;
863 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
864 data++;
865 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
866 data++;
867 }
868 }
869 }
870 }
871 else {
872 for (const int grid : nodes[i].grids()) {
873 const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
874 std::copy_n(grid_masks.data(), grid_masks.size(), data);
875 data += grid_masks.size();
876 }
877 }
878 });
879 }
880 else {
881 node_mask.foreach_index(GrainSize(64),
882 [&](const int i) { vbos[i]->data<float>().fill(0.0f); });
883 }
884}
885
886BLI_NOINLINE static void fill_face_sets_grids(const Object &object,
887 const OrigMeshData &orig_mesh_data,
888 const BitSpan use_flat_layout,
889 const IndexMask &node_mask,
891{
894 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
895 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
896 const int color_default = orig_mesh_data.face_set_default;
897 const int color_seed = orig_mesh_data.face_set_seed;
898 const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
899 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
900 ensure_vbos_allocated_grids(object, face_set_format(), use_flat_layout, node_mask, vbos);
901 if (const VArray<int> face_sets = *attributes.lookup<int>(".sculpt_face_set",
903 {
904 const VArraySpan<int> face_sets_span(face_sets);
905 node_mask.foreach_index(GrainSize(1), [&](const int i) {
906 const Span<int> grids = nodes[i].grids();
907 const int verts_per_grid = use_flat_layout[i] ? square_i(key.grid_size - 1) * 4 :
908 square_i(key.grid_size);
909 uchar4 *data = vbos[i]->data<uchar4>().data();
910 for (const int i : grids.index_range()) {
911 uchar4 color{UCHAR_MAX};
912 const int fset = face_sets[grid_to_face_map[grids[i]]];
913 if (fset != color_default) {
915 }
916
917 std::fill_n(data, verts_per_grid, color);
918 data += verts_per_grid;
919 }
920 });
921 }
922 else {
923 node_mask.foreach_index(GrainSize(1),
924 [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4{UCHAR_MAX}); });
925 }
926}
927
929 const IndexMask &node_mask,
931{
934 ensure_vbos_allocated_bmesh(object, position_format(), node_mask, vbos);
935 node_mask.foreach_index(GrainSize(1), [&](const int i) {
936 float3 *data = vbos[i]->data<float3>().data();
937 for (const BMFace *face :
939 {
941 continue;
942 }
943 const BMLoop *l = face->l_first;
944 *data = l->prev->v->co;
945 data++;
946 *data = l->v->co;
947 data++;
948 *data = l->next->v->co;
949 data++;
950 }
951 });
952}
953
954BLI_NOINLINE static void update_normals_bmesh(const Object &object,
955 const IndexMask &node_mask,
957{
960 ensure_vbos_allocated_bmesh(object, normal_format(), node_mask, vbos);
961 node_mask.foreach_index(GrainSize(1), [&](const int i) {
962 short4 *data = vbos[i]->data<short4>().data();
963 for (const BMFace *face :
965 {
967 continue;
968 }
970 const BMLoop *l = face->l_first;
971 *data = normal_float_to_short(l->prev->v->no);
972 data++;
973 *data = normal_float_to_short(l->v->no);
974 data++;
975 *data = normal_float_to_short(l->next->v->no);
976 data++;
977 }
978 else {
979 std::fill_n(data, 3, normal_float_to_short(face->no));
980 data += 3;
981 }
982 }
983 });
984}
985
986BLI_NOINLINE static void update_masks_bmesh(const Object &object,
987 const IndexMask &node_mask,
989{
992 const BMesh &bm = *object.sculpt->bm;
993 const int cd_offset = CustomData_get_offset_named(&bm.vdata, CD_PROP_FLOAT, ".sculpt_mask");
994 ensure_vbos_allocated_bmesh(object, mask_format(), node_mask, vbos);
995 if (cd_offset != -1) {
996 node_mask.foreach_index(GrainSize(1), [&](const int i) {
997 float *data = vbos[i]->data<float>().data();
998 for (const BMFace *face :
1000 {
1001 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1002 continue;
1003 }
1004 const BMLoop *l = face->l_first;
1005 *data = bmesh_cd_vert_get<float>(*l->prev->v, cd_offset);
1006 data++;
1007 *data = bmesh_cd_vert_get<float>(*l->v, cd_offset);
1008 data++;
1009 *data = bmesh_cd_vert_get<float>(*l->next->v, cd_offset);
1010 data++;
1011 }
1012 });
1013 }
1014 else {
1015 node_mask.foreach_index(GrainSize(64),
1016 [&](const int i) { vbos[i]->data<float>().fill(0.0f); });
1017 }
1018}
1019
1021 const OrigMeshData &orig_mesh_data,
1022 const IndexMask &node_mask,
1024{
1025 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1027 const BMesh &bm = *object.sculpt->bm;
1028 const int color_default = orig_mesh_data.face_set_default;
1029 const int color_seed = orig_mesh_data.face_set_seed;
1030 const int offset = CustomData_get_offset_named(&bm.pdata, CD_PROP_INT32, ".sculpt_face_set");
1031 ensure_vbos_allocated_bmesh(object, face_set_format(), node_mask, vbos);
1032 if (offset != -1) {
1033 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1034 uchar4 *data = vbos[i]->data<uchar4>().data();
1035 for (const BMFace *face :
1037 {
1038 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1039 continue;
1040 }
1041 uchar4 color{UCHAR_MAX};
1042 const int fset = bmesh_cd_face_get<int>(*face, offset);
1043 if (fset != color_default) {
1045 }
1046 std::fill_n(data, 3, color);
1047 data += 3;
1048 }
1049 });
1050 }
1051 else {
1052 node_mask.foreach_index(GrainSize(64),
1053 [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
1054 }
1055}
1056
1058 const OrigMeshData &orig_mesh_data,
1059 const IndexMask &node_mask,
1060 const StringRef name,
1062{
1063 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1065 const BMesh &bm = *object.sculpt->bm;
1067 if (!attr || attr.domain == bke::AttrDomain::Edge) {
1068 return;
1069 }
1071 object, attribute_format(orig_mesh_data, name, attr.type), node_mask, vbos);
1072 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1074 using T = decltype(dummy);
1075 const auto &faces = BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
1076 if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
1077 switch (attr.domain) {
1078 case bke::AttrDomain::Point:
1079 extract_data_vert_bmesh<T>(faces, attr.offset, *vbos[i]);
1080 break;
1081 case bke::AttrDomain::Face:
1082 extract_data_face_bmesh<T>(faces, attr.offset, *vbos[i]);
1083 break;
1084 case bke::AttrDomain::Corner:
1085 extract_data_corner_bmesh<T>(faces, attr.offset, *vbos[i]);
1086 break;
1087 default:
1088 BLI_assert_unreachable();
1089 }
1090 }
1091 });
1092 });
1093}
1094
1096 const Span<bool> hide_poly,
1097 const Span<int> face_indices)
1098{
1099 int corners_count = 0;
1100 for (const int face : face_indices) {
1101 if (!hide_poly.is_empty() && hide_poly[face]) {
1102 continue;
1103 }
1104 corners_count += faces[face].size();
1105 }
1106
1107 GPUIndexBufBuilder builder;
1108 GPU_indexbuf_init(&builder, GPU_PRIM_LINES, corners_count, INT_MAX);
1110
1111 int node_corner_offset = 0;
1112 int line_index = 0;
1113 for (const int face_index : face_indices) {
1114 const int face_size = faces[face_index].size();
1115 if (!hide_poly.is_empty() && hide_poly[face_index]) {
1116 node_corner_offset += face_size;
1117 continue;
1118 }
1119 for (const int i : IndexRange(face_size)) {
1120 const int next = (i == face_size - 1) ? 0 : i + 1;
1121 data[line_index] = uint2(i, next) + node_corner_offset;
1122 line_index++;
1123 }
1124
1125 node_corner_offset += face_size;
1126 }
1127
1128 return gpu::IndexBufPtr(GPU_indexbuf_build_ex(&builder, 0, node_corner_offset, false));
1129}
1130
1132 const int visible_faces_num)
1133{
1134 GPUIndexBufBuilder builder;
1135 GPU_indexbuf_init(&builder, GPU_PRIM_LINES, visible_faces_num * 3, INT_MAX);
1136
1138
1139 int line_index = 0;
1140 int vert_index = 0;
1141
1142 for (const BMFace *face : faces) {
1143 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1144 continue;
1145 }
1146
1147 data[line_index] = uint2(vert_index, vert_index + 1);
1148 line_index++;
1149 data[line_index] = uint2(vert_index + 1, vert_index + 2);
1150 line_index++;
1151 data[line_index] = uint2(vert_index + 2, vert_index);
1152 line_index++;
1153
1154 vert_index += 3;
1155 }
1156
1157 return gpu::IndexBufPtr(GPU_indexbuf_build_ex(&builder, 0, visible_faces_num * 3, false));
1158}
1159
1160static int create_tri_index_grids(const Span<int> grid_indices,
1161 const BitGroupVector<> &grid_hidden,
1162 const int gridsize,
1163 const int skip,
1164 const int totgrid,
1166{
1167 int tri_index = 0;
1168 int offset = 0;
1169 const int grid_vert_len = gridsize * gridsize;
1170 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1171 uint v0, v1, v2, v3;
1172
1173 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1174 grid_hidden[grid_indices[i]];
1175
1176 for (int y = 0; y < gridsize - skip; y += skip) {
1177 for (int x = 0; x < gridsize - skip; x += skip) {
1178 /* Skip hidden grid face */
1179 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1180 continue;
1181 }
1182 /* Indices in a Clockwise QUAD disposition. */
1183 v0 = offset + CCG_grid_xy_to_index(gridsize, x, y);
1184 v1 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y);
1185 v2 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y + skip);
1186 v3 = offset + CCG_grid_xy_to_index(gridsize, x, y + skip);
1187
1188 data[tri_index] = uint3(v0, v2, v1);
1189 tri_index++;
1190 data[tri_index] = uint3(v0, v3, v2);
1191 tri_index++;
1192 }
1193 }
1194 }
1195
1196 return tri_index;
1197}
1198
1199static int create_tri_index_grids_flat_layout(const Span<int> grid_indices,
1200 const BitGroupVector<> &grid_hidden,
1201 const int gridsize,
1202 const int skip,
1203 const int totgrid,
1205{
1206 int tri_index = 0;
1207 int offset = 0;
1208 const int grid_vert_len = square_uint(gridsize - 1) * 4;
1209 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1210 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1211 grid_hidden[grid_indices[i]];
1212
1213 uint v0, v1, v2, v3;
1214 for (int y = 0; y < gridsize - skip; y += skip) {
1215 for (int x = 0; x < gridsize - skip; x += skip) {
1216 /* Skip hidden grid face */
1217 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1218 continue;
1219 }
1220
1221 v0 = (y * (gridsize - 1) + x) * 4;
1222
1223 if (skip > 1) {
1224 v1 = (y * (gridsize - 1) + x + skip - 1) * 4;
1225 v2 = ((y + skip - 1) * (gridsize - 1) + x + skip - 1) * 4;
1226 v3 = ((y + skip - 1) * (gridsize - 1) + x) * 4;
1227 }
1228 else {
1229 v1 = v2 = v3 = v0;
1230 }
1231
1232 /* VBO data are in a Clockwise QUAD disposition. Note
1233 * that vertices might be in different quads if we're
1234 * building a coarse index buffer.
1235 */
1236 v0 += offset;
1237 v1 += offset + 1;
1238 v2 += offset + 2;
1239 v3 += offset + 3;
1240
1241 data[tri_index] = uint3(v0, v2, v1);
1242 tri_index++;
1243 data[tri_index] = uint3(v0, v3, v2);
1244 tri_index++;
1245 }
1246 }
1247 }
1248 return tri_index;
1249}
1250
1251static void create_lines_index_grids(const Span<int> grid_indices,
1252 int display_gridsize,
1253 const BitGroupVector<> &grid_hidden,
1254 const int gridsize,
1255 const int skip,
1256 const int totgrid,
1258{
1259 int line_index = 0;
1260 int offset = 0;
1261 const int grid_vert_len = gridsize * gridsize;
1262 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1263 uint v0, v1, v2, v3;
1264 bool grid_visible = false;
1265
1266 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1267 grid_hidden[grid_indices[i]];
1268
1269 for (int y = 0; y < gridsize - skip; y += skip) {
1270 for (int x = 0; x < gridsize - skip; x += skip) {
1271 /* Skip hidden grid face */
1272 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1273 continue;
1274 }
1275 /* Indices in a Clockwise QUAD disposition. */
1276 v0 = offset + CCG_grid_xy_to_index(gridsize, x, y);
1277 v1 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y);
1278 v2 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y + skip);
1279 v3 = offset + CCG_grid_xy_to_index(gridsize, x, y + skip);
1280
1281 data[line_index] = uint2(v0, v1);
1282 line_index++;
1283 data[line_index] = uint2(v0, v3);
1284 line_index++;
1285
1286 if (y / skip + 2 == display_gridsize) {
1287 data[line_index] = uint2(v2, v3);
1288 line_index++;
1289 }
1290 grid_visible = true;
1291 }
1292
1293 if (grid_visible) {
1294 data[line_index] = uint2(v1, v2);
1295 line_index++;
1296 }
1297 }
1298 }
1299}
1300
1302 int display_gridsize,
1303 const BitGroupVector<> &grid_hidden,
1304 const int gridsize,
1305 const int skip,
1306 const int totgrid,
1308{
1309 int line_index = 0;
1310 int offset = 0;
1311 const int grid_vert_len = square_uint(gridsize - 1) * 4;
1312 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1313 bool grid_visible = false;
1314 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1315 grid_hidden[grid_indices[i]];
1316
1317 uint v0, v1, v2, v3;
1318 for (int y = 0; y < gridsize - skip; y += skip) {
1319 for (int x = 0; x < gridsize - skip; x += skip) {
1320 /* Skip hidden grid face */
1321 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1322 continue;
1323 }
1324
1325 v0 = (y * (gridsize - 1) + x) * 4;
1326
1327 if (skip > 1) {
1328 v1 = (y * (gridsize - 1) + x + skip - 1) * 4;
1329 v2 = ((y + skip - 1) * (gridsize - 1) + x + skip - 1) * 4;
1330 v3 = ((y + skip - 1) * (gridsize - 1) + x) * 4;
1331 }
1332 else {
1333 v1 = v2 = v3 = v0;
1334 }
1335
1336 /* VBO data are in a Clockwise QUAD disposition. Note
1337 * that vertices might be in different quads if we're
1338 * building a coarse index buffer.
1339 */
1340 v0 += offset;
1341 v1 += offset + 1;
1342 v2 += offset + 2;
1343 v3 += offset + 3;
1344
1345 data[line_index] = uint2(v0, v1);
1346 line_index++;
1347 data[line_index] = uint2(v0, v3);
1348 line_index++;
1349
1350 if (y / skip + 2 == display_gridsize) {
1351 data[line_index] = uint2(v2, v3);
1352 line_index++;
1353 }
1354 grid_visible = true;
1355 }
1356
1357 if (grid_visible) {
1358 data[line_index] = uint2(v1, v2);
1359 line_index++;
1360 }
1361 }
1362 }
1363}
1364
1365static Array<int> calc_material_indices(const Object &object, const OrigMeshData &orig_mesh_data)
1366{
1367 const SculptSession &ss = *object.sculpt;
1368 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1369 switch (pbvh.type()) {
1370 case bke::pbvh::Type::Mesh: {
1372 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
1373 const bke::AttributeAccessor attributes = mesh.attributes();
1374 const VArray material_indices = *attributes.lookup<int>("material_index",
1376 if (!material_indices) {
1377 return {};
1378 }
1379 Array<int> node_materials(nodes.size());
1380 threading::parallel_for(nodes.index_range(), 64, [&](const IndexRange range) {
1381 for (const int i : range) {
1382 const Span<int> face_indices = nodes[i].faces();
1383 if (face_indices.is_empty()) {
1384 continue;
1385 }
1386 node_materials[i] = material_indices[face_indices.first()];
1387 }
1388 });
1389 return node_materials;
1390 }
1393 /* Use original mesh data because evaluated mesh is empty. */
1394 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1395 const VArray material_indices = *attributes.lookup<int>("material_index",
1397 if (!material_indices) {
1398 return {};
1399 }
1400 Array<int> node_materials(nodes.size());
1401 const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
1402 const Span<int> grid_faces = subdiv_ccg.grid_to_face_map;
1403 threading::parallel_for(nodes.index_range(), 64, [&](const IndexRange range) {
1404 for (const int i : range) {
1405 const Span<int> grids = nodes[i].grids();
1406 if (grids.is_empty()) {
1407 continue;
1408 }
1409 node_materials[i] = material_indices[grid_faces[grids.first()]];
1410 }
1411 });
1412 return node_materials;
1413 }
1415 return {};
1416 }
1418 return {};
1419}
1420
1421static BitVector<> calc_use_flat_layout(const Object &object, const OrigMeshData &orig_mesh_data)
1422{
1423 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1424 switch (pbvh.type()) {
1426 /* NOTE: Theoretically it would be possible to used vertex indexed buffers if there are no
1427 * face corner attributes, sharp faces, or face sets. */
1428 return {};
1431 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1432 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", bke::AttrDomain::Face);
1433 if (sharp_faces.is_empty()) {
1434 return BitVector<>(nodes.size(), false);
1435 }
1436
1437 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
1438 const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
1439
1440 /* Use boolean array instead of #BitVector for parallelized writing. */
1441 Array<bool> use_flat_layout(nodes.size());
1442 threading::parallel_for(nodes.index_range(), 4, [&](const IndexRange range) {
1443 for (const int i : range) {
1444 const Span<int> grids = nodes[i].grids();
1445 if (grids.is_empty()) {
1446 continue;
1447 }
1448 use_flat_layout[i] = std::any_of(grids.begin(), grids.end(), [&](const int grid) {
1449 return sharp_faces[grid_to_face_map[grid]];
1450 });
1451 }
1452 });
1453 return BitVector<>(use_flat_layout);
1454 }
1456 return {};
1457 }
1459 return {};
1460}
1461
1463 const Span<int3> corner_tris,
1464 const Span<bool> hide_poly,
1465 const bke::pbvh::MeshNode &node)
1466{
1467 const Span<int> face_indices = node.faces();
1468 int tris_num = 0;
1469 if (hide_poly.is_empty()) {
1470 tris_num = poly_to_tri_count(face_indices.size(), node.corners_num());
1471 }
1472 else {
1473 for (const int face : face_indices) {
1474 if (hide_poly[face]) {
1475 continue;
1476 }
1477 tris_num += bke::mesh::face_triangles_num(faces[face].size());
1478 }
1479 }
1480
1481 GPUIndexBufBuilder builder;
1482 GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, tris_num, INT_MAX);
1484
1485 int tri_index = 0;
1486 int node_corner_offset = 0;
1487 for (const int face_index : face_indices) {
1488 const IndexRange face = faces[face_index];
1489 if (!hide_poly.is_empty() && hide_poly[face_index]) {
1490 node_corner_offset += face.size();
1491 continue;
1492 }
1493 for (const int3 &tri : corner_tris.slice(bke::mesh::face_triangles_range(faces, face_index))) {
1494 for (int i : IndexRange(3)) {
1495 const int corner = tri[i];
1496 const int index_in_face = corner - face.first();
1497 data[tri_index][i] = node_corner_offset + index_in_face;
1498 }
1499 tri_index++;
1500 }
1501 node_corner_offset += face.size();
1502 }
1503
1504 return gpu::IndexBufPtr(GPU_indexbuf_build_ex(&builder, 0, node_corner_offset, false));
1505}
1506
1508 const BitGroupVector<> &grid_hidden,
1509 const bool do_coarse,
1510 const Span<int> grid_indices,
1511 const bool use_flat_layout)
1512{
1513 int gridsize = key.grid_size;
1514 int display_gridsize = gridsize;
1515 int totgrid = grid_indices.size();
1516 int skip = 1;
1517
1518 const int display_level = do_coarse ? 0 : key.level;
1519
1520 if (display_level < key.level) {
1521 display_gridsize = (1 << display_level) + 1;
1522 skip = 1 << (key.level - display_level - 1);
1523 }
1524
1525 uint visible_quad_len = bke::pbvh::count_grid_quads(
1526 grid_hidden, grid_indices, key.grid_size, display_gridsize);
1527
1528 GPUIndexBufBuilder builder;
1529 GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
1530
1532
1533 int tri_count;
1534 if (use_flat_layout) {
1536 grid_indices, grid_hidden, gridsize, skip, totgrid, data);
1537 }
1538 else {
1539 tri_count = create_tri_index_grids(grid_indices, grid_hidden, gridsize, skip, totgrid, data);
1540 }
1541
1542 builder.index_len = tri_count * 3;
1543 builder.index_min = 0;
1544 builder.index_max = 6 * visible_quad_len;
1545 builder.uses_restart_indices = false;
1547 GPU_indexbuf_build_in_place(&builder, result.get());
1548 return result;
1549}
1550
1552 const BitGroupVector<> &grid_hidden,
1553 const bool do_coarse,
1554 const Span<int> grid_indices,
1555 const bool use_flat_layout)
1556{
1557 int gridsize = key.grid_size;
1558 int display_gridsize = gridsize;
1559 int totgrid = grid_indices.size();
1560 int skip = 1;
1561
1562 const int display_level = do_coarse ? 0 : key.level;
1563
1564 if (display_level < key.level) {
1565 display_gridsize = (1 << display_level) + 1;
1566 skip = 1 << (key.level - display_level - 1);
1567 }
1568
1569 GPUIndexBufBuilder builder;
1571 &builder, GPU_PRIM_LINES, 2 * totgrid * display_gridsize * (display_gridsize - 1), INT_MAX);
1572
1574
1575 if (use_flat_layout) {
1577 grid_indices, display_gridsize, grid_hidden, gridsize, skip, totgrid, data);
1578 }
1579 else {
1581 grid_indices, display_gridsize, grid_hidden, gridsize, skip, totgrid, data);
1582 }
1583
1585 &builder, 0, 2 * totgrid * display_gridsize * (display_gridsize - 1), false));
1586}
1587
1588Span<gpu::IndexBufPtr> DrawCacheImpl::ensure_lines_indices(const Object &object,
1589 const OrigMeshData &orig_mesh_data,
1590 const IndexMask &node_mask,
1591 const bool coarse)
1592{
1593 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1594 Vector<gpu::IndexBufPtr> &ibos = coarse ? lines_ibos_coarse_ : lines_ibos_;
1595 ibos.resize(pbvh.nodes_num());
1596
1597 IndexMaskMemory memory;
1598 const IndexMask nodes_to_calculate = IndexMask::from_predicate(
1599 node_mask, GrainSize(8196), memory, [&](const int i) { return !ibos[i]; });
1600
1601 switch (pbvh.type()) {
1602 case bke::pbvh::Type::Mesh: {
1604 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
1605 const OffsetIndices<int> faces = mesh.faces();
1606 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1607 const VArraySpan hide_poly = *attributes.lookup<bool>(".hide_poly", bke::AttrDomain::Face);
1608 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1609 ibos[i] = create_lines_index_faces(faces, hide_poly, nodes[i].faces());
1610 });
1611 break;
1612 }
1614 const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
1615 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1616 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
1617 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
1619 key, subdiv_ccg.grid_hidden, coarse, nodes[i].grids(), use_flat_layout_[i]);
1620 });
1621 break;
1622 }
1624 const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
1625 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1626 const Set<BMFace *, 0> &faces = BKE_pbvh_bmesh_node_faces(
1627 &const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
1628 const int visible_faces_num = count_visible_tris_bmesh(faces);
1629 ibos[i] = create_lines_index_bmesh(faces, visible_faces_num);
1630 });
1631 break;
1632 }
1633 }
1634
1635 return ibos;
1636}
1637
1638BitSpan DrawCacheImpl::ensure_use_flat_layout(const Object &object,
1639 const OrigMeshData &orig_mesh_data)
1640{
1641 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1642 if (use_flat_layout_.size() != pbvh.nodes_num()) {
1643 use_flat_layout_ = calc_use_flat_layout(object, orig_mesh_data);
1644 }
1645 return use_flat_layout_;
1646}
1647
1649 const IndexMask &node_mask)
1650{
1651 node_mask.foreach_index([&](const int i) { GPU_vertbuf_use(vbos[i].get()); });
1652}
1653
1654Span<gpu::VertBufPtr> DrawCacheImpl::ensure_attribute_data(const Object &object,
1655 const OrigMeshData &orig_mesh_data,
1656 const AttributeRequest &attr,
1657 const IndexMask &node_mask)
1658{
1659 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1660 AttributeData &data = attribute_vbos_.lookup_or_add_default(attr);
1661 Vector<gpu::VertBufPtr> &vbos = data.vbos;
1662 vbos.resize(pbvh.nodes_num());
1663
1664 /* The nodes we recompute here are a combination of:
1665 * 1. null VBOs, which correspond to nodes that either haven't been drawn before, or have been
1666 * cleared completely by #free_nodes_with_changed_topology.
1667 * 2. Nodes that have been tagged dirty as their values are changed.
1668 * We also only process a subset of the nodes referenced by the caller, for example to only
1669 * recompute visible nodes. */
1670 IndexMaskMemory memory;
1671 const IndexMask empty_mask = IndexMask::from_predicate(
1672 node_mask, GrainSize(8196), memory, [&](const int i) { return !vbos[i]; });
1673 const IndexMask dirty_mask = IndexMask::from_bits(
1674 node_mask.slice_content(data.dirty_nodes.index_range()), data.dirty_nodes, memory);
1675 const IndexMask mask = IndexMask::from_union(empty_mask, dirty_mask, memory);
1676
1677 switch (pbvh.type()) {
1678 case bke::pbvh::Type::Mesh: {
1679 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&attr)) {
1680 switch (*request_type) {
1682 update_positions_mesh(object, mask, vbos);
1683 break;
1685 update_normals_mesh(object, mask, vbos);
1686 break;
1688 update_masks_mesh(object, orig_mesh_data, mask, vbos);
1689 break;
1691 update_face_sets_mesh(object, orig_mesh_data, mask, vbos);
1692 break;
1693 }
1694 }
1695 else {
1697 object, orig_mesh_data, mask, std::get<GenericRequest>(attr), vbos);
1698 }
1699 break;
1700 }
1702 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&attr)) {
1703 switch (*request_type) {
1705 fill_positions_grids(object, use_flat_layout_, mask, vbos);
1706 break;
1708 fill_normals_grids(object, orig_mesh_data, use_flat_layout_, mask, vbos);
1709 break;
1711 fill_masks_grids(object, use_flat_layout_, mask, vbos);
1712 break;
1714 fill_face_sets_grids(object, orig_mesh_data, use_flat_layout_, mask, vbos);
1715 break;
1716 }
1717 }
1718 else {
1720 object,
1721 attribute_format(orig_mesh_data, "Dummy", bke::AttrType::Float3),
1722 use_flat_layout_,
1723 mask,
1724 vbos);
1725 mask.foreach_index(GrainSize(1),
1726 [&](const int i) { vbos[i]->data<float3>().fill(float3(0.0f)); });
1727 }
1728 break;
1729 }
1731 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&attr)) {
1732 switch (*request_type) {
1734 update_positions_bmesh(object, mask, vbos);
1735 break;
1737 update_normals_bmesh(object, mask, vbos);
1738 break;
1740 update_masks_bmesh(object, mask, vbos);
1741 break;
1743 update_face_sets_bmesh(object, orig_mesh_data, mask, vbos);
1744 break;
1745 }
1746 }
1747 else {
1749 object, orig_mesh_data, mask, std::get<GenericRequest>(attr), vbos);
1750 }
1751 break;
1752 }
1753 }
1754
1755 /* TODO: It would be good to deallocate the bit vector if all of the bits have been reset to
1756 * avoid unnecessary processing in subsequent redraws. */
1757 dirty_mask.foreach_index_optimized<int>([&](const int i) { data.dirty_nodes[i].reset(); });
1758
1759 flush_vbo_data(vbos, mask);
1760
1761 return vbos;
1762}
1763
1764Span<gpu::IndexBufPtr> DrawCacheImpl::ensure_tri_indices(const Object &object,
1765 const OrigMeshData &orig_mesh_data,
1766 const IndexMask &node_mask,
1767 const bool coarse)
1768{
1769 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1770 switch (pbvh.type()) {
1771 case bke::pbvh::Type::Mesh: {
1772 const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
1773
1774 Vector<gpu::IndexBufPtr> &ibos = tris_ibos_;
1775 ibos.resize(nodes.size());
1776
1777 /* Whenever a node's visible triangle count has changed the index buffers are freed, so we
1778 * only recalculate null IBOs here. A new mask is recalculated for more even task
1779 * distribution between threads. */
1780 IndexMaskMemory memory;
1781 const IndexMask nodes_to_calculate = IndexMask::from_predicate(
1782 node_mask, GrainSize(8196), memory, [&](const int i) { return !ibos[i]; });
1783
1784 const Mesh &mesh = DRW_object_get_data_for_drawing<Mesh>(object);
1785 const OffsetIndices<int> faces = mesh.faces();
1786 const Span<int3> corner_tris = mesh.corner_tris();
1787 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1788 const VArraySpan hide_poly = *attributes.lookup<bool>(".hide_poly", bke::AttrDomain::Face);
1789 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1790 ibos[i] = create_tri_index_mesh(faces, corner_tris, hide_poly, nodes[i]);
1791 });
1792 return ibos;
1793 }
1795 /* Unlike the other geometry types, multires grids use indexed vertex buffers because when
1796 * there are no flat faces, vertices can be shared between neighboring quads. This results in
1797 * a 4x decrease in the amount of data uploaded. Theoretically it also means freeing VBOs
1798 * because of visibility changes is unnecessary.
1799 *
1800 * TODO: With the "flat layout" and no hidden faces, the index buffers are unnecessary, we
1801 * should avoid creating them in that case. */
1802 const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
1803
1804 Vector<gpu::IndexBufPtr> &ibos = coarse ? tris_ibos_coarse_ : tris_ibos_;
1805 ibos.resize(nodes.size());
1806
1807 /* Whenever a node's visible triangle count has changed the index buffers are freed, so we
1808 * only recalculate null IBOs here. A new mask is recalculated for more even task
1809 * distribution between threads. */
1810 IndexMaskMemory memory;
1811 const IndexMask nodes_to_calculate = IndexMask::from_predicate(
1812 node_mask, GrainSize(8196), memory, [&](const int i) { return !ibos[i]; });
1813
1814 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
1815 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
1816
1817 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1818 ibos[i] = create_tri_index_grids(
1819 key, subdiv_ccg.grid_hidden, coarse, nodes[i].grids(), use_flat_layout_[i]);
1820 });
1821 return ibos;
1822 }
1824 return {};
1825 }
1827 return {};
1828}
1829
1831 const ViewportRequest &request,
1832 const IndexMask &nodes_to_update)
1833{
1834 const Object &object_orig = *DEG_get_original(&object);
1835 const OrigMeshData orig_mesh_data{*static_cast<const Mesh *>(object_orig.data)};
1836 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1837
1838 this->ensure_use_flat_layout(object, orig_mesh_data);
1839 this->free_nodes_with_changed_topology(pbvh);
1840
1841 const Span<gpu::IndexBufPtr> ibos = this->ensure_tri_indices(
1842 object, orig_mesh_data, nodes_to_update, request.use_coarse_grids);
1843
1844 for (const AttributeRequest &attr : request.attributes) {
1845 this->ensure_attribute_data(object, orig_mesh_data, attr, nodes_to_update);
1846 }
1847
1848 /* Collect VBO spans in a different loop because #ensure_attribute_data invalidates the allocated
1849 * arrays when its map is changed. */
1851 for (const AttributeRequest &attr : request.attributes) {
1852 if (const AttributeData *attr_data = attribute_vbos_.lookup_ptr(attr)) {
1853 attr_vbos.append(attr_data->vbos);
1854 }
1855 }
1856
1857 /* Except for the first iteration of the draw loop, we only need to rebuild batches for nodes
1858 * with changed topology (visible triangle count). */
1859 Vector<gpu::Batch *> &batches = tris_batches_.lookup_or_add_default(request);
1860 batches.resize(pbvh.nodes_num(), nullptr);
1861 nodes_to_update.foreach_index(GrainSize(64), [&](const int i) {
1862 if (!batches[i]) {
1863 batches[i] = GPU_batch_create(
1864 GPU_PRIM_TRIS, nullptr, ibos.is_empty() ? nullptr : ibos[i].get());
1865 for (const Span<gpu::VertBufPtr> vbos : attr_vbos) {
1866 GPU_batch_vertbuf_add(batches[i], vbos[i].get(), false);
1867 }
1868 }
1869 });
1870
1871 return batches;
1872}
1873
1875 const ViewportRequest &request,
1876 const IndexMask &nodes_to_update)
1877{
1878 const Object &object_orig = *DEG_get_original(&object);
1879 const OrigMeshData orig_mesh_data(*static_cast<const Mesh *>(object_orig.data));
1880 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1881
1882 this->ensure_use_flat_layout(object, orig_mesh_data);
1883 this->free_nodes_with_changed_topology(pbvh);
1884
1885 const Span<gpu::VertBufPtr> position = this->ensure_attribute_data(
1886 object, orig_mesh_data, CustomRequest::Position, nodes_to_update);
1887 const Span<gpu::IndexBufPtr> lines = this->ensure_lines_indices(
1888 object, orig_mesh_data, nodes_to_update, request.use_coarse_grids);
1889
1890 /* Except for the first iteration of the draw loop, we only need to rebuild batches for nodes
1891 * with changed topology (visible triangle count). */
1892 Vector<gpu::Batch *> &batches = request.use_coarse_grids ? lines_batches_coarse_ :
1893 lines_batches_;
1894 batches.resize(pbvh.nodes_num(), nullptr);
1895 nodes_to_update.foreach_index(GrainSize(64), [&](const int i) {
1896 if (!batches[i]) {
1897 batches[i] = GPU_batch_create(GPU_PRIM_LINES, nullptr, lines[i].get());
1898 GPU_batch_vertbuf_add(batches[i], position[i].get(), false);
1899 }
1900 });
1901
1902 return batches;
1903}
1904
1906{
1907 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1908 if (material_indices_.size() != pbvh.nodes_num()) {
1909 const Object &object_orig = *DEG_get_original(&object);
1910 const OrigMeshData orig_mesh_data(*static_cast<const Mesh *>(object_orig.data));
1911 material_indices_ = calc_material_indices(object, orig_mesh_data);
1912 }
1913 return material_indices_;
1914}
1915
1916} // namespace blender::draw::pbvh
int CCG_grid_xy_to_index(const int grid_size, const int x, const int y)
Definition BKE_ccg.hh:73
CustomData interface, see also DNA_customdata_types.h.
eCustomDataMask CD_TYPE_AS_MASK(eCustomDataType type)
int CustomData_get_offset_named(const CustomData *data, eCustomDataType type, blender::StringRef name)
const char * CustomData_get_render_layer_name(const CustomData *data, eCustomDataType type)
const char * CustomData_get_active_layer_name(const CustomData *data, eCustomDataType type)
void BKE_paint_face_set_overlay_color_get(int face_set, int seed, uchar r_color[4])
Definition paint.cc:3092
bool paint_is_grid_face_hidden(blender::BoundedBitSpan grid_hidden, int gridsize, int x, int y)
Definition paint.cc:2129
A BVH for high poly meshes.
const blender::Set< BMFace *, 0 > & BKE_pbvh_bmesh_node_faces(blender::bke::pbvh::BMeshNode *node)
CCGKey BKE_subdiv_ccg_key_top_level(const SubdivCCG &subdiv_ccg)
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_NOINLINE
MINLINE int square_i(int a)
MINLINE unsigned int square_uint(unsigned int a)
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition math_geom.cc:58
MINLINE int poly_to_tri_count(int poly_count, int corner_count)
MINLINE void normal_float_to_short_v3(short out[3], const float in[3])
unsigned int uint
#define POINTER_OFFSET(v, ofs)
T * DEG_get_original(T *id)
@ CD_PROP_FLOAT
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
struct Mesh Mesh
Object is a sort of wrapper for general info.
struct Object Object
T & DRW_object_get_data_for_drawing(const Object &object)
#define GPU_batch_create(primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:141
int GPU_batch_vertbuf_add(blender::gpu::Batch *batch, blender::gpu::VertBuf *vertex_buf, bool own_vbo)
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:197
blender::MutableSpan< uint32_t > GPU_indexbuf_get_data(GPUIndexBufBuilder *)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
blender::gpu::IndexBuf * GPU_indexbuf_build_ex(GPUIndexBufBuilder *builder, uint index_min, uint index_max, bool uses_restart_indices)
blender::gpu::IndexBuf * GPU_indexbuf_calloc()
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *, blender::gpu::IndexBuf *)
@ GPU_PRIM_LINES
@ GPU_PRIM_TRIS
void GPU_vertbuf_use(blender::gpu::VertBuf *)
static blender::gpu::VertBuf * GPU_vertbuf_create_with_format(const GPUVertFormat &format)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
GPUVertFormat GPU_vertformat_from_attribute(blender::StringRef name, blender::gpu::VertAttrType type)
@ BM_ELEM_HIDDEN
@ BM_ELEM_SMOOTH
#define BM_elem_flag_test(ele, hflag)
BMDataLayerLookup BM_data_layer_lookup(const BMesh &bm, const blender::StringRef name)
BMesh const char void * data
BMesh * bm
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
void foreach_index(Fn &&fn) const
Value & lookup_or_add_default(const Key &key)
Definition BLI_map.hh:639
ValueIterator values() const &
Definition BLI_map.hh:884
constexpr int64_t size() const
Definition BLI_span.hh:252
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
static IndexMask from_bits(BitSpan bits, IndexMaskMemory &memory)
static IndexMask from_union(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
static IndexMask from_intersection(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
constexpr int64_t first() const
constexpr int64_t size() const
constexpr T * data() const
Definition BLI_span.hh:539
constexpr IndexRange index_range() const
Definition BLI_span.hh:670
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:137
constexpr const T * data() const
Definition BLI_span.hh:215
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
void append(const T &value)
void resize(const int64_t new_size)
void resize(const int64_t new_size_in_bits, const bool value=false)
GAttributeReader lookup(const StringRef attribute_id) const
Span< NodeT > nodes() const
void tag_visibility_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:212
void tag_positions_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:202
Span< int > ensure_material_indices(const Object &object) override
void tag_attribute_changed(const IndexMask &node_mask, StringRef attribute_name) override
Definition draw_pbvh.cc:239
Span< gpu::Batch * > ensure_tris_batches(const Object &object, const ViewportRequest &request, const IndexMask &nodes_to_update) override
void tag_masks_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:232
void tag_topology_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:218
void tag_face_sets_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:225
Span< gpu::Batch * > ensure_lines_batches(const Object &object, const ViewportRequest &request, const IndexMask &nodes_to_update) override
MutableSpan< T > data()
IndexMask slice_content(IndexRange range) const
void set_bits(MutableBitSpan r_bits, int64_t offset=0) const
void foreach_index(Fn &&fn) const
static float normals[][3]
format
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
static ulong * next
#define T
static char faces[256]
void convert_to_static_type(const CPPType &cpp_type, const Func &func)
IndexRange grid_range(const int grid_area, const int grid)
int face_triangles_num(const int face_size)
Definition BKE_mesh.hh:350
IndexRange face_triangles_range(OffsetIndices< int > faces, int face_i)
Definition BKE_mesh.hh:359
pbvh::Tree * pbvh_get(Object &object)
Definition paint.cc:3052
Span< float3 > vert_normals_eval_from_eval(const Object &object_eval)
Definition pbvh.cc:1065
int count_grid_quads(const BitGroupVector<> &grid_hidden, Span< int > grid_indices, int gridsize, int display_gridsize)
Definition pbvh.cc:1605
Span< float3 > vert_positions_eval_from_eval(const Object &object_eval)
Definition pbvh.cc:1046
Span< float3 > face_normals_eval_from_eval(const Object &object_eval)
Definition pbvh.cc:1072
std::optional< eCustomDataType > attr_type_to_custom_data_type(AttrType attr_type)
AttrType cpp_type_to_attribute_type(const CPPType &type)
void extract_data_vert_mesh(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< T > attribute, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:342
static BLI_NOINLINE void flush_vbo_data(const Span< gpu::VertBufPtr > vbos, const IndexMask &node_mask)
static BLI_NOINLINE void update_face_sets_bmesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
static BLI_NOINLINE void fill_masks_grids(const Object &object, const BitSpan use_flat_layout, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:839
static BLI_NOINLINE void ensure_vbos_allocated_bmesh(const Object &object, const GPUVertFormat &format, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:554
static BLI_NOINLINE void update_positions_bmesh(const Object &object, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:928
void extract_data_corner_mesh(const OffsetIndices< int > faces, const Span< T > attribute, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:377
static void update_normals_mesh(const Object &object, const IndexMask &node_mask, MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:589
static int count_visible_tris_bmesh(const Set< BMFace *, 0 > &faces)
Definition draw_pbvh.cc:469
const T & bmesh_cd_loop_get(const BMLoop &loop, const int offset)
Definition draw_pbvh.cc:399
static gpu::IndexBufPtr create_lines_index_faces(const OffsetIndices< int > faces, const Span< bool > hide_poly, const Span< int > face_indices)
static BitVector calc_use_flat_layout(const Object &object, const OrigMeshData &orig_mesh_data)
static const GPUVertFormat & normal_format()
Definition draw_pbvh.cc:289
static const GPUVertFormat & face_set_format()
Definition draw_pbvh.cc:303
DrawCache & ensure_draw_data(std::unique_ptr< bke::pbvh::DrawCache > &ptr)
Definition draw_pbvh.cc:250
static const GPUVertFormat & position_format()
Definition draw_pbvh.cc:282
static BLI_NOINLINE void update_generic_attribute_mesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, const StringRef name, MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:693
static BLI_NOINLINE void update_masks_bmesh(const Object &object, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:986
static void create_lines_index_grids_flat_layout(const Span< int > grid_indices, int display_gridsize, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, MutableSpan< uint2 > data)
const T & bmesh_cd_face_get(const BMFace &face, const int offset)
Definition draw_pbvh.cc:404
static void create_lines_index_grids(const Span< int > grid_indices, int display_gridsize, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, MutableSpan< uint2 > data)
static BLI_NOINLINE void update_generic_attribute_bmesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, const StringRef name, const MutableSpan< gpu::VertBufPtr > vbos)
static int create_tri_index_grids_flat_layout(const Span< int > grid_indices, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, MutableSpan< uint3 > data)
void extract_data_face_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:431
static Array< int > calc_material_indices(const Object &object, const OrigMeshData &orig_mesh_data)
short4 normal_float_to_short(const float3 &value)
Definition draw_pbvh.cc:334
static BLI_NOINLINE void free_vbos(const MutableSpan< gpu::VertBufPtr > vbos, const IndexMask &node_mask)
Definition draw_pbvh.cc:266
static BLI_NOINLINE void fill_face_sets_grids(const Object &object, const OrigMeshData &orig_mesh_data, const BitSpan use_flat_layout, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:886
static gpu::IndexBufPtr create_tri_index_mesh(const OffsetIndices< int > faces, const Span< int3 > corner_tris, const Span< bool > hide_poly, const bke::pbvh::MeshNode &node)
static BLI_NOINLINE void free_batches(const MutableSpan< gpu::Batch * > batches, const IndexMask &node_mask)
Definition draw_pbvh.cc:274
void extract_data_vert_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:410
void extract_data_face_mesh(const OffsetIndices< int > faces, const Span< T > attribute, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:360
static BLI_NOINLINE void update_masks_mesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:622
static GPUVertFormat attribute_format(const OrigMeshData &orig_mesh_data, const StringRef name, const bke::AttrType data_type)
Definition draw_pbvh.cc:310
static void update_positions_mesh(const Object &object, const IndexMask &node_mask, MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:572
static BLI_NOINLINE void update_normals_bmesh(const Object &object, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:954
void extract_data_corner_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:447
static BLI_NOINLINE void ensure_vbos_allocated_mesh(const Object &object, const GPUVertFormat &format, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:519
static BLI_NOINLINE void fill_normals_grids(const Object &object, const OrigMeshData &orig_mesh_data, const BitSpan use_flat_layout, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:776
static BLI_NOINLINE void ensure_vbos_allocated_grids(const Object &object, const GPUVertFormat &format, const BitSpan use_flat_layout, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:534
static BLI_NOINLINE void fill_positions_grids(const Object &object, const BitSpan use_flat_layout, const IndexMask &node_mask, const MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:735
static gpu::IndexBufPtr create_lines_index_bmesh(const Set< BMFace *, 0 > &faces, const int visible_faces_num)
static BLI_NOINLINE void free_ibos(const MutableSpan< gpu::IndexBufPtr > ibos, const IndexMask &node_mask)
Definition draw_pbvh.cc:258
static int create_tri_index_grids(const Span< int > grid_indices, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, MutableSpan< uint3 > data)
const T & bmesh_cd_vert_get(const BMVert &vert, const int offset)
Definition draw_pbvh.cc:394
static const GPUVertFormat & mask_format()
Definition draw_pbvh.cc:296
std::string GenericRequest
Definition DRW_pbvh.hh:39
static BLI_NOINLINE void update_face_sets_mesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, MutableSpan< gpu::VertBufPtr > vbos)
Definition draw_pbvh.cc:652
std::variant< CustomRequest, GenericRequest > AttributeRequest
Definition DRW_pbvh.hh:48
void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format, const char *base_name, bke::AttrType data_type, blender::StringRef layer_name, bool is_active_render, bool is_active_layer)
GPUVertFormat init_format_for_attribute(const bke::AttrType data_type, const StringRef vbo_name)
std::unique_ptr< IndexBuf, IndexBufDeleter > IndexBufPtr
std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > VertBufPtr
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< uint32_t, 2 > uint2
blender::VecBase< int16_t, 3 > short3
VecBase< uint32_t, 3 > uint3
blender::VecBase< int16_t, 4 > short4
blender::VecBase< uint8_t, 4 > uchar4
uint64_t get_default_hash(const T &v, const Args &...args)
Definition BLI_hash.hh:233
VecBase< int32_t, 3 > int3
VecBase< float, 3 > float3
const char * name
blender::bke::AttrType type
blender::bke::AttrDomain domain
BMHeader head
void * data
BMHeader head
BMHeader head
int grid_size
Definition BKE_ccg.hh:33
int level
Definition BKE_ccg.hh:26
SubdivCCG * subdiv_ccg
Definition BKE_paint.hh:395
blender::BitGroupVector grid_hidden
blender::Span< int > grid_to_face_map
uint64_t operator()(const draw::pbvh::AttributeRequest &value) const
Definition draw_pbvh.cc:43
bke::AttributeAccessor attributes
Definition draw_pbvh.cc:75
Vector< AttributeRequest > attributes
Definition DRW_pbvh.hh:51
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4238