Blender V4.3
draw_pbvh.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2024 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12#include "BLI_map.hh"
14#include "BLI_utildefines.h"
15#include "BLI_vector.hh"
16
17#include "DNA_object_types.h"
18
19#include "BKE_attribute.hh"
20#include "BKE_attribute_math.hh"
21#include "BKE_customdata.hh"
22#include "BKE_mesh.hh"
23#include "BKE_paint.hh"
24#include "BKE_pbvh_api.hh"
25#include "BKE_subdiv_ccg.hh"
26
28
29#include "GPU_batch.hh"
30
31#include "DRW_engine.hh"
32#include "DRW_pbvh.hh"
33
34#include "attribute_convert.hh"
35#include "bmesh.hh"
36
37namespace blender {
38
39template<> struct DefaultHash<draw::pbvh::AttributeRequest> {
41 {
42 using namespace draw::pbvh;
43 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&value)) {
44 return get_default_hash(*request_type);
45 }
46 const GenericRequest &attr = std::get<GenericRequest>(value);
47 return get_default_hash(attr.name);
48 }
49};
50
51} // namespace blender
52
53namespace blender::draw::pbvh {
54
56{
57 return get_default_hash(attributes, use_coarse_grids);
58}
59
73 OrigMeshData(const Mesh &mesh)
74 : active_color(mesh.active_color_attribute),
75 default_color(mesh.default_color_attribute),
78 face_set_default(mesh.face_sets_color_default),
79 face_set_seed(mesh.face_sets_color_seed),
80 attributes(mesh.attributes())
81 {
82 }
83};
84
89class DrawCacheImpl : public DrawCache {
90 struct AttributeData {
100 BitVector<> dirty_nodes;
105 void tag_dirty(const IndexMask &node_mask);
106 };
107
109 BitVector<> use_flat_layout_;
111 Array<int> material_indices_;
112
114 Vector<gpu::IndexBuf *> lines_ibos_;
116 Vector<gpu::IndexBuf *> lines_ibos_coarse_;
118 Vector<gpu::IndexBuf *> tris_ibos_;
120 Vector<gpu::IndexBuf *> tris_ibos_coarse_;
126
128 Vector<gpu::Batch *> lines_batches_;
130 Vector<gpu::Batch *> lines_batches_coarse_;
137
146 BitVector<> dirty_topology_;
147
148 public:
149 virtual ~DrawCacheImpl() override;
150
151 void tag_positions_changed(const IndexMask &node_mask) override;
152 void tag_visibility_changed(const IndexMask &node_mask) override;
153 void tag_topology_changed(const IndexMask &node_mask) override;
154 void tag_face_sets_changed(const IndexMask &node_mask) override;
155 void tag_masks_changed(const IndexMask &node_mask) override;
156 void tag_attribute_changed(const IndexMask &node_mask, StringRef attribute_name) override;
157
159 const ViewportRequest &request,
160 const IndexMask &nodes_to_update) override;
161
163 const ViewportRequest &request,
164 const IndexMask &nodes_to_update) override;
165
166 Span<int> ensure_material_indices(const Object &object) override;
167
168 private:
173 void free_nodes_with_changed_topology(const bke::pbvh::Tree &pbvh);
174
175 BitSpan ensure_use_flat_layout(const Object &object, const OrigMeshData &orig_mesh_data);
176
177 Span<gpu::VertBuf *> ensure_attribute_data(const Object &object,
178 const OrigMeshData &orig_mesh_data,
179 const AttributeRequest &attr,
180 const IndexMask &node_mask);
181
182 Span<gpu::IndexBuf *> ensure_tri_indices(const Object &object,
183 const OrigMeshData &orig_mesh_data,
184 const IndexMask &node_mask,
185 bool coarse);
186
187 Span<gpu::IndexBuf *> ensure_lines_indices(const Object &object,
188 const OrigMeshData &orig_mesh_data,
189 const IndexMask &node_mask,
190 bool coarse);
191};
192
193void DrawCacheImpl::AttributeData::tag_dirty(const IndexMask &node_mask)
194{
195 this->dirty_nodes.resize(std::max(this->dirty_nodes.size(), node_mask.min_array_size()), false);
196 node_mask.set_bits(this->dirty_nodes);
197}
198
200{
201 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::Position)) {
202 data->tag_dirty(node_mask);
203 }
204 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::Normal)) {
205 data->tag_dirty(node_mask);
206 }
207}
208
210{
211 dirty_topology_.resize(std::max(dirty_topology_.size(), node_mask.min_array_size()), false);
212 node_mask.set_bits(dirty_topology_);
213}
214
216{
219 this->tag_visibility_changed(node_mask);
220}
221
223{
224 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::FaceSet)) {
225 data->tag_dirty(node_mask);
226 }
227}
228
230{
231 if (DrawCacheImpl::AttributeData *data = attribute_vbos_.lookup_ptr(CustomRequest::Mask)) {
232 data->tag_dirty(node_mask);
233 }
234}
235
236void DrawCacheImpl::tag_attribute_changed(const IndexMask &node_mask, StringRef attribute_name)
237{
238 for (const auto &[data_request, data] : attribute_vbos_.items()) {
239 if (const GenericRequest *request = std::get_if<GenericRequest>(&data_request)) {
240 if (request->name == attribute_name) {
241 data.tag_dirty(node_mask);
242 }
243 }
244 }
245}
246
247DrawCache &ensure_draw_data(std::unique_ptr<bke::pbvh::DrawCache> &ptr)
248{
249 if (!ptr) {
250 ptr = std::make_unique<DrawCacheImpl>();
251 }
252 return dynamic_cast<DrawCache &>(*ptr);
253}
254
256 const IndexMask &node_mask)
257{
258 IndexMaskMemory memory;
259 const IndexMask mask = IndexMask::from_intersection(node_mask, ibos.index_range(), memory);
260 mask.foreach_index([&](const int i) { GPU_INDEXBUF_DISCARD_SAFE(ibos[i]); });
261}
262
264 const IndexMask &node_mask)
265{
266 IndexMaskMemory memory;
267 const IndexMask mask = IndexMask::from_intersection(node_mask, vbos.index_range(), memory);
268 mask.foreach_index([&](const int i) { GPU_VERTBUF_DISCARD_SAFE(vbos[i]); });
269}
270
272 const IndexMask &node_mask)
273{
274 IndexMaskMemory memory;
275 const IndexMask mask = IndexMask::from_intersection(node_mask, batches.index_range(), memory);
276 mask.foreach_index([&](const int i) { GPU_BATCH_DISCARD_SAFE(batches[i]); });
277}
278
280{
281 static GPUVertFormat format{};
282 if (format.attr_len == 0) {
284 }
285 return format;
286}
287
289{
290 static GPUVertFormat format{};
291 if (format.attr_len == 0) {
293 }
294 return format;
295}
296
298{
299 static GPUVertFormat format{};
300 if (format.attr_len == 0) {
302 }
303 return format;
304}
305
307{
308 static GPUVertFormat format{};
309 if (format.attr_len == 0) {
311 }
312 return format;
313}
314
315static GPUVertFormat attribute_format(const OrigMeshData &orig_mesh_data,
316 const StringRefNull name,
317 const eCustomDataType data_type)
318{
320
321 bool is_render, is_active;
322 const char *prefix = "a";
323
324 if (CD_TYPE_AS_MASK(data_type) & CD_MASK_COLOR_ALL) {
325 prefix = "c";
326 is_active = orig_mesh_data.active_color == name;
327 is_render = orig_mesh_data.default_color == name;
328 }
329 if (data_type == CD_PROP_FLOAT2) {
330 prefix = "u";
331 is_active = orig_mesh_data.active_uv_map == name;
332 is_render = orig_mesh_data.default_uv_map == name;
333 }
334
335 DRW_cdlayer_attr_aliases_add(&format, prefix, data_type, name.c_str(), is_render, is_active);
336 return format;
337}
338
339static GPUVertFormat format_for_request(const OrigMeshData &orig_mesh_data,
340 const AttributeRequest &request)
341{
342 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
343 switch (*request_type) {
345 return position_format();
347 return normal_format();
349 return mask_format();
351 return face_set_format();
352 }
353 }
354 else {
355 const GenericRequest &attr = std::get<GenericRequest>(request);
356 return attribute_format(orig_mesh_data, attr.name, attr.type);
357 }
359 return {};
360}
361
362static bool pbvh_attr_supported(const AttributeRequest &request)
363{
364 if (std::holds_alternative<CustomRequest>(request)) {
365 return true;
366 }
367 const GenericRequest &attr = std::get<GenericRequest>(request);
369 /* blender::bke::pbvh::Tree drawing does not support edge domain attributes. */
370 return false;
371 }
372 bool type_supported = false;
374 using T = decltype(dummy);
375 using Converter = AttributeConverter<T>;
376 using VBOType = typename Converter::VBOType;
377 if constexpr (!std::is_void_v<VBOType>) {
378 type_supported = true;
379 }
380 });
381 return type_supported;
382}
383
385{
387 normal_float_to_short_v3(result, value);
388 return short4(result.x, result.y, result.z, 0);
389}
390
391template<typename T>
393 const Span<int> corner_verts,
394 const Span<T> attribute,
395 const Span<int> face_indices,
396 gpu::VertBuf &vbo)
397{
398 using Converter = AttributeConverter<T>;
399 using VBOType = typename Converter::VBOType;
400 VBOType *data = vbo.data<VBOType>().data();
401 for (const int face : face_indices) {
402 for (const int vert : corner_verts.slice(faces[face])) {
403 *data = Converter::convert(attribute[vert]);
404 data++;
405 }
406 }
407}
408
409template<typename T>
411 const Span<T> attribute,
412 const Span<int> face_indices,
413 gpu::VertBuf &vbo)
414{
415 using Converter = AttributeConverter<T>;
416 using VBOType = typename Converter::VBOType;
417
418 VBOType *data = vbo.data<VBOType>().data();
419 for (const int face : face_indices) {
420 const int face_size = faces[face].size();
421 std::fill_n(data, face_size, Converter::convert(attribute[face]));
422 data += face_size;
423 }
424}
425
426template<typename T>
428 const Span<T> attribute,
429 const Span<int> face_indices,
430 gpu::VertBuf &vbo)
431{
432 using Converter = AttributeConverter<T>;
433 using VBOType = typename Converter::VBOType;
434
435 VBOType *data = vbo.data<VBOType>().data();
436 for (const int face : face_indices) {
437 for (const int corner : faces[face]) {
438 *data = Converter::convert(attribute[corner]);
439 data++;
440 }
441 }
442}
443
444template<typename T> const T &bmesh_cd_vert_get(const BMVert &vert, const int offset)
445{
446 return *static_cast<const T *>(POINTER_OFFSET(vert.head.data, offset));
447}
448
449template<typename T> const T &bmesh_cd_loop_get(const BMLoop &loop, const int offset)
450{
451 return *static_cast<const T *>(POINTER_OFFSET(loop.head.data, offset));
452}
453
454template<typename T> const T &bmesh_cd_face_get(const BMFace &face, const int offset)
455{
456 return *static_cast<const T *>(POINTER_OFFSET(face.head.data, offset));
457}
458
459template<typename T>
460void extract_data_vert_bmesh(const Set<BMFace *, 0> &faces, const int cd_offset, gpu::VertBuf &vbo)
461{
462 using Converter = AttributeConverter<T>;
463 using VBOType = typename Converter::VBOType;
464 VBOType *data = vbo.data<VBOType>().data();
465
466 for (const BMFace *face : faces) {
468 continue;
469 }
470 const BMLoop *l = face->l_first;
471 *data = Converter::convert(bmesh_cd_vert_get<T>(*l->prev->v, cd_offset));
472 data++;
473 *data = Converter::convert(bmesh_cd_vert_get<T>(*l->v, cd_offset));
474 data++;
475 *data = Converter::convert(bmesh_cd_vert_get<T>(*l->next->v, cd_offset));
476 data++;
477 }
478}
479
480template<typename T>
481void extract_data_face_bmesh(const Set<BMFace *, 0> &faces, const int cd_offset, gpu::VertBuf &vbo)
482{
483 using Converter = AttributeConverter<T>;
484 using VBOType = typename Converter::VBOType;
485 VBOType *data = vbo.data<VBOType>().data();
486
487 for (const BMFace *face : faces) {
489 continue;
490 }
491 std::fill_n(data, 3, Converter::convert(bmesh_cd_face_get<T>(*face, cd_offset)));
492 data += 3;
493 }
494}
495
496template<typename T>
498 const int cd_offset,
499 gpu::VertBuf &vbo)
500{
501 using Converter = AttributeConverter<T>;
502 using VBOType = typename Converter::VBOType;
503 VBOType *data = vbo.data<VBOType>().data();
504
505 for (const BMFace *face : faces) {
507 continue;
508 }
509 const BMLoop *l = face->l_first;
510 *data = Converter::convert(bmesh_cd_loop_get<T>(*l->prev, cd_offset));
511 data++;
512 *data = Converter::convert(bmesh_cd_loop_get<T>(*l, cd_offset));
513 data++;
514 *data = Converter::convert(bmesh_cd_loop_get<T>(*l->next, cd_offset));
515 data++;
516 }
517}
518
519static const CustomData *get_cdata(const BMesh &bm, const bke::AttrDomain domain)
520{
521 switch (domain) {
523 return &bm.vdata;
525 return &bm.ldata;
527 return &bm.pdata;
528 default:
529 return nullptr;
530 }
531}
532
533template<typename T> T fallback_value_for_fill()
534{
535 return T();
536}
537
539{
540 return ColorGeometry4f(1.0f, 1.0f, 1.0f, 1.0f);
541}
542
547
549{
550 return std::count_if(faces.begin(), faces.end(), [&](const BMFace *face) {
551 return !BM_elem_flag_test_bool(face, BM_ELEM_HIDDEN);
552 });
553}
554
556{
557 /* This destructor should support inconsistent vector lengths between attributes and index
558 * buffers. That's why the implementation isn't shared with #free_nodes_with_changed_topology.
559 * Also the gpu buffers and batches should just use RAII anyway. */
560 free_ibos(lines_ibos_, lines_ibos_.index_range());
561 free_ibos(lines_ibos_coarse_, lines_ibos_coarse_.index_range());
562 free_ibos(tris_ibos_, tris_ibos_.index_range());
563 free_ibos(tris_ibos_coarse_, tris_ibos_coarse_.index_range());
564 for (DrawCacheImpl::AttributeData &data : attribute_vbos_.values()) {
565 free_vbos(data.vbos, data.vbos.index_range());
566 }
567
568 free_batches(lines_batches_, lines_batches_.index_range());
569 free_batches(lines_batches_coarse_, lines_batches_coarse_.index_range());
570 for (MutableSpan<gpu::Batch *> batches : tris_batches_.values()) {
571 free_batches(batches, batches.index_range());
572 }
573}
574
575void DrawCacheImpl::free_nodes_with_changed_topology(const bke::pbvh::Tree &pbvh)
576{
577 /* NOTE: Theoretically we shouldn't need to free batches with a changed triangle count, but
578 * currently it's the simplest way to reallocate all the GPU data while keeping everything in a
579 * consistent state. */
580 IndexMaskMemory memory;
581 const IndexMask nodes_to_free = IndexMask::from_bits(dirty_topology_, memory);
582 if (nodes_to_free.is_empty()) {
583 return;
584 }
585
586 dirty_topology_.clear_and_shrink();
587
588 free_ibos(lines_ibos_, nodes_to_free);
589 free_ibos(lines_ibos_coarse_, nodes_to_free);
590 free_ibos(tris_ibos_, nodes_to_free);
591 free_ibos(tris_ibos_coarse_, nodes_to_free);
592 if (pbvh.type() == bke::pbvh::Type::BMesh) {
593 /* For BMesh, VBOs are only filled with data for visible triangles, and topology can also
594 * completely change due to dynamic topology, so VBOs must be rebuilt from scratch. For other
595 * types, actual topology doesn't change, and visibility changes are accounted for by the index
596 * buffers. */
597 for (AttributeData &data : attribute_vbos_.values()) {
598 free_vbos(data.vbos, nodes_to_free);
599 }
600 }
601
602 free_batches(lines_batches_, nodes_to_free);
603 free_batches(lines_batches_coarse_, nodes_to_free);
604 for (MutableSpan<gpu::Batch *> batches : tris_batches_.values()) {
605 free_batches(batches, nodes_to_free);
606 }
607}
608
610 const Span<int> corner_verts,
611 const Span<bool> sharp_faces,
612 const Span<float3> vert_normals,
613 const Span<float3> face_normals,
614 const Span<int> face_indices,
615 gpu::VertBuf &vert_buf)
616{
617 short4 *data = vert_buf.data<short4>().data();
618
619 for (const int face : face_indices) {
620 if (!sharp_faces.is_empty() && sharp_faces[face]) {
621 const int face_size = faces[face].size();
622 std::fill_n(data, face_size, normal_float_to_short(face_normals[face]));
623 data += face_size;
624 }
625 else {
626 for (const int vert : corner_verts.slice(faces[face])) {
627 *data = normal_float_to_short(vert_normals[vert]);
628 data++;
629 }
630 }
631 }
632}
633
635 const Span<int> corner_verts,
636 const Span<float> mask,
637 const Span<int> face_indices,
638 gpu::VertBuf &vbo)
639{
640 float *data = vbo.data<float>().data();
641 for (const int face : face_indices) {
642 for (const int vert : corner_verts.slice(faces[face])) {
643 *data = mask[vert];
644 data++;
645 }
646 }
647}
648
650 const Span<int> face_sets,
651 const int color_default,
652 const int color_seed,
653 const Span<int> face_indices,
654 gpu::VertBuf &vert_buf)
655{
656 uchar4 *data = vert_buf.data<uchar4>().data();
657 for (const int face : face_indices) {
658 const int id = face_sets[face];
659
660 uchar4 fset_color(UCHAR_MAX);
661 if (id != color_default) {
662 BKE_paint_face_set_overlay_color_get(id, color_seed, fset_color);
663 }
664 else {
665 /* Skip for the default color face set to render it white. */
666 fset_color[0] = fset_color[1] = fset_color[2] = UCHAR_MAX;
667 }
668
669 const int face_size = faces[face].size();
670 std::fill_n(data, face_size, fset_color);
671 data += face_size;
672 }
673}
674
676 const Span<int> corner_verts,
677 const GSpan attribute,
678 const bke::AttrDomain domain,
679 const Span<int> face_indices,
680 gpu::VertBuf &vert_buf)
681{
682 bke::attribute_math::convert_to_static_type(attribute.type(), [&](auto dummy) {
683 using T = decltype(dummy);
684 if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
685 const Span<T> src = attribute.typed<T>();
686 switch (domain) {
687 case bke::AttrDomain::Point:
688 extract_data_vert_mesh<T>(faces, corner_verts, src, face_indices, vert_buf);
689 break;
690 case bke::AttrDomain::Face:
691 extract_data_face_mesh<T>(faces, src, face_indices, vert_buf);
692 break;
693 case bke::AttrDomain::Corner:
694 extract_data_corner_mesh<T>(faces, src, face_indices, vert_buf);
695 break;
696 default:
697 BLI_assert_unreachable();
698 }
699 }
700 });
701}
702
703static void fill_vbo_position_grids(const CCGKey &key,
704 const Span<float3> positions,
705 const bool use_flat_layout,
706 const Span<int> grids,
707 gpu::VertBuf &vert_buf)
708{
709 float3 *data = vert_buf.data<float3>().data();
710 if (use_flat_layout) {
711 const int grid_size_1 = key.grid_size - 1;
712 for (const int grid : grids) {
713 const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
714 for (int y = 0; y < grid_size_1; y++) {
715 for (int x = 0; x < grid_size_1; x++) {
716 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)];
717 data++;
718 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
719 data++;
720 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
721 data++;
722 *data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
723 data++;
724 }
725 }
726 }
727 }
728 else {
729 for (const int grid : grids) {
730 const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
731 std::copy_n(grid_positions.data(), grid_positions.size(), data);
732 data += grid_positions.size();
733 }
734 }
735}
736
737static void fill_vbo_normal_grids(const CCGKey &key,
738 const Span<float3> positions,
739 const Span<float3> normals,
740 const Span<int> grid_to_face_map,
741 const Span<bool> sharp_faces,
742 const bool use_flat_layout,
743 const Span<int> grids,
744 gpu::VertBuf &vert_buf)
745{
746 short4 *data = vert_buf.data<short4>().data();
747
748 if (use_flat_layout) {
749 const int grid_size_1 = key.grid_size - 1;
750 for (const int grid : grids) {
751 const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
752 const Span<float3> grid_normals = normals.slice(bke::ccg::grid_range(key, grid));
753 if (!sharp_faces.is_empty() && sharp_faces[grid_to_face_map[grid]]) {
754 for (int y = 0; y < grid_size_1; y++) {
755 for (int x = 0; x < grid_size_1; x++) {
756 float3 no;
758 grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)],
759 grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)],
760 grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)],
761 grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)]);
762 std::fill_n(data, 4, normal_float_to_short(no));
763 data += 4;
764 }
765 }
766 }
767 else {
768 for (int y = 0; y < grid_size_1; y++) {
769 for (int x = 0; x < grid_size_1; x++) {
770 std::fill_n(
771 data,
772 4,
773 normal_float_to_short(grid_normals[CCG_grid_xy_to_index(key.grid_size, x, y)]));
774 data += 4;
775 }
776 }
777 }
778 }
779 }
780 else {
781 /* The non-flat VBO layout does not support sharp faces. */
782 for (const int grid : grids) {
783 for (const float3 &normal : normals.slice(bke::ccg::grid_range(key, grid))) {
784 *data = normal_float_to_short(normal);
785 data++;
786 }
787 }
788 }
789}
790
791static void fill_vbo_mask_grids(const CCGKey &key,
792 const Span<float> masks,
793 const bool use_flat_layout,
794 const Span<int> grids,
795 gpu::VertBuf &vert_buf)
796{
797 float *data = vert_buf.data<float>().data();
798 if (use_flat_layout) {
799 const int grid_size_1 = key.grid_size - 1;
800 for (const int grid : grids) {
801 const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
802 for (int y = 0; y < grid_size_1; y++) {
803 for (int x = 0; x < grid_size_1; x++) {
804 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y)];
805 data++;
806 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
807 data++;
808 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
809 data++;
810 *data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
811 data++;
812 }
813 }
814 }
815 }
816 else {
817 for (const int grid : grids) {
818 const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
819 std::copy_n(grid_masks.data(), grid_masks.size(), data);
820 data += grid_masks.size();
821 }
822 }
823}
824
825static void fill_vbo_face_set_grids(const CCGKey &key,
826 const Span<int> grid_to_face_map,
827 const Span<int> face_sets,
828 const int color_default,
829 const int color_seed,
830 const bool use_flat_layout,
831 const Span<int> grid_indices,
832 gpu::VertBuf &vert_buf)
833{
834 const int verts_per_grid = use_flat_layout ? square_i(key.grid_size - 1) * 4 :
835 square_i(key.grid_size);
836 uchar4 *data = vert_buf.data<uchar4>().data();
837 for (const int i : grid_indices.index_range()) {
838 uchar4 color{UCHAR_MAX};
839 const int fset = face_sets[grid_to_face_map[grid_indices[i]]];
840 if (fset != color_default) {
841 BKE_paint_face_set_overlay_color_get(fset, color_seed, color);
842 }
843
844 std::fill_n(data, verts_per_grid, color);
845 data += verts_per_grid;
846 }
847}
848
849static void fill_vbos_grids(const Object &object,
850 const OrigMeshData &orig_mesh_data,
851 const BitSpan use_flat_layout,
852 const IndexMask &node_mask,
853 const AttributeRequest &request,
855{
856 const SculptSession &ss = *object.sculpt;
857 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
859 const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
860 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
861
862 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
863 switch (*request_type) {
865 node_mask.foreach_index(GrainSize(1), [&](const int i) {
867 key, subdiv_ccg.positions, use_flat_layout[i], nodes[i].grids(), *vbos[i]);
868 });
869 break;
870 }
872 const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
873 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
874 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face",
876 node_mask.foreach_index(GrainSize(1), [&](const int i) {
878 subdiv_ccg.positions,
879 subdiv_ccg.normals,
880 grid_to_face_map,
881 sharp_faces,
882 use_flat_layout[i],
883 nodes[i].grids(),
884 *vbos[i]);
885 });
886
887 break;
888 }
889 case CustomRequest::Mask: {
890 const Span<float> masks = subdiv_ccg.masks;
891 if (!masks.is_empty()) {
892 node_mask.foreach_index(GrainSize(1), [&](const int i) {
893 fill_vbo_mask_grids(key, masks, use_flat_layout[i], nodes[i].grids(), *vbos[i]);
894 });
895 }
896 else {
897 node_mask.foreach_index(GrainSize(64),
898 [&](const int i) { vbos[i]->data<float>().fill(0.0f); });
899 }
900 break;
901 }
903 const int face_set_default = orig_mesh_data.face_set_default;
904 const int face_set_seed = orig_mesh_data.face_set_seed;
905 const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
906 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
907 if (const VArray<int> face_sets = *attributes.lookup<int>(".sculpt_face_set",
909 {
910 const VArraySpan<int> face_sets_span(face_sets);
911 node_mask.foreach_index(GrainSize(1), [&](const int i) {
913 grid_to_face_map,
914 face_sets_span,
915 face_set_default,
916 face_set_seed,
917 use_flat_layout[i],
918 nodes[i].grids(),
919 *vbos[i]);
920 });
921 }
922 else {
923 node_mask.foreach_index(
924 GrainSize(1), [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4{UCHAR_MAX}); });
925 }
926 break;
927 }
928 }
929 }
930 else {
931 const eCustomDataType type = std::get<GenericRequest>(request).type;
932 node_mask.foreach_index(GrainSize(1), [&](const int i) {
933 bke::attribute_math::convert_to_static_type(type, [&](auto dummy) {
934 using T = decltype(dummy);
935 using Converter = AttributeConverter<T>;
936 using VBOType = typename Converter::VBOType;
937 if constexpr (!std::is_void_v<VBOType>) {
938 vbos[i]->data<VBOType>().fill(Converter::convert(fallback_value_for_fill<T>()));
939 }
940 });
941 });
942 }
943}
944
945static void fill_vbos_mesh(const Object &object,
946 const OrigMeshData &orig_mesh_data,
947 const IndexMask &node_mask,
948 const AttributeRequest &request,
950{
951 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
953 const Mesh &mesh = *static_cast<const Mesh *>(object.data);
954 const OffsetIndices<int> faces = mesh.faces();
955 const Span<int> corner_verts = mesh.corner_verts();
956
957 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
958 switch (*request_type) {
960 const Span<float3> vert_positions = bke::pbvh::vert_positions_eval_from_eval(object);
961 node_mask.foreach_index(GrainSize(1), [&](const int i) {
963 faces, corner_verts, vert_positions, nodes[i].faces(), *vbos[i]);
964 });
965 break;
966 }
968 const Span<float3> vert_normals = bke::pbvh::vert_normals_eval_from_eval(object);
969 const Span<float3> face_normals = bke::pbvh::face_normals_eval_from_eval(object);
970 const bke::AttributeAccessor attributes = mesh.attributes();
971 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face",
973 node_mask.foreach_index(GrainSize(1), [&](const int i) {
975 corner_verts,
976 sharp_faces,
977 vert_normals,
978 face_normals,
979 nodes[i].faces(),
980 *vbos[i]);
981 });
982 break;
983 }
984 case CustomRequest::Mask: {
985 const VArraySpan mask = *orig_mesh_data.attributes.lookup<float>(".sculpt_mask",
987 if (!mask.is_empty()) {
988 node_mask.foreach_index(GrainSize(1), [&](const int i) {
989 fill_vbo_mask_mesh(faces, corner_verts, mask, nodes[i].faces(), *vbos[i]);
990 });
991 }
992 else {
993 node_mask.foreach_index(GrainSize(64),
994 [&](const int i) { vbos[i]->data<float>().fill(0.0f); });
995 }
996 break;
997 }
999 const int face_set_default = orig_mesh_data.face_set_default;
1000 const int face_set_seed = orig_mesh_data.face_set_seed;
1001 const VArraySpan face_sets = *orig_mesh_data.attributes.lookup<int>(".sculpt_face_set",
1003 if (!face_sets.is_empty()) {
1004 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1006 faces, face_sets, face_set_default, face_set_seed, nodes[i].faces(), *vbos[i]);
1007 });
1008 }
1009 else {
1010 node_mask.foreach_index(GrainSize(64),
1011 [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
1012 }
1013 break;
1014 }
1015 }
1016 }
1017 else {
1018 const GenericRequest &attr = std::get<GenericRequest>(request);
1019 const StringRef name = attr.name;
1020 const bke::AttrDomain domain = attr.domain;
1021 const eCustomDataType data_type = attr.type;
1022 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1023 const GVArraySpan attribute = *attributes.lookup_or_default(name, domain, data_type);
1024 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1025 fill_vbo_attribute_mesh(faces, corner_verts, attribute, domain, nodes[i].faces(), *vbos[i]);
1026 });
1027 }
1028}
1029
1031{
1032 float3 *data = vbo.data<float3>().data();
1033 for (const BMFace *face : faces) {
1034 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1035 continue;
1036 }
1037 const BMLoop *l = face->l_first;
1038 *data = l->prev->v->co;
1039 data++;
1040 *data = l->v->co;
1041 data++;
1042 *data = l->next->v->co;
1043 data++;
1044 }
1045}
1046
1048{
1049 short4 *data = vbo.data<short4>().data();
1050 for (const BMFace *face : faces) {
1051 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1052 continue;
1053 }
1054 if (BM_elem_flag_test(face, BM_ELEM_SMOOTH)) {
1055 const BMLoop *l = face->l_first;
1056 *data = normal_float_to_short(l->prev->v->no);
1057 data++;
1058 *data = normal_float_to_short(l->v->no);
1059 data++;
1060 *data = normal_float_to_short(l->next->v->no);
1061 data++;
1062 }
1063 else {
1064 std::fill_n(data, 3, normal_float_to_short(face->no));
1065 data += 3;
1066 }
1067 }
1068}
1069
1070static void fill_vbo_mask_bmesh(const Set<BMFace *, 0> &faces,
1071 const int cd_offset,
1072 gpu::VertBuf &vbo)
1073{
1074 float *data = vbo.data<float>().data();
1075 for (const BMFace *face : faces) {
1076 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1077 continue;
1078 }
1079 const BMLoop *l = face->l_first;
1080 *data = bmesh_cd_vert_get<float>(*l->prev->v, cd_offset);
1081 data++;
1082 *data = bmesh_cd_vert_get<float>(*l->v, cd_offset);
1083 data++;
1084 *data = bmesh_cd_vert_get<float>(*l->next->v, cd_offset);
1085 data++;
1086 }
1087}
1088
1090 const int color_default,
1091 const int color_seed,
1092 const int offset,
1093 gpu::VertBuf &vbo)
1094{
1095 uchar4 *data = vbo.data<uchar4>().data();
1096 for (const BMFace *face : faces) {
1097 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1098 continue;
1099 }
1100 uchar4 color{UCHAR_MAX};
1101 const int fset = bmesh_cd_face_get<int>(*face, offset);
1102 if (fset != color_default) {
1103 BKE_paint_face_set_overlay_color_get(fset, color_seed, color);
1104 }
1105 std::fill_n(data, 3, color);
1106 data += 3;
1107 }
1108}
1109
1111 const eCustomDataType data_type,
1112 const bke::AttrDomain domain,
1113 const int offset,
1114 gpu::VertBuf &vbo)
1115{
1116 bke::attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
1117 using T = decltype(dummy);
1118 if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
1119 switch (domain) {
1121 extract_data_vert_bmesh<T>(faces, offset, vbo);
1122 break;
1124 extract_data_face_bmesh<T>(faces, offset, vbo);
1125 break;
1127 extract_data_corner_bmesh<T>(faces, offset, vbo);
1128 break;
1129 default:
1131 }
1132 }
1133 });
1134}
1135
1136static void fill_vbos_bmesh(const Object &object,
1137 const OrigMeshData &orig_mesh_data,
1138 const IndexMask &node_mask,
1139 const AttributeRequest &request,
1140 const MutableSpan<gpu::VertBuf *> vbos)
1141{
1142 const SculptSession &ss = *object.sculpt;
1143 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1145 const BMesh &bm = *ss.bm;
1146 if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
1147 switch (*request_type) {
1149 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1151 BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])), *vbos[i]);
1152 });
1153 break;
1154 }
1155 case CustomRequest::Normal: {
1156 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1158 BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])), *vbos[i]);
1159 });
1160 break;
1161 }
1162 case CustomRequest::Mask: {
1163 const int cd_offset = CustomData_get_offset_named(
1164 &bm.vdata, CD_PROP_FLOAT, ".sculpt_mask");
1165 if (cd_offset != -1) {
1166 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1168 BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])),
1169 cd_offset,
1170 *vbos[i]);
1171 });
1172 }
1173 else {
1174 node_mask.foreach_index(GrainSize(64),
1175 [&](const int i) { vbos[i]->data<float>().fill(0.0f); });
1176 }
1177 break;
1178 }
1180 const int face_set_default = orig_mesh_data.face_set_default;
1181 const int face_set_seed = orig_mesh_data.face_set_seed;
1182 const int cd_offset = CustomData_get_offset_named(
1183 &bm.pdata, CD_PROP_INT32, ".sculpt_face_set");
1184 if (cd_offset != -1) {
1185 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1187 BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])),
1188 face_set_default,
1189 face_set_seed,
1190 cd_offset,
1191 *vbos[i]);
1192 });
1193 }
1194 else {
1195 node_mask.foreach_index(GrainSize(64),
1196 [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
1197 }
1198 break;
1199 }
1200 }
1201 }
1202 else {
1203 const GenericRequest &attr = std::get<GenericRequest>(request);
1204 const bke::AttrDomain domain = attr.domain;
1205 const eCustomDataType data_type = attr.type;
1206 const CustomData &custom_data = *get_cdata(bm, domain);
1207 const int offset = CustomData_get_offset_named(&custom_data, data_type, attr.name);
1208 node_mask.foreach_index(GrainSize(1), [&](const int i) {
1210 BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])),
1211 data_type,
1212 domain,
1213 offset,
1214 *vbos[i]);
1215 });
1216 }
1217}
1218
1220 const Span<bool> hide_poly,
1221 const Span<int> face_indices)
1222{
1223 int corners_count = 0;
1224 for (const int face : face_indices) {
1225 if (!hide_poly.is_empty() && hide_poly[face]) {
1226 continue;
1227 }
1228 corners_count += faces[face].size();
1229 }
1230
1231 GPUIndexBufBuilder builder;
1232 GPU_indexbuf_init(&builder, GPU_PRIM_LINES, corners_count, INT_MAX);
1234
1235 int node_corner_offset = 0;
1236 int line_index = 0;
1237 for (const int face_index : face_indices) {
1238 const int face_size = faces[face_index].size();
1239 if (!hide_poly.is_empty() && hide_poly[face_index]) {
1240 node_corner_offset += face_size;
1241 continue;
1242 }
1243 for (const int i : IndexRange(face_size)) {
1244 const int next = (i == face_size - 1) ? 0 : i + 1;
1245 data[line_index] = uint2(i, next) + node_corner_offset;
1246 line_index++;
1247 }
1248
1249 node_corner_offset += face_size;
1250 }
1251
1253 GPU_indexbuf_build_in_place_ex(&builder, 0, node_corner_offset, false, ibo);
1254 return ibo;
1255}
1256
1258 const int visible_faces_num)
1259{
1260 GPUIndexBufBuilder elb_lines;
1261 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, visible_faces_num * 3, INT_MAX);
1262
1263 int v_index = 0;
1264
1265 for (const BMFace *face : faces) {
1266 if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
1267 continue;
1268 }
1269
1270 GPU_indexbuf_add_line_verts(&elb_lines, v_index, v_index + 1);
1271 GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
1272 GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index);
1273
1274 v_index += 3;
1275 }
1276
1277 return GPU_indexbuf_build(&elb_lines);
1278}
1279
1280static void create_tri_index_grids(const Span<int> grid_indices,
1281 const BitGroupVector<> &grid_hidden,
1282 const int gridsize,
1283 const int skip,
1284 const int totgrid,
1285 GPUIndexBufBuilder &elb)
1286{
1287 uint offset = 0;
1288 const uint grid_vert_len = gridsize * gridsize;
1289 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1290 uint v0, v1, v2, v3;
1291
1292 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1293 grid_hidden[grid_indices[i]];
1294
1295 for (int y = 0; y < gridsize - skip; y += skip) {
1296 for (int x = 0; x < gridsize - skip; x += skip) {
1297 /* Skip hidden grid face */
1298 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1299 continue;
1300 }
1301 /* Indices in a Clockwise QUAD disposition. */
1302 v0 = offset + CCG_grid_xy_to_index(gridsize, x, y);
1303 v1 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y);
1304 v2 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y + skip);
1305 v3 = offset + CCG_grid_xy_to_index(gridsize, x, y + skip);
1306
1307 GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
1308 GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
1309 }
1310 }
1311 }
1312}
1313
1314static void create_tri_index_grids_flat_layout(const Span<int> grid_indices,
1315 const BitGroupVector<> &grid_hidden,
1316 const int gridsize,
1317 const int skip,
1318 const int totgrid,
1319 GPUIndexBufBuilder &elb)
1320{
1321 uint offset = 0;
1322 const uint grid_vert_len = square_uint(gridsize - 1) * 4;
1323
1324 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1325 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1326 grid_hidden[grid_indices[i]];
1327
1328 uint v0, v1, v2, v3;
1329 for (int y = 0; y < gridsize - skip; y += skip) {
1330 for (int x = 0; x < gridsize - skip; x += skip) {
1331 /* Skip hidden grid face */
1332 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1333 continue;
1334 }
1335
1336 v0 = (y * (gridsize - 1) + x) * 4;
1337
1338 if (skip > 1) {
1339 v1 = (y * (gridsize - 1) + x + skip - 1) * 4;
1340 v2 = ((y + skip - 1) * (gridsize - 1) + x + skip - 1) * 4;
1341 v3 = ((y + skip - 1) * (gridsize - 1) + x) * 4;
1342 }
1343 else {
1344 v1 = v2 = v3 = v0;
1345 }
1346
1347 /* VBO data are in a Clockwise QUAD disposition. Note
1348 * that vertices might be in different quads if we're
1349 * building a coarse index buffer.
1350 */
1351 v0 += offset;
1352 v1 += offset + 1;
1353 v2 += offset + 2;
1354 v3 += offset + 3;
1355
1356 GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
1357 GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
1358 }
1359 }
1360 }
1361}
1362
1363static void create_lines_index_grids(const Span<int> grid_indices,
1364 int display_gridsize,
1365 const BitGroupVector<> &grid_hidden,
1366 const int gridsize,
1367 const int skip,
1368 const int totgrid,
1369 GPUIndexBufBuilder &elb_lines)
1370{
1371 uint offset = 0;
1372 const uint grid_vert_len = gridsize * gridsize;
1373 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1374 uint v0, v1, v2, v3;
1375 bool grid_visible = false;
1376
1377 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1378 grid_hidden[grid_indices[i]];
1379
1380 for (int y = 0; y < gridsize - skip; y += skip) {
1381 for (int x = 0; x < gridsize - skip; x += skip) {
1382 /* Skip hidden grid face */
1383 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1384 continue;
1385 }
1386 /* Indices in a Clockwise QUAD disposition. */
1387 v0 = offset + CCG_grid_xy_to_index(gridsize, x, y);
1388 v1 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y);
1389 v2 = offset + CCG_grid_xy_to_index(gridsize, x + skip, y + skip);
1390 v3 = offset + CCG_grid_xy_to_index(gridsize, x, y + skip);
1391
1392 GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
1393 GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
1394
1395 if (y / skip + 2 == display_gridsize) {
1396 GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
1397 }
1398 grid_visible = true;
1399 }
1400
1401 if (grid_visible) {
1402 GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
1403 }
1404 }
1405 }
1406}
1407
1409 int display_gridsize,
1410 const BitGroupVector<> &grid_hidden,
1411 const int gridsize,
1412 const int skip,
1413 const int totgrid,
1414 GPUIndexBufBuilder &elb_lines)
1415{
1416 uint offset = 0;
1417 const uint grid_vert_len = square_uint(gridsize - 1) * 4;
1418
1419 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
1420 bool grid_visible = false;
1421 const BoundedBitSpan gh = grid_hidden.is_empty() ? BoundedBitSpan() :
1422 grid_hidden[grid_indices[i]];
1423
1424 uint v0, v1, v2, v3;
1425 for (int y = 0; y < gridsize - skip; y += skip) {
1426 for (int x = 0; x < gridsize - skip; x += skip) {
1427 /* Skip hidden grid face */
1428 if (!gh.is_empty() && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
1429 continue;
1430 }
1431
1432 v0 = (y * (gridsize - 1) + x) * 4;
1433
1434 if (skip > 1) {
1435 v1 = (y * (gridsize - 1) + x + skip - 1) * 4;
1436 v2 = ((y + skip - 1) * (gridsize - 1) + x + skip - 1) * 4;
1437 v3 = ((y + skip - 1) * (gridsize - 1) + x) * 4;
1438 }
1439 else {
1440 v1 = v2 = v3 = v0;
1441 }
1442
1443 /* VBO data are in a Clockwise QUAD disposition. Note
1444 * that vertices might be in different quads if we're
1445 * building a coarse index buffer.
1446 */
1447 v0 += offset;
1448 v1 += offset + 1;
1449 v2 += offset + 2;
1450 v3 += offset + 3;
1451
1452 GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
1453 GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
1454
1455 if (y / skip + 2 == display_gridsize) {
1456 GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
1457 }
1458 grid_visible = true;
1459 }
1460
1461 if (grid_visible) {
1462 GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
1463 }
1464 }
1465 }
1466}
1467
1468static Array<int> calc_material_indices(const Object &object, const OrigMeshData &orig_mesh_data)
1469{
1470 const SculptSession &ss = *object.sculpt;
1471 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1472 switch (pbvh.type()) {
1473 case bke::pbvh::Type::Mesh: {
1475 const Mesh &mesh = *static_cast<const Mesh *>(object.data);
1476 const bke::AttributeAccessor attributes = mesh.attributes();
1477 const VArray material_indices = *attributes.lookup<int>("material_index",
1479 if (!material_indices) {
1480 return {};
1481 }
1482 Array<int> node_materials(nodes.size());
1483 threading::parallel_for(nodes.index_range(), 64, [&](const IndexRange range) {
1484 for (const int i : range) {
1485 const Span<int> face_indices = nodes[i].faces();
1486 if (face_indices.is_empty()) {
1487 continue;
1488 }
1489 node_materials[i] = material_indices[face_indices.first()];
1490 }
1491 });
1492 return node_materials;
1493 }
1496 /* Use original mesh data because evaluated mesh is empty. */
1497 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1498 const VArray material_indices = *attributes.lookup<int>("material_index",
1500 if (!material_indices) {
1501 return {};
1502 }
1503 Array<int> node_materials(nodes.size());
1504 const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
1505 const Span<int> grid_faces = subdiv_ccg.grid_to_face_map;
1506 threading::parallel_for(nodes.index_range(), 64, [&](const IndexRange range) {
1507 for (const int i : range) {
1508 const Span<int> grids = nodes[i].grids();
1509 if (grids.is_empty()) {
1510 continue;
1511 }
1512 node_materials[i] = material_indices[grid_faces[grids.first()]];
1513 }
1514 });
1515 return node_materials;
1516 }
1518 return {};
1519 }
1521 return {};
1522}
1523
1524static BitVector<> calc_use_flat_layout(const Object &object, const OrigMeshData &orig_mesh_data)
1525{
1526 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1527 switch (pbvh.type()) {
1529 /* NOTE: Theoretically it would be possible to used vertex indexed buffers if there are no
1530 * face corner attributes, sharp faces, or face sets. */
1531 return {};
1534 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1535 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", bke::AttrDomain::Face);
1536 if (sharp_faces.is_empty()) {
1537 return BitVector<>(nodes.size(), false);
1538 }
1539
1540 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
1541 const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
1542
1543 /* Use boolean array instead of #BitVector for parallelized writing. */
1544 Array<bool> use_flat_layout(nodes.size());
1545 threading::parallel_for(nodes.index_range(), 4, [&](const IndexRange range) {
1546 for (const int i : range) {
1547 const Span<int> grids = nodes[i].grids();
1548 if (grids.is_empty()) {
1549 continue;
1550 }
1551 use_flat_layout[i] = std::any_of(grids.begin(), grids.end(), [&](const int grid) {
1552 return sharp_faces[grid_to_face_map[grid]];
1553 });
1554 }
1555 });
1556 return BitVector<>(use_flat_layout);
1557 }
1559 return {};
1560 }
1562 return {};
1563}
1564
1566 const Span<int3> corner_tris,
1567 const Span<bool> hide_poly,
1568 const bke::pbvh::MeshNode &node)
1569{
1570 const Span<int> face_indices = node.faces();
1571 int tris_num = 0;
1572 if (hide_poly.is_empty()) {
1573 tris_num = poly_to_tri_count(face_indices.size(), node.corners_num());
1574 }
1575 else {
1576 for (const int face : face_indices) {
1577 if (hide_poly[face]) {
1578 continue;
1579 }
1580 tris_num += bke::mesh::face_triangles_num(faces[face].size());
1581 }
1582 }
1583
1584 GPUIndexBufBuilder builder;
1585 GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, tris_num, INT_MAX);
1587
1588 int tri_index = 0;
1589 int node_corner_offset = 0;
1590 for (const int face_index : face_indices) {
1591 const IndexRange face = faces[face_index];
1592 if (!hide_poly.is_empty() && hide_poly[face_index]) {
1593 node_corner_offset += face.size();
1594 continue;
1595 }
1596 for (const int3 &tri : corner_tris.slice(bke::mesh::face_triangles_range(faces, face_index))) {
1597 for (int i : IndexRange(3)) {
1598 const int corner = tri[i];
1599 const int index_in_face = corner - face.first();
1600 data[tri_index][i] = node_corner_offset + index_in_face;
1601 }
1602 tri_index++;
1603 }
1604 node_corner_offset += face.size();
1605 }
1606
1608 GPU_indexbuf_build_in_place_ex(&builder, 0, node_corner_offset, false, ibo);
1609 return ibo;
1610}
1611
1613 const BitGroupVector<> &grid_hidden,
1614 const bool do_coarse,
1615 const Span<int> grid_indices,
1616 const bool use_flat_layout)
1617{
1618 int gridsize = key.grid_size;
1619 int display_gridsize = gridsize;
1620 int totgrid = grid_indices.size();
1621 int skip = 1;
1622
1623 const int display_level = do_coarse ? 0 : key.level;
1624
1625 if (display_level < key.level) {
1626 display_gridsize = (1 << display_level) + 1;
1627 skip = 1 << (key.level - display_level - 1);
1628 }
1629
1631
1632 uint visible_quad_len = bke::pbvh::count_grid_quads(
1633 grid_hidden, grid_indices, key.grid_size, display_gridsize);
1634
1635 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
1636
1637 if (use_flat_layout) {
1638 create_tri_index_grids_flat_layout(grid_indices, grid_hidden, gridsize, skip, totgrid, elb);
1639 }
1640 else {
1641 create_tri_index_grids(grid_indices, grid_hidden, gridsize, skip, totgrid, elb);
1642 }
1643
1644 return GPU_indexbuf_build(&elb);
1645}
1646
1648 const BitGroupVector<> &grid_hidden,
1649 const bool do_coarse,
1650 const Span<int> grid_indices,
1651 const bool use_flat_layout)
1652{
1653 int gridsize = key.grid_size;
1654 int display_gridsize = gridsize;
1655 int totgrid = grid_indices.size();
1656 int skip = 1;
1657
1658 const int display_level = do_coarse ? 0 : key.level;
1659
1660 if (display_level < key.level) {
1661 display_gridsize = (1 << display_level) + 1;
1662 skip = 1 << (key.level - display_level - 1);
1663 }
1664
1667 &elb, GPU_PRIM_LINES, 2 * totgrid * display_gridsize * (display_gridsize - 1), INT_MAX);
1668
1669 if (use_flat_layout) {
1671 grid_indices, display_gridsize, grid_hidden, gridsize, skip, totgrid, elb);
1672 }
1673 else {
1675 grid_indices, display_gridsize, grid_hidden, gridsize, skip, totgrid, elb);
1676 }
1677
1678 return GPU_indexbuf_build(&elb);
1679}
1680
1681Span<gpu::IndexBuf *> DrawCacheImpl::ensure_lines_indices(const Object &object,
1682 const OrigMeshData &orig_mesh_data,
1683 const IndexMask &node_mask,
1684 const bool coarse)
1685{
1686 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1687 Vector<gpu::IndexBuf *> &ibos = coarse ? lines_ibos_coarse_ : lines_ibos_;
1688 ibos.resize(pbvh.nodes_num(), nullptr);
1689
1690 IndexMaskMemory memory;
1691 const IndexMask nodes_to_calculate = IndexMask::from_predicate(
1692 node_mask, GrainSize(8196), memory, [&](const int i) { return !ibos[i]; });
1693
1694 switch (pbvh.type()) {
1695 case bke::pbvh::Type::Mesh: {
1697 const Mesh &mesh = *static_cast<const Mesh *>(object.data);
1698 const OffsetIndices<int> faces = mesh.faces();
1699 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1700 const VArraySpan hide_poly = *attributes.lookup<bool>(".hide_poly", bke::AttrDomain::Face);
1701 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1702 ibos[i] = create_index_faces(faces, hide_poly, nodes[i].faces());
1703 });
1704 break;
1705 }
1707 const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
1708 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1709 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
1710 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
1711 ibos[i] = create_lines_index_grids(
1712 key, subdiv_ccg.grid_hidden, coarse, nodes[i].grids(), use_flat_layout_[i]);
1713 });
1714 break;
1715 }
1717 const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
1718 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1719 const Set<BMFace *, 0> &faces = BKE_pbvh_bmesh_node_faces(
1720 &const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
1721 const int visible_faces_num = count_visible_tris_bmesh(faces);
1722 ibos[i] = create_index_bmesh(faces, visible_faces_num);
1723 });
1724 break;
1725 }
1726 }
1727
1728 return ibos;
1729}
1730
1731BitSpan DrawCacheImpl::ensure_use_flat_layout(const Object &object,
1732 const OrigMeshData &orig_mesh_data)
1733{
1734 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1735 if (use_flat_layout_.size() != pbvh.nodes_num()) {
1736 use_flat_layout_ = calc_use_flat_layout(object, orig_mesh_data);
1737 }
1738 return use_flat_layout_;
1739}
1740
1742 const GPUVertFormat &format,
1743 const IndexMask &node_mask,
1744 const MutableSpan<gpu::VertBuf *> vbos)
1745{
1746 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1748 node_mask.foreach_index(GrainSize(64), [&](const int i) {
1749 if (!vbos[i]) {
1751 }
1752 GPU_vertbuf_data_alloc(*vbos[i], nodes[i].corners_num());
1753 });
1754}
1755
1757 const GPUVertFormat &format,
1758 const BitSpan use_flat_layout,
1759 const IndexMask &node_mask,
1760 const MutableSpan<gpu::VertBuf *> vbos)
1761{
1762 const SculptSession &ss = *object.sculpt;
1763 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1765 const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
1766 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
1767 node_mask.foreach_index(GrainSize(64), [&](const int i) {
1768 if (!vbos[i]) {
1770 }
1771 const int verts_per_grid = use_flat_layout[i] ? square_i(key.grid_size - 1) * 4 :
1772 square_i(key.grid_size);
1773 const int verts_num = nodes[i].grids().size() * verts_per_grid;
1774 GPU_vertbuf_data_alloc(*vbos[i], verts_num);
1775 });
1776}
1777
1779 const GPUVertFormat &format,
1780 const IndexMask &node_mask,
1781 const MutableSpan<gpu::VertBuf *> vbos)
1782{
1783 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1785
1786 node_mask.foreach_index(GrainSize(64), [&](const int i) {
1787 if (!vbos[i]) {
1789 }
1791 &const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
1792 const int verts_num = count_visible_tris_bmesh(faces) * 3;
1793 GPU_vertbuf_data_alloc(*vbos[i], verts_num);
1794 });
1795}
1796
1798 const IndexMask &node_mask)
1799{
1800 node_mask.foreach_index([&](const int i) { GPU_vertbuf_use(vbos[i]); });
1801}
1802
1803Span<gpu::VertBuf *> DrawCacheImpl::ensure_attribute_data(const Object &object,
1804 const OrigMeshData &orig_mesh_data,
1805 const AttributeRequest &attr,
1806 const IndexMask &node_mask)
1807{
1808 if (!pbvh_attr_supported(attr)) {
1809 return {};
1810 }
1811 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1812 AttributeData &data = attribute_vbos_.lookup_or_add_default(attr);
1813 Vector<gpu::VertBuf *> &vbos = data.vbos;
1814 vbos.resize(pbvh.nodes_num(), nullptr);
1815
1816 /* The nodes we recompute here are a combination of:
1817 * 1. null VBOs, which correspond to nodes that either haven't been drawn before, or have been
1818 * cleared completely by #free_nodes_with_changed_topology.
1819 * 2. Nodes that have been tagged dirty as their values are changed.
1820 * We also only process a subset of the nodes referenced by the caller, for example to only
1821 * recompute visible nodes. */
1822 IndexMaskMemory memory;
1823 const IndexMask empty_mask = IndexMask::from_predicate(
1824 node_mask, GrainSize(8196), memory, [&](const int i) { return !vbos[i]; });
1825 const IndexMask dirty_mask = IndexMask::from_bits(
1826 node_mask.slice_content(data.dirty_nodes.index_range()), data.dirty_nodes, memory);
1827 const IndexMask mask = IndexMask::from_union(empty_mask, dirty_mask, memory);
1828
1829 const GPUVertFormat format = format_for_request(orig_mesh_data, attr);
1830
1831 switch (pbvh.type()) {
1832 case bke::pbvh::Type::Mesh: {
1833 ensure_vbos_allocated_mesh(object, format, mask, vbos);
1834 fill_vbos_mesh(object, orig_mesh_data, mask, attr, vbos);
1835 break;
1836 }
1838 ensure_vbos_allocated_grids(object, format, use_flat_layout_, mask, vbos);
1839 fill_vbos_grids(object, orig_mesh_data, use_flat_layout_, mask, attr, vbos);
1840 break;
1841 }
1843 ensure_vbos_allocated_bmesh(object, format, mask, vbos);
1844 fill_vbos_bmesh(object, orig_mesh_data, mask, attr, vbos);
1845 break;
1846 }
1847 }
1848
1849 /* TODO: It would be good to deallocate the bit vector if all of the bits have been reset to
1850 * avoid unnecessary processing in subsequent redraws. */
1851 dirty_mask.foreach_index_optimized<int>([&](const int i) { data.dirty_nodes[i].reset(); });
1852
1853 flush_vbo_data(vbos, mask);
1854
1855 return vbos;
1856}
1857
1858Span<gpu::IndexBuf *> DrawCacheImpl::ensure_tri_indices(const Object &object,
1859 const OrigMeshData &orig_mesh_data,
1860 const IndexMask &node_mask,
1861 const bool coarse)
1862{
1863 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1864 switch (pbvh.type()) {
1865 case bke::pbvh::Type::Mesh: {
1866 const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
1867
1868 Vector<gpu::IndexBuf *> &ibos = tris_ibos_;
1869 ibos.resize(nodes.size(), nullptr);
1870
1871 /* Whenever a node's visible triangle count has changed the index buffers are freed, so we
1872 * only recalculate null IBOs here. A new mask is recalculated for more even task
1873 * distribution between threads. */
1874 IndexMaskMemory memory;
1875 const IndexMask nodes_to_calculate = IndexMask::from_predicate(
1876 node_mask, GrainSize(8196), memory, [&](const int i) { return !ibos[i]; });
1877
1878 const Mesh &mesh = *static_cast<const Mesh *>(object.data);
1879 const OffsetIndices<int> faces = mesh.faces();
1880 const Span<int3> corner_tris = mesh.corner_tris();
1881 const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
1882 const VArraySpan hide_poly = *attributes.lookup<bool>(".hide_poly", bke::AttrDomain::Face);
1883 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1884 ibos[i] = create_tri_index_mesh(faces, corner_tris, hide_poly, nodes[i]);
1885 });
1886 return ibos;
1887 }
1889 /* Unlike the other geometry types, multires grids use indexed vertex buffers because when
1890 * there are no flat faces, vertices can be shared between neighboring quads. This results in
1891 * a 4x decrease in the amount of data uploaded. Theoretically it also means freeing VBOs
1892 * because of visibility changes is unnecessary.
1893 *
1894 * TODO: With the "flat layout" and no hidden faces, the index buffers are unnecessary, we
1895 * should avoid creating them in that case. */
1896 const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
1897
1898 Vector<gpu::IndexBuf *> &ibos = coarse ? tris_ibos_coarse_ : tris_ibos_;
1899 ibos.resize(nodes.size(), nullptr);
1900
1901 /* Whenever a node's visible triangle count has changed the index buffers are freed, so we
1902 * only recalculate null IBOs here. A new mask is recalculated for more even task
1903 * distribution between threads. */
1904 IndexMaskMemory memory;
1905 const IndexMask nodes_to_calculate = IndexMask::from_predicate(
1906 node_mask, GrainSize(8196), memory, [&](const int i) { return !ibos[i]; });
1907
1908 const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
1909 const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
1910
1911 nodes_to_calculate.foreach_index(GrainSize(1), [&](const int i) {
1912 ibos[i] = create_tri_index_grids(
1913 key, subdiv_ccg.grid_hidden, coarse, nodes[i].grids(), use_flat_layout_[i]);
1914 });
1915 return ibos;
1916 }
1918 return {};
1919 }
1921 return {};
1922}
1923
1925 const ViewportRequest &request,
1926 const IndexMask &nodes_to_update)
1927{
1928 const Object &object_orig = *DEG_get_original_object(&const_cast<Object &>(object));
1929 const OrigMeshData orig_mesh_data{*static_cast<const Mesh *>(object_orig.data)};
1930 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1931
1932 this->ensure_use_flat_layout(object, orig_mesh_data);
1933 this->free_nodes_with_changed_topology(pbvh);
1934
1935 const Span<gpu::IndexBuf *> ibos = this->ensure_tri_indices(
1936 object, orig_mesh_data, nodes_to_update, request.use_coarse_grids);
1937
1938 for (const AttributeRequest &attr : request.attributes) {
1939 this->ensure_attribute_data(object, orig_mesh_data, attr, nodes_to_update);
1940 }
1941
1942 /* Collect VBO spans in a different loop because #ensure_attribute_data invalidates the allocated
1943 * arrays when its map is changed. */
1944 Vector<Span<gpu::VertBuf *>> attr_vbos;
1945 for (const AttributeRequest &attr : request.attributes) {
1946 const Span<gpu::VertBuf *> vbos = attribute_vbos_.lookup(attr).vbos;
1947 if (!vbos.is_empty()) {
1948 attr_vbos.append(vbos);
1949 }
1950 }
1951
1952 /* Except for the first iteration of the draw loop, we only need to rebuild batches for nodes
1953 * with changed topology (visible triangle count). */
1954 Vector<gpu::Batch *> &batches = tris_batches_.lookup_or_add_default(request);
1955 batches.resize(pbvh.nodes_num(), nullptr);
1956 nodes_to_update.foreach_index(GrainSize(64), [&](const int i) {
1957 if (!batches[i]) {
1958 batches[i] = GPU_batch_create(GPU_PRIM_TRIS, nullptr, ibos.is_empty() ? nullptr : ibos[i]);
1959 for (const Span<gpu::VertBuf *> vbos : attr_vbos) {
1960 GPU_batch_vertbuf_add(batches[i], vbos[i], false);
1961 }
1962 }
1963 });
1964
1965 return batches;
1966}
1967
1969 const ViewportRequest &request,
1970 const IndexMask &nodes_to_update)
1971{
1972 const Object &object_orig = *DEG_get_original_object(&const_cast<Object &>(object));
1973 const OrigMeshData orig_mesh_data(*static_cast<const Mesh *>(object_orig.data));
1974 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
1975
1976 this->ensure_use_flat_layout(object, orig_mesh_data);
1977 this->free_nodes_with_changed_topology(pbvh);
1978
1979 const Span<gpu::VertBuf *> position = this->ensure_attribute_data(
1980 object, orig_mesh_data, CustomRequest::Position, nodes_to_update);
1981 const Span<gpu::IndexBuf *> lines = this->ensure_lines_indices(
1982 object, orig_mesh_data, nodes_to_update, request.use_coarse_grids);
1983
1984 /* Except for the first iteration of the draw loop, we only need to rebuild batches for nodes
1985 * with changed topology (visible triangle count). */
1986 Vector<gpu::Batch *> &batches = request.use_coarse_grids ? lines_batches_coarse_ :
1987 lines_batches_;
1988 batches.resize(pbvh.nodes_num(), nullptr);
1989 nodes_to_update.foreach_index(GrainSize(64), [&](const int i) {
1990 if (!batches[i]) {
1991 batches[i] = GPU_batch_create(GPU_PRIM_LINES, nullptr, lines[i]);
1992 GPU_batch_vertbuf_add(batches[i], position[i], false);
1993 }
1994 });
1995
1996 return batches;
1997}
1998
2000{
2001 const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
2002 if (material_indices_.size() != pbvh.nodes_num()) {
2003 const Object &object_orig = *DEG_get_original_object(&const_cast<Object &>(object));
2004 const OrigMeshData orig_mesh_data(*static_cast<const Mesh *>(object_orig.data));
2005 material_indices_ = calc_material_indices(object, orig_mesh_data);
2006 }
2007 return material_indices_;
2008}
2009
2010} // namespace blender::draw::pbvh
int CCG_grid_xy_to_index(const int grid_size, const int x, const int y)
Definition BKE_ccg.hh:77
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_offset_named(const CustomData *data, eCustomDataType type, blender::StringRef name)
const char * CustomData_get_render_layer_name(const CustomData *data, eCustomDataType type)
const char * CustomData_get_active_layer_name(const CustomData *data, eCustomDataType type)
#define CD_TYPE_AS_MASK(_type)
void BKE_paint_face_set_overlay_color_get(int face_set, int seed, uchar r_color[4])
Definition paint.cc:2886
bool paint_is_grid_face_hidden(blender::BoundedBitSpan grid_hidden, int gridsize, int x, int y)
Definition paint.cc:1940
A BVH for high poly meshes.
const blender::Set< BMFace *, 0 > & BKE_pbvh_bmesh_node_faces(blender::bke::pbvh::BMeshNode *node)
CCGKey BKE_subdiv_ccg_key_top_level(const SubdivCCG &subdiv_ccg)
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_NOINLINE
MINLINE int square_i(int a)
MINLINE unsigned int square_uint(unsigned int a)
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition math_geom.cc:56
MINLINE int poly_to_tri_count(int poly_count, int corner_count)
MINLINE void normal_float_to_short_v3(short out[3], const float in[3])
unsigned int uint
#define ELEM(...)
#define POINTER_OFFSET(v, ofs)
Object * DEG_get_original_object(Object *object)
#define CD_MASK_COLOR_ALL
@ CD_PROP_FLOAT
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
Object is a sort of wrapper for general info.
void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format, const char *base_name, int data_type, const char *layer_name, bool is_active_render, bool is_active_layer)
#define GPU_batch_create(primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:149
int GPU_batch_vertbuf_add(blender::gpu::Batch *batch, blender::gpu::VertBuf *vertex_buf, bool own_vbo)
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:205
void GPU_indexbuf_build_in_place_ex(GPUIndexBufBuilder *builder, uint index_min, uint index_max, bool uses_restart_indices, blender::gpu::IndexBuf *elem)
blender::MutableSpan< uint32_t > GPU_indexbuf_get_data(GPUIndexBufBuilder *)
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
blender::gpu::IndexBuf * GPU_indexbuf_calloc()
blender::gpu::IndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3)
@ GPU_PRIM_LINES
@ GPU_PRIM_TRIS
#define GPU_vertbuf_create_with_format(format)
void GPU_vertbuf_use(blender::gpu::VertBuf *)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
@ GPU_COMP_I16
@ GPU_COMP_U8
@ BM_ELEM_HIDDEN
@ BM_ELEM_SMOOTH
#define BM_elem_flag_test(ele, hflag)
ATTR_WARN_UNUSED_RESULT BMesh * bm
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
AttributeSet attributes
int64_t size() const
Definition BLI_array.hh:245
constexpr int64_t size() const
const Value * lookup_ptr(const Key &key) const
Definition BLI_map.hh:484
Value & lookup_or_add_default(const Key &key)
Definition BLI_map.hh:601
const Value & lookup(const Key &key) const
Definition BLI_map.hh:506
ValueIterator values() const
Definition BLI_map.hh:846
ItemIterator items() const
Definition BLI_map.hh:864
constexpr MutableSpan< NewT > cast() const
Definition BLI_span.hh:736
constexpr T * data() const
Definition BLI_span.hh:540
constexpr IndexRange index_range() const
Definition BLI_span.hh:671
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:138
constexpr const T * data() const
Definition BLI_span.hh:216
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
constexpr bool is_empty() const
Definition BLI_span.hh:261
void append(const T &value)
IndexRange index_range() const
void resize(const int64_t new_size)
void resize(const int64_t new_size_in_bits, const bool value=false)
GAttributeReader lookup(const StringRef attribute_id) const
Span< NodeT > nodes() const
int nodes_num() const
Definition pbvh.cc:504
void tag_visibility_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:209
void tag_positions_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:199
virtual ~DrawCacheImpl() override
Definition draw_pbvh.cc:555
Span< int > ensure_material_indices(const Object &object) override
void tag_attribute_changed(const IndexMask &node_mask, StringRef attribute_name) override
Definition draw_pbvh.cc:236
Span< gpu::Batch * > ensure_tris_batches(const Object &object, const ViewportRequest &request, const IndexMask &nodes_to_update) override
void tag_masks_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:229
void tag_topology_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:215
void tag_face_sets_changed(const IndexMask &node_mask) override
Definition draw_pbvh.cc:222
Span< gpu::Batch * > ensure_lines_batches(const Object &object, const ViewportRequest &request, const IndexMask &nodes_to_update) override
MutableSpan< T > data()
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
IndexMask slice_content(IndexRange range) const
static IndexMask from_bits(BitSpan bits, IndexMaskMemory &memory)
static IndexMask from_union(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
static IndexMask from_intersection(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
void set_bits(MutableBitSpan r_bits, int64_t offset=0) const
void foreach_index(Fn &&fn) const
format
static ulong * next
#define T
static char faces[256]
void convert_to_static_type(const CPPType &cpp_type, const Func &func)
IndexRange grid_range(const int grid_area, const int grid)
int face_triangles_num(const int face_size)
Definition BKE_mesh.hh:287
IndexRange face_triangles_range(OffsetIndices< int > faces, int face_i)
Definition BKE_mesh.hh:296
pbvh::Tree * pbvh_get(Object &object)
Definition paint.cc:2846
Span< float3 > vert_normals_eval_from_eval(const Object &object_eval)
Definition pbvh.cc:2509
Span< float3 > vert_positions_eval_from_eval(const Object &object_eval)
Definition pbvh.cc:2489
int count_grid_quads(const BitGroupVector<> &grid_visibility, Span< int > grid_indices, int gridsize, int display_gridsize)
Definition pbvh.cc:1403
Span< float3 > face_normals_eval_from_eval(const Object &object_eval)
Definition pbvh.cc:2516
void extract_data_vert_mesh(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< T > attribute, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:392
static BLI_NOINLINE void ensure_vbos_allocated_bmesh(const Object &object, const GPUVertFormat &format, const IndexMask &node_mask, const MutableSpan< gpu::VertBuf * > vbos)
static void create_tri_index_grids(const Span< int > grid_indices, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, GPUIndexBufBuilder &elb)
static BLI_NOINLINE void ensure_vbos_allocated_grids(const Object &object, const GPUVertFormat &format, const BitSpan use_flat_layout, const IndexMask &node_mask, const MutableSpan< gpu::VertBuf * > vbos)
void extract_data_corner_mesh(const OffsetIndices< int > faces, const Span< T > attribute, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:427
static gpu::IndexBuf * create_index_bmesh(const Set< BMFace *, 0 > &faces, const int visible_faces_num)
static const CustomData * get_cdata(const BMesh &bm, const bke::AttrDomain domain)
Definition draw_pbvh.cc:519
static void fill_vbo_face_set_grids(const CCGKey &key, const Span< int > grid_to_face_map, const Span< int > face_sets, const int color_default, const int color_seed, const bool use_flat_layout, const Span< int > grid_indices, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:825
static int count_visible_tris_bmesh(const Set< BMFace *, 0 > &faces)
Definition draw_pbvh.cc:548
const T & bmesh_cd_loop_get(const BMLoop &loop, const int offset)
Definition draw_pbvh.cc:449
static void fill_vbo_position_grids(const CCGKey &key, const Span< float3 > positions, const bool use_flat_layout, const Span< int > grids, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:703
static BitVector calc_use_flat_layout(const Object &object, const OrigMeshData &orig_mesh_data)
static const GPUVertFormat & normal_format()
Definition draw_pbvh.cc:288
static const GPUVertFormat & face_set_format()
Definition draw_pbvh.cc:306
static void fill_vbo_normal_grids(const CCGKey &key, const Span< float3 > positions, const Span< float3 > normals, const Span< int > grid_to_face_map, const Span< bool > sharp_faces, const bool use_flat_layout, const Span< int > grids, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:737
DrawCache & ensure_draw_data(std::unique_ptr< bke::pbvh::DrawCache > &ptr)
Definition draw_pbvh.cc:247
static const GPUVertFormat & position_format()
Definition draw_pbvh.cc:279
static void create_lines_index_grids(const Span< int > grid_indices, int display_gridsize, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, GPUIndexBufBuilder &elb_lines)
static GPUVertFormat attribute_format(const OrigMeshData &orig_mesh_data, const StringRefNull name, const eCustomDataType data_type)
Definition draw_pbvh.cc:315
static void create_tri_index_grids_flat_layout(const Span< int > grid_indices, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, GPUIndexBufBuilder &elb)
static void fill_vbo_face_set_mesh(const OffsetIndices< int > faces, const Span< int > face_sets, const int color_default, const int color_seed, const Span< int > face_indices, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:649
static GPUVertFormat format_for_request(const OrigMeshData &orig_mesh_data, const AttributeRequest &request)
Definition draw_pbvh.cc:339
const T & bmesh_cd_face_get(const BMFace &face, const int offset)
Definition draw_pbvh.cc:454
static gpu::IndexBuf * create_tri_index_mesh(const OffsetIndices< int > faces, const Span< int3 > corner_tris, const Span< bool > hide_poly, const bke::pbvh::MeshNode &node)
static BLI_NOINLINE void flush_vbo_data(const Span< gpu::VertBuf * > vbos, const IndexMask &node_mask)
static void fill_vbo_attribute_mesh(const OffsetIndices< int > faces, const Span< int > corner_verts, const GSpan attribute, const bke::AttrDomain domain, const Span< int > face_indices, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:675
static BLI_NOINLINE void ensure_vbos_allocated_mesh(const Object &object, const GPUVertFormat &format, const IndexMask &node_mask, const MutableSpan< gpu::VertBuf * > vbos)
static void fill_vbo_normal_mesh(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< bool > sharp_faces, const Span< float3 > vert_normals, const Span< float3 > face_normals, const Span< int > face_indices, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:609
void extract_data_face_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:481
static Array< int > calc_material_indices(const Object &object, const OrigMeshData &orig_mesh_data)
short4 normal_float_to_short(const float3 &value)
Definition draw_pbvh.cc:384
static void fill_vbo_normal_bmesh(const Set< BMFace *, 0 > &faces, gpu::VertBuf &vbo)
static void fill_vbo_position_bmesh(const Set< BMFace *, 0 > &faces, gpu::VertBuf &vbo)
static void fill_vbo_mask_mesh(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< float > mask, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:634
static BLI_NOINLINE void free_batches(const MutableSpan< gpu::Batch * > batches, const IndexMask &node_mask)
Definition draw_pbvh.cc:271
static bool pbvh_attr_supported(const AttributeRequest &request)
Definition draw_pbvh.cc:362
static void fill_vbo_mask_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
void extract_data_vert_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:460
static void fill_vbo_face_set_bmesh(const Set< BMFace *, 0 > &faces, const int color_default, const int color_seed, const int offset, gpu::VertBuf &vbo)
void extract_data_face_mesh(const OffsetIndices< int > faces, const Span< T > attribute, const Span< int > face_indices, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:410
static gpu::IndexBuf * create_index_faces(const OffsetIndices< int > faces, const Span< bool > hide_poly, const Span< int > face_indices)
static BLI_NOINLINE void free_ibos(const MutableSpan< gpu::IndexBuf * > ibos, const IndexMask &node_mask)
Definition draw_pbvh.cc:255
static void fill_vbo_mask_grids(const CCGKey &key, const Span< float > masks, const bool use_flat_layout, const Span< int > grids, gpu::VertBuf &vert_buf)
Definition draw_pbvh.cc:791
void extract_data_corner_bmesh(const Set< BMFace *, 0 > &faces, const int cd_offset, gpu::VertBuf &vbo)
Definition draw_pbvh.cc:497
static BLI_NOINLINE void free_vbos(const MutableSpan< gpu::VertBuf * > vbos, const IndexMask &node_mask)
Definition draw_pbvh.cc:263
static void create_lines_index_grids_flat_layout(const Span< int > grid_indices, int display_gridsize, const BitGroupVector<> &grid_hidden, const int gridsize, const int skip, const int totgrid, GPUIndexBufBuilder &elb_lines)
static void fill_vbo_attribute_bmesh(const Set< BMFace *, 0 > &faces, const eCustomDataType data_type, const bke::AttrDomain domain, const int offset, gpu::VertBuf &vbo)
static void fill_vbos_mesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, const AttributeRequest &request, const MutableSpan< gpu::VertBuf * > vbos)
Definition draw_pbvh.cc:945
static void fill_vbos_bmesh(const Object &object, const OrigMeshData &orig_mesh_data, const IndexMask &node_mask, const AttributeRequest &request, const MutableSpan< gpu::VertBuf * > vbos)
static void fill_vbos_grids(const Object &object, const OrigMeshData &orig_mesh_data, const BitSpan use_flat_layout, const IndexMask &node_mask, const AttributeRequest &request, const MutableSpan< gpu::VertBuf * > vbos)
Definition draw_pbvh.cc:849
const T & bmesh_cd_vert_get(const BMVert &vert, const int offset)
Definition draw_pbvh.cc:444
static const GPUVertFormat & mask_format()
Definition draw_pbvh.cc:297
std::variant< CustomRequest, GenericRequest > AttributeRequest
Definition DRW_pbvh.hh:58
GPUVertFormat init_format_for_attribute(const eCustomDataType data_type, const StringRefNull vbo_name)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
VecBase< uint32_t, 2 > uint2
blender::VecBase< int16_t, 4 > short4
blender::VecBase< uint8_t, 4 > uchar4
uint64_t get_default_hash(const T &v)
Definition BLI_hash.hh:219
ColorSceneLinear4f< eAlpha::Premultiplied > ColorGeometry4f
Definition BLI_color.hh:337
unsigned __int64 uint64_t
Definition stdint.h:90
void * data
BMHeader head
struct BMVert * v
struct BMLoop * prev
struct BMLoop * next
float co[3]
float no[3]
BMHeader head
CustomData vdata
CustomData pdata
CustomData ldata
int grid_size
Definition BKE_ccg.hh:33
int level
Definition BKE_ccg.hh:26
SubdivCCG * subdiv_ccg
Definition BKE_paint.hh:405
blender::BitGroupVector grid_hidden
blender::Span< int > grid_to_face_map
uint64_t operator()(const draw::pbvh::AttributeRequest &value) const
Definition draw_pbvh.cc:40
bke::AttributeAccessor attributes
Definition draw_pbvh.cc:72
Vector< AttributeRequest > attributes
Definition DRW_pbvh.hh:61
PointerRNA * ptr
Definition wm_files.cc:4126