Blender V5.0
draw_cache_impl_subdivision.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
6#include "draw_subdivision.hh"
7
8#include "DNA_mesh_types.h"
9#include "DNA_object_types.h"
10#include "DNA_scene_types.h"
11
12#include "BKE_attribute.hh"
13#include "BKE_editmesh.hh"
14#include "BKE_mesh.hh"
15#include "BKE_mesh_mapping.hh"
16#include "BKE_object.hh"
17#include "BKE_subdiv.hh"
18#include "BKE_subdiv_eval.hh"
19#include "BKE_subdiv_foreach.hh"
20#include "BKE_subdiv_mesh.hh"
22
23#include "BLI_linklist.h"
24#include "BLI_mutex.hh"
25#include "BLI_virtual_array.hh"
26
27#include "DRW_engine.hh"
28#include "DRW_render.hh"
29
30#include "GPU_capabilities.hh"
31#include "GPU_compute.hh"
32#include "GPU_index_buffer.hh"
33#include "GPU_state.hh"
34#include "GPU_uniform_buffer.hh"
35#include "GPU_vertex_buffer.hh"
36
39#ifdef WITH_OPENSUBDIV
42#endif
43
44#include "draw_cache_extract.hh"
45#include "draw_cache_impl.hh"
46#include "draw_cache_inline.hh"
47#include "draw_common_c.hh"
48#include "draw_shader.hh"
51
52namespace blender::draw {
53
54/* -------------------------------------------------------------------- */
59
60#ifdef WITH_OPENSUBDIV
61/* Vertex format used for the `PatchTable::PatchHandle`. */
62static const GPUVertFormat &get_patch_handle_format()
63{
64 static const GPUVertFormat format = [&]() {
65 GPUVertFormat format{};
66 GPU_vertformat_attr_add(&format, "vertex_index", gpu::VertAttrType::SINT_32);
67 GPU_vertformat_attr_add(&format, "array_index", gpu::VertAttrType::SINT_32);
68 GPU_vertformat_attr_add(&format, "patch_index", gpu::VertAttrType::SINT_32);
69 return format;
70 }();
71 return format;
72}
73
74/* Vertex format used for the quad-tree nodes of the PatchMap. */
75static const GPUVertFormat &get_quadtree_format()
76{
77 static const GPUVertFormat format = [&]() {
78 GPUVertFormat format{};
79 GPU_vertformat_attr_add(&format, "child", gpu::VertAttrType::UINT_32_32_32_32);
80 return format;
81 }();
82 return format;
83}
84
85struct CompressedPatchCoord {
86 int ptex_face_index;
87 /* UV coordinate encoded as u << 16 | v, where u and v are quantized on 16-bits. */
88 uint encoded_uv;
89};
90
91MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, float v)
92{
93 CompressedPatchCoord patch_coord = {
94 ptex_face_index,
95 (uint(u * 65535.0f) << 16) | uint(v * 65535.0f),
96 };
97 return patch_coord;
98}
99
100/* Vertex format used for the #CompressedPatchCoord. */
101static const GPUVertFormat &get_blender_patch_coords_format()
102{
103 static const GPUVertFormat format = [&]() {
104 GPUVertFormat format{};
105 /* WARNING! Adjust #CompressedPatchCoord accordingly. */
106 GPU_vertformat_attr_add(&format, "ptex_face_index", gpu::VertAttrType::UINT_32);
107 GPU_vertformat_attr_add(&format, "uv", gpu::VertAttrType::UINT_32);
108 return format;
109 }();
110 return format;
111}
112
113#endif
114
116{
117 static const GPUVertFormat format = [&]() {
119 GPU_vertformat_attr_add(&format, "index", gpu::VertAttrType::SINT_32);
120 return format;
121 }();
122 return format;
123}
124
126
127// --------------------------------------------------------
128
129static uint tris_count_from_number_of_loops(const uint number_of_loops)
130{
131 const uint32_t number_of_quads = number_of_loops / 4;
132 return number_of_quads * 2;
133}
134
135/* -------------------------------------------------------------------- */
138
140 uint num_loops,
141 uint loose_len)
142{
145 GPU_vertbuf_data_alloc(*buffer, num_loops + loose_len);
146
147 buffer->data<int32_t>().take_front(num_loops).copy_from({vert_origindex, num_loops});
148 return buffer;
149}
150
152{
153 return draw_subdiv_init_origindex_buffer(vert_origindex, num_loops, 0).release();
154}
155
157{
160 GPU_vertbuf_data_alloc(*buffer, vert_origindex.size() + loose_len);
161
162 buffer->data<int32_t>().take_front(vert_origindex.size()).copy_from(vert_origindex);
163 return buffer;
164}
165
167{
168 return draw_subdiv_init_origindex_buffer(vert_origindex, 0).release();
169}
170
172
173/* -------------------------------------------------------------------- */
176
177#ifdef WITH_OPENSUBDIV
178
179static void draw_patch_map_build(DRWPatchMap *gpu_patch_map, bke::subdiv::Subdiv *subdiv)
180{
181 gpu::VertBuf *patch_map_handles = GPU_vertbuf_calloc();
182 GPU_vertbuf_init_with_format_ex(*patch_map_handles, get_patch_handle_format(), GPU_USAGE_STATIC);
183
184 gpu::VertBuf *patch_map_quadtree = GPU_vertbuf_calloc();
185 GPU_vertbuf_init_with_format_ex(*patch_map_quadtree, get_quadtree_format(), GPU_USAGE_STATIC);
186
187 int min_patch_face = 0;
188 int max_patch_face = 0;
189 int max_depth = 0;
190 int patches_are_triangular = 0;
191
192 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
193 evaluator->eval_output->getPatchMap(patch_map_handles,
194 patch_map_quadtree,
195 &min_patch_face,
196 &max_patch_face,
197 &max_depth,
198 &patches_are_triangular);
199
200 gpu_patch_map->patch_map_handles = patch_map_handles;
201 gpu_patch_map->patch_map_quadtree = patch_map_quadtree;
202 gpu_patch_map->min_patch_face = min_patch_face;
203 gpu_patch_map->max_patch_face = max_patch_face;
204 gpu_patch_map->max_depth = max_depth;
205 gpu_patch_map->patches_are_triangular = patches_are_triangular;
206}
207
208#endif
209
210static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
211{
214 gpu_patch_map->min_patch_face = 0;
215 gpu_patch_map->max_patch_face = 0;
216 gpu_patch_map->max_depth = 0;
217 gpu_patch_map->patches_are_triangular = false;
218}
219
221
222/* -------------------------------------------------------------------- */
225
227{
228 return cache.subdiv && cache.subdiv->evaluator && cache.num_subdiv_loops != 0;
229}
230
237
245
276
277/* Flags used in #DRWSubdivCache.extra_coarse_face_data. The flags are packed in the upper bits of
278 * each uint (one per coarse face), #SUBDIV_COARSE_FACE_FLAG_OFFSET tells where they are in the
279 * packed bits. */
280#define SUBDIV_COARSE_FACE_FLAG_SMOOTH 1u
281#define SUBDIV_COARSE_FACE_FLAG_SELECT 2u
282#define SUBDIV_COARSE_FACE_FLAG_ACTIVE 4u
283#define SUBDIV_COARSE_FACE_FLAG_HIDDEN 8u
284
285#define SUBDIV_COARSE_FACE_FLAG_OFFSET 28u
286
287#define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK \
288 (SUBDIV_COARSE_FACE_FLAG_SMOOTH << SUBDIV_COARSE_FACE_FLAG_OFFSET)
289#define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK \
290 (SUBDIV_COARSE_FACE_FLAG_SELECT << SUBDIV_COARSE_FACE_FLAG_OFFSET)
291#define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK \
292 (SUBDIV_COARSE_FACE_FLAG_ACTIVE << SUBDIV_COARSE_FACE_FLAG_OFFSET)
293#define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK \
294 (SUBDIV_COARSE_FACE_FLAG_HIDDEN << SUBDIV_COARSE_FACE_FLAG_OFFSET)
295
296#define SUBDIV_COARSE_FACE_LOOP_START_MASK \
297 ~((SUBDIV_COARSE_FACE_FLAG_SMOOTH | SUBDIV_COARSE_FACE_FLAG_SELECT | \
298 SUBDIV_COARSE_FACE_FLAG_ACTIVE | SUBDIV_COARSE_FACE_FLAG_HIDDEN) \
299 << SUBDIV_COARSE_FACE_FLAG_OFFSET)
300
301static uint32_t compute_coarse_face_flag_bm(BMFace *f, BMFace *efa_act)
302{
303 uint32_t flag = 0;
306 }
309 }
310 if (f == efa_act) {
312 }
313 return flag;
314}
315
317 BMFace *efa_act,
318 MutableSpan<uint32_t> flags_data)
319{
320 BMFace *f;
321 BMIter iter;
322
323 BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
324 const int index = BM_elem_index_get(f);
325 uint32_t flag = compute_coarse_face_flag_bm(f, efa_act);
328 }
329 const int loopstart = BM_elem_index_get(f->l_first);
330 flags_data[index] = uint(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
331 }
332}
333
335 const Mesh *mesh,
336 MutableSpan<uint32_t> flags_data)
337{
338 const OffsetIndices faces = mesh->faces();
339 for (const int i : faces.index_range()) {
340 uint32_t flag = 0;
342 (!mr.sharp_faces.is_empty() && mr.sharp_faces[i])))
343 {
345 }
346 if (!mr.select_poly.is_empty() && mr.select_poly[i]) {
348 }
349 if (!mr.hide_poly.is_empty() && mr.hide_poly[i]) {
351 }
352 flags_data[i] = uint(faces[i].start()) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
353 }
354}
355
357 BMesh *bm,
358 MeshRenderData &mr,
359 MutableSpan<uint32_t> flags_data)
360{
361 if (bm == nullptr) {
363 return;
364 }
365
366 const OffsetIndices faces = mesh->faces();
367 for (const int i : faces.index_range()) {
368 BMFace *f = bm_original_face_get(mr, i);
369 /* Selection and hiding from bmesh. */
370 uint32_t flag = (f) ? compute_coarse_face_flag_bm(f, mr.efa_act) : 0;
371 /* Smooth from mesh. */
373 (!mr.sharp_faces.is_empty() && mr.sharp_faces[i])))
374 {
376 }
377 flags_data[i] = uint(faces[i].start()) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
378 }
379}
380
382 const Mesh *mesh,
383 MeshRenderData &mr)
384{
385 if (cache.extra_coarse_face_data == nullptr) {
387 static const GPUVertFormat format = []() {
389 GPU_vertformat_attr_add(&format, "data", gpu::VertAttrType::UINT_32);
390 return format;
391 }();
395 mesh->faces_num);
396 }
397
398 MutableSpan<uint32_t> flags_data = cache.extra_coarse_face_data->data<uint32_t>();
399
402 }
403 else if (mr.orig_index_face != nullptr) {
404 draw_subdiv_cache_extra_coarse_face_data_mapped(mesh, cache.bm, mr, flags_data);
405 }
406 else {
408 }
409
410 /* Make sure updated data is re-uploaded. */
412}
413
415{
416 DRWSubdivCache *subdiv_cache = mbc.subdiv_cache;
417 if (subdiv_cache == nullptr) {
418 subdiv_cache = MEM_new<DRWSubdivCache>(__func__);
419 }
420 mbc.subdiv_cache = subdiv_cache;
421 return *subdiv_cache;
422}
423
424#ifdef WITH_OPENSUBDIV
425
426static void draw_subdiv_invalidate_evaluator_for_orco(bke::subdiv::Subdiv *subdiv,
427 const Mesh *mesh)
428{
429 if (!(subdiv && subdiv->evaluator)) {
430 return;
431 }
432
433 const bool has_orco = CustomData_has_layer(&mesh->vert_data, CD_ORCO);
434 if (has_orco && !subdiv->evaluator->eval_output->hasVertexData()) {
435 /* If we suddenly have/need original coordinates, recreate the evaluator if the extra
436 * source was not created yet. The refiner also has to be recreated as refinement for source
437 * and vertex data is done only once. */
438 delete subdiv->evaluator;
439 subdiv->evaluator = nullptr;
440
441 delete subdiv->topology_refiner;
442 subdiv->topology_refiner = nullptr;
443 }
444}
445
447
448/* -------------------------------------------------------------------- */
458
459struct DRWCacheBuildingContext {
460 const Mesh *coarse_mesh;
461 const bke::subdiv::Subdiv *subdiv;
462 const bke::subdiv::ToMeshSettings *settings;
463
464 DRWSubdivCache *cache;
465
466 /* Pointers into #DRWSubdivCache buffers for easier access during traversal. */
467 CompressedPatchCoord *patch_coords;
468 int *subdiv_loop_vert_index;
469 int *subdiv_loop_subdiv_vert_index;
470 int *subdiv_loop_edge_index;
471 int *subdiv_loop_edge_draw_flag;
472 int *subdiv_loop_subdiv_edge_index;
473 int *subdiv_loop_face_index;
474
475 /* Temporary buffers used during traversal. */
476 int *vert_origindex_map;
477 int *edge_draw_flag_map;
478 int *edge_origindex_map;
479
480 /* #CD_ORIGINDEX layers from the mesh to directly look up during traversal the original-index
481 * from the base mesh for edit data so that we do not have to handle yet another GPU buffer and
482 * do this in the shaders. */
483 const int *orig_index_vert;
484 const int *orig_index_edge;
485};
486
487static bool draw_subdiv_topology_info_cb(const bke::subdiv::ForeachContext *foreach_context,
488 const int num_verts,
489 const int num_edges,
490 const int num_loops,
491 const int num_faces,
492 const int *subdiv_face_offset)
493{
494 /* num_loops does not take into account meshes with only loose geometry, which might be meshes
495 * used as custom bone shapes, so let's check the num_verts also. */
496 if (num_verts == 0 && num_loops == 0) {
497 return false;
498 }
499
500 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
501 DRWSubdivCache *cache = ctx->cache;
502
503 /* Set topology information only if we have loops. */
504 if (num_loops != 0) {
505 cache->num_subdiv_edges = uint(num_edges);
506 cache->num_subdiv_loops = uint(num_loops);
507 cache->num_subdiv_verts = uint(num_verts);
508 cache->num_subdiv_quads = uint(num_faces);
509 cache->subdiv_face_offset = static_cast<int *>(MEM_dupallocN(subdiv_face_offset));
510 }
511
512 cache->may_have_loose_geom = num_verts != 0 || num_edges != 0;
513
514 /* Initialize cache buffers, prefer dynamic usage so we can reuse memory on the host even after
515 * it was sent to the device, since we may use the data while building other buffers on the CPU
516 * side.
517 *
518 * These VBOs are created even when there are no faces and only loose geometry. This avoids the
519 * need for many null checks. Binding them must be avoided if they are empty though. */
520 cache->patch_coords = GPU_vertbuf_calloc();
522 *cache->patch_coords, get_blender_patch_coords_format(), GPU_USAGE_DYNAMIC);
523 GPU_vertbuf_data_alloc(*cache->patch_coords, cache->num_subdiv_loops);
524
525 cache->corner_patch_coords = GPU_vertbuf_calloc();
527 *cache->corner_patch_coords, get_blender_patch_coords_format(), GPU_USAGE_DYNAMIC);
528 GPU_vertbuf_data_alloc(*cache->corner_patch_coords, cache->num_subdiv_loops);
529
530 cache->verts_orig_index = GPU_vertbuf_calloc();
532 *cache->verts_orig_index, get_origindex_format(), GPU_USAGE_DYNAMIC);
533 GPU_vertbuf_data_alloc(*cache->verts_orig_index, cache->num_subdiv_loops);
534
535 cache->edges_orig_index = GPU_vertbuf_calloc();
537 *cache->edges_orig_index, get_origindex_format(), GPU_USAGE_DYNAMIC);
538 GPU_vertbuf_data_alloc(*cache->edges_orig_index, cache->num_subdiv_loops);
539
540 cache->edges_draw_flag = GPU_vertbuf_calloc();
542 *cache->edges_draw_flag, get_origindex_format(), GPU_USAGE_DYNAMIC);
543 GPU_vertbuf_data_alloc(*cache->edges_draw_flag, cache->num_subdiv_loops);
544
545 cache->subdiv_loop_subdiv_vert_index = MEM_malloc_arrayN<int>(cache->num_subdiv_loops,
546 "subdiv_loop_subdiv_vert_index");
547
548 cache->subdiv_loop_subdiv_edge_index = MEM_malloc_arrayN<int>(cache->num_subdiv_loops,
549 "subdiv_loop_subdiv_edge_index");
550
551 cache->subdiv_loop_face_index = MEM_malloc_arrayN<int>(cache->num_subdiv_loops,
552 "subdiv_loop_face_index");
553
554 /* Initialize context pointers and temporary buffers. */
555 ctx->patch_coords = cache->patch_coords->data<CompressedPatchCoord>().data();
556 ctx->subdiv_loop_vert_index = cache->verts_orig_index->data<int>().data();
557 ctx->subdiv_loop_edge_index = cache->edges_orig_index->data<int>().data();
558 ctx->subdiv_loop_edge_draw_flag = cache->edges_draw_flag->data<int>().data();
559 ctx->subdiv_loop_subdiv_vert_index = cache->subdiv_loop_subdiv_vert_index;
560 ctx->subdiv_loop_subdiv_edge_index = cache->subdiv_loop_subdiv_edge_index;
561 ctx->subdiv_loop_face_index = cache->subdiv_loop_face_index;
562
563 ctx->orig_index_vert = static_cast<const int *>(
564 CustomData_get_layer(&ctx->coarse_mesh->vert_data, CD_ORIGINDEX));
565
566 ctx->orig_index_edge = static_cast<const int *>(
567 CustomData_get_layer(&ctx->coarse_mesh->edge_data, CD_ORIGINDEX));
568
569 if (cache->num_subdiv_verts) {
570 ctx->vert_origindex_map = MEM_malloc_arrayN<int>(cache->num_subdiv_verts,
571 "subdiv_vert_origindex_map");
572 for (int i = 0; i < num_verts; i++) {
573 ctx->vert_origindex_map[i] = -1;
574 }
575 }
576
577 if (cache->num_subdiv_edges) {
578 ctx->edge_origindex_map = MEM_malloc_arrayN<int>(cache->num_subdiv_edges,
579 "subdiv_edge_origindex_map");
580 for (int i = 0; i < num_edges; i++) {
581 ctx->edge_origindex_map[i] = -1;
582 }
583 ctx->edge_draw_flag_map = MEM_calloc_arrayN<int>(cache->num_subdiv_edges,
584 "subdiv_edge_draw_flag_map");
585 }
586
587 return true;
588}
589
590static void draw_subdiv_vertex_corner_cb(const bke::subdiv::ForeachContext *foreach_context,
591 void * /*tls*/,
592 const int /*ptex_face_index*/,
593 const float /*u*/,
594 const float /*v*/,
595 const int coarse_vertex_index,
596 const int /*coarse_face_index*/,
597 const int /*coarse_corner*/,
598 const int subdiv_vertex_index)
599{
600 BLI_assert(coarse_vertex_index != ORIGINDEX_NONE);
601 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
602 ctx->vert_origindex_map[subdiv_vertex_index] = coarse_vertex_index;
603}
604
605static void draw_subdiv_vertex_edge_cb(const bke::subdiv::ForeachContext * /*foreach_context*/,
606 void * /*tls_v*/,
607 const int /*ptex_face_index*/,
608 const float /*u*/,
609 const float /*v*/,
610 const int /*coarse_edge_index*/,
611 const int /*coarse_face_index*/,
612 const int /*coarse_corner*/,
613 const int /*subdiv_vertex_index*/)
614{
615 /* Required if bke::subdiv::ForeachContext.vertex_corner is also set. */
616}
617
618static void draw_subdiv_edge_cb(const bke::subdiv::ForeachContext *foreach_context,
619 void * /*tls*/,
620 const int coarse_edge_index,
621 const int subdiv_edge_index,
622 const bool /*is_loose*/,
623 const int /*subdiv_v1*/,
624 const int /*subdiv_v2*/)
625{
626 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
627
628 if (!ctx->edge_origindex_map) {
629 return;
630 }
631
632 if (coarse_edge_index == ORIGINDEX_NONE) {
633 /* Not mapped to edge in the subdivision base mesh. */
634 ctx->edge_origindex_map[subdiv_edge_index] = ORIGINDEX_NONE;
635 if (!ctx->cache->optimal_display) {
636 ctx->edge_draw_flag_map[subdiv_edge_index] = 1;
637 }
638 }
639 else {
640 if (ctx->orig_index_edge) {
641 const int origindex = ctx->orig_index_edge[coarse_edge_index];
642 ctx->edge_origindex_map[subdiv_edge_index] = origindex;
643 if (!(origindex == ORIGINDEX_NONE && ctx->cache->hide_unmapped_edges)) {
644 /* Not mapped to edge in original mesh (generated by a preceding modifier). */
645 ctx->edge_draw_flag_map[subdiv_edge_index] = 1;
646 }
647 }
648 else {
649 ctx->edge_origindex_map[subdiv_edge_index] = coarse_edge_index;
650 ctx->edge_draw_flag_map[subdiv_edge_index] = 1;
651 }
652 }
653}
654
655static void draw_subdiv_loop_cb(const bke::subdiv::ForeachContext *foreach_context,
656 void * /*tls_v*/,
657 const int ptex_face_index,
658 const float u,
659 const float v,
660 const int /*coarse_loop_index*/,
661 const int coarse_face_index,
662 const int /*coarse_corner*/,
663 const int subdiv_loop_index,
664 const int subdiv_vertex_index,
665 const int subdiv_edge_index)
666{
667 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
668 ctx->patch_coords[subdiv_loop_index] = make_patch_coord(ptex_face_index, u, v);
669
670 int coarse_vertex_index = ctx->vert_origindex_map[subdiv_vertex_index];
671
672 ctx->subdiv_loop_subdiv_vert_index[subdiv_loop_index] = subdiv_vertex_index;
673 ctx->subdiv_loop_subdiv_edge_index[subdiv_loop_index] = subdiv_edge_index;
674 ctx->subdiv_loop_face_index[subdiv_loop_index] = coarse_face_index;
675 ctx->subdiv_loop_vert_index[subdiv_loop_index] = coarse_vertex_index;
676}
677
678static void draw_subdiv_foreach_callbacks(bke::subdiv::ForeachContext *foreach_context)
679{
680 *foreach_context = {};
681 foreach_context->topology_info = draw_subdiv_topology_info_cb;
682 foreach_context->loop = draw_subdiv_loop_cb;
683 foreach_context->edge = draw_subdiv_edge_cb;
684 foreach_context->vertex_corner = draw_subdiv_vertex_corner_cb;
685 foreach_context->vertex_edge = draw_subdiv_vertex_edge_cb;
686}
687
688static void do_subdiv_traversal(DRWCacheBuildingContext *cache_building_context,
689 bke::subdiv::Subdiv *subdiv)
690{
691 bke::subdiv::ForeachContext foreach_context;
692 draw_subdiv_foreach_callbacks(&foreach_context);
693 foreach_context.user_data = cache_building_context;
694
696 &foreach_context,
697 cache_building_context->settings,
698 cache_building_context->coarse_mesh);
699
700 /* Now that traversal is done, we can set up the right original indices for the
701 * subdiv-loop-to-coarse-edge map.
702 */
703 for (int i = 0; i < cache_building_context->cache->num_subdiv_loops; i++) {
704 const int edge_index = cache_building_context->subdiv_loop_subdiv_edge_index[i];
705 cache_building_context->subdiv_loop_edge_index[i] =
706 cache_building_context->edge_origindex_map[edge_index];
707 cache_building_context->subdiv_loop_edge_draw_flag[i] =
708 cache_building_context->edge_draw_flag_map[edge_index];
709 }
710}
711
712static gpu::VertBuf *gpu_vertbuf_create_from_format(const GPUVertFormat &format, uint len)
713{
714 gpu::VertBuf *verts = GPU_vertbuf_calloc();
717 return verts;
718}
719
720/* Build maps to hold enough information to tell which face is adjacent to which vertex; those will
721 * be used for computing normals if limit surfaces are unavailable. */
722static void build_vertex_face_adjacency_maps(DRWSubdivCache &cache)
723{
724 /* +1 so that we do not require a special case for the last vertex, this extra offset will
725 * contain the total number of adjacent faces. */
726 cache.subdiv_vertex_face_adjacency_offsets = gpu_vertbuf_create_from_format(
727 get_origindex_format(), cache.num_subdiv_verts + 1);
728
729 MutableSpan<int> vertex_offsets = cache.subdiv_vertex_face_adjacency_offsets->data<int>();
730 vertex_offsets.fill(0);
731
733 {cache.subdiv_loop_subdiv_vert_index, cache.num_subdiv_loops}, vertex_offsets);
734
735 cache.subdiv_vertex_face_adjacency = gpu_vertbuf_create_from_format(get_origindex_format(),
736 cache.num_subdiv_loops);
737 MutableSpan<int> adjacent_faces = cache.subdiv_vertex_face_adjacency->data<int>();
738 int *tmp_set_faces = MEM_calloc_arrayN<int>(cache.num_subdiv_verts, "tmp subdiv vertex offset");
739
740 for (int i = 0; i < cache.num_subdiv_loops / 4; i++) {
741 for (int j = 0; j < 4; j++) {
742 const int subdiv_vertex = cache.subdiv_loop_subdiv_vert_index[i * 4 + j];
743 int first_face_offset = vertex_offsets[subdiv_vertex] + tmp_set_faces[subdiv_vertex];
744 adjacent_faces[first_face_offset] = i;
745 tmp_set_faces[subdiv_vertex] += 1;
746 }
747 }
748
749 MEM_freeN(tmp_set_faces);
750}
751
752static bool draw_subdiv_build_cache(DRWSubdivCache &cache,
753 bke::subdiv::Subdiv *subdiv,
754 const Mesh *mesh_eval,
755 const SubsurfRuntimeData *runtime_data)
756{
757 bke::subdiv::ToMeshSettings to_mesh_settings;
758 to_mesh_settings.resolution = runtime_data->resolution;
759 to_mesh_settings.use_optimal_display = false;
760
761 if (cache.resolution != to_mesh_settings.resolution) {
762 /* Resolution changed, we need to rebuild, free any existing cached data. */
764 }
765
766 /* If the resolution between the cache and the settings match for some reason, check if the patch
767 * coordinates were not already generated. Those coordinates are specific to the resolution, so
768 * they should be null either after initialization, or after freeing if the resolution (or some
769 * other subdivision setting) changed.
770 */
771 if (cache.patch_coords != nullptr) {
772 return true;
773 }
774
775 DRWCacheBuildingContext cache_building_context;
776 memset(&cache_building_context, 0, sizeof(DRWCacheBuildingContext));
777 cache_building_context.coarse_mesh = mesh_eval;
778 cache_building_context.settings = &to_mesh_settings;
779 cache_building_context.cache = &cache;
780
781 do_subdiv_traversal(&cache_building_context, subdiv);
782 if (cache.num_subdiv_loops == 0 && cache.num_subdiv_verts == 0 && !cache.may_have_loose_geom) {
783 /* Either the traversal failed, or we have an empty mesh, either way we cannot go any further.
784 * The subdiv_face_offset cannot then be reliably stored in the cache, so free it directly.
785 */
786 MEM_SAFE_FREE(cache.subdiv_face_offset);
787 return false;
788 }
789
790 /* Only build face related data if we have polygons. */
791 const OffsetIndices faces = mesh_eval->faces();
792 if (cache.num_subdiv_loops != 0) {
793 /* Build buffers for the PatchMap. */
794 draw_patch_map_build(&cache.gpu_patch_map, subdiv);
795
796 cache.face_ptex_offset = bke::subdiv::face_ptex_offset_get(subdiv);
797
798 /* Build patch coordinates for all the face dots. */
799 cache.fdots_patch_coords = gpu_vertbuf_create_from_format(get_blender_patch_coords_format(),
800 mesh_eval->faces_num);
801 CompressedPatchCoord *blender_fdots_patch_coords =
802 cache.fdots_patch_coords->data<CompressedPatchCoord>().data();
803 for (int i = 0; i < mesh_eval->faces_num; i++) {
804 const int ptex_face_index = cache.face_ptex_offset[i];
805 if (faces[i].size() == 4) {
806 /* For quads, the center coordinate of the coarse face has `u = v = 0.5`. */
807 blender_fdots_patch_coords[i] = make_patch_coord(ptex_face_index, 0.5f, 0.5f);
808 }
809 else {
810 /* For N-gons, since they are split into quads from the center, and since the center is
811 * chosen to be the top right corner of each quad, the center coordinate of the coarse face
812 * is any one of those top right corners with `u = v = 1.0`. */
813 blender_fdots_patch_coords[i] = make_patch_coord(ptex_face_index, 1.0f, 1.0f);
814 }
815 }
816
817 cache.subdiv_face_offset_buffer = draw_subdiv_build_origindex_buffer(cache.subdiv_face_offset,
818 faces.size());
819
820 cache.face_ptex_offset_buffer = draw_subdiv_build_origindex_buffer(cache.face_ptex_offset);
821
822 build_vertex_face_adjacency_maps(cache);
823 }
824
825 cache.resolution = to_mesh_settings.resolution;
826 cache.num_coarse_faces = faces.size();
827
828 /* To avoid floating point precision issues when evaluating patches at patch boundaries,
829 * ensure that all loops sharing a vertex use the same patch coordinate. This could cause
830 * the mesh to not be watertight, leading to shadowing artifacts (see #97877). */
831 Vector<int> first_loop_index(cache.num_subdiv_verts, -1);
832
833 /* Save coordinates for corners, as attributes may vary for each loop connected to the same
834 * vertex. */
835 if (cache.num_subdiv_loops > 0) {
836 memcpy(cache.corner_patch_coords->data<CompressedPatchCoord>().data(),
837 cache_building_context.patch_coords,
838 sizeof(CompressedPatchCoord) * cache.num_subdiv_loops);
839
840 for (int i = 0; i < cache.num_subdiv_loops; i++) {
841 const int vertex = cache_building_context.subdiv_loop_subdiv_vert_index[i];
842 if (first_loop_index[vertex] != -1) {
843 continue;
844 }
845 first_loop_index[vertex] = i;
846 }
847
848 for (int i = 0; i < cache.num_subdiv_loops; i++) {
849 const int vertex = cache_building_context.subdiv_loop_subdiv_vert_index[i];
850 cache_building_context.patch_coords[i] =
851 cache_building_context.patch_coords[first_loop_index[vertex]];
852 }
853 }
854
855 /* Cleanup. */
856 MEM_SAFE_FREE(cache_building_context.vert_origindex_map);
857 MEM_SAFE_FREE(cache_building_context.edge_origindex_map);
858 MEM_SAFE_FREE(cache_building_context.edge_draw_flag_map);
859
860 return true;
861}
862
863#endif
864
866
867/* -------------------------------------------------------------------- */
872
875 const int src_offset,
876 const int dst_offset,
877 const uint total_dispatch_size,
878 const bool has_sculpt_mask,
879 const uint edge_loose_offset)
880{
881 ubo->src_offset = src_offset;
882 ubo->dst_offset = dst_offset;
885 ubo->max_depth = cache.gpu_patch_map.max_depth;
889 ubo->edge_loose_offset = edge_loose_offset;
890 ubo->has_sculpt_mask = has_sculpt_mask;
896 ubo->total_dispatch_size = total_dispatch_size;
897 ubo->is_edit_mode = cache.is_edit_mode;
898 ubo->use_hide = cache.use_hide;
899}
900
902 const int src_offset,
903 const int dst_offset,
904 const uint total_dispatch_size,
905 const bool has_sculpt_mask = false,
906 const uint edge_loose_offset = 0)
907{
908 DRWSubdivUboStorage storage;
910 &storage,
911 src_offset,
912 dst_offset,
913 total_dispatch_size,
914 has_sculpt_mask,
915 edge_loose_offset);
916
917 if (!cache.ubo) {
918 const_cast<DRWSubdivCache *>(&cache)->ubo = GPU_uniformbuf_create_ex(
919 sizeof(DRWSubdivUboStorage), &storage, "DRWSubdivUboStorage");
920 }
921
922 GPU_uniformbuf_update(cache.ubo, &storage);
924}
925
927
928// --------------------------------------------------------
929
930#define SUBDIV_LOCAL_WORK_GROUP_SIZE 64
932{
934}
935
942 gpu::Shader *shader,
943 const int src_offset,
944 const int dst_offset,
945 uint total_dispatch_size,
946 const bool has_sculpt_mask = false,
947 const uint edge_loose_offset = 0)
948{
949 const uint max_res_x = uint(GPU_max_work_group_count(0));
950
951 const uint dispatch_size = get_dispatch_size(total_dispatch_size);
952 uint dispatch_rx = dispatch_size;
953 uint dispatch_ry = 1u;
954 if (dispatch_rx > max_res_x) {
955 /* Since there are some limitations with regards to the maximum work group size (could be as
956 * low as 64k elements per call), we split the number elements into a "2d" number, with the
957 * final index being computed as `res_x + res_y * max_work_group_size`. Even with a maximum
958 * work group size of 64k, that still leaves us with roughly `64k * 64k = 4` billion elements
959 * total, which should be enough. If not, we could also use the 3rd dimension. */
960 /* TODO(fclem): We could dispatch fewer groups if we compute the prime factorization and
961 * get the smallest rect fitting the requirements. */
962 dispatch_rx = dispatch_ry = ceilf(sqrtf(dispatch_size));
963 /* Avoid a completely empty dispatch line caused by rounding. */
964 if ((dispatch_rx * (dispatch_ry - 1)) >= dispatch_size) {
965 dispatch_ry -= 1;
966 }
967 }
968
969 /* X and Y dimensions may have different limits so the above computation may not be right, but
970 * even with the standard 64k minimum on all dimensions we still have a lot of room. Therefore,
971 * we presume it all fits. */
972 BLI_assert(dispatch_ry < uint(GPU_max_work_group_count(1)));
973
975 cache, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask, edge_loose_offset);
976
977 GPU_compute_dispatch(shader, dispatch_rx, dispatch_ry, 1);
978}
979
981{
982#ifdef WITH_OPENSUBDIV
984 /* Happens on meshes with only loose geometry. */
985 return;
986 }
987
988 bke::subdiv::Subdiv *subdiv = cache.subdiv;
989 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
990
991 gpu::VertBuf *src_buffer = evaluator->eval_output->get_source_buf();
992 gpu::VertBuf *src_extra_buffer = nullptr;
993 if (orco) {
994 src_extra_buffer = evaluator->eval_output->get_source_data_buf();
995 }
996
997 gpu::StorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
998 gpu::StorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
999 gpu::StorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
1000
1003 GPU_shader_bind(shader);
1004
1015 if (orco) {
1016 GPU_vertbuf_bind_as_ssbo(src_extra_buffer,
1019 }
1020
1021 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1022
1023 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1024 * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1025 * needed. */
1027
1028 /* Cleanup. */
1030
1031 GPU_storagebuf_free(patch_arrays_buffer);
1032#else
1033 UNUSED_VARS(cache, pos, orco);
1034#endif
1035}
1036
1038 gpu::VertBuf *uvs,
1039 const int face_varying_channel,
1040 const int dst_offset)
1041{
1042#ifdef WITH_OPENSUBDIV
1044 /* Happens on meshes with only loose geometry. */
1045 return;
1046 }
1047
1048 bke::subdiv::Subdiv *subdiv = cache.subdiv;
1049 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1050
1051 gpu::VertBuf *src_buffer = evaluator->eval_output->get_face_varying_source_buf(
1052 face_varying_channel);
1053 int src_buffer_offset = evaluator->eval_output->get_face_varying_source_offset(
1054 face_varying_channel);
1055
1056 gpu::StorageBuf *patch_arrays_buffer =
1057 evaluator->eval_output->create_face_varying_patch_array_buf(face_varying_channel);
1058 gpu::StorageBuf *patch_index_buffer = evaluator->eval_output->get_face_varying_patch_index_buf(
1059 face_varying_channel);
1060 gpu::StorageBuf *patch_param_buffer = evaluator->eval_output->get_face_varying_patch_param_buf(
1061 face_varying_channel);
1062
1064 GPU_shader_bind(shader);
1065
1076
1077 /* The buffer offset has the stride baked in (which is 2 as we have UVs) so remove the stride by
1078 * dividing by 2 */
1080 cache, shader, src_buffer_offset / 2, dst_offset, cache.num_subdiv_quads);
1081
1082 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1083 * Since it may also be used for computing UV stretches, we also need a barrier on the shader
1084 * storage. */
1086
1087 /* Cleanup. */
1089
1090 GPU_storagebuf_free(patch_arrays_buffer);
1091#else
1092 UNUSED_VARS(cache, uvs, face_varying_channel, dst_offset);
1093#endif
1094}
1095
1097 gpu::VertBuf &src_data,
1098 gpu::VertBuf &dst_data,
1099 GPUVertCompType comp_type,
1100 int dimensions,
1101 int dst_offset)
1102{
1104 /* Happens on meshes with only loose geometry. */
1105 return;
1106 }
1107
1108 gpu::Shader *shader = DRW_shader_subdiv_custom_data_get(comp_type, dimensions);
1109 GPU_shader_bind(shader);
1110
1111 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1119
1120 drw_subdiv_compute_dispatch(cache, shader, 0, dst_offset, cache.num_subdiv_quads);
1121
1122 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. Put
1123 * a barrier on the shader storage as we may use the result in another compute shader. */
1125
1126 /* Cleanup. */
1128}
1129
1131 gpu::VertBuf &src_data,
1132 gpu::VertBuf &dst_data)
1133{
1135 /* Happens on meshes with only loose geometry. */
1136 return;
1137 }
1138
1140 GPU_shader_bind(shader);
1141
1142 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1150
1151 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1152
1153 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. Put
1154 * a barrier on the shader storage as we may use the result in another compute shader. */
1156
1157 /* Cleanup. */
1159}
1160
1162 gpu::VertBuf *mask_vbo,
1163 gpu::VertBuf *face_set_vbo,
1164 gpu::VertBuf *sculpt_data)
1165{
1167 GPU_shader_bind(shader);
1168
1169 /* Mask VBO is always at binding point 0. */
1170 if (mask_vbo) {
1172 }
1175
1176 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads, mask_vbo != nullptr);
1177
1178 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1180
1181 /* Cleanup. */
1183}
1184
1187 gpu::VertBuf *face_adjacency_offsets,
1188 gpu::VertBuf *face_adjacency_lists,
1189 gpu::VertBuf *vertex_loop_map,
1190 gpu::VertBuf *vert_normals)
1191{
1193 GPU_shader_bind(shader);
1194
1196 GPU_vertbuf_bind_as_ssbo(face_adjacency_offsets,
1201
1202 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_verts);
1203
1204 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1205 * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1206 * needed. */
1208
1209 /* Cleanup. */
1211}
1212
1214 gpu::IndexBuf *subdiv_tris,
1215 const int material_count)
1216{
1218 /* Happens on meshes with only loose geometry. */
1219 return;
1220 }
1221
1222 const bool do_single_material = material_count <= 1;
1223
1225 do_single_material ? SubdivShaderType::BUFFER_TRIS :
1227 GPU_shader_bind(shader);
1228
1229 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1232 if (!do_single_material) {
1234 }
1235
1236 /* Outputs */
1238
1239 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1240
1241 /* This generates an index buffer, so we need to put a barrier on the element array. */
1243
1244 /* Cleanup. */
1246}
1247
1249 gpu::VertBuf *fdots_pos,
1250 gpu::VertBuf *fdots_nor,
1251 gpu::IndexBuf *fdots_indices)
1252{
1253#ifdef WITH_OPENSUBDIV
1255 /* Happens on meshes with only loose geometry. */
1256 return;
1257 }
1258
1259 bke::subdiv::Subdiv *subdiv = cache.subdiv;
1260 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1261
1262 gpu::VertBuf *src_buffer = evaluator->eval_output->get_source_buf();
1263 gpu::StorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
1264 gpu::StorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
1265 gpu::StorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
1266
1270 GPU_shader_bind(shader);
1271
1284 /* F-dots normals may not be requested, still reserve the binding point. */
1285 if (fdots_nor) {
1287 }
1291
1292 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_coarse_faces);
1293
1294 /* This generates two vertex buffers and an index buffer, so we need to put a barrier on the
1295 * vertex attributes and element arrays. */
1297
1298 /* Cleanup. */
1300
1301 GPU_storagebuf_free(patch_arrays_buffer);
1302#else
1303 UNUSED_VARS(cache, fdots_pos, fdots_nor, fdots_indices);
1304#endif
1305}
1306
1325
1327 gpu::IndexBuf *lines_indices,
1328 gpu::VertBuf *lines_flags,
1329 uint edge_loose_offset,
1330 uint num_loose_edges)
1331{
1333 GPU_shader_bind(shader);
1334
1337
1338 drw_subdiv_compute_dispatch(cache, shader, 0, 0, num_loose_edges, false, edge_loose_offset);
1339
1340 /* This generates an index buffer, so we need to put a barrier on the element array. */
1342
1343 /* Cleanup. */
1345}
1346
1349 gpu::VertBuf *edge_draw_flag,
1350 gpu::VertBuf *poly_other_map,
1351 gpu::VertBuf *edge_fac)
1352{
1354 GPU_shader_bind(shader);
1355
1360
1361 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1362
1363 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1365
1366 /* Cleanup. */
1368}
1369
1372 gpu::VertBuf *vert_normals,
1373 gpu::VertBuf *subdiv_corner_verts,
1374 gpu::VertBuf *lnor)
1375{
1377 /* Happens on meshes with only loose geometry. */
1378 return;
1379 }
1380
1382 GPU_shader_bind(shader);
1383
1384 /* Inputs */
1391
1392 /* Outputs */
1394
1395 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1396
1397 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1399
1400 /* Cleanup. */
1402}
1403
1405{
1407 /* Happens on meshes with only loose geometry. */
1408 return;
1409 }
1410
1412 GPU_shader_bind(shader);
1413
1414 /* Inputs */
1419
1420 /* Outputs */
1422
1423 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1424
1425 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1427
1428 /* Cleanup. */
1430}
1431
1433 gpu::VertBuf *coarse_data,
1434 gpu::VertBuf *subdiv_data)
1435{
1437 GPU_shader_bind(shader);
1438
1439 /* Inputs */
1440 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1443 /* Outputs */
1445
1446 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1447
1448 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1450
1451 /* Cleanup. */
1453}
1454
1457 gpu::VertBuf *uvs,
1458 int uvs_offset,
1459 gpu::VertBuf *stretch_angles)
1460{
1462 GPU_shader_bind(shader);
1463
1464 /* Inputs */
1467 /* Outputs */
1469
1470 drw_subdiv_compute_dispatch(cache, shader, uvs_offset, 0, cache.num_subdiv_quads);
1471
1472 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1474
1475 /* Cleanup. */
1477}
1478
1479/* -------------------------------------------------------------------- */
1480
1515 const Mesh *mesh_eval,
1516 uint mat_len)
1517{
1519
1520 const int number_of_quads = cache.num_subdiv_loops / 4;
1521
1522 if (mat_len == 1) {
1523 cache.mat_start = MEM_callocN<int>("subdiv mat_end");
1524 cache.mat_end = MEM_callocN<int>("subdiv mat_end");
1525 cache.mat_start[0] = 0;
1526 cache.mat_end[0] = number_of_quads;
1527 return;
1528 }
1529
1530 const bke::AttributeAccessor attributes = mesh_eval->attributes();
1531 const VArraySpan<int> material_indices = *attributes.lookup_or_default<int>(
1532 "material_index", bke::AttrDomain::Face, 0);
1533
1534 /* Count number of subdivided polygons for each material. */
1535 int *mat_start = MEM_calloc_arrayN<int>(mat_len, "subdiv mat_start");
1536 int *subdiv_face_offset = cache.subdiv_face_offset;
1537
1538 /* TODO: parallel_reduce? */
1539 for (int i = 0; i < mesh_eval->faces_num; i++) {
1540 const int next_offset = (i == mesh_eval->faces_num - 1) ? number_of_quads :
1541 subdiv_face_offset[i + 1];
1542 const int quad_count = next_offset - subdiv_face_offset[i];
1543 const uint mat_index = uint(material_indices[i]) < mat_len ? uint(material_indices[i]) : 0;
1544 mat_start[mat_index] += quad_count;
1545 }
1546
1547 /* Accumulate offsets. */
1548 int ofs = mat_start[0];
1549 mat_start[0] = 0;
1550 for (uint i = 1; i < mat_len; i++) {
1551 int tmp = mat_start[i];
1552 mat_start[i] = ofs;
1553 ofs += tmp;
1554 }
1555
1556 /* Compute per face offsets. */
1557 int *mat_end = static_cast<int *>(MEM_dupallocN(mat_start));
1558 int *per_face_mat_offset = MEM_malloc_arrayN<int>(mesh_eval->faces_num, "per_face_mat_offset");
1559
1560 for (int i = 0; i < mesh_eval->faces_num; i++) {
1561 const uint mat_index = uint(material_indices[i]) < mat_len ? uint(material_indices[i]) : 0;
1562 const int single_material_index = subdiv_face_offset[i];
1563 const int material_offset = mat_end[mat_index];
1564 const int next_offset = (i == mesh_eval->faces_num - 1) ? number_of_quads :
1565 subdiv_face_offset[i + 1];
1566 const int quad_count = next_offset - subdiv_face_offset[i];
1567 mat_end[mat_index] += quad_count;
1568
1569 per_face_mat_offset[i] = material_offset - single_material_index;
1570 }
1571
1572 cache.face_mat_offset = draw_subdiv_build_origindex_buffer(per_face_mat_offset,
1573 mesh_eval->faces_num);
1574 cache.mat_start = mat_start;
1575 cache.mat_end = mat_end;
1576
1577 MEM_freeN(per_face_mat_offset);
1578}
1579
1587/* The evaluator cache is global, so we cannot allow concurrent usage and need synchronization. */
1589
1591 Mesh &mesh,
1592 MeshBatchCache &batch_cache,
1593 MeshBufferCache &mbc,
1594 const Span<IBOType> ibo_requests,
1595 const Span<VBOType> vbo_requests,
1596 const bool is_editmode,
1597 const bool is_paint_mode,
1598 const bool do_final,
1599 const bool do_uvedit,
1600 const bool do_cage,
1601 const ToolSettings *ts,
1602 const bool use_hide)
1603{
1604 SubsurfRuntimeData *runtime_data = mesh.runtime->subsurf_runtime_data;
1605 BLI_assert(runtime_data && runtime_data->has_gpu_subdiv);
1606
1607 if (runtime_data->settings.level == 0) {
1608 return false;
1609 }
1610
1611 const Mesh *mesh_eval = &mesh;
1612 BMesh *bm = nullptr;
1613 if (mesh.runtime->edit_mesh) {
1614 mesh_eval = BKE_object_get_editmesh_eval_final(&ob);
1615 bm = mesh.runtime->edit_mesh->bm;
1616 }
1617
1618#ifdef WITH_OPENSUBDIV
1619 draw_subdiv_invalidate_evaluator_for_orco(runtime_data->subdiv_gpu, mesh_eval);
1620#endif
1621
1623 runtime_data, mesh_eval, true);
1624 if (!subdiv) {
1625 return false;
1626 }
1627
1628 /* Lock the entire evaluation to avoid concurrent usage of shader objects in evaluator cache. */
1629 std::scoped_lock lock(g_subdiv_eval_mutex);
1630
1631 if (g_subdiv_evaluator_cache == nullptr) {
1633 }
1634
1635 /* Increment evaluator cache reference if an evaluator has been assigned to it. */
1636 bool evaluator_might_be_assigned = subdiv->evaluator == nullptr;
1637 auto maybe_increment_cache_ref = [evaluator_might_be_assigned](bke::subdiv::Subdiv *subdiv) {
1638 if (evaluator_might_be_assigned && subdiv->evaluator != nullptr) {
1639 /* An evaluator was assigned. */
1641 }
1642 };
1643
1646 {
1647 /* This could happen in two situations:
1648 * - OpenSubdiv is disabled.
1649 * - Something totally bad happened, and OpenSubdiv rejected our topology.
1650 * In either way, we can't safely continue. However, we still have to handle potential loose
1651 * geometry, which is done separately. */
1652 if (mesh_eval->faces_num) {
1653 maybe_increment_cache_ref(subdiv);
1654 return false;
1655 }
1656 }
1657
1658 DRWSubdivCache &draw_cache = mesh_batch_cache_ensure_subdiv_cache(batch_cache);
1659
1660 draw_cache.optimal_display = runtime_data->use_optimal_display;
1661 /* If there is no distinct cage, hide unmapped edges that can't be selected. */
1662 draw_cache.hide_unmapped_edges = is_editmode && !do_cage;
1663 draw_cache.bm = bm;
1664 draw_cache.mesh = mesh_eval;
1665 draw_cache.subdiv = subdiv;
1666
1667#ifdef WITH_OPENSUBDIV
1668 if (!draw_subdiv_build_cache(draw_cache, subdiv, mesh_eval, runtime_data)) {
1669 maybe_increment_cache_ref(subdiv);
1670 return false;
1671 }
1672#endif
1673
1675
1676 /* Copy topology information for stats display. */
1677 runtime_data->stats_totvert = draw_cache.num_subdiv_verts;
1678 runtime_data->stats_totedge = draw_cache.num_subdiv_edges;
1679 runtime_data->stats_faces_num = draw_cache.num_subdiv_quads;
1680 runtime_data->stats_totloop = draw_cache.num_subdiv_loops;
1681
1682 draw_cache.use_custom_loop_normals = (runtime_data->use_loop_normals) &&
1683 mesh_eval->attributes().contains("custom_normal");
1684
1685 if (ibo_requests.contains(IBOType::Tris)) {
1686 draw_subdiv_cache_ensure_mat_offsets(draw_cache, mesh_eval, batch_cache.mat_len);
1687 }
1688
1690 ob, mesh, is_editmode, is_paint_mode, do_final, do_uvedit, use_hide, ts);
1691 draw_cache.use_hide = use_hide;
1692
1693 /* Used for setting loop normals flags. Mapped extraction is only used during edit mode.
1694 * See comments in #extract_lnor_iter_face_mesh.
1695 */
1696 draw_cache.is_edit_mode = mr.edit_bmesh != nullptr;
1697
1698 draw_subdiv_cache_update_extra_coarse_face_data(draw_cache, mesh_eval, mr);
1699
1701 batch_cache, mbc, ibo_requests, vbo_requests, draw_cache, mr);
1702
1703 maybe_increment_cache_ref(subdiv);
1704 return true;
1705}
1706
1708{
1709 const Span<int> loose_edges = cache.loose_geom.edges;
1710 if (loose_edges.is_empty()) {
1711 return;
1712 }
1713
1714 if (!subdiv_cache.loose_edge_positions.is_empty()) {
1715 /* Already processed. */
1716 return;
1717 }
1718
1719 const Mesh *coarse_mesh = subdiv_cache.mesh;
1720 const bool is_simple = subdiv_cache.subdiv->settings.is_simple;
1721 const int resolution = subdiv_cache.resolution;
1722 const int resolution_1 = resolution - 1;
1723 const float inv_resolution_1 = 1.0f / float(resolution_1);
1724
1725 const Span<float3> coarse_positions = coarse_mesh->vert_positions();
1726 const Span<int2> coarse_edges = coarse_mesh->edges();
1727
1728 Array<int> vert_to_edge_offsets;
1729 Array<int> vert_to_edge_indices;
1730 const GroupedSpan<int> vert_to_edge_map = bke::mesh::build_vert_to_edge_map(
1731 coarse_edges, coarse_mesh->verts_num, vert_to_edge_offsets, vert_to_edge_indices);
1732
1733 /* Also store the last vertex to simplify copying the positions to the VBO. */
1734 subdiv_cache.loose_edge_positions.reinitialize(loose_edges.size() * resolution);
1735 MutableSpan<float3> edge_positions = subdiv_cache.loose_edge_positions;
1736
1737 threading::parallel_for(loose_edges.index_range(), 1024, [&](const IndexRange range) {
1738 for (const int i : range) {
1739 const int coarse_edge = loose_edges[i];
1740 MutableSpan positions = edge_positions.slice(i * resolution, resolution);
1741 for (const int j : positions.index_range()) {
1742 positions[j] = bke::subdiv::mesh_interpolate_position_on_edge(coarse_positions,
1743 coarse_edges,
1744 vert_to_edge_map,
1745 coarse_edge,
1746 is_simple,
1747 j * inv_resolution_1);
1748 }
1749 }
1750 });
1751}
1752
1762
1764 Mesh &mesh,
1765 MeshBatchCache &batch_cache,
1766 MeshBufferCache &mbc,
1767 const Span<IBOType> ibo_requests,
1768 const Span<VBOType> vbo_requests,
1769 const bool is_editmode,
1770 const bool is_paint_mode,
1771 const bool do_final,
1772 const bool do_uvedit,
1773 const bool do_cage,
1774 const ToolSettings *ts,
1775 const bool use_hide)
1776{
1777
1778#undef TIME_SUBDIV
1779
1780#ifdef TIME_SUBDIV
1781 const double begin_time = BLI_time_now_seconds();
1782#endif
1783
1785 mesh,
1786 batch_cache,
1787 mbc,
1788 ibo_requests,
1789 vbo_requests,
1790 is_editmode,
1791 is_paint_mode,
1792 do_final,
1793 do_uvedit,
1794 do_cage,
1795 ts,
1796 use_hide))
1797 {
1798 /* Did not run. */
1799 return;
1800 }
1801
1802#ifdef TIME_SUBDIV
1803 const double end_time = BLI_time_now_seconds();
1804 fprintf(stderr, "Time to update subdivision: %f\n", end_time - begin_time);
1805 fprintf(stderr, "Maximum FPS: %f\n", 1.0 / (end_time - begin_time));
1806#endif
1807}
1808
1814
1816{
1817 {
1818 std::scoped_lock lock(gpu_subdiv_queue_mutex);
1819
1820 while (gpu_subdiv_free_queue != nullptr) {
1821 bke::subdiv::Subdiv *subdiv = static_cast<bke::subdiv::Subdiv *>(
1823
1824 {
1825 std::scoped_lock lock(g_subdiv_eval_mutex);
1826 if (subdiv->evaluator != nullptr) {
1828 }
1829 }
1830#ifdef WITH_OPENSUBDIV
1831 /* Set the type to CPU so that we do actually free the cache. */
1833#endif
1834 bke::subdiv::free(subdiv);
1835 }
1836 }
1837
1838 {
1839 std::scoped_lock lock(g_subdiv_eval_mutex);
1840 /* Free evaluator cache if there is no more reference to it.. */
1841 if (g_subdiv_evaluator_users == 0) {
1843 g_subdiv_evaluator_cache = nullptr;
1844 }
1845 }
1846}
1847
1848} // namespace blender::draw
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
#define ORIGINDEX_NONE
bool CustomData_has_layer(const CustomData *data, eCustomDataType type)
General operations, lookup, etc. for blender objects.
const Mesh * BKE_object_get_editmesh_eval_final(const Object *object)
blender::bke::subdiv::Subdiv * BKE_subsurf_modifier_subdiv_descriptor_ensure(SubsurfRuntimeData *runtime_data, const Mesh *mesh, bool for_draw_code)
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE uint divide_ceil_u(uint a, uint b)
#define MINLINE
unsigned int uint
double BLI_time_now_seconds(void)
Definition time.cc:113
#define UNUSED_VARS(...)
struct Mesh Mesh
Object is a sort of wrapper for general info.
int GPU_max_work_group_count(int index)
void GPU_compute_dispatch(blender::gpu::Shader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_indexbuf_bind_as_ssbo(blender::gpu::IndexBuf *elem, int binding)
void GPU_shader_bind(blender::gpu::Shader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_shader_unbind()
@ GPU_BARRIER_SHADER_STORAGE
Definition GPU_state.hh:48
@ GPU_BARRIER_ELEMENT_ARRAY
Definition GPU_state.hh:52
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
Definition GPU_state.hh:50
void GPU_memory_barrier(GPUBarrier barrier)
Definition gpu_state.cc:326
void GPU_storagebuf_free(blender::gpu::StorageBuf *ssbo)
void GPU_storagebuf_bind(blender::gpu::StorageBuf *ssbo, int slot)
void GPU_uniformbuf_free(blender::gpu::UniformBuf *ubo)
void GPU_uniformbuf_bind(blender::gpu::UniformBuf *ubo, int slot)
blender::gpu::UniformBuf * GPU_uniformbuf_create_ex(size_t size, const void *data, const char *name)
void GPU_uniformbuf_update(blender::gpu::UniformBuf *ubo, const void *data)
blender::gpu::VertBuf * GPU_vertbuf_create_with_format_ex(const GPUVertFormat &format, GPUUsageType usage)
void GPU_vertbuf_tag_dirty(blender::gpu::VertBuf *verts)
#define GPU_vertbuf_init_with_format(verts, format)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
blender::gpu::VertBuf * GPU_vertbuf_calloc()
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_bind_as_ssbo(blender::gpu::VertBuf *verts, int binding)
void GPU_vertbuf_init_with_format_ex(blender::gpu::VertBuf &verts, const GPUVertFormat &format, GPUUsageType)
@ GPU_USAGE_STATIC
@ GPU_USAGE_DYNAMIC
uint GPU_vertformat_attr_add(GPUVertFormat *format, blender::StringRef name, blender::gpu::VertAttrType type)
GPUVertCompType
#define MEM_SAFE_FREE(v)
volatile int lock
@ BM_ELEM_HIDDEN
@ BM_ELEM_SELECT
@ BM_ELEM_SMOOTH
#define BM_elem_index_get(ele)
#define BM_elem_flag_test(ele, hflag)
#define BM_ITER_MESH(ele, iter, bm, itype)
@ BM_FACES_OF_MESH
BMesh const char void * data
BMesh * bm
ATTR_WARN_UNUSED_RESULT const BMVert * v
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
AttributeSet attributes
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr bool is_empty() const
Definition BLI_span.hh:260
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
constexpr bool contains(const T &value) const
Definition BLI_span.hh:277
GAttributeReader lookup_or_default(StringRef attribute_id, AttrDomain domain, AttrType data_type, const void *default_value=nullptr) const
MutableSpan< T > data()
gpu::VertBuf * get_face_varying_source_buf(const int face_varying_channel)
gpu::StorageBuf * get_face_varying_patch_index_buf(const int face_varying_channel)
gpu::StorageBuf * create_patch_arrays_buf()
gpu::StorageBuf * create_face_varying_patch_array_buf(const int face_varying_channel)
int get_face_varying_source_offset(const int face_varying_channel) const
gpu::StorageBuf * get_face_varying_patch_param_buf(const int face_varying_channel)
void getPatchMap(blender::gpu::VertBuf *patch_map_handles, blender::gpu::VertBuf *patch_map_quadtree, int *min_patch_face, int *max_patch_face, int *max_depth, int *patches_are_triangular)
nullptr float
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK
#define SUBDIV_COARSE_FACE_FLAG_OFFSET
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE
#define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK
#define SUBDIV_LOCAL_WORK_GROUP_SIZE
#define SUBDIV_COARSE_FACE_FLAG_SELECT
#define SUBDIV_COARSE_FACE_LOOP_START_MASK
blender::gpu::Shader * DRW_shader_subdiv_get(SubdivShaderType shader_type)
blender::gpu::Shader * DRW_shader_subdiv_interp_corner_normals_get()
blender::gpu::Shader * DRW_shader_subdiv_custom_data_get(GPUVertCompType comp_type, int dimensions)
@ PATCH_EVALUATION_FACE_DOTS_WITH_NORMALS
#define LOOP_NORMALS_POS_SLOT
#define LOOP_NORMALS_VERT_NORMALS_BUF_SLOT
#define EDGE_FAC_EDGE_FAC_BUF_SLOT
#define LINES_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define TRIS_OUTPUT_TRIS_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_NORMALS_BUF_SLOT
#define NORMALS_ACCUMULATE_POS_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_POS_BUF_SLOT
#define PAINT_OVERLAY_EXTRA_INPUT_VERT_ORIG_INDEX_SLOT
#define PATCH_EVALUATION_PATCH_ARRAY_BUFFER_BUF_SLOT
#define CUSTOM_DATA_SOURCE_DATA_BUF_SLOT
#define CUSTOM_DATA_FACE_PTEX_OFFSET_BUF_SLOT
#define LOOP_NORMALS_OUTPUT_LNOR_BUF_SLOT
#define LINES_OUTPUT_LINES_BUF_SLOT
#define PATCH_EVALUATION_INPUT_VERTEX_ORIG_INDEX_BUF_SLOT
#define SCULPT_DATA_SCULPT_DATA_BUF_SLOT
#define SHADER_DATA_BUF_SLOT
#define PATCH_EVALUATION_QUAD_NODES_BUF_SLOT
#define PATCH_EVALUATION_INPUT_PATCH_HANDLES_BUF_SLOT
#define PATCH_EVALUATION_SOURCE_EXTRA_VERTEX_BUFFER_BUF_SLOT
#define STRETCH_ANGLE_UV_STRETCHES_BUF_SLOT
#define NORMALS_ACCUMULATE_FACE_ADJACENCY_LISTS_BUF_SLOT
#define PATCH_EVALUATION_PATCH_PARAM_BUFFER_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_FDOTS_VERTEX_BUFFER_BUF_SLOT
#define LINES_LINES_LOOSE_FLAGS
#define EDGE_FAC_POS_BUF_SLOT
#define EDGE_FAC_POLY_OTHER_MAP_BUF_SLOT
#define LINES_INPUT_EDGE_DRAW_FLAG_BUF_SLOT
#define TRIS_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define PATCH_EVALUATION_PATCH_COORDS_BUF_SLOT
#define PATCH_EVALUATION_PATCH_INDEX_BUFFER_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_INDICES_BUF_SLOT
#define NORMALS_ACCUMULATE_NORMALS_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_ORCOS_BUF_SLOT
#define SCULPT_DATA_SCULPT_FACE_SET_COLOR_BUF_SLOT
#define LOOP_NORMALS_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define SCULPT_DATA_SCULPT_MASK_BUF_SLOT
#define PAINT_OVERLAY_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define EDGE_FAC_EDGE_DRAW_FLAG_BUF_SLOT
#define STRETCH_ANGLE_POS_BUF_SLOT
#define PATCH_EVALUATION_SOURCE_VERTEX_BUFFER_BUF_SLOT
#define TRIS_FACE_MAT_OFFSET
#define STRETCH_AREA_COARSE_STRETCH_AREA_BUF_SLOT
#define CUSTOM_DATA_DESTINATION_DATA_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_FVAR_BUF_SLOT
#define SUBDIV_FACE_OFFSET_BUF_SLOT
#define CUSTOM_DATA_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define NORMALS_ACCUMULATE_VERTEX_LOOP_MAP_BUF_SLOT
#define STRETCH_AREA_SUBDIV_STRETCH_AREA_BUF_SLOT
#define PATCH_EVALUATION_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define NORMALS_ACCUMULATE_FACE_ADJACENCY_OFFSETS_BUF_SLOT
#define LOOP_NORMALS_VERTEX_LOOP_MAP_BUF_SLOT
#define PAINT_OVERLAY_OUTPUT_FLAG_SLOT
#define STRETCH_ANGLE_UVS_BUF_SLOT
#define CUSTOM_DATA_PATCH_COORDS_BUF_SLOT
void openSubdiv_deleteEvaluatorCache(OpenSubdiv_EvaluatorCache *evaluator_cache)
OpenSubdiv_EvaluatorCache * openSubdiv_createEvaluatorCache(eOpenSubdivEvaluator evaluator_type)
Extraction of Mesh data into VBO to feed to GPU.
static float verts[][3]
uint pos
format
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static char faces[256]
GroupedSpan< int > build_vert_to_edge_map(Span< int2 > edges, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
void free(Subdiv *subdiv)
Definition subdiv.cc:190
bool eval_begin_from_mesh(Subdiv *subdiv, const Mesh *mesh, eSubdivEvaluatorType evaluator_type, Span< float3 > coarse_vert_positions={}, OpenSubdiv_EvaluatorCache *evaluator_cache=nullptr)
bool foreach_subdiv_geometry(Subdiv *subdiv, const ForeachContext *context, const ToMeshSettings *mesh_settings, const Mesh *coarse_mesh)
Span< int > face_ptex_offset_get(Subdiv *subdiv)
Definition subdiv.cc:214
BLI_INLINE BMFace * bm_original_face_get(const MeshRenderData &mr, int idx)
static void draw_subdiv_init_ubo_storage(const DRWSubdivCache &cache, DRWSubdivUboStorage *ubo, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask, const uint edge_loose_offset)
static void draw_subdiv_cache_free_material_data(DRWSubdivCache &cache)
void draw_subdiv_build_edge_fac_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos, gpu::VertBuf *edge_draw_flag, gpu::VertBuf *poly_other_map, gpu::VertBuf *edge_fac)
static void draw_subdiv_free_edit_mode_cache(DRWSubdivCache &cache)
static bool draw_subdiv_cache_need_face_data(const DRWSubdivCache &cache)
void draw_subdiv_extract_uvs(const DRWSubdivCache &cache, gpu::VertBuf *uvs, const int face_varying_channel, const int dst_offset)
gpu::VertBufPtr draw_subdiv_init_origindex_buffer(int32_t *vert_origindex, uint num_loops, uint loose_len)
void draw_subdiv_build_edituv_stretch_area_buffer(const DRWSubdivCache &cache, gpu::VertBuf *coarse_data, gpu::VertBuf *subdiv_data)
static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
void draw_subdiv_build_lnor_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos, gpu::VertBuf *vert_normals, gpu::VertBuf *subdiv_corner_verts, gpu::VertBuf *lnor)
static void draw_subdiv_cache_ensure_mat_offsets(DRWSubdivCache &cache, const Mesh *mesh_eval, uint mat_len)
void draw_subdiv_build_fdots_buffers(const DRWSubdivCache &cache, gpu::VertBuf *fdots_pos, gpu::VertBuf *fdots_nor, gpu::IndexBuf *fdots_indices)
void draw_subdiv_interp_corner_normals(const DRWSubdivCache &cache, gpu::VertBuf &src_data, gpu::VertBuf &dst_data)
void DRW_subdivide_loose_geom(DRWSubdivCache &subdiv_cache, const MeshBufferCache &cache)
void draw_subdiv_interp_custom_data(const DRWSubdivCache &cache, gpu::VertBuf &src_data, gpu::VertBuf &dst_data, GPUVertCompType comp_type, int dimensions, int dst_offset)
static blender::Mutex gpu_subdiv_queue_mutex
void draw_subdiv_cache_free(DRWSubdivCache &cache)
void draw_subdiv_build_paint_overlay_flag_buffer(const DRWSubdivCache &cache, gpu::VertBuf &flags)
void draw_subdiv_build_tris_buffer(const DRWSubdivCache &cache, gpu::IndexBuf *subdiv_tris, const int material_count)
static LinkNode * gpu_subdiv_free_queue
void DRW_subdiv_cache_free(bke::subdiv::Subdiv *subdiv)
void draw_subdiv_build_lines_buffer(const DRWSubdivCache &cache, gpu::IndexBuf *lines_indices)
static void drw_subdiv_compute_dispatch(const DRWSubdivCache &cache, gpu::Shader *shader, const int src_offset, const int dst_offset, uint total_dispatch_size, const bool has_sculpt_mask=false, const uint edge_loose_offset=0)
static uint get_dispatch_size(uint elements)
void draw_subdiv_extract_pos(const DRWSubdivCache &cache, gpu::VertBuf *pos, gpu::VertBuf *orco)
static bool draw_subdiv_create_requested_buffers(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache, MeshBufferCache &mbc, Span< IBOType > ibo_requests, Span< VBOType > vbo_requests, DRWSubdivCache &subdiv_cache, MeshRenderData &mr)
static DRWSubdivCache & mesh_batch_cache_ensure_subdiv_cache(MeshBatchCache &mbc)
void draw_subdiv_build_lines_loose_buffer(const DRWSubdivCache &cache, gpu::IndexBuf *lines_indices, gpu::VertBuf *lines_flags, uint edge_loose_offset, uint num_loose_edges)
void draw_subdiv_build_edituv_stretch_angle_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos, gpu::VertBuf *uvs, int uvs_offset, gpu::VertBuf *stretch_angles)
static uint32_t compute_coarse_face_flag_bm(BMFace *f, BMFace *efa_act)
static void draw_subdiv_cache_extra_coarse_face_data_mesh(const MeshRenderData &mr, const Mesh *mesh, MutableSpan< uint32_t > flags_data)
static void draw_subdiv_ubo_update_and_bind(const DRWSubdivCache &cache, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask=false, const uint edge_loose_offset=0)
static uint tris_count_from_number_of_loops(const uint number_of_loops)
void draw_subdiv_build_sculpt_data_buffer(const DRWSubdivCache &cache, gpu::VertBuf *mask_vbo, gpu::VertBuf *face_set_vbo, gpu::VertBuf *sculpt_data)
void DRW_create_subdivision(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
gpu::VertBuf * draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
static void draw_subdiv_cache_extra_coarse_face_data_mapped(const Mesh *mesh, BMesh *bm, MeshRenderData &mr, MutableSpan< uint32_t > flags_data)
static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache &cache, const Mesh *mesh, MeshRenderData &mr)
MeshRenderData mesh_render_data_create(Object &object, Mesh &mesh, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool use_hide, const ToolSettings *ts)
static const GPUVertFormat & get_origindex_format()
void draw_subdiv_accumulate_normals(const DRWSubdivCache &cache, gpu::VertBuf *pos, gpu::VertBuf *face_adjacency_offsets, gpu::VertBuf *face_adjacency_lists, gpu::VertBuf *vertex_loop_map, gpu::VertBuf *vert_normals)
static OpenSubdiv_EvaluatorCache * g_subdiv_evaluator_cache
static uint64_t g_subdiv_evaluator_users
static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm, BMFace *efa_act, MutableSpan< uint32_t > flags_data)
std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > VertBufPtr
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
std::mutex Mutex
Definition BLI_mutex.hh:47
@ OPENSUBDIV_EVALUATOR_GPU
@ OPENSUBDIV_EVALUATOR_CPU
#define sqrtf
#define ceilf
BMLoop * l_first
int totface
MeshRuntimeHandle * runtime
CustomData vert_data
int faces_num
int verts_num
blender::opensubdiv::EvalOutputAPI * eval_output
eOpenSubdivEvaluator type
blender::bke::subdiv::Settings settings
blender::bke::subdiv::Subdiv * subdiv_gpu
OpenSubdiv_Evaluator * evaluator
blender::opensubdiv::TopologyRefinerImpl * topology_refiner
gpu::VertBuf * subdiv_vertex_face_adjacency_offsets
VArraySpan< bool > sharp_faces
VArraySpan< bool > select_poly
bke::MeshNormalDomain normals_domain
i
Definition text_draw.cc:230
uint len
uint8_t flag
Definition wm_window.cc:145