Blender V4.3
extract_mesh_vbo_pos.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "BLI_array_utils.hh"
10
11#include "extract_mesh.hh"
12
13#include "draw_subdivision.hh"
14
15namespace blender::draw {
16
18{
19 MutableSpan corners_data = vbo_data.take_front(mr.corners_num);
20 MutableSpan loose_edge_data = vbo_data.slice(mr.corners_num, mr.loose_edges.size() * 2);
21 MutableSpan loose_vert_data = vbo_data.take_back(mr.loose_verts.size());
22
25 vbo_data.size_in_bytes() + mr.loose_edges.size(),
26 [&]() {
27 array_utils::gather(mr.vert_positions, mr.corner_verts, corners_data);
28 extract_mesh_loose_edge_data(mr.vert_positions, mr.edges, mr.loose_edges, loose_edge_data);
29 array_utils::gather(mr.vert_positions, mr.loose_verts, loose_vert_data);
30 });
31}
32
34{
35 const BMesh &bm = *mr.bm;
36 MutableSpan corners_data = vbo_data.take_front(mr.corners_num);
37 MutableSpan loose_edge_data = vbo_data.slice(mr.corners_num, mr.loose_edges.size() * 2);
38 MutableSpan loose_vert_data = vbo_data.take_back(mr.loose_verts.size());
39
40 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
41 for (const int face_index : range) {
42 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
43 const BMLoop *loop = BM_FACE_FIRST_LOOP(&face);
44 for ([[maybe_unused]] const int i : IndexRange(face.len)) {
45 const int index = BM_elem_index_get(loop);
46 corners_data[index] = bm_vert_co_get(mr, loop->v);
47 loop = loop->next;
48 }
49 }
50 });
51
52 const Span<int> loose_edges = mr.loose_edges;
53 threading::parallel_for(loose_edges.index_range(), 4096, [&](const IndexRange range) {
54 for (const int i : range) {
55 const BMEdge &edge = *BM_edge_at_index(&const_cast<BMesh &>(bm), loose_edges[i]);
56 loose_edge_data[i * 2 + 0] = bm_vert_co_get(mr, edge.v1);
57 loose_edge_data[i * 2 + 1] = bm_vert_co_get(mr, edge.v2);
58 }
59 });
60
61 const Span<int> loose_verts = mr.loose_verts;
62 threading::parallel_for(loose_verts.index_range(), 2048, [&](const IndexRange range) {
63 for (const int i : range) {
64 const BMVert &vert = *BM_vert_at_index(&const_cast<BMesh &>(bm), loose_verts[i]);
65 loose_vert_data[i] = bm_vert_co_get(mr, &vert);
66 }
67 });
68}
69
71{
72 static GPUVertFormat format = {0};
73 if (format.attr_len == 0) {
75 }
78
79 MutableSpan vbo_data = vbo.data<float3>();
80 if (mr.extract_type == MR_EXTRACT_MESH) {
81 extract_positions_mesh(mr, vbo_data);
82 }
83 else {
84 extract_positions_bm(mr, vbo_data);
85 }
86}
87
89{
90 static GPUVertFormat format = {0};
91 if (format.attr_len == 0) {
94 }
95 return format;
96}
97
99{
100 static GPUVertFormat format = {0};
101 if (format.attr_len == 0) {
104 }
105 return format;
106}
107
108static void extract_vertex_flags(const MeshRenderData &mr, char *flags)
109{
110 for (int i = 0; i < mr.verts_num; i++) {
111 char *flag = &flags[i];
112 const bool vert_hidden = !mr.hide_vert.is_empty() && mr.hide_vert[i];
113 /* Flag for paint mode overlay. */
114 if (vert_hidden || ((mr.orig_index_vert) && (mr.orig_index_vert[i] == ORIGINDEX_NONE))) {
115 *flag = -1;
116 }
117 else if (!mr.select_vert.is_empty() && mr.select_vert[i]) {
118 *flag = 1;
119 }
120 else {
121 *flag = 0;
122 }
123 }
124}
125
126static void extract_loose_positions_subdiv(const DRWSubdivCache &subdiv_cache,
127 const MeshRenderData &mr,
128 gpu::VertBuf &vbo)
129{
130 const Span<int> loose_verts = mr.loose_verts;
131 const int loose_edges_num = mr.loose_edges.size();
132 if (loose_verts.is_empty() && loose_edges_num == 0) {
133 return;
134 }
135
136 /* TODO(@kevindietrich): replace this when compressed normals are supported. */
137 struct SubdivPosNorLoop {
138 float pos[3];
139 float nor[3];
140 float flag;
141 };
142
143 /* Make sure buffer is active for sending loose data. */
144 GPU_vertbuf_use(&vbo);
145
146 const int resolution = subdiv_cache.resolution;
147 const Span<float3> cached_positions = subdiv_cache.loose_edge_positions;
148 const int verts_per_edge = subdiv_verts_per_coarse_edge(subdiv_cache);
149 const int edges_per_edge = subdiv_edges_per_coarse_edge(subdiv_cache);
150
151 const int loose_geom_start = subdiv_cache.num_subdiv_loops;
152
153 SubdivPosNorLoop edge_data[2];
154 memset(edge_data, 0, sizeof(SubdivPosNorLoop) * 2);
155 for (const int i : IndexRange(loose_edges_num)) {
156 const int edge_offset = loose_geom_start + i * verts_per_edge;
157 const Span<float3> positions = cached_positions.slice(i * resolution, resolution);
158 for (const int edge : IndexRange(edges_per_edge)) {
159 copy_v3_v3(edge_data[0].pos, positions[edge + 0]);
160 copy_v3_v3(edge_data[1].pos, positions[edge + 1]);
162 (edge_offset + edge * 2) * sizeof(SubdivPosNorLoop),
163 sizeof(SubdivPosNorLoop) * 2,
164 &edge_data);
165 }
166 }
167
168 const int loose_verts_start = loose_geom_start + loose_edges_num * verts_per_edge;
169 const Span<float3> positions = mr.vert_positions;
170
171 SubdivPosNorLoop vert_data;
172 memset(&vert_data, 0, sizeof(SubdivPosNorLoop));
173 for (const int i : loose_verts.index_range()) {
174 copy_v3_v3(vert_data.pos, positions[loose_verts[i]]);
176 (loose_verts_start + i) * sizeof(SubdivPosNorLoop),
177 sizeof(SubdivPosNorLoop),
178 &vert_data);
179 }
180}
181
183 const MeshRenderData &mr,
184 gpu::VertBuf &vbo,
185 gpu::VertBuf *orco_vbo)
186{
188 vbo, draw_subdiv_get_pos_nor_format(), subdiv_full_vbo_size(mr, subdiv_cache));
189
190 if (subdiv_cache.num_subdiv_loops == 0) {
191 extract_loose_positions_subdiv(subdiv_cache, mr, vbo);
192 return;
193 }
194
195 gpu::VertBuf *flags_buffer = GPU_vertbuf_calloc();
196 static GPUVertFormat flag_format = {0};
197 if (flag_format.attr_len == 0) {
198 GPU_vertformat_attr_add(&flag_format, "data", GPU_COMP_I32, 1, GPU_FETCH_INT);
199 GPU_vertformat_alias_add(&flag_format, "flag");
200 }
201 GPU_vertbuf_init_with_format(*flags_buffer, flag_format);
202 GPU_vertbuf_data_alloc(*flags_buffer, divide_ceil_u(mr.verts_num, 4));
203 char *flags = flags_buffer->data<char>().data();
204 extract_vertex_flags(mr, flags);
205 GPU_vertbuf_tag_dirty(flags_buffer);
206
207 if (orco_vbo) {
208 static GPUVertFormat format = {0};
209 if (format.attr_len == 0) {
210 /* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
211 * attributes. This is a substantial waste of video-ram and should be done another way.
212 * Unfortunately, at the time of writing, I did not found any other "non disruptive"
213 * alternative. */
215 }
217 }
218
219 draw_subdiv_extract_pos_nor(subdiv_cache, flags_buffer, &vbo, orco_vbo);
220
221 if (subdiv_cache.use_custom_loop_normals) {
222 const Mesh *coarse_mesh = subdiv_cache.mesh;
223
224 gpu::VertBuf *src_custom_normals = GPU_vertbuf_calloc();
226 GPU_vertbuf_data_alloc(*src_custom_normals, coarse_mesh->corners_num);
227
228 src_custom_normals->data<float3>().copy_from(coarse_mesh->corner_normals());
229
230 gpu::VertBuf *dst_custom_normals = GPU_vertbuf_calloc();
232 *dst_custom_normals, get_custom_normals_format(), subdiv_cache.num_subdiv_loops);
233
235 subdiv_cache, *src_custom_normals, *dst_custom_normals, GPU_COMP_F32, 3, 0);
236
237 draw_subdiv_finalize_custom_normals(subdiv_cache, dst_custom_normals, &vbo);
238
239 GPU_vertbuf_discard(src_custom_normals);
240 GPU_vertbuf_discard(dst_custom_normals);
241 }
242 else {
243 /* We cannot evaluate vertex normals using the limit surface, so compute them manually. */
244 gpu::VertBuf *subdiv_loop_subdiv_vert_index = draw_subdiv_build_origindex_buffer(
245 subdiv_cache.subdiv_loop_subdiv_vert_index, subdiv_cache.num_subdiv_loops);
246
247 gpu::VertBuf *vert_normals = GPU_vertbuf_calloc();
249 *vert_normals, get_normals_format(), subdiv_cache.num_subdiv_verts);
250
252 &vbo,
254 subdiv_cache.subdiv_vertex_face_adjacency,
255 subdiv_loop_subdiv_vert_index,
256 vert_normals);
257
258 draw_subdiv_finalize_normals(subdiv_cache, vert_normals, subdiv_loop_subdiv_vert_index, &vbo);
259
260 GPU_vertbuf_discard(vert_normals);
261 GPU_vertbuf_discard(subdiv_loop_subdiv_vert_index);
262 }
263
264 GPU_vertbuf_discard(flags_buffer);
265
266 extract_loose_positions_subdiv(subdiv_cache, mr, vbo);
267}
268
269} // namespace blender::draw
#define ORIGINDEX_NONE
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE void copy_v3_v3(float r[3], const float a[3])
void GPU_vertbuf_init_build_on_device(blender::gpu::VertBuf &verts, const GPUVertFormat &format, uint v_len)
void GPU_vertbuf_use(blender::gpu::VertBuf *)
void GPU_vertbuf_tag_dirty(blender::gpu::VertBuf *verts)
#define GPU_vertbuf_init_with_format(verts, format)
blender::gpu::VertBuf * GPU_vertbuf_calloc()
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_update_sub(blender::gpu::VertBuf *verts, uint start, uint len, const void *data)
void GPU_vertbuf_discard(blender::gpu::VertBuf *)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias)
@ GPU_COMP_F32
@ GPU_COMP_I32
ATTR_WARN_UNUSED_RESULT BMesh * bm
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:574
constexpr MutableSpan take_back(const int64_t n) const
Definition BLI_span.hh:641
constexpr int64_t size_in_bytes() const
Definition BLI_span.hh:502
constexpr MutableSpan take_front(const int64_t n) const
Definition BLI_span.hh:630
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:138
constexpr int64_t size_in_bytes() const
Definition BLI_span.hh:269
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
constexpr bool is_empty() const
Definition BLI_span.hh:261
MutableSpan< T > data()
Extraction of Mesh data into VBO to feed to GPU.
format
int subdiv_verts_per_coarse_edge(const DRWSubdivCache &cache)
void extract_positions_subdiv(const DRWSubdivCache &subdiv_cache, const MeshRenderData &mr, gpu::VertBuf &vbo, gpu::VertBuf *orco_vbo)
int subdiv_edges_per_coarse_edge(const DRWSubdivCache &cache)
static void extract_positions_mesh(const MeshRenderData &mr, MutableSpan< float3 > vbo_data)
static void extract_positions_bm(const MeshRenderData &mr, MutableSpan< float3 > vbo_data)
static void extract_vertex_flags(const MeshRenderData &mr, char *flags)
static void extract_loose_positions_subdiv(const DRWSubdivCache &subdiv_cache, const MeshRenderData &mr, gpu::VertBuf &vbo)
void draw_subdiv_accumulate_normals(const DRWSubdivCache &cache, gpu::VertBuf *pos_nor, gpu::VertBuf *face_adjacency_offsets, gpu::VertBuf *face_adjacency_lists, gpu::VertBuf *vertex_loop_map, gpu::VertBuf *vert_normals)
void draw_subdiv_extract_pos_nor(const DRWSubdivCache &cache, gpu::VertBuf *flags_buffer, gpu::VertBuf *pos_nor, gpu::VertBuf *orco)
int subdiv_full_vbo_size(const MeshRenderData &mr, const DRWSubdivCache &cache)
static const GPUVertFormat & get_custom_normals_format()
void draw_subdiv_interp_custom_data(const DRWSubdivCache &cache, gpu::VertBuf &src_data, gpu::VertBuf &dst_data, int comp_type, int dimensions, int dst_offset)
void draw_subdiv_finalize_normals(const DRWSubdivCache &cache, gpu::VertBuf *vert_normals, gpu::VertBuf *subdiv_loop_subdiv_vert_index, gpu::VertBuf *pos_nor)
static const GPUVertFormat & get_normals_format()
gpu::VertBuf * draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
void extract_positions(const MeshRenderData &mr, gpu::VertBuf &vbo)
const GPUVertFormat & draw_subdiv_get_pos_nor_format()
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache &cache, gpu::VertBuf *src_custom_normals, gpu::VertBuf *pos_nor)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
void memory_bandwidth_bound_task(const int64_t approximate_bytes_touched, const Function &function)
Definition BLI_task.hh:243
Frequency::GEOMETRY nor[]
int totface
int corners_num
gpu::VertBuf * subdiv_vertex_face_adjacency_offsets
VArraySpan< bool > select_vert
uint8_t flag
Definition wm_window.cc:138