Blender V4.5
mesh_calc_edges.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "BLI_array_utils.hh"
10#include "BLI_math_base.h"
11#include "BLI_ordered_edge.hh"
12#include "BLI_task.hh"
13#include "BLI_threads.h"
14#include "BLI_vector_set.hh"
15
16#include "BKE_attribute.hh"
17#include "BKE_customdata.hh"
18#include "BKE_mesh.hh"
19
20namespace blender::bke {
21
22namespace calc_edges {
23
28static uint64_t edge_hash_2(const OrderedEdge &edge)
29{
30 return edge.v_low;
31}
32
34 16,
40
41static void reserve_hash_maps(const Mesh &mesh,
42 const bool keep_existing_edges,
43 MutableSpan<EdgeMap> edge_maps)
44{
45 const int totedge_guess = std::max(keep_existing_edges ? mesh.edges_num : 0, mesh.faces_num * 2);
47 edge_maps, [&](EdgeMap &edge_map) { edge_map.reserve(totedge_guess / edge_maps.size()); });
48}
49
51 const uint32_t parallel_mask,
52 MutableSpan<EdgeMap> edge_maps)
53{
54 /* Assume existing edges are valid. */
55 const Span<int2> edges = mesh.edges();
56 threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
57 const int task_index = &edge_map - edge_maps.data();
58 for (const int2 edge : edges) {
59 const OrderedEdge ordered_edge(edge);
60 /* Only add the edge when it belongs into this map. */
61 if (task_index == (parallel_mask & edge_hash_2(ordered_edge))) {
62 edge_map.add(ordered_edge);
63 }
64 }
65 });
66}
67
69 const uint32_t parallel_mask,
70 MutableSpan<EdgeMap> edge_maps)
71{
72 const OffsetIndices<int> faces = mesh.faces();
73 const Span<int> corner_verts = mesh.corner_verts();
74 threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
75 const int task_index = &edge_map - edge_maps.data();
76 for (const int face_i : faces.index_range()) {
77 const IndexRange face = faces[face_i];
78 for (const int corner : face) {
79 const int vert = corner_verts[corner];
80 const int vert_prev = corner_verts[bke::mesh::face_corner_prev(face, corner)];
81 /* Can only be the same when the mesh data is invalid. */
82 if (LIKELY(vert_prev != vert)) {
83 const OrderedEdge ordered_edge(vert_prev, vert);
84 /* Only add the edge when it belongs into this map. */
85 if (task_index == (parallel_mask & edge_hash_2(ordered_edge))) {
86 edge_map.add(ordered_edge);
87 }
88 }
89 }
90 }
91 });
92}
93
95 const OffsetIndices<int> edge_offsets,
96 MutableSpan<int2> new_edges)
97{
98 threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
99 const int task_index = &edge_map - edge_maps.data();
100 if (edge_offsets[task_index].is_empty()) {
101 return;
102 }
103
104 MutableSpan<int2> result_edges = new_edges.slice(edge_offsets[task_index]);
105 result_edges.copy_from(edge_map.as_span().cast<int2>());
106 });
107}
108
110 const Span<int> corner_verts,
111 const Span<EdgeMap> edge_maps,
112 const uint32_t parallel_mask,
113 const OffsetIndices<int> edge_offsets,
114 MutableSpan<int> corner_edges)
115{
116 threading::parallel_for(faces.index_range(), 100, [&](IndexRange range) {
117 for (const int face_index : range) {
118 const IndexRange face = faces[face_index];
119 for (const int corner : face) {
120 const int vert = corner_verts[corner];
121 const int vert_prev = corner_verts[bke::mesh::face_corner_next(face, corner)];
122 if (UNLIKELY(vert == vert_prev)) {
123 /* This is an invalid edge; normally this does not happen in Blender,
124 * but it can be part of an imported mesh with invalid geometry. See
125 * #76514. */
126 corner_edges[corner] = 0;
127 continue;
128 }
129
130 const OrderedEdge ordered_edge(vert_prev, vert);
131 const int task_index = parallel_mask & edge_hash_2(ordered_edge);
132 const EdgeMap &edge_map = edge_maps[task_index];
133 const int edge_i = edge_map.index_of(ordered_edge);
134 const int edge_index = edge_offsets[task_index][edge_i];
135 corner_edges[corner] = edge_index;
136 }
137 }
138 });
139}
140
142{
143 /* Don't use parallelization when the mesh is small. */
144 if (mesh.faces_num < 1000) {
145 return 1;
146 }
147 /* Use at most 8 separate hash tables. Using more threads has diminishing returns. These threads
148 * are better off doing something more useful instead. */
149 const int system_thread_count = BLI_system_thread_count();
150 return power_of_2_min_i(std::min(8, system_thread_count));
151}
152
154{
155 threading::parallel_for_each(edge_maps, [](EdgeMap &edge_map) { edge_map.clear(); });
156}
157
158static void deselect_known_edges(const OffsetIndices<int> edge_offsets,
159 const Span<EdgeMap> edge_maps,
160 const uint32_t parallel_mask,
161 const Span<int2> known_edges,
162 MutableSpan<bool> selection)
163{
164 threading::parallel_for(known_edges.index_range(), 2048, [&](const IndexRange range) {
165 for (const int2 original_edge : known_edges.slice(range)) {
166 const OrderedEdge ordered_edge(original_edge);
167 const int task_index = parallel_mask & edge_hash_2(ordered_edge);
168 const EdgeMap &edge_map = edge_maps[task_index];
169 const int edge_i = edge_map.index_of(ordered_edge);
170 const int edge_index = edge_offsets[task_index][edge_i];
171 selection[edge_index] = false;
172 }
173 });
174}
175
176} // namespace calc_edges
177
178void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, const bool select_new_edges)
179{
180 /* Parallelization is achieved by having multiple hash tables for different subsets of edges.
181 * Each edge is assigned to one of the hash maps based on the lower bits of a hash value. */
182 const int parallel_maps = calc_edges::get_parallel_maps_count(mesh);
183 BLI_assert(is_power_of_2_i(parallel_maps));
184 const uint32_t parallel_mask = uint32_t(parallel_maps) - 1;
185 Array<calc_edges::EdgeMap> edge_maps(parallel_maps);
186 calc_edges::reserve_hash_maps(mesh, keep_existing_edges, edge_maps);
187
188 /* Add all edges. */
189 if (keep_existing_edges) {
190 calc_edges::add_existing_edges_to_hash_maps(mesh, parallel_mask, edge_maps);
191 }
192 calc_edges::add_face_edges_to_hash_maps(mesh, parallel_mask, edge_maps);
193
194 Array<int> edge_sizes(edge_maps.size() + 1);
195 for (const int i : edge_maps.index_range()) {
196 edge_sizes[i] = edge_maps[i].size();
197 }
199
200 /* Create new edges. */
201 MutableAttributeAccessor attributes = mesh.attributes_for_write();
202 attributes.add<int>(".corner_edge", AttrDomain::Corner, AttributeInitConstruct());
203 MutableSpan<int2> new_edges(MEM_calloc_arrayN<int2>(edge_offsets.total_size(), __func__),
204 edge_offsets.total_size());
205 calc_edges::serialize_and_initialize_deduplicated_edges(edge_maps, edge_offsets, new_edges);
207 mesh.corner_verts(),
208 edge_maps,
209 parallel_mask,
210 edge_offsets,
211 mesh.corner_edges_for_write());
212
213 Array<int2> original_edges;
214 if (keep_existing_edges && select_new_edges) {
215 original_edges.reinitialize(mesh.edges_num);
216 array_utils::copy(mesh.edges(), original_edges.as_mutable_span());
217 }
218
219 /* Free old CustomData and assign new one. */
220 CustomData_free(&mesh.edge_data);
221 CustomData_reset(&mesh.edge_data);
222 mesh.edges_num = edge_offsets.total_size();
223 attributes.add<int2>(".edge_verts", AttrDomain::Edge, AttributeInitMoveArray(new_edges.data()));
224
225 if (select_new_edges) {
226 MutableAttributeAccessor attributes = mesh.attributes_for_write();
227 SpanAttributeWriter<bool> select_edge = attributes.lookup_or_add_for_write_span<bool>(
228 ".select_edge", AttrDomain::Edge);
229 if (select_edge) {
230 select_edge.span.fill(true);
231 if (!original_edges.is_empty()) {
233 edge_offsets, edge_maps, parallel_mask, original_edges, select_edge.span);
234 }
235 select_edge.finish();
236 }
237 }
238
239 if (!keep_existing_edges) {
240 /* All edges are rebuilt from the faces, so there are no loose edges. */
241 mesh.tag_loose_edges_none();
242 }
243
244 /* Explicitly clear edge maps, because that way it can be parallelized. */
246}
247
248} // namespace blender::bke
CustomData interface, see also DNA_customdata_types.h.
void CustomData_reset(CustomData *data)
void CustomData_free(CustomData *data)
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE int power_of_2_min_i(int n)
MINLINE int is_power_of_2_i(int n)
int BLI_system_thread_count(void)
Definition threads.cc:253
#define LIKELY(x)
unsigned long long int uint64_t
int64_t size() const
Definition BLI_array.hh:245
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:237
IndexRange index_range() const
Definition BLI_array.hh:349
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
bool is_empty() const
Definition BLI_array.hh:253
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr T * data() const
Definition BLI_span.hh:539
constexpr void copy_from(Span< T > values) const
Definition BLI_span.hh:739
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
bool add(const Key &key)
void reserve(const int64_t n)
Span< Key > as_span() const
GSpanAttributeWriter lookup_or_add_for_write_span(StringRef attribute_id, AttrDomain domain, eCustomDataType data_type, const AttributeInit &initializer=AttributeInitDefaultValue())
bool add(const StringRef attribute_id, const AttrDomain domain, const eCustomDataType data_type, const AttributeInit &initializer)
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
static char faces[256]
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
static void add_existing_edges_to_hash_maps(const Mesh &mesh, const uint32_t parallel_mask, MutableSpan< EdgeMap > edge_maps)
static uint64_t edge_hash_2(const OrderedEdge &edge)
static void update_edge_indices_in_face_loops(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< EdgeMap > edge_maps, const uint32_t parallel_mask, const OffsetIndices< int > edge_offsets, MutableSpan< int > corner_edges)
static void serialize_and_initialize_deduplicated_edges(MutableSpan< EdgeMap > edge_maps, const OffsetIndices< int > edge_offsets, MutableSpan< int2 > new_edges)
static int get_parallel_maps_count(const Mesh &mesh)
static void deselect_known_edges(const OffsetIndices< int > edge_offsets, const Span< EdgeMap > edge_maps, const uint32_t parallel_mask, const Span< int2 > known_edges, MutableSpan< bool > selection)
static void clear_hash_tables(MutableSpan< EdgeMap > edge_maps)
static void reserve_hash_maps(const Mesh &mesh, const bool keep_existing_edges, MutableSpan< EdgeMap > edge_maps)
static void add_face_edges_to_hash_maps(const Mesh &mesh, const uint32_t parallel_mask, MutableSpan< EdgeMap > edge_maps)
VectorSet< OrderedEdge, 16, DefaultProbingStrategy, DefaultHash< OrderedEdge >, DefaultEquality< OrderedEdge >, SimpleVectorSetSlot< OrderedEdge, int >, GuardedAllocator > EdgeMap
int face_corner_prev(const IndexRange face, const int corner)
Definition BKE_mesh.hh:290
void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, bool select_new_edges)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void parallel_for_each(Range &&range, const Function &function)
Definition BLI_task.hh:56
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< int32_t, 2 > int2
PythonProbingStrategy<> DefaultProbingStrategy
i
Definition text_draw.cc:230