Blender V5.0
evaluator_impl.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2018 Blender Foundation
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later
4 *
5 * Author: Sergey Sharybin. */
6
7#include <cassert>
8
9#ifdef _MSC_VER
10# include <iso646.h>
11#endif
12
13#include <opensubdiv/far/patchMap.h>
14#include <opensubdiv/far/patchTable.h>
15#include <opensubdiv/far/patchTableFactory.h>
16#include <opensubdiv/osd/mesh.h>
17#include <opensubdiv/osd/types.h>
18#include <opensubdiv/version.h>
19
27
28using OpenSubdiv::Far::PatchTable;
29using OpenSubdiv::Far::PatchTableFactory;
30using OpenSubdiv::Far::StencilTable;
31using OpenSubdiv::Far::StencilTableFactory;
32using OpenSubdiv::Far::StencilTableReal;
33using OpenSubdiv::Far::TopologyRefiner;
34using OpenSubdiv::Osd::PatchArray;
35using OpenSubdiv::Osd::PatchCoord;
36
37namespace blender::opensubdiv {
38
39// Array implementation which stores small data on stack (or, rather, in the class itself).
40template<typename T, int kNumMaxElementsOnStack> class StackOrHeapArray {
41 public:
49
51 {
52 resize(size);
53 }
54
56 {
57 delete[] heap_elements_;
58 }
59
60 int size() const
61 {
62 return num_elements_;
63 };
64
66 {
68 }
69
70 void resize(int num_elements)
71 {
72 const int old_num_elements = num_elements_;
73 num_elements_ = num_elements;
74 // Early output if allcoation size did not change, or allocation size is smaller.
75 // We never re-allocate, sacrificing some memory over performance.
76 if (old_num_elements >= num_elements) {
77 return;
78 }
79 // Simple case: no previously allocated buffer, can simply do one allocation.
80 if (effective_elements_ == nullptr) {
81 effective_elements_ = allocate(num_elements);
82 return;
83 }
84 // Make new allocation, and copy elements if needed.
85 T *old_buffer = effective_elements_;
86 effective_elements_ = allocate(num_elements);
87 if (old_buffer != effective_elements_) {
88 memcpy(
89 effective_elements_, old_buffer, sizeof(T) * std::min(old_num_elements, num_elements));
90 }
91 if (old_buffer != stack_elements_) {
92 delete[] old_buffer;
93 }
94 }
95
96 protected:
97 T *allocate(int num_elements)
98 {
99 if (num_elements < kNumMaxElementsOnStack) {
100 return stack_elements_;
101 }
102 heap_elements_ = new T[num_elements];
103 return heap_elements_;
104 }
105
106 // Number of elements in the buffer.
108
109 // Elements which are allocated on a stack (or, rather, in the same allocation as the buffer
110 // itself).
111 // Is used as long as buffer is smaller than kNumMaxElementsOnStack.
112 T stack_elements_[kNumMaxElementsOnStack];
113
114 // Heap storage for buffer larger than kNumMaxElementsOnStack.
117
118 // Depending on the current buffer size points to rither stack_elements_ or heap_elements_.
120};
121
122// 32 is a number of inner vertices along the patch size at subdivision level 6.
124
125static void convertPatchCoordsToArray(const OpenSubdiv_PatchCoord *patch_coords,
126 const int num_patch_coords,
127 const PatchMap *patch_map,
129{
130 array->resize(num_patch_coords);
131 for (int i = 0; i < num_patch_coords; ++i) {
132 const PatchTable::PatchHandle *handle = patch_map->FindPatch(
133 patch_coords[i].ptex_face, patch_coords[i].u, patch_coords[i].v);
134 (array->data())[i] = PatchCoord(*handle, patch_coords[i].u, patch_coords[i].v);
135 }
136}
137
139// Evaluator wrapper for anonymous API.
140
142 : patch_map_(patch_map), implementation_(implementation)
143{
144}
145
150
152{
153 implementation_->updateSettings(settings);
154}
155
156void EvalOutputAPI::setCoarsePositions(const float *positions,
157 const int start_vertex_index,
158 const int num_vertices)
159{
160 // TODO(sergey): Add sanity check on indices.
161 implementation_->updateData(positions, start_vertex_index, num_vertices);
162}
163
164void EvalOutputAPI::setVaryingData(const float *varying_data,
165 const int start_vertex_index,
166 const int num_vertices)
167{
168 // TODO(sergey): Add sanity check on indices.
169 implementation_->updateVaryingData(varying_data, start_vertex_index, num_vertices);
170}
171
172void EvalOutputAPI::setVertexData(const float *vertex_data,
173 const int start_vertex_index,
174 const int num_vertices)
175{
176 // TODO(sergey): Add sanity check on indices.
177 implementation_->updateVertexData(vertex_data, start_vertex_index, num_vertices);
178}
179
180void EvalOutputAPI::setFaceVaryingData(const int face_varying_channel,
181 const float *face_varying_data,
182 const int start_vertex_index,
183 const int num_vertices)
184{
185 // TODO(sergey): Add sanity check on indices.
186 implementation_->updateFaceVaryingData(
187 face_varying_channel, face_varying_data, start_vertex_index, num_vertices);
188}
189
191 const int start_offset,
192 const int stride,
193 const int start_vertex_index,
194 const int num_vertices)
195{
196 // TODO(sergey): Add sanity check on indices.
197 const unsigned char *current_buffer = (unsigned char *)buffer;
198 current_buffer += start_offset;
199 for (int i = 0; i < num_vertices; ++i) {
200 const int current_vertex_index = start_vertex_index + i;
201 implementation_->updateData(
202 reinterpret_cast<const float *>(current_buffer), current_vertex_index, 1);
203 current_buffer += stride;
204 }
205}
206
208 const int start_offset,
209 const int stride,
210 const int start_vertex_index,
211 const int num_vertices)
212{
213 // TODO(sergey): Add sanity check on indices.
214 const unsigned char *current_buffer = (unsigned char *)buffer;
215 current_buffer += start_offset;
216 for (int i = 0; i < num_vertices; ++i) {
217 const int current_vertex_index = start_vertex_index + i;
218 implementation_->updateVaryingData(
219 reinterpret_cast<const float *>(current_buffer), current_vertex_index, 1);
220 current_buffer += stride;
221 }
222}
223
224void EvalOutputAPI::setFaceVaryingDataFromBuffer(const int face_varying_channel,
225 const void *buffer,
226 const int start_offset,
227 const int stride,
228 const int start_vertex_index,
229 const int num_vertices)
230{
231 // TODO(sergey): Add sanity check on indices.
232 const unsigned char *current_buffer = (unsigned char *)buffer;
233 current_buffer += start_offset;
234 for (int i = 0; i < num_vertices; ++i) {
235 const int current_vertex_index = start_vertex_index + i;
236 implementation_->updateFaceVaryingData(face_varying_channel,
237 reinterpret_cast<const float *>(current_buffer),
238 current_vertex_index,
239 1);
240 current_buffer += stride;
241 }
242}
243
245{
246 implementation_->refine();
247}
248
249void EvalOutputAPI::evaluateLimit(const int ptex_face_index,
250 float face_u,
251 float face_v,
252 float P[3],
253 float dPdu[3],
254 float dPdv[3])
255{
256 assert(face_u >= 0.0f);
257 assert(face_u <= 1.0f);
258 assert(face_v >= 0.0f);
259 assert(face_v <= 1.0f);
260 const PatchTable::PatchHandle *handle = patch_map_->FindPatch(ptex_face_index, face_u, face_v);
261 PatchCoord patch_coord(*handle, face_u, face_v);
262 if (dPdu != nullptr || dPdv != nullptr) {
263 implementation_->evalPatchesWithDerivatives(&patch_coord, 1, P, dPdu, dPdv);
264 }
265 else {
266 implementation_->evalPatches(&patch_coord, 1, P);
267 }
268}
269
270void EvalOutputAPI::evaluateVarying(const int ptex_face_index,
271 float face_u,
272 float face_v,
273 float varying[3])
274{
275 assert(face_u >= 0.0f);
276 assert(face_u <= 1.0f);
277 assert(face_v >= 0.0f);
278 assert(face_v <= 1.0f);
279 const PatchTable::PatchHandle *handle = patch_map_->FindPatch(ptex_face_index, face_u, face_v);
280 PatchCoord patch_coord(*handle, face_u, face_v);
281 implementation_->evalPatchesVarying(&patch_coord, 1, varying);
282}
283
284void EvalOutputAPI::evaluateVertexData(const int ptex_face_index,
285 float face_u,
286 float face_v,
287 float vertex_data[])
288{
289 assert(face_u >= 0.0f);
290 assert(face_u <= 1.0f);
291 assert(face_v >= 0.0f);
292 assert(face_v <= 1.0f);
293 const PatchTable::PatchHandle *handle = patch_map_->FindPatch(ptex_face_index, face_u, face_v);
294 PatchCoord patch_coord(*handle, face_u, face_v);
295 implementation_->evalPatchesVertexData(&patch_coord, 1, vertex_data);
296}
297
298void EvalOutputAPI::evaluateFaceVarying(const int face_varying_channel,
299 const int ptex_face_index,
300 float face_u,
301 float face_v,
302 float face_varying[2])
303{
304 assert(face_u >= 0.0f);
305 assert(face_u <= 1.0f);
306 assert(face_v >= 0.0f);
307 assert(face_v <= 1.0f);
308 const PatchTable::PatchHandle *handle = patch_map_->FindPatch(ptex_face_index, face_u, face_v);
309 PatchCoord patch_coord(*handle, face_u, face_v);
310 implementation_->evalPatchesFaceVarying(face_varying_channel, &patch_coord, 1, face_varying);
311}
312
314 const int num_patch_coords,
315 float *P,
316 float *dPdu,
317 float *dPdv)
318{
319 StackOrHeapPatchCoordArray patch_coords_array;
320 convertPatchCoordsToArray(patch_coords, num_patch_coords, patch_map_, &patch_coords_array);
321 if (dPdu != nullptr || dPdv != nullptr) {
322 implementation_->evalPatchesWithDerivatives(
323 patch_coords_array.data(), num_patch_coords, P, dPdu, dPdv);
324 }
325 else {
326 implementation_->evalPatches(patch_coords_array.data(), num_patch_coords, P);
327 }
328}
329
331 blender::gpu::VertBuf *patch_map_quadtree,
332 int *min_patch_face,
333 int *max_patch_face,
334 int *max_depth,
335 int *patches_are_triangular)
336{
337 *min_patch_face = patch_map_->getMinPatchFace();
338 *max_patch_face = patch_map_->getMaxPatchFace();
339 *max_depth = patch_map_->getMaxDepth();
340 *patches_are_triangular = patch_map_->getPatchesAreTriangular();
341
342 const std::vector<PatchTable::PatchHandle> &handles = patch_map_->getHandles();
343 // TODO(jbakker): should these be SSBO's they are never bound as vertex buffers.
344 GPU_vertbuf_data_alloc(*patch_map_handles, handles.size());
346 patch_map_handles->data<PatchTable::PatchHandle>();
347 memcpy(buffer_handles.data(), handles.data(), sizeof(PatchTable::PatchHandle) * handles.size());
348
349 const std::vector<PatchMap::QuadNode> &quadtree = patch_map_->nodes();
350 GPU_vertbuf_data_alloc(*patch_map_quadtree, quadtree.size());
351 MutableSpan<PatchMap::QuadNode> buffer_nodes = patch_map_quadtree->data<PatchMap::QuadNode>();
352 memcpy(buffer_nodes.data(), quadtree.data(), sizeof(PatchMap::QuadNode) * quadtree.size());
353}
354
356{
357 return implementation_->create_patch_arrays_buf();
358}
359
361{
362 return implementation_->get_patch_index_buf();
363}
364
366{
367 return implementation_->get_patch_param_buf();
368}
369
371{
372 return implementation_->get_source_buf();
373}
374
376{
377 return implementation_->get_source_data_buf();
378}
379
381{
382 return implementation_->create_face_varying_patch_array_buf(face_varying_channel);
383}
384
386{
387 return implementation_->get_face_varying_patch_index_buf(face_varying_channel);
388}
389
391{
392 return implementation_->get_face_varying_patch_param_buf(face_varying_channel);
393}
394
396{
397 return implementation_->get_face_varying_source_buf(face_varying_channel);
398}
399
400int EvalOutputAPI::get_face_varying_source_offset(const int face_varying_channel) const
401{
402 return implementation_->get_face_varying_source_offset(face_varying_channel);
403}
404
406{
407 return implementation_->hasVertexData();
408}
409
410} // namespace blender::opensubdiv
411
416
418{
419 delete eval_output;
420 delete patch_map;
421 delete patch_table;
422}
423
426 eOpenSubdivEvaluator evaluator_type,
427 OpenSubdiv_EvaluatorCache *evaluator_cache_descr)
428{
429 TopologyRefiner *refiner = topology_refiner->topology_refiner;
430 if (refiner == nullptr) {
431 // Happens on bad topology.
432 return nullptr;
433 }
434 // TODO(sergey): Base this on actual topology.
435 const bool has_varying_data = false;
436 const int num_face_varying_channels = refiner->GetNumFVarChannels();
437 const bool has_face_varying_data = (num_face_varying_channels != 0);
438 const int level = topology_refiner->settings.level;
439 const bool is_adaptive = topology_refiner->settings.is_adaptive;
440 // Common settings for stencils and patches.
441 const bool stencil_generate_intermediate_levels = is_adaptive;
442 const bool stencil_generate_offsets = true;
443 const bool use_inf_sharp_patch = true;
444 // Refine the topology with given settings.
445 // TODO(sergey): What if topology is already refined?
446 if (is_adaptive) {
447 TopologyRefiner::AdaptiveOptions options(level);
448 options.considerFVarChannels = has_face_varying_data;
449 options.useInfSharpPatch = use_inf_sharp_patch;
450 refiner->RefineAdaptive(options);
451 }
452 else {
453 TopologyRefiner::UniformOptions options(level);
454 refiner->RefineUniform(options);
455 }
456
457 // Work around ASAN warnings, due to OpenSubdiv pretending to have an actual StencilTable
458 // instance while it's really its base class.
459 auto delete_stencil_table = [](const StencilTable *table) {
460 static_assert(std::is_base_of_v<StencilTableReal<float>, StencilTable>);
461 delete reinterpret_cast<const StencilTableReal<float> *>(table);
462 };
463
464 // Generate stencil table to update the bi-cubic patches control vertices
465 // after they have been re-posed (both for vertex & varying interpolation).
466 //
467 // Vertex stencils.
468 StencilTableFactory::Options vertex_stencil_options;
469 vertex_stencil_options.generateOffsets = stencil_generate_offsets;
470 vertex_stencil_options.generateIntermediateLevels = stencil_generate_intermediate_levels;
471 const StencilTable *vertex_stencils = StencilTableFactory::Create(*refiner,
472 vertex_stencil_options);
473 // Varying stencils.
474 //
475 // TODO(sergey): Seems currently varying stencils are always required in
476 // OpenSubdiv itself.
477 const StencilTable *varying_stencils = nullptr;
478 if (has_varying_data) {
479 StencilTableFactory::Options varying_stencil_options;
480 varying_stencil_options.generateOffsets = stencil_generate_offsets;
481 varying_stencil_options.generateIntermediateLevels = stencil_generate_intermediate_levels;
482 varying_stencil_options.interpolationMode = StencilTableFactory::INTERPOLATE_VARYING;
483 varying_stencils = StencilTableFactory::Create(*refiner, varying_stencil_options);
484 }
485 // Face warying stencil.
486 std::vector<const StencilTable *> all_face_varying_stencils;
487 all_face_varying_stencils.reserve(num_face_varying_channels);
488 for (int face_varying_channel = 0; face_varying_channel < num_face_varying_channels;
489 ++face_varying_channel)
490 {
491 StencilTableFactory::Options face_varying_stencil_options;
492 face_varying_stencil_options.generateOffsets = stencil_generate_offsets;
493 face_varying_stencil_options.generateIntermediateLevels = stencil_generate_intermediate_levels;
494 face_varying_stencil_options.interpolationMode = StencilTableFactory::INTERPOLATE_FACE_VARYING;
495 face_varying_stencil_options.fvarChannel = face_varying_channel;
496 all_face_varying_stencils.push_back(
497 StencilTableFactory::Create(*refiner, face_varying_stencil_options));
498 }
499 // Generate bi-cubic patch table for the limit surface.
500 PatchTableFactory::Options patch_options(level);
501 patch_options.SetEndCapType(PatchTableFactory::Options::ENDCAP_GREGORY_BASIS);
502 patch_options.useInfSharpPatch = use_inf_sharp_patch;
503 patch_options.generateFVarTables = has_face_varying_data;
504 patch_options.generateFVarLegacyLinearPatches = false;
505 const PatchTable *patch_table = PatchTableFactory::Create(*refiner, patch_options);
506 // Append local points stencils.
507 // Point stencils.
508 const StencilTable *local_point_stencil_table = patch_table->GetLocalPointStencilTable();
509 if (local_point_stencil_table != nullptr) {
510 const StencilTable *table = StencilTableFactory::AppendLocalPointStencilTable(
511 *refiner, vertex_stencils, local_point_stencil_table);
512 delete_stencil_table(vertex_stencils);
513 vertex_stencils = table;
514 }
515 // Varying stencils.
516 if (has_varying_data) {
517 const StencilTable *local_point_varying_stencil_table =
518 patch_table->GetLocalPointVaryingStencilTable();
519 if (local_point_varying_stencil_table != nullptr) {
520 const StencilTable *table = StencilTableFactory::AppendLocalPointStencilTable(
521 *refiner, varying_stencils, local_point_varying_stencil_table);
522 delete_stencil_table(varying_stencils);
523 varying_stencils = table;
524 }
525 }
526 for (int face_varying_channel = 0; face_varying_channel < num_face_varying_channels;
527 ++face_varying_channel)
528 {
529 const StencilTable *table = StencilTableFactory::AppendLocalPointStencilTableFaceVarying(
530 *refiner,
531 all_face_varying_stencils[face_varying_channel],
532 patch_table->GetLocalPointFaceVaryingStencilTable(face_varying_channel),
533 face_varying_channel);
534 if (table != nullptr) {
535 delete_stencil_table(all_face_varying_stencils[face_varying_channel]);
536 all_face_varying_stencils[face_varying_channel] = table;
537 }
538 }
539 // Create OpenSubdiv's CPU side evaluator.
541
542 const bool use_gpu_evaluator = evaluator_type == OPENSUBDIV_EVALUATOR_GPU;
543 if (use_gpu_evaluator) {
545 if (evaluator_cache_descr) {
546 evaluator_cache = static_cast<blender::opensubdiv::GpuEvalOutput::EvaluatorCache *>(
547 evaluator_cache_descr->impl->eval_cache);
548 }
549
550 eval_output = new blender::opensubdiv::GpuEvalOutput(vertex_stencils,
551 varying_stencils,
552 all_face_varying_stencils,
553 2,
554 patch_table,
555 evaluator_cache);
556 }
557 else {
558 eval_output = new blender::opensubdiv::CpuEvalOutput(
559 vertex_stencils, varying_stencils, all_face_varying_stencils, 2, patch_table);
560 }
561
563 // Wrap everything we need into an object which we control from our side.
565 evaluator->type = evaluator_type;
566
567 evaluator->eval_output = new blender::opensubdiv::EvalOutputAPI(eval_output, patch_map);
568 evaluator->patch_map = patch_map;
569 evaluator->patch_table = patch_table;
570 // TODO(sergey): Look into whether we've got duplicated stencils arrays.
571 delete_stencil_table(vertex_stencils);
572 delete_stencil_table(varying_stencils);
573 for (const StencilTable *table : all_face_varying_stencils) {
574 delete_stencil_table(table);
575 }
576
577 return evaluator;
578}
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
ATTR_WARN_UNUSED_RESULT const BMVert * v
T * resize(const size_t newsize)
constexpr T * data() const
Definition BLI_span.hh:539
MutableSpan< T > data()
gpu::VertBuf * get_face_varying_source_buf(const int face_varying_channel)
gpu::StorageBuf * get_face_varying_patch_index_buf(const int face_varying_channel)
void setVaryingData(const float *varying_data, const int start_vertex_index, const int num_vertices)
void evaluatePatchesLimit(const OpenSubdiv_PatchCoord *patch_coords, const int num_patch_coords, float *P, float *dPdu, float *dPdv)
gpu::StorageBuf * create_patch_arrays_buf()
void setSettings(const OpenSubdiv_EvaluatorSettings *settings)
void evaluateVarying(const int ptex_face_index, float face_u, float face_v, float varying[3])
void evaluateLimit(const int ptex_face_index, float face_u, float face_v, float P[3], float dPdu[3], float dPdv[3])
void setFaceVaryingData(const int face_varying_channel, const float *varying_data, const int start_vertex_index, const int num_vertices)
gpu::StorageBuf * create_face_varying_patch_array_buf(const int face_varying_channel)
void evaluateVertexData(const int ptex_face_index, float face_u, float face_v, float data[])
void evaluateFaceVarying(const int face_varying_channel, const int ptex_face_index, float face_u, float face_v, float face_varying[2])
int get_face_varying_source_offset(const int face_varying_channel) const
void setCoarsePositionsFromBuffer(const void *buffer, const int start_offset, const int stride, const int start_vertex_index, const int num_vertices)
void setVaryingDataFromBuffer(const void *buffer, const int start_offset, const int stride, const int start_vertex_index, const int num_vertices)
EvalOutputAPI(EvalOutput *implementation, PatchMap *patch_map)
gpu::StorageBuf * get_face_varying_patch_param_buf(const int face_varying_channel)
void setFaceVaryingDataFromBuffer(const int face_varying_channel, const void *buffer, const int start_offset, const int stride, const int start_vertex_index, const int num_vertices)
void setVertexData(const float *data, const int start_vertex_index, const int num_vertices)
void getPatchMap(blender::gpu::VertBuf *patch_map_handles, blender::gpu::VertBuf *patch_map_quadtree, int *min_patch_face, int *max_patch_face, int *max_depth, int *patches_are_triangular)
void setCoarsePositions(const float *positions, const int start_vertex_index, const int num_vertices)
An quadtree-based map connecting coarse faces to their sub-patches.
Definition patch_map.h:25
Handle const * FindPatch(int patchFaceId, double u, double v) const
Returns a handle to the sub-patch of the face at the given (u,v). Note that the patch face ID corresp...
Definition patch_map.h:201
OpenSubdiv::Far::TopologyRefiner * topology_refiner
CCL_NAMESPACE_BEGIN struct Options options
OpenSubdiv_Evaluator * openSubdiv_createEvaluatorFromTopologyRefiner(blender::opensubdiv::TopologyRefinerImpl *topology_refiner, eOpenSubdivEvaluator evaluator_type, OpenSubdiv_EvaluatorCache *evaluator_cache_descr)
#define assert(assertion)
#define T
static void convertPatchCoordsToArray(const OpenSubdiv_PatchCoord *patch_coords, const int num_patch_coords, const PatchMap *patch_map, StackOrHeapPatchCoordArray *array)
StackOrHeapArray< PatchCoord, 32 *32 > StackOrHeapPatchCoordArray
eOpenSubdivEvaluator
@ OPENSUBDIV_EVALUATOR_GPU
OpenSubdiv_EvaluatorCacheImpl * impl
const OpenSubdiv::Far::PatchTable * patch_table
blender::opensubdiv::EvalOutputAPI * eval_output
const blender::opensubdiv::PatchMap * patch_map
eOpenSubdivEvaluator type
i
Definition text_draw.cc:230
ParamHandle ** handles