Blender V5.0
volume_grid.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Foundation
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BKE_volume_grid.hh"
8
9#include "BLI_index_mask.hh"
10#include "BLI_memory_counter.hh"
11#include "BLI_task.hh"
12
13#ifdef WITH_OPENVDB
14# include <openvdb/Grid.h>
15# include <openvdb/tools/Prune.h>
16#endif
17
19
20#ifdef WITH_OPENVDB
21
22VolumeGridData::VolumeGridData()
23{
24 tree_access_token_ = std::make_shared<AccessToken>(*this);
25}
26
27struct CreateGridOp {
28 template<typename GridT> openvdb::GridBase::Ptr operator()() const
29 {
30 return GridT::create();
31 }
32};
33
34static openvdb::GridBase::Ptr create_grid_for_type(const VolumeGridType grid_type)
35{
36 return BKE_volume_grid_type_operation(grid_type, CreateGridOp{});
37}
38
39VolumeGridData::VolumeGridData(const VolumeGridType grid_type)
40 : VolumeGridData(create_grid_for_type(grid_type))
41{
42}
43
44VolumeGridData::VolumeGridData(std::shared_ptr<openvdb::GridBase> grid)
45 : grid_(std::move(grid)), tree_loaded_(true), transform_loaded_(true), meta_data_loaded_(true)
46{
47 BLI_assert(grid_);
48 BLI_assert(grid_.use_count() == 1);
49 BLI_assert(grid_->isTreeUnique());
50
51 tree_sharing_info_ = OpenvdbTreeSharingInfo::make(grid_->baseTreePtr());
52 tree_access_token_ = std::make_shared<AccessToken>(*this);
53}
54
55VolumeGridData::VolumeGridData(std::function<LazyLoadedGrid()> lazy_load_grid,
56 std::shared_ptr<openvdb::GridBase> meta_data_and_transform_grid)
57 : grid_(std::move(meta_data_and_transform_grid)), lazy_load_grid_(std::move(lazy_load_grid))
58{
59 if (grid_) {
60 transform_loaded_ = true;
61 meta_data_loaded_ = true;
62 }
63 tree_access_token_ = std::make_shared<AccessToken>(*this);
64}
65
66VolumeGridData::~VolumeGridData() = default;
67
68void VolumeGridData::delete_self()
69{
70 MEM_delete(this);
71}
72
73const openvdb::GridBase &VolumeGridData::grid(VolumeTreeAccessToken &r_token) const
74{
75 return *this->grid_ptr(r_token);
76}
77
78openvdb::GridBase &VolumeGridData::grid_for_write(VolumeTreeAccessToken &r_token)
79{
80 return *this->grid_ptr_for_write(r_token);
81}
82
83std::shared_ptr<const openvdb::GridBase> VolumeGridData::grid_ptr(
84 VolumeTreeAccessToken &r_token) const
85{
86 std::lock_guard lock{mutex_};
87 this->ensure_grid_loaded();
88 r_token.token_ = tree_access_token_;
89 return grid_;
90}
91
92std::shared_ptr<openvdb::GridBase> VolumeGridData::grid_ptr_for_write(
93 VolumeTreeAccessToken &r_token)
94{
95 BLI_assert(this->is_mutable());
96 std::lock_guard lock{mutex_};
97 this->ensure_grid_loaded();
98 r_token.token_ = tree_access_token_;
99 if (tree_sharing_info_->is_mutable()) {
100 tree_sharing_info_->tag_ensured_mutable();
101 }
102 else {
103 auto tree_copy = grid_->baseTree().copy();
104 grid_->setTree(tree_copy);
105 tree_sharing_info_ = OpenvdbTreeSharingInfo::make(std::move(tree_copy));
106 }
107 /* Can't reload the grid anymore if it has been changed. */
108 lazy_load_grid_ = {};
109 return grid_;
110}
111
112const openvdb::math::Transform &VolumeGridData::transform() const
113{
114 std::lock_guard lock{mutex_};
115 if (!transform_loaded_) {
116 this->ensure_grid_loaded();
117 }
118 return grid_->transform();
119}
120
121openvdb::math::Transform &VolumeGridData::transform_for_write()
122{
123 BLI_assert(this->is_mutable());
124 std::lock_guard lock{mutex_};
125 if (!transform_loaded_) {
126 this->ensure_grid_loaded();
127 }
128 return grid_->transform();
129}
130
131std::string VolumeGridData::name() const
132{
133 std::lock_guard lock{mutex_};
134 if (!meta_data_loaded_) {
135 this->ensure_grid_loaded();
136 }
137 return grid_->getName();
138}
139
140void VolumeGridData::set_name(const StringRef name)
141{
142 BLI_assert(this->is_mutable());
143 std::lock_guard lock{mutex_};
144 if (!meta_data_loaded_) {
145 this->ensure_grid_loaded();
146 }
147 grid_->setName(name);
148}
149
150VolumeGridType VolumeGridData::grid_type() const
151{
152 std::lock_guard lock{mutex_};
153 if (!meta_data_loaded_) {
154 this->ensure_grid_loaded();
155 }
156 return get_type(*grid_);
157}
158
159std::optional<VolumeGridType> VolumeGridData::grid_type_without_load() const
160{
161 std::lock_guard lock{mutex_};
162 if (!meta_data_loaded_) {
163 return std::nullopt;
164 }
165 return get_type(*grid_);
166}
167
168openvdb::GridClass VolumeGridData::grid_class() const
169{
170 std::lock_guard lock{mutex_};
171 if (!meta_data_loaded_) {
172 this->ensure_grid_loaded();
173 }
174 return grid_->getGridClass();
175}
176
177bool VolumeGridData::is_reloadable() const
178{
179 return bool(lazy_load_grid_);
180}
181
182void VolumeGridData::tag_tree_modified() const
183{
184 active_voxels_mutex_.tag_dirty();
185 active_leaf_voxels_mutex_.tag_dirty();
186 active_tiles_mutex_.tag_dirty();
187 size_in_bytes_mutex_.tag_dirty();
188 active_bounds_mutex_.tag_dirty();
189}
190
191bool VolumeGridData::is_loaded() const
192{
193 std::lock_guard lock{mutex_};
194 return tree_loaded_ && transform_loaded_ && meta_data_loaded_;
195}
196
197void VolumeGridData::count_memory(MemoryCounter &memory) const
198{
199 std::lock_guard lock{mutex_};
200 if (!tree_loaded_) {
201 return;
202 }
203 memory.add_shared(tree_sharing_info_.get(), [&](MemoryCounter &shared_memory) {
204 shared_memory.add(this->size_in_bytes());
205 });
206}
207
208int64_t VolumeGridData::active_voxels() const
209{
210 active_voxels_mutex_.ensure([&]() {
211 VolumeTreeAccessToken token;
212 const openvdb::GridBase &grid = this->grid(token);
213 const openvdb::TreeBase &tree = grid.baseTree();
214 active_voxels_ = tree.activeVoxelCount();
215 });
216 return active_voxels_;
217}
218
219int64_t VolumeGridData::active_leaf_voxels() const
220{
221 active_leaf_voxels_mutex_.ensure([&]() {
222 VolumeTreeAccessToken token;
223 const openvdb::GridBase &grid = this->grid(token);
224 const openvdb::TreeBase &tree = grid.baseTree();
225 active_leaf_voxels_ = tree.activeLeafVoxelCount();
226 });
227 return active_leaf_voxels_;
228}
229
230int64_t VolumeGridData::active_tiles() const
231{
232 active_tiles_mutex_.ensure([&]() {
233 VolumeTreeAccessToken token;
234 const openvdb::GridBase &grid = this->grid(token);
235 const openvdb::TreeBase &tree = grid.baseTree();
236 active_tiles_ = tree.activeTileCount();
237 });
238 return active_tiles_;
239}
240
241int64_t VolumeGridData::size_in_bytes() const
242{
243 size_in_bytes_mutex_.ensure([&]() {
244 VolumeTreeAccessToken token;
245 const openvdb::GridBase &grid = this->grid(token);
246 const openvdb::TreeBase &tree = grid.baseTree();
247 size_in_bytes_ = tree.memUsage();
248 });
249 return size_in_bytes_;
250}
251
252const openvdb::CoordBBox &VolumeGridData::active_bounds() const
253{
254 active_bounds_mutex_.ensure([&]() {
255 VolumeTreeAccessToken token;
256 const openvdb::GridBase &grid = this->grid(token);
257 const openvdb::TreeBase &tree = grid.baseTree();
258 tree.evalActiveVoxelBoundingBox(active_bounds_);
259 });
260 return active_bounds_;
261}
262
263std::string VolumeGridData::error_message() const
264{
265 std::lock_guard lock{mutex_};
266 return error_message_;
267}
268
269void VolumeGridData::unload_tree_if_possible() const
270{
271 std::lock_guard lock{mutex_};
272 if (!grid_) {
273 return;
274 }
275 if (!tree_loaded_) {
276 return;
277 }
278 if (!this->is_reloadable()) {
279 return;
280 }
281 if (tree_access_token_.use_count() != 1) {
282 /* Some code is using the tree currently, so it can't be freed. */
283 return;
284 }
285 grid_->newTree();
286 tree_loaded_ = false;
287 tree_sharing_info_.reset();
288}
289
290GVolumeGrid VolumeGridData::copy() const
291{
292 std::lock_guard lock{mutex_};
293 this->ensure_grid_loaded();
294 /* Can't use #MEM_new because the default constructor is private. */
295 VolumeGridData *new_copy = new (MEM_mallocN(sizeof(VolumeGridData), __func__)) VolumeGridData();
296 /* Makes a deep copy of the meta-data but shares the tree. */
297 new_copy->grid_ = grid_->copyGrid();
298 new_copy->tree_sharing_info_ = tree_sharing_info_;
299 new_copy->tree_loaded_ = tree_loaded_;
300 new_copy->transform_loaded_ = transform_loaded_;
301 new_copy->meta_data_loaded_ = meta_data_loaded_;
302 return GVolumeGrid(new_copy);
303}
304
305void VolumeGridData::ensure_grid_loaded() const
306{
307 /* Assert that the mutex is locked. */
308 BLI_assert(!mutex_.try_lock());
309
310 if (tree_loaded_ && transform_loaded_ && meta_data_loaded_) {
311 return;
312 }
313 BLI_assert(lazy_load_grid_);
314 LazyLoadedGrid loaded_grid;
315 /* Isolate because the a mutex is locked. */
316 threading::isolate_task([&]() {
317 error_message_.clear();
318 try {
319 loaded_grid = lazy_load_grid_();
320 }
321 catch (const openvdb::IoError &e) {
322 error_message_ = e.what();
323 }
324 catch (...) {
325 error_message_ = "Unknown error reading VDB file";
326 }
327 });
328 if (!loaded_grid.grid) {
329 BLI_assert(!loaded_grid.tree_sharing_info);
330 if (grid_) {
331 const openvdb::Name &grid_type = grid_->type();
332 if (openvdb::GridBase::isRegistered(grid_type)) {
333 /* Create a dummy grid of the expected type. */
334 loaded_grid.grid = openvdb::GridBase::createGrid(grid_type);
335 }
336 }
337 }
338 if (!loaded_grid.grid) {
339 /* Create a dummy grid. We can't really know the expected data type here. */
340 loaded_grid.grid = openvdb::FloatGrid::create();
341 }
342 BLI_assert(loaded_grid.grid);
343 BLI_assert(loaded_grid.grid.use_count() == 1);
344
345 if (!loaded_grid.tree_sharing_info) {
346 BLI_assert(loaded_grid.grid->isTreeUnique());
347 loaded_grid.tree_sharing_info = OpenvdbTreeSharingInfo::make(loaded_grid.grid->baseTreePtr());
348 }
349
350 if (grid_) {
351 /* Keep the existing grid pointer and just insert the newly loaded data. */
352 BLI_assert(!tree_loaded_);
353 BLI_assert(meta_data_loaded_);
354 grid_->setTree(loaded_grid.grid->baseTreePtr());
355 if (!transform_loaded_) {
356 grid_->setTransform(loaded_grid.grid->transformPtr());
357 }
358 }
359 else {
360 grid_ = std::move(loaded_grid.grid);
361 }
362
363 BLI_assert(!tree_sharing_info_);
364 BLI_assert(loaded_grid.tree_sharing_info);
365 tree_sharing_info_ = std::move(loaded_grid.tree_sharing_info);
366
367 tree_loaded_ = true;
368 transform_loaded_ = true;
369 meta_data_loaded_ = true;
370}
371
372GVolumeGrid::GVolumeGrid(std::shared_ptr<openvdb::GridBase> grid)
373{
374 data_ = ImplicitSharingPtr(MEM_new<VolumeGridData>(__func__, std::move(grid)));
375}
376
377GVolumeGrid::GVolumeGrid(const VolumeGridType grid_type)
378 : GVolumeGrid(create_grid_for_type(grid_type))
379{
380}
381
382VolumeGridData &GVolumeGrid::get_for_write()
383{
384 BLI_assert(*this);
385 if (data_->is_mutable()) {
386 data_->tag_ensured_mutable();
387 }
388 else {
389 *this = data_->copy();
390 }
391 return const_cast<VolumeGridData &>(*data_);
392}
393
394VolumeGridType get_type(const openvdb::TreeBase &tree)
395{
396 if (tree.isType<openvdb::FloatTree>()) {
397 return VOLUME_GRID_FLOAT;
398 }
399 if (tree.isType<openvdb::Vec3fTree>()) {
401 }
402 if (tree.isType<openvdb::BoolTree>()) {
403 return VOLUME_GRID_BOOLEAN;
404 }
405 if (tree.isType<openvdb::DoubleTree>()) {
406 return VOLUME_GRID_DOUBLE;
407 }
408 if (tree.isType<openvdb::Int32Tree>()) {
409 return VOLUME_GRID_INT;
410 }
411 if (tree.isType<openvdb::Int64Tree>()) {
412 return VOLUME_GRID_INT64;
413 }
414 if (tree.isType<openvdb::Vec3ITree>()) {
416 }
417 if (tree.isType<openvdb::Vec3dTree>()) {
419 }
420 if (tree.isType<openvdb::MaskTree>()) {
421 return VOLUME_GRID_MASK;
422 }
423 if (tree.isType<openvdb::points::PointDataTree>()) {
424 return VOLUME_GRID_POINTS;
425 }
426 return VOLUME_GRID_UNKNOWN;
427}
428
429VolumeGridType get_type(const openvdb::GridBase &grid)
430{
431 return get_type(grid.baseTree());
432}
433
434ImplicitSharingPtr<> OpenvdbTreeSharingInfo::make(std::shared_ptr<openvdb::tree::TreeBase> tree)
435{
436 return ImplicitSharingPtr<>{MEM_new<OpenvdbTreeSharingInfo>(__func__, std::move(tree))};
437}
438
439OpenvdbTreeSharingInfo::OpenvdbTreeSharingInfo(std::shared_ptr<openvdb::tree::TreeBase> tree)
440 : tree_(std::move(tree))
441{
442}
443
444void OpenvdbTreeSharingInfo::delete_self_with_data()
445{
446 MEM_delete(this);
447}
448
449void OpenvdbTreeSharingInfo::delete_data_only()
450{
451 tree_.reset();
452}
453
454VolumeTreeAccessToken::~VolumeTreeAccessToken()
455{
456 const VolumeGridData *grid = token_ ? &token_->grid : nullptr;
457 token_.reset();
458 if (grid) {
459 /* Unload immediately when the value is not used anymore. However, the tree may still be cached
460 * at a deeper level and thus usually does not have to be loaded from disk again. */
461 grid->unload_tree_if_possible();
462 }
463}
464
465#endif /* WITH_OPENVDB */
466
467std::string get_name(const VolumeGridData &grid)
468{
469#ifdef WITH_OPENVDB
470 return grid.name();
471#else
472 UNUSED_VARS(grid);
473 return "density";
474#endif
475}
476
477VolumeGridType get_type(const VolumeGridData &grid)
478{
479#ifdef WITH_OPENVDB
480 return grid.grid_type();
481#else
482 UNUSED_VARS(grid);
483 return VOLUME_GRID_UNKNOWN;
484#endif
485}
486
488{
489 switch (type) {
493 case VOLUME_GRID_INT:
495 case VOLUME_GRID_MASK:
496 return 1;
500 return 3;
503 return 0;
504 }
505 return 0;
506}
507
508float4x4 get_transform_matrix(const VolumeGridData &grid)
509{
510#ifdef WITH_OPENVDB
511 return BKE_volume_transform_to_blender(grid.transform());
512#else
513 UNUSED_VARS(grid);
514 return float4x4::identity();
515#endif
516}
517
518void set_transform_matrix(VolumeGridData &grid, const float4x4 &matrix)
519{
520#ifdef WITH_OPENVDB
521 grid.transform_for_write() = BKE_volume_transform_to_openvdb(matrix);
522#else
523 UNUSED_VARS(grid, matrix);
524#endif
525}
526
527void clear_tree(VolumeGridData &grid)
528{
529#ifdef WITH_OPENVDB
530 VolumeTreeAccessToken tree_token;
531 grid.grid_for_write(tree_token).clear();
532 grid.tag_tree_modified();
533#else
534 UNUSED_VARS(grid);
535#endif
536}
537
538bool is_loaded(const VolumeGridData &grid)
539{
540#ifdef WITH_OPENVDB
541 return grid.is_loaded();
542#else
543 UNUSED_VARS(grid);
544 return false;
545#endif
546}
547
548void count_memory(const VolumeGridData &grid, MemoryCounter &memory)
549{
550#ifdef WITH_OPENVDB
551 grid.count_memory(memory);
552#else
553 UNUSED_VARS(grid, memory);
554#endif
555}
556
557void load(const VolumeGridData &grid)
558{
559#ifdef WITH_OPENVDB
560 VolumeTreeAccessToken tree_token;
561 /* Just "touch" the grid, so that it is loaded. */
562 grid.grid(tree_token);
563#else
564 UNUSED_VARS(grid);
565#endif
566}
567
568std::string error_message_from_load(const VolumeGridData &grid)
569{
570#ifdef WITH_OPENVDB
571 return grid.error_message();
572#else
573 UNUSED_VARS(grid);
574 return "";
575#endif
576}
577
578#ifdef WITH_OPENVDB
579
584template<typename LeafNodeT>
585static void parallel_grid_topology_tasks_leaf_node(const LeafNodeT &node,
586 const ProcessLeafFn process_leaf_fn,
588{
589 using NodeMaskT = typename LeafNodeT::NodeMaskType;
590
591 const int on_count = node.onVoxelCount();
592 /* This number is somewhat arbitrary. 64 is a 1/8th of the number of voxels in a standard leaf
593 * which is 8x8x8. It's a trade-off between benefiting from the better performance of
594 * leaf-processing vs. processing more voxels in a batch. */
595 const int on_count_threshold = 64;
596 if (on_count <= on_count_threshold) {
597 /* The leaf contains only a few active voxels. It's beneficial to process them in a batch with
598 * active voxels from other leafs. So only gather them here for later processing. */
599 for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) {
600 const openvdb::Coord coord = value_iter.getCoord();
601 r_coords.append(coord);
602 }
603 return;
604 }
605 /* Process entire leaf at once. This is especially beneficial when very many of the voxels in
606 * the leaf are active. In that case, one can work on the openvdb arrays stored in the leafs
607 * directly. */
608 const NodeMaskT &value_mask = node.getValueMask();
609 const openvdb::CoordBBox bbox = node.getNodeBoundingBox();
610 process_leaf_fn(value_mask, bbox, [&](MutableSpan<openvdb::Coord> r_voxels) {
611 for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) {
612 r_voxels[value_iter.pos()] = value_iter.getCoord();
613 }
614 });
615}
616
620template<typename InternalNodeT>
621static void parallel_grid_topology_tasks_internal_node(const InternalNodeT &node,
622 const ProcessLeafFn process_leaf_fn,
623 const ProcessVoxelsFn process_voxels_fn,
624 const ProcessTilesFn process_tiles_fn)
625{
626 using ChildNodeT = typename InternalNodeT::ChildNodeType;
627 using LeafNodeT = typename InternalNodeT::LeafNodeType;
628 using NodeMaskT = typename InternalNodeT::NodeMaskType;
629 using UnionT = typename InternalNodeT::UnionType;
630
631 /* Gather the active sub-nodes first, to be able to parallelize over them more easily. */
632 const NodeMaskT &child_mask = node.getChildMask();
633 const UnionT *table = node.getTable();
634 Vector<int, 512> child_indices;
635 for (auto child_mask_iter = child_mask.beginOn(); child_mask_iter.test(); ++child_mask_iter) {
636 child_indices.append(child_mask_iter.pos());
637 }
638
639 threading::parallel_for(child_indices.index_range(), 8, [&](const IndexRange range) {
640 /* Voxels collected from potentially multiple leaf nodes to be processed in one batch. This
641 * inline buffer size is sufficient to avoid an allocation in all cases (a single standard leaf
642 * has 512 voxels). */
643 Vector<openvdb::Coord, 1024> gathered_voxels;
644 for (const int child_index : child_indices.as_span().slice(range)) {
645 const ChildNodeT &child = *table[child_index].getChild();
646 if constexpr (std::is_same_v<ChildNodeT, LeafNodeT>) {
647 parallel_grid_topology_tasks_leaf_node(child, process_leaf_fn, gathered_voxels);
648 /* If enough voxels have been gathered, process them in one batch. */
649 if (gathered_voxels.size() >= 512) {
650 process_voxels_fn(gathered_voxels);
651 gathered_voxels.clear();
652 }
653 }
654 else {
655 /* Recurse into lower-level internal nodes. */
656 parallel_grid_topology_tasks_internal_node(
657 child, process_leaf_fn, process_voxels_fn, process_tiles_fn);
658 }
659 }
660 /* Process any remaining voxels. */
661 if (!gathered_voxels.is_empty()) {
662 process_voxels_fn(gathered_voxels);
663 gathered_voxels.clear();
664 }
665 });
666
667 /* Process the active tiles within the internal node. Note that these are not processed above
668 * already because there only sub-nodes are handled, but tiles are "inlined" into internal nodes.
669 * All tiles are first gathered and then processed in one batch. */
670 const NodeMaskT &value_mask = node.getValueMask();
671 Vector<openvdb::CoordBBox> tile_bboxes;
672 for (auto value_mask_iter = value_mask.beginOn(); value_mask_iter.test(); ++value_mask_iter) {
673 const openvdb::Index32 index = value_mask_iter.pos();
674 const openvdb::Coord tile_origin = node.offsetToGlobalCoord(index);
675 const openvdb::CoordBBox tile_bbox = openvdb::CoordBBox::createCube(tile_origin,
676 ChildNodeT::DIM);
677 tile_bboxes.append(tile_bbox);
678 }
679 if (!tile_bboxes.is_empty()) {
680 process_tiles_fn(tile_bboxes);
681 }
682}
683
684/* Call the process functions on all active tiles and voxels in the given tree. */
685void parallel_grid_topology_tasks(const openvdb::MaskTree &mask_tree,
686 const ProcessLeafFn process_leaf_fn,
687 const ProcessVoxelsFn process_voxels_fn,
688 const ProcessTilesFn process_tiles_fn)
689{
690 /* Iterate over the root internal nodes. */
691 for (auto root_child_iter = mask_tree.cbeginRootChildren(); root_child_iter.test();
692 ++root_child_iter)
693 {
694 const auto &internal_node = *root_child_iter;
695 parallel_grid_topology_tasks_internal_node(
696 internal_node, process_leaf_fn, process_voxels_fn, process_tiles_fn);
697 }
698}
699
700openvdb::GridBase::Ptr create_grid_with_topology(const openvdb::MaskTree &topology,
701 const openvdb::math::Transform &transform,
702 const VolumeGridType grid_type)
703{
704 openvdb::GridBase::Ptr grid;
705 BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) {
706 using GridT = typename decltype(type_tag)::type;
707 using TreeT = typename GridT::TreeType;
708 using ValueType = typename TreeT::ValueType;
709 const ValueType background{};
710 auto tree = std::make_shared<TreeT>(topology, background, openvdb::TopologyCopy());
711 grid = openvdb::createGrid(std::move(tree));
712 grid->setTransform(transform.copy());
713 });
714 return grid;
715}
716
717void set_grid_values(openvdb::GridBase &grid_base,
718 const GSpan values,
719 const Span<openvdb::Coord> voxels)
720{
721 BLI_assert(values.size() == voxels.size());
722 to_typed_grid(grid_base, [&](auto &grid) {
723 using GridT = std::decay_t<decltype(grid)>;
724 using ValueType = typename GridT::ValueType;
725 const ValueType *data = static_cast<const ValueType *>(values.data());
726
727 auto accessor = grid.getUnsafeAccessor();
728 for (const int64_t i : voxels.index_range()) {
729 accessor.setValue(voxels[i], data[i]);
730 }
731 });
732}
733
734void set_tile_values(openvdb::GridBase &grid_base,
735 const GSpan values,
737{
738 BLI_assert(values.size() == tiles.size());
739 to_typed_grid(grid_base, [&](auto &grid) {
740 using GridT = typename std::decay_t<decltype(grid)>;
741 using TreeT = typename GridT::TreeType;
742 using ValueType = typename GridT::ValueType;
743 auto &tree = grid.tree();
744
745 const ValueType *computed_values = static_cast<const ValueType *>(values.data());
746
747 const auto set_tile_value = [&](auto &node, const openvdb::Coord &coord_in_tile, auto value) {
748 const openvdb::Index n = node.coordToOffset(coord_in_tile);
749 BLI_assert(node.isChildMaskOff(n));
750 /* TODO: Figure out how to do this without const_cast, although the same is done in
751 * `openvdb_ax/openvdb_ax/compiler/VolumeExecutable.cc` which has a similar purpose.
752 * It seems like OpenVDB generally allows that, but it does not have a proper public
753 * API for this yet. */
754 using UnionType = typename std::decay_t<decltype(node)>::UnionType;
755 auto *table = const_cast<UnionType *>(node.getTable());
756 table[n].setValue(value);
757 };
758
759 for (const int i : tiles.index_range()) {
760 const openvdb::CoordBBox tile = tiles[i];
761 const openvdb::Coord coord_in_tile = tile.min();
762 const auto &computed_value = computed_values[i];
763 using InternalNode1 = typename TreeT::RootNodeType::ChildNodeType;
764 using InternalNode2 = typename InternalNode1::ChildNodeType;
765 /* Find the internal node that contains the tile and update the value in there. */
766 if (auto *node = tree.template probeNode<InternalNode2>(coord_in_tile)) {
767 set_tile_value(*node, coord_in_tile, computed_value);
768 }
769 else if (auto *node = tree.template probeNode<InternalNode1>(coord_in_tile)) {
770 set_tile_value(*node, coord_in_tile, computed_value);
771 }
772 else {
774 }
775 }
776 });
777}
778
779void set_mask_leaf_buffer_from_bools(openvdb::BoolGrid &grid,
780 const Span<bool> values,
781 const IndexMask &index_mask,
782 const Span<openvdb::Coord> voxels)
783{
784 auto accessor = grid.getUnsafeAccessor();
785 /* Could probably use int16_t for the iteration index. Double check this. */
786 index_mask.foreach_index_optimized<int>([&](const int i) {
787 const openvdb::Coord &coord = voxels[i];
788 accessor.setValue(coord, values[i]);
789 });
790}
791
792void set_grid_background(openvdb::GridBase &grid_base, const GPointer value)
793{
794 to_typed_grid(grid_base, [&](auto &grid) {
795 using GridT = std::decay_t<decltype(grid)>;
796 using ValueType = typename GridT::ValueType;
797 auto &tree = grid.tree();
798
799 BLI_assert(value.type()->size == sizeof(ValueType));
800 tree.root().setBackground(*static_cast<const ValueType *>(value.get()), true);
801 });
802}
803
804void prune_inactive(openvdb::GridBase &grid_base)
805{
806 to_typed_grid(grid_base, [&](auto &grid) { openvdb::tools::pruneInactive(grid.tree()); });
807}
808
809#endif /* WITH_OPENVDB */
810
811} // namespace blender::bke::volume_grid
VolumeGridType
@ VOLUME_GRID_VECTOR_FLOAT
@ VOLUME_GRID_MASK
@ VOLUME_GRID_VECTOR_DOUBLE
@ VOLUME_GRID_VECTOR_INT
@ VOLUME_GRID_UNKNOWN
@ VOLUME_GRID_DOUBLE
@ VOLUME_GRID_BOOLEAN
@ VOLUME_GRID_INT
@ VOLUME_GRID_INT64
@ VOLUME_GRID_POINTS
@ VOLUME_GRID_FLOAT
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define UNUSED_VARS(...)
volatile int lock
BMesh const char void * data
return true
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
long long int int64_t
SIMD_FORCE_INLINE btVector3 operator()(const btVector3 &x) const
Return the transform of the vector.
Definition btTransform.h:90
void foreach_index_optimized(Fn &&fn) const
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
void append(const T &value)
bool is_empty() const
IndexRange index_range() const
void append(const T &value)
KDTree_3d * tree
ccl_gpu_kernel_postfix ccl_global KernelWorkTile * tiles
const ccl_global KernelWorkTile * tile
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
float4x4 get_transform_matrix(const VolumeGridData &grid)
std::string get_name(const VolumeGridData &grid)
void count_memory(const VolumeGridData &grid, MemoryCounter &memory)
int get_channels_num(VolumeGridType type)
std::string error_message_from_load(const VolumeGridData &grid)
void load(const VolumeGridData &grid)
void clear_tree(VolumeGridData &grid)
VolumeGridType get_type(const VolumeGridData &grid)
void set_transform_matrix(VolumeGridData &grid, const float4x4 &matrix)
bool is_loaded(const VolumeGridData &grid)
MatBase< float, 4, 4 > float4x4
const char * name
i
Definition text_draw.cc:230