Blender V4.3
editmesh_undo.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include <variant>
10
11#include "MEM_guardedalloc.h"
12
13#include "CLG_log.h"
14
15#include "DNA_key_types.h"
16#include "DNA_layer_types.h"
17#include "DNA_mesh_types.h"
18#include "DNA_meshdata_types.h"
19#include "DNA_object_types.h"
20#include "DNA_scene_types.h"
21
22#include "BLI_array_utils.h"
24#include "BLI_listbase.h"
25#include "BLI_string.h"
26#include "BLI_task.hh"
27#include "BLI_vector.hh"
28
29#include "BKE_context.hh"
30#include "BKE_customdata.hh"
31#include "BKE_editmesh.hh"
32#include "BKE_key.hh"
33#include "BKE_layer.hh"
34#include "BKE_lib_id.hh"
35#include "BKE_main.hh"
36#include "BKE_mesh.hh"
37#include "BKE_object.hh"
38#include "BKE_undo_system.hh"
39
40#include "DEG_depsgraph.hh"
41
42#include "ED_mesh.hh"
43#include "ED_object.hh"
44#include "ED_undo.hh"
45#include "ED_util.hh"
46
47#include "WM_api.hh"
48#include "WM_types.hh"
49
50#define USE_ARRAY_STORE
51
52#ifdef USE_ARRAY_STORE
53// # define DEBUG_PRINT
54// # define DEBUG_TIME
55# ifdef DEBUG_TIME
56# include "BLI_time_utildefines.h"
57# endif
58
59# include "BLI_array_store.h"
68# define ARRAY_CHUNK_SIZE_IN_BYTES 65536
69# define ARRAY_CHUNK_NUM_MIN 256
70
71# define USE_ARRAY_STORE_THREAD
72#endif
73
74#ifdef USE_ARRAY_STORE_THREAD
75# include "BLI_task.h"
76#endif
77
79static CLG_LogRef LOG = {"ed.undo.mesh"};
80
81/* -------------------------------------------------------------------- */
85#ifdef USE_ARRAY_STORE
86
87static size_t array_chunk_size_calc(const size_t stride)
88{
89 /* Return a chunk size that targets a size in bytes,
90 * this is done so boolean arrays don't add so much overhead and
91 * larger arrays aren't so big as to waste memory, see: #105205. */
93}
94
95/* Single linked list of layers stored per type */
101
102#endif
103
104struct UndoMesh {
110
114
127
128#ifdef USE_ARRAY_STORE
129 /* Null arrays are considered empty. */
130 struct { /* most data is stored as 'custom' data */
136#endif /* USE_ARRAY_STORE */
137
138 size_t undo_size;
139};
140
141#ifdef USE_ARRAY_STORE
142
143/* -------------------------------------------------------------------- */
151enum {
159};
160# define ARRAY_STORE_INDEX_NUM (ARRAY_STORE_INDEX_MSEL + 1)
161
162static struct {
164 int users;
165
171
172# ifdef USE_ARRAY_STORE_THREAD
174# endif
175
176} um_arraystore = {{{nullptr}}};
177
179 const size_t data_len,
180 const bool create,
181 const int bs_index,
182 const BArrayCustomData *bcd_reference,
183 BArrayCustomData **r_bcd_first)
184{
185 using namespace blender;
186 if (data_len == 0) {
187 if (create) {
188 *r_bcd_first = nullptr;
189 }
190 }
191
192 const BArrayCustomData *bcd_reference_current = bcd_reference;
193 BArrayCustomData *bcd = nullptr, *bcd_first = nullptr, *bcd_prev = nullptr;
194 for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
195 const eCustomDataType type = eCustomDataType(cdata->layers[layer_start].type);
196
197 /* Perform a full copy on dynamic layers.
198 *
199 * Unfortunately we can't compare dynamic layer types as they contain allocated pointers,
200 * which burns CPU cycles looking for duplicate data that doesn't exist.
201 * The array data isn't comparable once copied from the mesh,
202 * this bottlenecks on high poly meshes, see #84114.
203 *
204 * Ideally the data would be expanded into a format that could be de-duplicated effectively,
205 * this would require a flat representation of each dynamic custom-data layer.
206 *
207 * Instead, these non-trivial custom data layer are stored in the undo system using implicit
208 * sharing, to avoid the copy from the undo mesh.
209 */
210 const bool layer_type_is_dynamic = CustomData_layertype_is_dynamic(type);
211
212 layer_end = layer_start + 1;
213 while ((layer_end < cdata->totlayer) && (type == cdata->layers[layer_end].type)) {
214 layer_end++;
215 }
216
217 const int stride = CustomData_sizeof(type);
218 BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride[bs_index],
219 stride,
220 array_chunk_size_calc(stride)) :
221 nullptr;
222 const int layer_len = layer_end - layer_start;
223
224 if (create) {
225 if (bcd_reference_current && (bcd_reference_current->type == type)) {
226 /* common case, the reference is aligned */
227 }
228 else {
229 bcd_reference_current = nullptr;
230
231 /* Do a full lookup when unaligned. */
232 if (bcd_reference) {
233 const BArrayCustomData *bcd_iter = bcd_reference;
234 while (bcd_iter) {
235 if (bcd_iter->type == type) {
236 bcd_reference_current = bcd_iter;
237 break;
238 }
239 bcd_iter = bcd_iter->next;
240 }
241 }
242 }
243 }
244
245 if (create) {
246 bcd = MEM_new<BArrayCustomData>(__func__);
247 bcd->next = nullptr;
248 bcd->type = type;
249 bcd->states.reinitialize(layer_end - layer_start);
250
251 if (bcd_prev) {
252 bcd_prev->next = bcd;
253 bcd_prev = bcd;
254 }
255 else {
256 bcd_first = bcd;
257 bcd_prev = bcd;
258 }
259 }
260
261 CustomDataLayer *layer = &cdata->layers[layer_start];
262 for (int i = 0; i < layer_len; i++, layer++) {
263 if (create) {
264 if (layer->data) {
265 if (layer_type_is_dynamic) {
266 /* See comment on `layer_type_is_dynamic` above. */
267 const ImplicitSharingInfo *sharing_info;
268 if (layer->sharing_info) {
269 sharing_info = layer->sharing_info;
270 sharing_info->add_user();
271 }
272 else {
273 sharing_info = implicit_sharing::info_for_mem_free(layer->data);
274 }
275 bcd->states[i] = ImplicitSharingInfoAndData{sharing_info, layer->data};
276 }
277 else {
278 BArrayState *state_reference = nullptr;
279 if (bcd_reference_current && i < bcd_reference_current->states.size()) {
280 state_reference = std::get<BArrayState *>(bcd_reference_current->states[i]);
281 }
282
284 bs, layer->data, size_t(data_len) * stride, state_reference);
285 }
286 }
287 else {
288 bcd->states[i] = nullptr;
289 }
290 }
291
292 if (layer->data) {
293 if (layer->sharing_info) {
294 layer->sharing_info->remove_user_and_delete_if_last();
295 layer->sharing_info = nullptr;
296 layer->data = nullptr;
297 }
298 else {
299 MEM_SAFE_FREE(layer->data);
300 }
301 }
302 }
303
304 if (create) {
305 if (bcd_reference_current) {
306 bcd_reference_current = bcd_reference_current->next;
307 }
308 }
309 }
310
311 if (create) {
312 *r_bcd_first = bcd_first;
313 }
314}
315
321 CustomData *cdata,
322 const size_t data_len)
323{
324 using namespace blender;
325 CustomDataLayer *layer = cdata->layers;
326 while (bcd) {
327 const int stride = CustomData_sizeof(bcd->type);
328 for (int i = 0; i < bcd->states.size(); i++) {
329 BLI_assert(bcd->type == layer->type);
330 if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
331 BArrayState *state = std::get<BArrayState *>(bcd->states[i]);
332 if (state) {
333 size_t state_len;
334 layer->data = BLI_array_store_state_data_get_alloc(state, &state_len);
335 BLI_assert(stride * data_len == state_len);
336 UNUSED_VARS_NDEBUG(stride, data_len);
337 }
338 else {
339 layer->data = nullptr;
340 }
341 }
342 else {
343 ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
344 layer->data = const_cast<void *>(state.data);
345 layer->sharing_info = state.sharing_info;
346 layer->sharing_info->add_user();
347 }
348 layer++;
349 }
350 bcd = bcd->next;
351 }
352}
353
354static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
355{
356 using namespace blender;
357 while (bcd) {
358 BArrayCustomData *bcd_next = bcd->next;
359 const int stride = CustomData_sizeof(bcd->type);
360 BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[bs_index], stride);
361 for (int i = 0; i < bcd->states.size(); i++) {
362 if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
363 if (BArrayState *state = std::get<BArrayState *>(bcd->states[i])) {
365 }
366 }
367 else {
368 ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
369 state.sharing_info->remove_user_and_delete_if_last();
370 }
371 }
372 MEM_delete(bcd);
373 bcd = bcd_next;
374 }
375}
376
382static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
383{
384 Mesh *mesh = &um->mesh;
385
386 /* Compacting can be time consuming, run in parallel.
387 *
388 * NOTE(@ideasman42): this could be further parallelized with every custom-data layer
389 * running in its own thread. If this is a bottleneck it's worth considering.
390 * At the moment it seems fast enough to split by domain.
391 * Since this is itself a background thread, using too many threads here could
392 * interfere with foreground tasks. */
394 4096 < (mesh->verts_num + mesh->edges_num + mesh->corners_num + mesh->faces_num),
395 [&]() {
396 um_arraystore_cd_compact(&mesh->vert_data,
397 mesh->verts_num,
398 create,
399 ARRAY_STORE_INDEX_VERT,
400 um_ref ? um_ref->store.vdata : nullptr,
401 &um->store.vdata);
402 },
403 [&]() {
404 um_arraystore_cd_compact(&mesh->edge_data,
405 mesh->edges_num,
406 create,
407 ARRAY_STORE_INDEX_EDGE,
408 um_ref ? um_ref->store.edata : nullptr,
409 &um->store.edata);
410 },
411 [&]() {
412 um_arraystore_cd_compact(&mesh->corner_data,
413 mesh->corners_num,
414 create,
415 ARRAY_STORE_INDEX_LOOP,
416 um_ref ? um_ref->store.ldata : nullptr,
417 &um->store.ldata);
418 },
419 [&]() {
420 um_arraystore_cd_compact(&mesh->face_data,
421 mesh->faces_num,
422 create,
423 ARRAY_STORE_INDEX_POLY,
424 um_ref ? um_ref->store.pdata : nullptr,
425 &um->store.pdata);
426 },
427 [&]() {
428 if (mesh->face_offset_indices) {
429 BLI_assert(create == (um->store.face_offset_indices == nullptr));
430 if (create) {
431 BArrayState *state_reference = um_ref ? um_ref->store.face_offset_indices : nullptr;
432 const size_t stride = sizeof(*mesh->face_offset_indices);
433 BArrayStore *bs = BLI_array_store_at_size_ensure(
434 &um_arraystore.bs_stride[ARRAY_STORE_INDEX_POLY_OFFSETS],
435 stride,
436 array_chunk_size_calc(stride));
437 um->store.face_offset_indices = BLI_array_store_state_add(bs,
438 mesh->face_offset_indices,
439 size_t(mesh->faces_num + 1) *
440 stride,
441 state_reference);
442 }
443 blender::implicit_sharing::free_shared_data(&mesh->face_offset_indices,
444 &mesh->runtime->face_offsets_sharing_info);
445 }
446 },
447 [&]() {
448 if (mesh->key && mesh->key->totkey) {
449 const size_t stride = mesh->key->elemsize;
452 stride,
453 array_chunk_size_calc(stride)) :
454 nullptr;
455 if (create) {
456 um->store.keyblocks = static_cast<BArrayState **>(
457 MEM_mallocN(mesh->key->totkey * sizeof(*um->store.keyblocks), __func__));
458 }
459 KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
460 for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
461 if (create) {
462 BArrayState *state_reference = (um_ref && um_ref->mesh.key &&
463 (i < um_ref->mesh.key->totkey)) ?
464 um_ref->store.keyblocks[i] :
465 nullptr;
466 um->store.keyblocks[i] = BLI_array_store_state_add(
467 bs, keyblock->data, size_t(keyblock->totelem) * stride, state_reference);
468 }
469
470 if (keyblock->data) {
471 MEM_freeN(keyblock->data);
472 keyblock->data = nullptr;
473 }
474 }
475 }
476 },
477 [&]() {
478 if (mesh->mselect && mesh->totselect) {
479 BLI_assert(create == (um->store.mselect == nullptr));
480 if (create) {
481 BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
482 const size_t stride = sizeof(*mesh->mselect);
485 stride,
486 array_chunk_size_calc(stride));
487 um->store.mselect = BLI_array_store_state_add(
488 bs, mesh->mselect, size_t(mesh->totselect) * stride, state_reference);
489 }
490
491 /* keep mesh->totselect for validation */
492 MEM_freeN(mesh->mselect);
493 mesh->mselect = nullptr;
494 }
495 });
496
497 if (create) {
498 um_arraystore.users += 1;
499 }
500}
501
505static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
506{
507 um_arraystore_compact_ex(um, um_ref, true);
508}
509
510static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
511{
512# ifdef DEBUG_PRINT
513 size_t size_expanded_prev = 0, size_compacted_prev = 0;
514
515 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
516 size_t size_expanded_prev_iter, size_compacted_prev_iter;
518 &um_arraystore.bs_stride[bs_index], &size_expanded_prev_iter, &size_compacted_prev_iter);
519 size_expanded_prev += size_expanded_prev_iter;
520 size_compacted_prev += size_compacted_prev_iter;
521 }
522# endif
523
524# ifdef DEBUG_TIME
525 TIMEIT_START(mesh_undo_compact);
526# endif
527
528 um_arraystore_compact(um, um_ref);
529
530# ifdef DEBUG_TIME
531 TIMEIT_END(mesh_undo_compact);
532# endif
533
534# ifdef DEBUG_PRINT
535 {
536 size_t size_expanded = 0, size_compacted = 0;
537
538 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
539 size_t size_expanded_iter, size_compacted_iter;
541 &um_arraystore.bs_stride[bs_index], &size_expanded_iter, &size_compacted_iter);
542 size_expanded += size_expanded_iter;
543 size_compacted += size_compacted_iter;
544 }
545
546 const double percent_total = size_expanded ?
547 ((double(size_compacted) / double(size_expanded)) * 100.0) :
548 -1.0;
549
550 size_t size_expanded_step = size_expanded - size_expanded_prev;
551 size_t size_compacted_step = size_compacted - size_compacted_prev;
552 const double percent_step = size_expanded_step ?
553 ((double(size_compacted_step) / double(size_expanded_step)) *
554 100.0) :
555 -1.0;
556
557 printf("overall memory use: %.8f%% of expanded size\n", percent_total);
558 printf("step memory use: %.8f%% of expanded size\n", percent_step);
559 }
560# endif
561}
562
563# ifdef USE_ARRAY_STORE_THREAD
564
567 const UndoMesh *um_ref; /* can be nullptr */
568};
569static void um_arraystore_compact_cb(TaskPool *__restrict /*pool*/, void *taskdata)
570{
571 UMArrayData *um_data = static_cast<UMArrayData *>(taskdata);
572 um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
573}
574
575# endif /* USE_ARRAY_STORE_THREAD */
576
581{
582 um_arraystore_compact_ex(um, nullptr, false);
583}
584
586{
587 Mesh *mesh = &um->mesh;
588
589 um_arraystore_cd_expand(um->store.vdata, &mesh->vert_data, mesh->verts_num);
590 um_arraystore_cd_expand(um->store.edata, &mesh->edge_data, mesh->edges_num);
591 um_arraystore_cd_expand(um->store.ldata, &mesh->corner_data, mesh->corners_num);
592 um_arraystore_cd_expand(um->store.pdata, &mesh->face_data, mesh->faces_num);
593
594 if (um->store.keyblocks) {
595 const size_t stride = mesh->key->elemsize;
596 KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
597 for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
599 size_t state_len;
600 keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
601 BLI_assert(keyblock->totelem == (state_len / stride));
602 UNUSED_VARS_NDEBUG(stride);
603 }
604 }
605
606 if (um->store.face_offset_indices) {
607 const size_t stride = sizeof(*mesh->face_offset_indices);
609 size_t state_len;
610 mesh->face_offset_indices = static_cast<int *>(
612 mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
613 mesh->face_offset_indices);
614 BLI_assert((mesh->faces_num + 1) == (state_len / stride));
615 UNUSED_VARS_NDEBUG(stride);
616 }
617 if (um->store.mselect) {
618 const size_t stride = sizeof(*mesh->mselect);
620 size_t state_len;
621 mesh->mselect = static_cast<MSelect *>(
623 BLI_assert(mesh->totselect == (state_len / stride));
624 UNUSED_VARS_NDEBUG(stride);
625 }
626}
627
629{
630 Mesh *mesh = &um->mesh;
631
636
637 if (um->store.keyblocks) {
638 const size_t stride = mesh->key->elemsize;
640 &um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE], stride);
641 for (int i = 0; i < mesh->key->totkey; i++) {
644 }
646 um->store.keyblocks = nullptr;
647 }
648
649 if (um->store.face_offset_indices) {
650 const size_t stride = sizeof(*mesh->face_offset_indices);
655 um->store.face_offset_indices = nullptr;
656 }
657 if (um->store.mselect) {
658 const size_t stride = sizeof(*mesh->mselect);
660 stride);
663 um->store.mselect = nullptr;
664 }
665
666 um_arraystore.users -= 1;
667
668 BLI_assert(um_arraystore.users >= 0);
669
670 if (um_arraystore.users == 0) {
671# ifdef DEBUG_PRINT
672 printf("mesh undo store: freeing all data!\n");
673# endif
674 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
675 BLI_array_store_at_size_clear(&um_arraystore.bs_stride[bs_index]);
676 }
677# ifdef USE_ARRAY_STORE_THREAD
679 um_arraystore.task_pool = nullptr;
680# endif
681 }
682}
683
686/* -------------------------------------------------------------------- */
702{
703 /* Map: `Mesh.id.session_uid` -> `UndoMesh`. */
704 GHash *uuid_map = BLI_ghash_ptr_new_ex(__func__, object_len);
705 UndoMesh **um_references = static_cast<UndoMesh **>(
706 MEM_callocN(sizeof(UndoMesh *) * object_len, __func__));
707 for (int i = 0; i < object_len; i++) {
708 const Mesh *mesh = static_cast<const Mesh *>(object[i]->data);
709 BLI_ghash_insert(uuid_map, POINTER_FROM_INT(mesh->id.session_uid), &um_references[i]);
710 }
711 int uuid_map_len = object_len;
712
713 /* Loop backwards over all previous mesh undo data until either:
714 * - All elements have been found (where `um_references` we'll have every element set).
715 * - There are no undo steps left to look for. */
716 UndoMesh *um_iter = static_cast<UndoMesh *>(um_arraystore.local_links.last);
717 while (um_iter && (uuid_map_len != 0)) {
718 UndoMesh **um_p;
719 if ((um_p = static_cast<UndoMesh **>(
720 BLI_ghash_popkey(uuid_map, POINTER_FROM_INT(um_iter->mesh.id.session_uid), nullptr))))
721 {
722 *um_p = um_iter;
723 uuid_map_len--;
724 }
725 um_iter = um_iter->local_prev;
726 }
727 BLI_assert(uuid_map_len == BLI_ghash_len(uuid_map));
728 BLI_ghash_free(uuid_map, nullptr, nullptr);
729 if (uuid_map_len == object_len) {
730 MEM_freeN(um_references);
731 um_references = nullptr;
732 }
733 return um_references;
734}
735
738#endif /* USE_ARRAY_STORE */
739
740/* for callbacks */
741/* undo simply makes copies of a bmesh */
745static void *undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key, UndoMesh *um_ref)
746{
748#ifdef USE_ARRAY_STORE_THREAD
749 /* changes this waits is low, but must have finished */
750 if (um_arraystore.task_pool) {
752 }
753#endif
754 /* make sure shape keys work */
755 if (key != nullptr) {
756 um->mesh.key = (Key *)BKE_id_copy_ex(
757 nullptr, &key->id, nullptr, LIB_ID_COPY_LOCALIZE | LIB_ID_COPY_NO_ANIMDATA);
758 }
759 else {
760 um->mesh.key = nullptr;
761 }
762
763 /* Uncomment for troubleshooting. */
764 // BM_mesh_validate(em->bm);
765
766 /* Copy the ID name characters to the mesh so code that depends on accessing the ID type can work
767 * on it. Necessary to use the attribute API. */
768 STRNCPY(um->mesh.id.name, "MEundomesh_from_editmesh");
769
770 /* Runtime data is necessary for some asserts in other code, and the overhead of creating it for
771 * undo meshes should be low. */
772 BLI_assert(um->mesh.runtime == nullptr);
774
775 CustomData_MeshMasks cd_mask_extra{};
776 cd_mask_extra.vmask = CD_MASK_SHAPE_KEYINDEX;
778 /* Undo code should not be manipulating 'G_MAIN->object' hooks/vertex-parent. */
779 params.calc_object_remap = false;
780 params.update_shapekey_indices = false;
781 params.cd_mask_extra = cd_mask_extra;
782 params.active_shapekey_to_mvert = true;
783 BM_mesh_bm_to_me(nullptr, em->bm, &um->mesh, &params);
784
785 um->selectmode = em->selectmode;
786 um->shapenr = em->bm->shapenr;
787
788#ifdef USE_ARRAY_STORE
789 {
790 /* Add ourselves. */
791 BLI_addtail(&um_arraystore.local_links, um);
792
793# ifdef USE_ARRAY_STORE_THREAD
794 if (um_arraystore.task_pool == nullptr) {
796 }
797
798 UMArrayData *um_data = static_cast<UMArrayData *>(MEM_mallocN(sizeof(*um_data), __func__));
799 um_data->um = um;
800 um_data->um_ref = um_ref;
801
802 BLI_task_pool_push(um_arraystore.task_pool, um_arraystore_compact_cb, um_data, true, nullptr);
803# else
805# endif
806 }
807#else
808 UNUSED_VARS(um_ref);
809#endif
810
811 return um;
812}
813
815{
816 BMEditMesh *em_tmp;
817 BMesh *bm;
818
819#ifdef USE_ARRAY_STORE
820# ifdef USE_ARRAY_STORE_THREAD
821 /* changes this waits is low, but must have finished */
823# endif
824
825# ifdef DEBUG_TIME
826 TIMEIT_START(mesh_undo_expand);
827# endif
828
830
831# ifdef DEBUG_TIME
832 TIMEIT_END(mesh_undo_expand);
833# endif
834#endif /* USE_ARRAY_STORE */
835
836 const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(&um->mesh);
837
838 em->bm->shapenr = um->shapenr;
839
841
842 BMeshCreateParams create_params{};
843 create_params.use_toolflags = true;
844 bm = BM_mesh_create(&allocsize, &create_params);
845
846 BMeshFromMeshParams convert_params{};
847 /* Handled with tessellation. */
848 convert_params.calc_face_normal = false;
849 convert_params.calc_vert_normal = false;
850 convert_params.active_shapekey = um->shapenr;
851 BM_mesh_bm_from_me(bm, &um->mesh, &convert_params);
852
853 em_tmp = BKE_editmesh_create(bm);
854 *em = *em_tmp;
855
856 /* Calculate face normals and tessellation at once since it's multi-threaded. */
858
859 em->selectmode = um->selectmode;
860 bm->selectmode = um->selectmode;
861
863
864 ob->shapenr = um->shapenr;
865
866 MEM_delete(em_tmp);
867
868#ifdef USE_ARRAY_STORE
870#endif
871}
872
874{
875 Mesh *mesh = &um->mesh;
876
877#ifdef USE_ARRAY_STORE
878
879# ifdef USE_ARRAY_STORE_THREAD
880 /* changes this waits is low, but must have finished */
882# endif
883
884 /* we need to expand so any allocations in custom-data are freed with the mesh */
886
887 BLI_assert(BLI_findindex(&um_arraystore.local_links, um) != -1);
888 BLI_remlink(&um_arraystore.local_links, um);
889
891#endif
892
893 if (mesh->key) {
894 BKE_key_free_data(mesh->key);
895 MEM_freeN(mesh->key);
896 }
897
899}
900
902{
903 Scene *scene = CTX_data_scene(C);
904 ViewLayer *view_layer = CTX_data_view_layer(C);
905 BKE_view_layer_synced_ensure(scene, view_layer);
906 Object *obedit = BKE_view_layer_edit_object_get(view_layer);
907 if (obedit && obedit->type == OB_MESH) {
908 const Mesh *mesh = static_cast<Mesh *>(obedit->data);
909 if (mesh->runtime->edit_mesh != nullptr) {
910 return obedit;
911 }
912 }
913 return nullptr;
914}
915
918/* -------------------------------------------------------------------- */
925 UndoRefID_Object obedit_ref;
927};
928
936
938{
939 return editmesh_object_from_context(C) != nullptr;
940}
941
942static bool mesh_undosys_step_encode(bContext *C, Main *bmain, UndoStep *us_p)
943{
944 MeshUndoStep *us = (MeshUndoStep *)us_p;
945
946 /* Important not to use the 3D view when getting objects because all objects
947 * outside of this list will be moved out of edit-mode when reading back undo steps. */
948 Scene *scene = CTX_data_scene(C);
949 ViewLayer *view_layer = CTX_data_view_layer(C);
950 const ToolSettings *ts = scene->toolsettings;
952
953 us->scene_ref.ptr = scene;
954 us->elems = static_cast<MeshUndoStep_Elem *>(
955 MEM_callocN(sizeof(*us->elems) * objects.size(), __func__));
956 us->elems_len = objects.size();
957
958 UndoMesh **um_references = nullptr;
959
960#ifdef USE_ARRAY_STORE
961 um_references = mesh_undostep_reference_elems_from_objects(objects.data(), objects.size());
962#endif
963
964 for (uint i = 0; i < objects.size(); i++) {
965 Object *ob = objects[i];
966 MeshUndoStep_Elem *elem = &us->elems[i];
967
968 elem->obedit_ref.ptr = ob;
969 Mesh *mesh = static_cast<Mesh *>(elem->obedit_ref.ptr->data);
970 BMEditMesh *em = mesh->runtime->edit_mesh.get();
971 undomesh_from_editmesh(&elem->data, em, mesh->key, um_references ? um_references[i] : nullptr);
972 em->needs_flush_to_id = 1;
973 us->step.data_size += elem->data.undo_size;
974 elem->data.uv_selectmode = ts->uv_selectmode;
975
976#ifdef USE_ARRAY_STORE
978 elem->data.mesh.id.session_uid = mesh->id.session_uid;
979#endif
980 }
981
982 if (um_references != nullptr) {
983 MEM_freeN(um_references);
984 }
985
986 bmain->is_memfile_undo_flush_needed = true;
987
988 return true;
989}
990
992 bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir /*dir*/, bool /*is_final*/)
993{
994 MeshUndoStep *us = (MeshUndoStep *)us_p;
995 Scene *scene = CTX_data_scene(C);
996 ViewLayer *view_layer = CTX_data_view_layer(C);
997
999 CTX_wm_manager(C), us->scene_ref.ptr, &scene, &view_layer);
1001 scene, view_layer, &us->elems[0].obedit_ref.ptr, us->elems_len, sizeof(*us->elems));
1002
1004
1005 for (uint i = 0; i < us->elems_len; i++) {
1006 MeshUndoStep_Elem *elem = &us->elems[i];
1007 Object *obedit = elem->obedit_ref.ptr;
1008 Mesh *mesh = static_cast<Mesh *>(obedit->data);
1009 if (mesh->runtime->edit_mesh == nullptr) {
1010 /* Should never fail, may not crash but can give odd behavior. */
1011 CLOG_ERROR(&LOG,
1012 "name='%s', failed to enter edit-mode for object '%s', undo state invalid",
1013 us_p->name,
1014 obedit->id.name);
1015 continue;
1016 }
1017 BMEditMesh *em = mesh->runtime->edit_mesh.get();
1018 undomesh_to_editmesh(&elem->data, obedit, em);
1019 em->needs_flush_to_id = 1;
1021 }
1022
1023 /* The first element is always active */
1025 scene, view_layer, us->elems[0].obedit_ref.ptr, us_p->name, &LOG);
1026
1027 /* Check after setting active (unless undoing into another scene). */
1028 BLI_assert(mesh_undosys_poll(C) || (scene != CTX_data_scene(C)));
1029
1030 scene->toolsettings->selectmode = us->elems[0].data.selectmode;
1031 scene->toolsettings->uv_selectmode = us->elems[0].data.uv_selectmode;
1032
1033 bmain->is_memfile_undo_flush_needed = true;
1034
1035 WM_event_add_notifier(C, NC_GEOM | ND_DATA, nullptr);
1036}
1037
1039{
1040 MeshUndoStep *us = (MeshUndoStep *)us_p;
1041
1042 for (uint i = 0; i < us->elems_len; i++) {
1043 MeshUndoStep_Elem *elem = &us->elems[i];
1044 undomesh_free_data(&elem->data);
1045 }
1046 MEM_freeN(us->elems);
1047}
1048
1050 UndoTypeForEachIDRefFn foreach_ID_ref_fn,
1051 void *user_data)
1052{
1053 MeshUndoStep *us = (MeshUndoStep *)us_p;
1054
1055 foreach_ID_ref_fn(user_data, ((UndoRefID *)&us->scene_ref));
1056 for (uint i = 0; i < us->elems_len; i++) {
1057 MeshUndoStep_Elem *elem = &us->elems[i];
1058 foreach_ID_ref_fn(user_data, ((UndoRefID *)&elem->obedit_ref));
1059 }
1060}
1061
1076
Scene * CTX_data_scene(const bContext *C)
wmWindowManager * CTX_wm_manager(const bContext *C)
ViewLayer * CTX_data_view_layer(const bContext *C)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
bool CustomData_layertype_is_dynamic(eCustomDataType type)
BMEditMesh * BKE_editmesh_create(BMesh *bm)
Definition editmesh.cc:34
void BKE_editmesh_looptris_and_normals_calc(BMEditMesh *em)
Definition editmesh.cc:83
void BKE_key_free_data(Key *key)
Definition key.cc:241
void BKE_view_layer_synced_ensure(const Scene *scene, ViewLayer *view_layer)
Object * BKE_view_layer_edit_object_get(const ViewLayer *view_layer)
@ LIB_ID_COPY_LOCALIZE
@ LIB_ID_COPY_NO_ANIMDATA
ID * BKE_id_copy_ex(Main *bmain, const ID *id, ID **new_id_p, int flag)
Definition lib_id.cc:760
void BKE_mesh_free_data_for_undo(Mesh *mesh)
General operations, lookup, etc. for blender objects.
bool BKE_object_is_in_editmode(const Object *ob)
@ UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE
void(*)(void *user_data, UndoRefID *id_ref) UndoTypeForEachIDRefFn
eUndoStepDir
Efficient in-memory storage of multiple similar arrays.
BArrayState * BLI_array_store_state_add(BArrayStore *bs, const void *data, size_t data_len, const BArrayState *state_reference)
void BLI_array_store_state_remove(BArrayStore *bs, BArrayState *state)
void * BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_len)
void BLI_array_store_at_size_clear(struct BArrayStore_AtSize *bs_stride)
void BLI_array_store_at_size_calc_memory_usage(const struct BArrayStore_AtSize *bs_stride, size_t *r_size_expanded, size_t *r_size_compacted)
BArrayStore * BLI_array_store_at_size_get(struct BArrayStore_AtSize *bs_stride, int stride)
BArrayStore * BLI_array_store_at_size_ensure(struct BArrayStore_AtSize *bs_stride, int stride, int chunk_size)
Generic array manipulation API.
#define BLI_array_is_zeroed(arr, arr_len)
#define BLI_assert(a)
Definition BLI_assert.h:50
void * BLI_ghash_popkey(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:802
GHash * BLI_ghash_ptr_new_ex(const char *info, unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
unsigned int BLI_ghash_len(const GHash *gh) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:702
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.c:707
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.c:860
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:110
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:130
int BLI_findindex(const struct ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE int power_of_2_max_i(int n)
#define STRNCPY(dst, src)
Definition BLI_string.h:593
unsigned int uint
@ TASK_PRIORITY_LOW
Definition BLI_task.h:56
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition task_pool.cc:471
TaskPool * BLI_task_pool_create_background(void *userdata, eTaskPriority priority)
Definition task_pool.cc:399
void BLI_task_pool_free(TaskPool *pool)
Definition task_pool.cc:431
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition task_pool.cc:450
Utility defines for timing/benchmarks.
#define TIMEIT_START(var)
#define TIMEIT_END(var)
#define UNUSED_VARS(...)
#define POINTER_FROM_INT(i)
#define UNUSED_VARS_NDEBUG(...)
typedef double(DMatrix)[4][4]
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:182
void DEG_id_tag_update(ID *id, unsigned int flags)
@ ID_RECALC_GEOMETRY
Definition DNA_ID.h:1041
#define CD_MASK_SHAPE_KEYINDEX
Object is a sort of wrapper for general info.
@ OB_MESH
void EDBM_mesh_free_data(BMEditMesh *em)
void ED_undo_object_set_active_or_warn(Scene *scene, ViewLayer *view_layer, Object *ob, const char *info, CLG_LogRef *log)
Definition ed_undo.cc:790
void ED_undo_object_editmode_restore_helper(Scene *scene, ViewLayer *view_layer, Object **object_array, uint object_array_len, uint object_array_stride)
Definition ed_undo.cc:826
blender::Vector< Object * > ED_undo_editmode_objects_from_view_layer(const Scene *scene, ViewLayer *view_layer)
Definition ed_undo.cc:871
void ED_undo_object_editmode_validate_scene_from_windows(wmWindowManager *wm, const Scene *scene_ref, Scene **scene_p, ViewLayer **view_layer_p)
Definition ed_undo.cc:809
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
#define NC_GEOM
Definition WM_types.hh:360
#define ND_DATA
Definition WM_types.hh:475
@ BM_SPACEARR_DIRTY_ALL
ATTR_WARN_UNUSED_RESULT BMesh * bm
BMesh * BM_mesh_create(const BMAllocTemplate *allocsize, const BMeshCreateParams *params)
BMesh Make Mesh.
#define BMALLOC_TEMPLATE_FROM_ME(...)
void BM_mesh_bm_from_me(BMesh *bm, const Mesh *mesh, const BMeshFromMeshParams *params)
void BM_mesh_bm_to_me(Main *bmain, BMesh *bm, Mesh *mesh, const BMeshToMeshParams *params)
int64_t size() const
Definition BLI_array.hh:245
const T * data() const
Definition BLI_array.hh:301
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:388
#define printf
static Object * editmesh_object_from_context(bContext *C)
static void um_arraystore_expand(UndoMesh *um)
static void um_arraystore_compact_cb(TaskPool *__restrict, void *taskdata)
static void um_arraystore_free(UndoMesh *um)
static void undomesh_free_data(UndoMesh *um)
static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
#define ARRAY_CHUNK_NUM_MIN
static bool mesh_undosys_step_encode(bContext *C, Main *bmain, UndoStep *us_p)
#define ARRAY_CHUNK_SIZE_IN_BYTES
BArrayStore_AtSize bs_stride[ARRAY_STORE_INDEX_NUM]
void ED_mesh_undosys_type(UndoType *ut)
static struct @443 um_arraystore
static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
#define ARRAY_STORE_INDEX_NUM
static size_t array_chunk_size_calc(const size_t stride)
TaskPool * task_pool
static void um_arraystore_expand_clear(UndoMesh *um)
static void undomesh_to_editmesh(UndoMesh *um, Object *ob, BMEditMesh *em)
ListBase local_links
static void um_arraystore_cd_expand(const BArrayCustomData *bcd, CustomData *cdata, const size_t data_len)
static bool mesh_undosys_poll(bContext *C)
static void mesh_undosys_foreach_ID_ref(UndoStep *us_p, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
@ ARRAY_STORE_INDEX_LOOP
@ ARRAY_STORE_INDEX_VERT
@ ARRAY_STORE_INDEX_POLY_OFFSETS
@ ARRAY_STORE_INDEX_SHAPE
@ ARRAY_STORE_INDEX_MSEL
@ ARRAY_STORE_INDEX_EDGE
@ ARRAY_STORE_INDEX_POLY
static void * undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key, UndoMesh *um_ref)
int users
static CLG_LogRef LOG
static void mesh_undosys_step_free(UndoStep *us_p)
static void mesh_undosys_step_decode(bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir, bool)
static UndoMesh ** mesh_undostep_reference_elems_from_objects(Object **object, int object_len)
static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
static void um_arraystore_cd_compact(CustomData *cdata, const size_t data_len, const bool create, const int bs_index, const BArrayCustomData *bcd_reference, BArrayCustomData **r_bcd_first)
static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
static ulong state[N]
const ImplicitSharingInfo * info_for_mem_free(void *data)
void parallel_invoke(Functions &&...functions)
Definition BLI_task.hh:199
BArrayCustomData * next
blender::Array< std::variant< BArrayState *, blender::ImplicitSharingInfoAndData > > states
eCustomDataType type
short selectmode
char needs_flush_to_id
int shapenr
short selectmode
char spacearr_dirty
CustomDataLayer * layers
char name[66]
Definition DNA_ID.h:425
unsigned int session_uid
Definition DNA_ID.h:454
struct KeyBlock * next
void * data
bool is_memfile_undo_flush_needed
Definition BKE_main.hh:165
UndoRefID_Object obedit_ref
MeshUndoStep_Elem * elems
UndoRefID_Scene scene_ref
MeshRuntimeHandle * runtime
struct Key * key
const UndoMesh * um_ref
UndoMesh * um
size_t undo_size
UndoMesh * local_prev
BArrayState * mselect
BArrayCustomData * ldata
BArrayState * face_offset_indices
BArrayCustomData * vdata
struct UndoMesh::@444 store
BArrayState ** keyblocks
UndoMesh * local_next
BArrayCustomData * pdata
char uv_selectmode
BArrayCustomData * edata
size_t data_size
char name[64]
void(* step_foreach_ID_ref)(UndoStep *us, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
const char * name
void(* step_free)(UndoStep *us)
bool(* poll)(struct bContext *C)
void(* step_decode)(bContext *C, Main *bmain, UndoStep *us, eUndoStepDir dir, bool is_final)
bool(* step_encode)(bContext *C, Main *bmain, UndoStep *us)
void WM_event_add_notifier(const bContext *C, uint type, void *reference)