Blender V5.0
editmesh_undo.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <algorithm>
10#include <variant>
11
12#include "MEM_guardedalloc.h"
13
14#include "CLG_log.h"
15
16#include "DNA_key_types.h"
17#include "DNA_layer_types.h"
18#include "DNA_mesh_types.h"
19#include "DNA_meshdata_types.h"
20#include "DNA_object_types.h"
21#include "DNA_scene_types.h"
22
23#include "BLI_array_utils.h"
25#include "BLI_listbase.h"
26#include "BLI_math_base.h"
27#include "BLI_string.h"
28#include "BLI_task.hh"
29#include "BLI_vector.hh"
30
31#include "BKE_context.hh"
32#include "BKE_customdata.hh"
33#include "BKE_deform.hh"
34#include "BKE_editmesh.hh"
35#include "BKE_key.hh"
36#include "BKE_layer.hh"
37#include "BKE_lib_id.hh"
38#include "BKE_main.hh"
39#include "BKE_mesh.hh"
40#include "BKE_object.hh"
41#include "BKE_undo_system.hh"
42
43#include "DEG_depsgraph.hh"
44
45#include "ED_mesh.hh"
46#include "ED_object.hh"
47#include "ED_undo.hh"
48#include "ED_util.hh"
49
50#include "WM_api.hh"
51#include "WM_types.hh"
52
53#define USE_ARRAY_STORE
54
55#ifdef USE_ARRAY_STORE
56// # define DEBUG_PRINT
57// # define DEBUG_TIME
58# ifdef DEBUG_TIME
59# include "BLI_time_utildefines.h"
60# endif
61
62# include "BLI_array_store.h"
71# define ARRAY_CHUNK_SIZE_IN_BYTES 65536
72# define ARRAY_CHUNK_NUM_MIN 256
73
74# define USE_ARRAY_STORE_THREAD
75
98# define USE_ARRAY_STORE_RLE
99#endif
100
101#ifdef USE_ARRAY_STORE_THREAD
102# include "BLI_task.h"
103#endif
104
106static CLG_LogRef LOG = {"undo.mesh"};
107
108/* -------------------------------------------------------------------- */
111
112#ifdef USE_ARRAY_STORE
113
114static size_t array_chunk_size_calc(const size_t stride)
115{
116 /* Return a chunk size that targets a size in bytes,
117 * this is done so boolean arrays don't add so much overhead and
118 * larger arrays aren't so big as to waste memory, see: #105205. */
120}
121
122/* Single linked list of layers stored per type */
128
129# ifdef USE_ARRAY_STORE_RLE
131{
132 /* NOTE(@ideasman42): This could be enabled for all byte sized layers.
133 * for now only use for boolean layers to address: #136737. */
134 if (bcd->type == CD_PROP_BOOL) {
136 return true;
137 }
138 return false;
139}
140# endif
141
142#endif
143
144struct UndoMesh {
150
153
166
167#ifdef USE_ARRAY_STORE
168 /* Null arrays are considered empty. */
169 struct { /* most data is stored as 'custom' data */
175#endif /* USE_ARRAY_STORE */
176
177 size_t undo_size;
178};
179
180#ifdef USE_ARRAY_STORE
181
182/* -------------------------------------------------------------------- */
185
190enum {
198};
199# define ARRAY_STORE_INDEX_NUM (ARRAY_STORE_INDEX_MSEL + 1)
200
201static struct {
203 int users;
204
210
211# ifdef USE_ARRAY_STORE_THREAD
213# endif
214
215} um_arraystore = {{{nullptr}}};
216
218 const size_t data_len,
219 const bool create,
220 const int bs_index,
221 const BArrayCustomData *bcd_reference,
222 BArrayCustomData **r_bcd_first)
223{
224 using namespace blender;
225 if (data_len == 0) {
226 if (create) {
227 *r_bcd_first = nullptr;
228 }
229 }
230
231 const BArrayCustomData *bcd_reference_current = bcd_reference;
232 BArrayCustomData *bcd = nullptr, *bcd_first = nullptr, *bcd_prev = nullptr;
233 for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
234 const eCustomDataType type = eCustomDataType(cdata->layers[layer_start].type);
235
236 /* Perform a full copy on dynamic layers.
237 *
238 * Unfortunately we can't compare dynamic layer types as they contain allocated pointers,
239 * which burns CPU cycles looking for duplicate data that doesn't exist.
240 * The array data isn't comparable once copied from the mesh,
241 * this bottlenecks on high poly meshes, see #84114.
242 *
243 * Ideally the data would be expanded into a format that could be de-duplicated effectively,
244 * this would require a flat representation of each dynamic custom-data layer.
245 *
246 * Instead, these non-trivial custom data layer are stored in the undo system using implicit
247 * sharing, to avoid the copy from the undo mesh.
248 */
249 const bool layer_type_is_dynamic = CustomData_layertype_is_dynamic(type);
250
251 layer_end = layer_start + 1;
252 while ((layer_end < cdata->totlayer) && (type == cdata->layers[layer_end].type)) {
253 layer_end++;
254 }
255
256 const int stride = CustomData_sizeof(type);
257 BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride[bs_index],
258 stride,
259 array_chunk_size_calc(stride)) :
260 nullptr;
261 const int layer_len = layer_end - layer_start;
262
263 if (create) {
264 if (bcd_reference_current && (bcd_reference_current->type == type)) {
265 /* common case, the reference is aligned */
266 }
267 else {
268 bcd_reference_current = nullptr;
269
270 /* Do a full lookup when unaligned. */
271 if (bcd_reference) {
272 const BArrayCustomData *bcd_iter = bcd_reference;
273 while (bcd_iter) {
274 if (bcd_iter->type == type) {
275 bcd_reference_current = bcd_iter;
276 break;
277 }
278 bcd_iter = bcd_iter->next;
279 }
280 }
281 }
282 }
283
284 if (create) {
285 bcd = MEM_new<BArrayCustomData>(__func__);
286 bcd->next = nullptr;
287 bcd->type = type;
288 bcd->states.reinitialize(layer_end - layer_start);
289
290 if (bcd_prev) {
291 bcd_prev->next = bcd;
292 bcd_prev = bcd;
293 }
294 else {
295 bcd_first = bcd;
296 bcd_prev = bcd;
297 }
298 }
299
300 CustomDataLayer *layer = &cdata->layers[layer_start];
301 for (int i = 0; i < layer_len; i++, layer++) {
302 if (create) {
303 if (layer->data) {
304 if (layer_type_is_dynamic) {
305 /* See comment on `layer_type_is_dynamic` above. */
306 const ImplicitSharingInfo *sharing_info = layer->sharing_info;
307 sharing_info->add_user();
308 bcd->states[i] = ImplicitSharingInfoAndData{sharing_info, layer->data};
309 }
310 else {
311 const BArrayState *state_reference = nullptr;
312 if (bcd_reference_current && i < bcd_reference_current->states.size()) {
313 state_reference = std::get<BArrayState *>(bcd_reference_current->states[i]);
314 }
315
316 void *data_final = layer->data;
317 size_t data_final_size = size_t(data_len) * stride;
318
319# ifdef USE_ARRAY_STORE_RLE
320 const bool use_rle = um_customdata_layer_use_rle(bcd);
321 uint8_t *data_enc = nullptr;
322 if (use_rle) {
323 /* Store the size in the encoded data (for convenience). */
324 size_t data_enc_extra_size = sizeof(size_t);
325 size_t data_enc_len;
326 data_enc = BLI_array_store_rle_encode(reinterpret_cast<const uint8_t *>(data_final),
327 data_final_size,
328 data_enc_extra_size,
329 &data_enc_len);
330 memcpy(data_enc, &data_final_size, data_enc_extra_size);
331 data_final = data_enc;
332 data_final_size = data_enc_extra_size + data_enc_len;
333 }
334# endif
335
336 bcd->states[i] = {
337 BLI_array_store_state_add(bs, data_final, data_final_size, state_reference),
338 };
339
340# ifdef USE_ARRAY_STORE_RLE
341 if (use_rle) {
342 MEM_freeN(data_enc);
343 }
344# endif
345 }
346 }
347 else {
348 bcd->states[i] = nullptr;
349 }
350 }
351
352 if (layer->data) {
353 layer->sharing_info->remove_user_and_delete_if_last();
354 layer->sharing_info = nullptr;
355 layer->data = nullptr;
356 }
357 }
358
359 if (create) {
360 if (bcd_reference_current) {
361 bcd_reference_current = bcd_reference_current->next;
362 }
363 }
364 }
365
366 if (create) {
367 *r_bcd_first = bcd_first;
368 }
369}
370
376 CustomData *cdata,
377 const size_t data_len)
378{
379 using namespace blender;
380 CustomDataLayer *layer = cdata->layers;
381 while (bcd) {
382 const int stride = CustomData_sizeof(bcd->type);
383 for (int i = 0; i < bcd->states.size(); i++) {
384 BLI_assert(bcd->type == layer->type);
385 if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
386 const BArrayState *state = std::get<BArrayState *>(bcd->states[i]);
387 if (state) {
388 size_t state_len;
390
391# ifdef USE_ARRAY_STORE_RLE
392 const bool use_rle = um_customdata_layer_use_rle(bcd);
393 if (use_rle) {
394 /* Store the size in the encoded data (for convenience). */
395 size_t data_enc_extra_size = sizeof(size_t);
396 const uint8_t *data_enc = reinterpret_cast<uint8_t *>(data);
397 size_t data_dec_len;
398 memcpy(&data_dec_len, data_enc, sizeof(size_t));
399 uint8_t *data_dec = MEM_malloc_arrayN<uint8_t>(data_dec_len, __func__);
400 BLI_array_store_rle_decode(data_enc + data_enc_extra_size,
401 state_len - data_enc_extra_size,
402 data_dec,
403 data_dec_len);
405 data = static_cast<void *>(data_dec);
406 /* Just for the assert to succeed. */
407 state_len = data_dec_len;
408 }
409# endif
410
411 layer->data = data;
413 BLI_assert(stride * data_len == state_len);
414 UNUSED_VARS_NDEBUG(stride, data_len);
415 }
416 else {
417 layer->data = nullptr;
418 }
419 }
420 else {
421 ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
422 layer->data = const_cast<void *>(state.data);
423 layer->sharing_info = state.sharing_info;
424 layer->sharing_info->add_user();
425 }
426 layer++;
427 }
428 bcd = bcd->next;
429 }
430}
431
432static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
433{
434 using namespace blender;
435 while (bcd) {
436 BArrayCustomData *bcd_next = bcd->next;
437 const int stride = CustomData_sizeof(bcd->type);
438 BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[bs_index], stride);
439 for (int i = 0; i < bcd->states.size(); i++) {
440 if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
441 if (BArrayState *state = std::get<BArrayState *>(bcd->states[i])) {
443 }
444 }
445 else {
446 ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
447 state.sharing_info->remove_user_and_delete_if_last();
448 }
449 }
450 MEM_delete(bcd);
451 bcd = bcd_next;
452 }
453}
454
460static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
461{
462 Mesh *mesh = um->mesh;
463
464 /* Compacting can be time consuming, run in parallel.
465 *
466 * NOTE(@ideasman42): this could be further parallelized with every custom-data layer
467 * running in its own thread. If this is a bottleneck it's worth considering.
468 * At the moment it seems fast enough to split by domain.
469 * Since this is itself a background thread, using too many threads here could
470 * interfere with foreground tasks. */
472 4096 < (mesh->verts_num + mesh->edges_num + mesh->corners_num + mesh->faces_num),
473 [&]() {
474 um_arraystore_cd_compact(&mesh->vert_data,
475 mesh->verts_num,
476 create,
477 ARRAY_STORE_INDEX_VERT,
478 um_ref ? um_ref->store.vdata : nullptr,
479 &um->store.vdata);
480 },
481 [&]() {
482 um_arraystore_cd_compact(&mesh->edge_data,
483 mesh->edges_num,
484 create,
485 ARRAY_STORE_INDEX_EDGE,
486 um_ref ? um_ref->store.edata : nullptr,
487 &um->store.edata);
488 },
489 [&]() {
490 um_arraystore_cd_compact(&mesh->corner_data,
491 mesh->corners_num,
492 create,
493 ARRAY_STORE_INDEX_LOOP,
494 um_ref ? um_ref->store.ldata : nullptr,
495 &um->store.ldata);
496 },
497 [&]() {
498 um_arraystore_cd_compact(&mesh->face_data,
499 mesh->faces_num,
500 create,
501 ARRAY_STORE_INDEX_POLY,
502 um_ref ? um_ref->store.pdata : nullptr,
503 &um->store.pdata);
504 },
505 [&]() {
506 if (mesh->face_offset_indices) {
507 BLI_assert(create == (um->store.face_offset_indices == nullptr));
508 if (create) {
509 const BArrayState *state_reference = um_ref ? um_ref->store.face_offset_indices :
510 nullptr;
511 const size_t stride = sizeof(*mesh->face_offset_indices);
512 BArrayStore *bs = BLI_array_store_at_size_ensure(
513 &um_arraystore.bs_stride[ARRAY_STORE_INDEX_POLY_OFFSETS],
514 stride,
515 array_chunk_size_calc(stride));
516 um->store.face_offset_indices = BLI_array_store_state_add(bs,
517 mesh->face_offset_indices,
518 size_t(mesh->faces_num + 1) *
519 stride,
520 state_reference);
521 }
522 blender::implicit_sharing::free_shared_data(&mesh->face_offset_indices,
523 &mesh->runtime->face_offsets_sharing_info);
524 }
525 },
526 [&]() {
527 if (mesh->key && mesh->key->totkey) {
528 const size_t stride = mesh->key->elemsize;
531 stride,
532 array_chunk_size_calc(stride)) :
533 nullptr;
534 if (create) {
535 um->store.keyblocks = static_cast<BArrayState **>(
536 MEM_mallocN(mesh->key->totkey * sizeof(*um->store.keyblocks), __func__));
537 }
538 KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
539 for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
540 if (create) {
541 const BArrayState *state_reference = (um_ref && um_ref->mesh->key &&
542 (i < um_ref->mesh->key->totkey)) ?
543 um_ref->store.keyblocks[i] :
544 nullptr;
545 um->store.keyblocks[i] = BLI_array_store_state_add(
546 bs, keyblock->data, size_t(keyblock->totelem) * stride, state_reference);
547 }
548
549 if (keyblock->data) {
550 MEM_freeN(keyblock->data);
551 keyblock->data = nullptr;
552 }
553 }
554 }
555 },
556 [&]() {
557 if (mesh->mselect && mesh->totselect) {
558 BLI_assert(create == (um->store.mselect == nullptr));
559 if (create) {
560 const BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
561 const size_t stride = sizeof(*mesh->mselect);
564 stride,
565 array_chunk_size_calc(stride));
566 um->store.mselect = BLI_array_store_state_add(
567 bs, mesh->mselect, size_t(mesh->totselect) * stride, state_reference);
568 }
569
570 /* keep mesh->totselect for validation */
571 MEM_freeN(mesh->mselect);
572 mesh->mselect = nullptr;
573 }
574 });
575
576 if (create) {
577 um_arraystore.users += 1;
578 }
579}
580
584static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
585{
586 um_arraystore_compact_ex(um, um_ref, true);
587}
588
589static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
590{
591# ifdef DEBUG_PRINT
592 size_t size_expanded_prev = 0, size_compacted_prev = 0;
593
594 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
595 size_t size_expanded_prev_iter, size_compacted_prev_iter;
597 &um_arraystore.bs_stride[bs_index], &size_expanded_prev_iter, &size_compacted_prev_iter);
598 size_expanded_prev += size_expanded_prev_iter;
599 size_compacted_prev += size_compacted_prev_iter;
600 }
601# endif
602
603# ifdef DEBUG_TIME
604 TIMEIT_START(mesh_undo_compact);
605# endif
606
607 um_arraystore_compact(um, um_ref);
608
609# ifdef DEBUG_TIME
610 TIMEIT_END(mesh_undo_compact);
611# endif
612
613# ifdef DEBUG_PRINT
614 {
615 size_t size_expanded = 0, size_compacted = 0;
616
617 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
618 size_t size_expanded_iter, size_compacted_iter;
620 &um_arraystore.bs_stride[bs_index], &size_expanded_iter, &size_compacted_iter);
621 size_expanded += size_expanded_iter;
622 size_compacted += size_compacted_iter;
623 }
624
625 const double percent_total = size_expanded ?
626 ((double(size_compacted) / double(size_expanded)) * 100.0) :
627 -1.0;
628
629 size_t size_expanded_step = size_expanded - size_expanded_prev;
630 size_t size_compacted_step = size_compacted - size_compacted_prev;
631 const double percent_step = size_expanded_step ?
632 ((double(size_compacted_step) / double(size_expanded_step)) *
633 100.0) :
634 -1.0;
635
636 printf("overall memory use: %.8f%% of expanded size\n", percent_total);
637 printf("step memory use: %.8f%% of expanded size\n", percent_step);
638 }
639# endif
640}
641
642# ifdef USE_ARRAY_STORE_THREAD
643
646 const UndoMesh *um_ref; /* can be nullptr */
647};
648static void um_arraystore_compact_cb(TaskPool *__restrict /*pool*/, void *taskdata)
649{
650 UMArrayData *um_data = static_cast<UMArrayData *>(taskdata);
651 um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
652}
653
654# endif /* USE_ARRAY_STORE_THREAD */
655
660{
661 um_arraystore_compact_ex(um, nullptr, false);
662}
663
665{
666 Mesh *mesh = um->mesh;
667
672
673 if (um->store.keyblocks) {
674 const size_t stride = mesh->key->elemsize;
675 KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
676 for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
677 const BArrayState *state = um->store.keyblocks[i];
678 size_t state_len;
679 keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
680 BLI_assert(keyblock->totelem == (state_len / stride));
681 UNUSED_VARS_NDEBUG(stride);
682 }
683 }
684
685 if (um->store.face_offset_indices) {
686 const size_t stride = sizeof(*mesh->face_offset_indices);
688 size_t state_len;
689 mesh->face_offset_indices = static_cast<int *>(
691 mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
692 mesh->face_offset_indices);
693 BLI_assert((mesh->faces_num + 1) == (state_len / stride));
694 UNUSED_VARS_NDEBUG(stride);
695 }
696 if (um->store.mselect) {
697 const size_t stride = sizeof(*mesh->mselect);
698 const BArrayState *state = um->store.mselect;
699 size_t state_len;
700 mesh->mselect = static_cast<MSelect *>(
702 BLI_assert(mesh->totselect == (state_len / stride));
703 UNUSED_VARS_NDEBUG(stride);
704 }
705}
706
708{
709 Mesh *mesh = um->mesh;
710
715
716 if (um->store.keyblocks) {
717 const size_t stride = mesh->key->elemsize;
719 &um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE], stride);
720 for (int i = 0; i < mesh->key->totkey; i++) {
723 }
725 um->store.keyblocks = nullptr;
726 }
727
728 if (um->store.face_offset_indices) {
729 const size_t stride = sizeof(*mesh->face_offset_indices);
734 um->store.face_offset_indices = nullptr;
735 }
736 if (um->store.mselect) {
737 const size_t stride = sizeof(*mesh->mselect);
739 stride);
742 um->store.mselect = nullptr;
743 }
744
745 um_arraystore.users -= 1;
746
747 BLI_assert(um_arraystore.users >= 0);
748
749 if (um_arraystore.users == 0) {
750# ifdef DEBUG_PRINT
751 printf("mesh undo store: freeing all data!\n");
752# endif
753 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
754 BLI_array_store_at_size_clear(&um_arraystore.bs_stride[bs_index]);
755 }
756# ifdef USE_ARRAY_STORE_THREAD
758 um_arraystore.task_pool = nullptr;
759# endif
760 }
761}
762
764
765/* -------------------------------------------------------------------- */
768
781{
782 /* Map: `Mesh.id.session_uid` -> `UndoMesh`. */
783 GHash *uuid_map = BLI_ghash_ptr_new_ex(__func__, object_len);
784 UndoMesh **um_references = MEM_calloc_arrayN<UndoMesh *>(object_len, __func__);
785 for (int i = 0; i < object_len; i++) {
786 const Mesh *mesh = static_cast<const Mesh *>(object[i]->data);
787 BLI_ghash_insert(uuid_map, POINTER_FROM_INT(mesh->id.session_uid), &um_references[i]);
788 }
789 int uuid_map_len = object_len;
790
791 /* Loop backwards over all previous mesh undo data until either:
792 * - All elements have been found (where `um_references` we'll have every element set).
793 * - There are no undo steps left to look for. */
794 UndoMesh *um_iter = static_cast<UndoMesh *>(um_arraystore.local_links.last);
795 while (um_iter && (uuid_map_len != 0)) {
796 UndoMesh **um_p;
797 if ((um_p = static_cast<UndoMesh **>(BLI_ghash_popkey(
798 uuid_map, POINTER_FROM_INT(um_iter->mesh->id.session_uid), nullptr))))
799 {
800 *um_p = um_iter;
801 uuid_map_len--;
802 }
803 um_iter = um_iter->local_prev;
804 }
805 BLI_assert(uuid_map_len == BLI_ghash_len(uuid_map));
806 BLI_ghash_free(uuid_map, nullptr, nullptr);
807 if (uuid_map_len == object_len) {
808 MEM_freeN(um_references);
809 um_references = nullptr;
810 }
811 return um_references;
812}
813
815
816#endif /* USE_ARRAY_STORE */
817
818/* for callbacks */
819/* undo simply makes copies of a bmesh */
829 BMEditMesh *em,
830 Key *key,
831 const ListBase *vertex_group_names,
832 const int vertex_group_active_index,
833 UndoMesh *um_ref)
834{
836#ifdef USE_ARRAY_STORE_THREAD
837 /* changes this waits is low, but must have finished */
838 if (um_arraystore.task_pool) {
840 }
841#endif
842
844
845 /* make sure shape keys work */
846 if (key != nullptr) {
847 um->mesh->key = (Key *)BKE_id_copy_ex(
848 nullptr, &key->id, nullptr, LIB_ID_COPY_LOCALIZE | LIB_ID_COPY_NO_ANIMDATA);
849 }
850 else {
851 um->mesh->key = nullptr;
852 }
853
854 /* Uncomment for troubleshooting. */
855 if (false) {
856 BM_mesh_is_valid(em->bm);
857
858 /* Ensure UV's are in a valid state. */
859 if (em->bm->uv_select_sync_valid) {
860 const int cd_loop_uv_offset = CustomData_get_offset(&em->bm->ldata, CD_PROP_FLOAT2);
861 bool check_flush = true;
862 /* This should check the sticky mode too (currently the scene isn't available). */
863 bool check_contiguous = (cd_loop_uv_offset != -1);
865 bool is_valid = BM_mesh_uvselect_is_valid(
866 em->bm, cd_loop_uv_offset, true, check_flush, check_contiguous, &info);
867 if (is_valid == false) {
868 fprintf(stderr, "ERROR: UV sync check failed!\n");
869 }
870 // BLI_assert(is_valid);
871 }
872 }
873
874 CustomData_MeshMasks cd_mask_extra{};
875 cd_mask_extra.vmask = CD_MASK_SHAPE_KEYINDEX;
877 /* Undo code should not be manipulating 'G_MAIN->object' hooks/vertex-parent. */
878 params.calc_object_remap = false;
879 params.update_shapekey_indices = false;
880 params.cd_mask_extra = cd_mask_extra;
881 params.active_shapekey_to_mvert = true;
882 BM_mesh_bm_to_me(nullptr, em->bm, um->mesh, &params);
883 BKE_defgroup_copy_list(&um->mesh->vertex_group_names, vertex_group_names);
884 um->mesh->vertex_group_active_index = vertex_group_active_index;
885
886 um->selectmode = em->selectmode;
887 um->shapenr = em->bm->shapenr;
888
889#ifdef USE_ARRAY_STORE
890 {
891 /* Add ourselves. */
892 BLI_addtail(&um_arraystore.local_links, um);
893
894# ifdef USE_ARRAY_STORE_THREAD
895 if (um_arraystore.task_pool == nullptr) {
897 }
898
899 UMArrayData *um_data = MEM_mallocN<UMArrayData>(__func__);
900 um_data->um = um;
901 um_data->um_ref = um_ref;
902
903 BLI_task_pool_push(um_arraystore.task_pool, um_arraystore_compact_cb, um_data, true, nullptr);
904# else
906# endif
907 }
908#else
909 UNUSED_VARS(um_ref);
910#endif
911
912 return um;
913}
914
924 BMEditMesh *em,
925 ListBase *vertex_group_names,
926 int *vertex_group_active_index)
927{
928 BMEditMesh *em_tmp;
929 BMesh *bm;
930
931#ifdef USE_ARRAY_STORE
932# ifdef USE_ARRAY_STORE_THREAD
933 /* changes this waits is low, but must have finished */
935# endif
936
937# ifdef DEBUG_TIME
938 TIMEIT_START(mesh_undo_expand);
939# endif
940
942
943# ifdef DEBUG_TIME
944 TIMEIT_END(mesh_undo_expand);
945# endif
946#endif /* USE_ARRAY_STORE */
947
948 const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(um->mesh);
949
950 em->bm->shapenr = um->shapenr;
951
953
954 BMeshCreateParams create_params{};
955 create_params.use_toolflags = true;
956 bm = BM_mesh_create(&allocsize, &create_params);
957
958 BMeshFromMeshParams convert_params{};
959 /* Handled with tessellation. */
960 convert_params.calc_face_normal = false;
961 convert_params.calc_vert_normal = false;
962 convert_params.active_shapekey = um->shapenr;
963 BM_mesh_bm_from_me(bm, um->mesh, &convert_params);
964 BLI_freelistN(vertex_group_names);
965 BKE_defgroup_copy_list(vertex_group_names, &um->mesh->vertex_group_names);
966 *vertex_group_active_index = um->mesh->vertex_group_active_index;
967
968 em_tmp = BKE_editmesh_create(bm);
969 *em = *em_tmp;
970
971 /* Calculate face normals and tessellation at once since it's multi-threaded. */
973
974 em->selectmode = um->selectmode;
975 bm->selectmode = um->selectmode;
976
977 bm->spacearr_dirty = BM_SPACEARR_DIRTY_ALL;
978
979 MEM_delete(em_tmp);
980
981#ifdef USE_ARRAY_STORE
983#endif
984}
985
987{
988 Mesh *mesh = um->mesh;
989
990#ifdef USE_ARRAY_STORE
991
992# ifdef USE_ARRAY_STORE_THREAD
993 /* Chances this waits is low, but must have finished. */
995# endif
996
997 /* We need to expand so any allocations in custom-data are freed with the mesh. */
999
1000 BLI_assert(BLI_findindex(&um_arraystore.local_links, um) != -1);
1001 BLI_remlink(&um_arraystore.local_links, um);
1002
1004#endif
1005
1006 if (mesh->key) {
1007 BKE_id_free(nullptr, mesh->key);
1008 mesh->key = nullptr;
1009 }
1010
1011 BKE_id_free(nullptr, mesh);
1012 um->mesh = nullptr;
1013}
1014
1016{
1017 Scene *scene = CTX_data_scene(C);
1018 ViewLayer *view_layer = CTX_data_view_layer(C);
1019 BKE_view_layer_synced_ensure(scene, view_layer);
1020 Object *obedit = BKE_view_layer_edit_object_get(view_layer);
1021 if (obedit && obedit->type == OB_MESH) {
1022 const Mesh *mesh = static_cast<Mesh *>(obedit->data);
1023 if (mesh->runtime->edit_mesh != nullptr) {
1024 return obedit;
1025 }
1026 }
1027 return nullptr;
1028}
1029
1031
1032/* -------------------------------------------------------------------- */
1037
1039 UndoRefID_Object obedit_ref;
1041};
1042
1053
1063
1065{
1066 return editmesh_object_from_context(C) != nullptr;
1067}
1068
1070{
1071 MeshUndoStep *us = (MeshUndoStep *)us_p;
1072
1073 /* Important not to use the 3D view when getting objects because all objects
1074 * outside of this list will be moved out of edit-mode when reading back undo steps. */
1075 Scene *scene = CTX_data_scene(C);
1076 ViewLayer *view_layer = CTX_data_view_layer(C);
1077 const ToolSettings *ts = scene->toolsettings;
1079
1080 us->scene_ref.ptr = scene;
1081 us->elems = MEM_calloc_arrayN<MeshUndoStep_Elem>(objects.size(), __func__);
1082 us->elems_len = objects.size();
1083
1084 UndoMesh **um_references = nullptr;
1085
1086#ifdef USE_ARRAY_STORE
1087 um_references = mesh_undostep_reference_elems_from_objects(objects.data(), objects.size());
1088#endif
1089
1090 {
1091 MeshUndoStep_SceneData &scene_data = us->scene_data;
1092 scene_data.selectmode = ts->selectmode;
1093 scene_data.uv_selectmode = ts->uv_selectmode;
1094 scene_data.uv_sticky = ts->uv_sticky;
1095 scene_data.uv_flag = ts->uv_flag;
1096 }
1097
1098 for (uint i = 0; i < objects.size(); i++) {
1099 Object *obedit = objects[i];
1100 MeshUndoStep_Elem *elem = &us->elems[i];
1101
1102 elem->obedit_ref.ptr = obedit;
1103 Mesh *mesh = static_cast<Mesh *>(elem->obedit_ref.ptr->data);
1104 BMEditMesh *em = mesh->runtime->edit_mesh.get();
1106 em,
1107 mesh->key,
1108 &mesh->vertex_group_names,
1110 um_references ? um_references[i] : nullptr);
1111
1112 em->needs_flush_to_id = 1;
1113 us->step.data_size += elem->data.undo_size;
1114
1115#ifdef USE_ARRAY_STORE
1117 elem->data.mesh->id.session_uid = mesh->id.session_uid;
1118#endif
1119 }
1120
1121 if (um_references != nullptr) {
1122 MEM_freeN(um_references);
1123 }
1124
1125 bmain->is_memfile_undo_flush_needed = true;
1126
1127 return true;
1128}
1129
1131 bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir /*dir*/, bool /*is_final*/)
1132{
1133 MeshUndoStep *us = (MeshUndoStep *)us_p;
1134 Scene *scene = CTX_data_scene(C);
1135 ViewLayer *view_layer = CTX_data_view_layer(C);
1136
1138 CTX_wm_manager(C), us->scene_ref.ptr, &scene, &view_layer);
1140 scene, view_layer, &us->elems[0].obedit_ref.ptr, us->elems_len, sizeof(*us->elems));
1141
1143
1144 for (uint i = 0; i < us->elems_len; i++) {
1145 MeshUndoStep_Elem *elem = &us->elems[i];
1146 Object *obedit = elem->obedit_ref.ptr;
1147 Mesh *mesh = static_cast<Mesh *>(obedit->data);
1148 if (mesh->runtime->edit_mesh == nullptr) {
1149 /* Should never fail, may not crash but can give odd behavior. */
1150 CLOG_ERROR(&LOG,
1151 "name='%s', failed to enter edit-mode for object '%s', undo state invalid",
1152 us_p->name,
1153 obedit->id.name);
1154 continue;
1155 }
1156 BMEditMesh *em = mesh->runtime->edit_mesh.get();
1158 &elem->data, em, &mesh->vertex_group_names, &mesh->vertex_group_active_index);
1159
1160 obedit->shapenr = em->bm->shapenr;
1161
1162 em->needs_flush_to_id = 1;
1164 /* The object update tag is necessary to cause modifiers to reevaluate after vertex group
1165 * changes. */
1167 }
1168
1169 /* The first element is always active */
1171 scene, view_layer, us->elems[0].obedit_ref.ptr, us_p->name, &LOG);
1172
1173 /* Check after setting active (unless undoing into another scene). */
1175
1176 {
1177 /* Follow settings related to selection.
1178 * While other flags could be included too: it's important the user doesn't
1179 * undo into a state where the scene settings would show a different selection
1180 * to the selection the user was previously editing. */
1181 constexpr char uv_flag_undo = UV_FLAG_SELECT_SYNC | UV_FLAG_SELECT_ISLAND;
1182
1183 ToolSettings *ts = scene->toolsettings;
1184 const MeshUndoStep_SceneData &scene_data = us->scene_data;
1185 ts->selectmode = scene_data.selectmode;
1186 ts->uv_selectmode = scene_data.uv_selectmode;
1187 ts->uv_sticky = scene_data.uv_sticky;
1188 ts->uv_flag = (ts->uv_flag & ~uv_flag_undo) | (scene_data.uv_flag & uv_flag_undo);
1189 }
1190
1191 bmain->is_memfile_undo_flush_needed = true;
1192
1194}
1195
1197{
1198 MeshUndoStep *us = (MeshUndoStep *)us_p;
1199
1200 for (uint i = 0; i < us->elems_len; i++) {
1201 MeshUndoStep_Elem *elem = &us->elems[i];
1202 undomesh_free_data(&elem->data);
1203 }
1204 MEM_freeN(us->elems);
1205}
1206
1208 UndoTypeForEachIDRefFn foreach_ID_ref_fn,
1209 void *user_data)
1210{
1211 MeshUndoStep *us = (MeshUndoStep *)us_p;
1212
1213 foreach_ID_ref_fn(user_data, ((UndoRefID *)&us->scene_ref));
1214 for (uint i = 0; i < us->elems_len; i++) {
1215 MeshUndoStep_Elem *elem = &us->elems[i];
1216 foreach_ID_ref_fn(user_data, ((UndoRefID *)&elem->obedit_ref));
1217 }
1218}
1219
1234
Scene * CTX_data_scene(const bContext *C)
wmWindowManager * CTX_wm_manager(const bContext *C)
ViewLayer * CTX_data_view_layer(const bContext *C)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
int CustomData_get_offset(const CustomData *data, eCustomDataType type)
bool CustomData_layertype_is_dynamic(eCustomDataType type)
support for deformation groups and hooks.
void BKE_defgroup_copy_list(ListBase *outbase, const ListBase *inbase)
Definition deform.cc:73
BMEditMesh * BKE_editmesh_create(BMesh *bm)
Definition editmesh.cc:32
void BKE_editmesh_looptris_and_normals_calc(BMEditMesh *em)
Definition editmesh.cc:95
void BKE_view_layer_synced_ensure(const Scene *scene, ViewLayer *view_layer)
Object * BKE_view_layer_edit_object_get(const ViewLayer *view_layer)
void BKE_id_free(Main *bmain, void *idv)
ID * BKE_id_copy_ex(Main *bmain, const ID *id, ID **new_id_p, int flag)
Definition lib_id.cc:777
@ LIB_ID_COPY_LOCALIZE
@ LIB_ID_COPY_NO_ANIMDATA
General operations, lookup, etc. for blender objects.
bool BKE_object_is_in_editmode(const Object *ob)
@ UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE
void(*)(void *user_data, UndoRefID *id_ref) UndoTypeForEachIDRefFn
eUndoStepDir
Efficient in-memory storage of multiple similar arrays.
BArrayState * BLI_array_store_state_add(BArrayStore *bs, const void *data, size_t data_len, const BArrayState *state_reference)
void BLI_array_store_state_remove(BArrayStore *bs, BArrayState *state)
void * BLI_array_store_state_data_get_alloc(const BArrayState *state, size_t *r_data_len)
void BLI_array_store_rle_decode(const uint8_t *data_enc, const size_t data_enc_len, void *data_dec_v, const size_t data_dec_len)
uint8_t * BLI_array_store_rle_encode(const uint8_t *data_dec, size_t data_dec_len, size_t data_enc_extra_size, size_t *r_data_enc_len)
struct BArrayStore * BLI_array_store_at_size_ensure(struct BArrayStore_AtSize *bs_stride, int stride, int chunk_size)
void BLI_array_store_at_size_clear(struct BArrayStore_AtSize *bs_stride)
void BLI_array_store_at_size_calc_memory_usage(const struct BArrayStore_AtSize *bs_stride, size_t *r_size_expanded, size_t *r_size_compacted)
struct BArrayStore * BLI_array_store_at_size_get(struct BArrayStore_AtSize *bs_stride, int stride)
Generic array manipulation API.
#define BLI_array_is_zeroed(arr, arr_len)
#define BLI_assert(a)
Definition BLI_assert.h:46
void * BLI_ghash_popkey(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.cc:802
GHash * BLI_ghash_ptr_new_ex(const char *info, unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
unsigned int BLI_ghash_len(const GHash *gh) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.cc:702
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.cc:707
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.cc:860
int BLI_findindex(const ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:586
void void BLI_freelistN(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:497
void BLI_addtail(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:111
void BLI_remlink(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:131
MINLINE int power_of_2_max_i(int n)
unsigned int uint
@ TASK_PRIORITY_LOW
Definition BLI_task.h:52
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition task_pool.cc:535
TaskPool * BLI_task_pool_create_background(void *userdata, eTaskPriority priority)
Definition task_pool.cc:489
void BLI_task_pool_free(TaskPool *pool)
Definition task_pool.cc:521
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition task_pool.cc:526
Utility defines for timing/benchmarks.
#define TIMEIT_START(var)
#define TIMEIT_END(var)
#define UNUSED_VARS(...)
#define POINTER_FROM_INT(i)
#define UNUSED_VARS_NDEBUG(...)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:188
void DEG_id_tag_update(ID *id, unsigned int flags)
@ ID_RECALC_GEOMETRY
Definition DNA_ID.h:1074
#define CD_MASK_SHAPE_KEYINDEX
@ CD_PROP_FLOAT2
Object is a sort of wrapper for general info.
@ OB_MESH
@ UV_FLAG_SELECT_ISLAND
@ UV_FLAG_SELECT_SYNC
void EDBM_mesh_free_data(BMEditMesh *em)
void ED_undo_object_set_active_or_warn(Scene *scene, ViewLayer *view_layer, Object *ob, const char *info, CLG_LogRef *log)
Definition ed_undo.cc:775
void ED_undo_object_editmode_restore_helper(Scene *scene, ViewLayer *view_layer, Object **object_array, uint object_array_len, uint object_array_stride)
Definition ed_undo.cc:811
blender::Vector< Object * > ED_undo_editmode_objects_from_view_layer(const Scene *scene, ViewLayer *view_layer)
Definition ed_undo.cc:856
void ED_undo_object_editmode_validate_scene_from_windows(wmWindowManager *wm, const Scene *scene_ref, Scene **scene_p, ViewLayer **view_layer_p)
Definition ed_undo.cc:794
Read Guarded memory(de)allocation.
#define C
Definition RandGen.cpp:29
#define NC_GEOM
Definition WM_types.hh:393
#define ND_DATA
Definition WM_types.hh:509
@ BM_SPACEARR_DIRTY_ALL
BMesh const char void * data
BMesh * bm
BMesh * BM_mesh_create(const BMAllocTemplate *allocsize, const BMeshCreateParams *params)
BMesh Make Mesh.
#define BMALLOC_TEMPLATE_FROM_ME(...)
void BM_mesh_bm_from_me(BMesh *bm, const Mesh *mesh, const BMeshFromMeshParams *params)
void BM_mesh_bm_to_me(Main *bmain, BMesh *bm, Mesh *mesh, const BMeshToMeshParams *params)
bool BM_mesh_is_valid(BMesh *bm)
bool BM_mesh_uvselect_is_valid(BMesh *bm, const int cd_loop_uv_offset, const bool check_sync, const bool check_flush, const bool check_contiguous, UVSelectValidateInfo *info_p)
int64_t size() const
Definition BLI_array.hh:256
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
int64_t size() const
ListBase local_links
int users
BArrayStore_AtSize bs_stride
static Object * editmesh_object_from_context(bContext *C)
static void um_arraystore_expand(UndoMesh *um)
static void um_arraystore_compact_cb(TaskPool *__restrict, void *taskdata)
static void um_arraystore_free(UndoMesh *um)
static void undomesh_free_data(UndoMesh *um)
static void * undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key, const ListBase *vertex_group_names, const int vertex_group_active_index, UndoMesh *um_ref)
static void undomesh_to_editmesh(UndoMesh *um, BMEditMesh *em, ListBase *vertex_group_names, int *vertex_group_active_index)
static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
#define ARRAY_CHUNK_NUM_MIN
static bool mesh_undosys_step_encode(bContext *C, Main *bmain, UndoStep *us_p)
#define ARRAY_CHUNK_SIZE_IN_BYTES
void ED_mesh_undosys_type(UndoType *ut)
static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
@ ARRAY_STORE_INDEX_LOOP
@ ARRAY_STORE_INDEX_VERT
@ ARRAY_STORE_INDEX_POLY_OFFSETS
@ ARRAY_STORE_INDEX_SHAPE
@ ARRAY_STORE_INDEX_MSEL
@ ARRAY_STORE_INDEX_EDGE
@ ARRAY_STORE_INDEX_POLY
#define ARRAY_STORE_INDEX_NUM
static size_t array_chunk_size_calc(const size_t stride)
TaskPool * task_pool
static void um_arraystore_expand_clear(UndoMesh *um)
static void um_arraystore_cd_expand(const BArrayCustomData *bcd, CustomData *cdata, const size_t data_len)
static bool mesh_undosys_poll(bContext *C)
static void mesh_undosys_foreach_ID_ref(UndoStep *us_p, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
static bool um_customdata_layer_use_rle(const BArrayCustomData *bcd)
static struct @337111344364020116106255322323117072035066202234 um_arraystore
static void mesh_undosys_step_free(UndoStep *us_p)
static void mesh_undosys_step_decode(bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir, bool)
static UndoMesh ** mesh_undostep_reference_elems_from_objects(Object **object, int object_len)
static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
static void um_arraystore_cd_compact(CustomData *cdata, const size_t data_len, const bool create, const int bs_index, const BArrayCustomData *bcd_reference, BArrayCustomData **r_bcd_first)
static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
#define printf(...)
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
#define LOG(level)
Definition log.h:97
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ulong state[N]
std::unique_ptr< IDProperty, IDPropertyDeleter > create(StringRef prop_name, int32_t value, eIDPropertyFlag flags={})
Allocate a new IDProperty of type IDP_INT, set its name and value.
Mesh * mesh_new_no_attributes(int verts_num, int edges_num, int faces_num, int corners_num)
const ImplicitSharingInfo * info_for_mem_free(void *data)
void parallel_invoke(Functions &&...functions)
Definition BLI_task.hh:221
BArrayCustomData * next
blender::Array< std::variant< BArrayState *, blender::ImplicitSharingInfoAndData > > states
eCustomDataType type
short selectmode
char needs_flush_to_id
bool uv_select_sync_valid
int shapenr
CustomData ldata
const ImplicitSharingInfoHandle * sharing_info
CustomDataLayer * layers
char name[258]
Definition DNA_ID.h:432
unsigned int session_uid
Definition DNA_ID.h:462
struct KeyBlock * next
void * data
int totkey
int elemsize
ListBase block
void * first
bool is_memfile_undo_flush_needed
Definition BKE_main.hh:213
UndoRefID_Object obedit_ref
MeshUndoStep_Elem * elems
UndoRefID_Scene scene_ref
MeshUndoStep_SceneData scene_data
int corners_num
CustomData edge_data
int edges_num
MeshRuntimeHandle * runtime
CustomData corner_data
CustomData face_data
ListBase vertex_group_names
int * face_offset_indices
CustomData vert_data
int vertex_group_active_index
struct Key * key
int totselect
int faces_num
struct MSelect * mselect
int verts_num
struct ToolSettings * toolsettings
const UndoMesh * um_ref
UndoMesh * um
size_t undo_size
UndoMesh * local_prev
BArrayState * mselect
BArrayCustomData * ldata
BArrayState * face_offset_indices
BArrayCustomData * vdata
BArrayState ** keyblocks
UndoMesh * local_next
struct UndoMesh::@125065304153060141020032116057354024373376324373 store
BArrayCustomData * pdata
BArrayCustomData * edata
size_t data_size
char name[64]
void(* step_foreach_ID_ref)(UndoStep *us, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
const char * name
void(* step_free)(UndoStep *us)
bool(* poll)(struct bContext *C)
void(* step_decode)(bContext *C, Main *bmain, UndoStep *us, eUndoStepDir dir, bool is_final)
bool(* step_encode)(bContext *C, Main *bmain, UndoStep *us)
i
Definition text_draw.cc:230
void WM_event_add_notifier(const bContext *C, uint type, void *reference)