Blender V4.3
undo_system.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include <cstdio>
12#include <cstring>
13
14#include "CLG_log.h"
15
16#include "BLI_listbase.h"
17#include "BLI_string.h"
18#include "BLI_sys_types.h"
19#include "BLI_utildefines.h"
20
21#include "BLT_translation.hh"
22
23#include "DNA_listBase.h"
25
26#include "BKE_context.hh"
27#include "BKE_global.hh"
28#include "BKE_lib_id.hh"
29#include "BKE_lib_override.hh"
30#include "BKE_main.hh"
31#include "BKE_undo_system.hh"
32
33#include "RNA_access.hh"
34
35#include "MEM_guardedalloc.h"
36
37/* Header to pull symbols from the file which otherwise might get stripped away. */
38#include "BKE_blender_undo.hh"
39
40#define undo_stack _wm_undo_stack_disallow /* pass in as a variable always. */
41
43#define WITH_GLOBAL_UNDO_KEEP_ONE
44
46#define WITH_GLOBAL_UNDO_ENSURE_UPDATED
47
52#define WITH_GLOBAL_UNDO_CORRECT_ORDER
53
55static CLG_LogRef LOG = {"bke.undosys"};
56
57/* -------------------------------------------------------------------- */
67
68static ListBase g_undo_types = {nullptr, nullptr};
69
70/* An unused function with public linkage just to ensure symbols from the blender_undo.cc are not
71 * stripped. */
78
80{
81 LISTBASE_FOREACH (const UndoType *, ut, &g_undo_types) {
82 /* No poll means we don't check context. */
83 if (ut->poll && ut->poll(C)) {
84 return ut;
85 }
86 }
87 return nullptr;
88}
89
92/* -------------------------------------------------------------------- */
100#define WITH_NESTED_UNDO_CHECK
101
102#ifdef WITH_NESTED_UNDO_CHECK
103static bool g_undo_callback_running = false;
104# define UNDO_NESTED_ASSERT(state) BLI_assert(g_undo_callback_running == state)
105# define UNDO_NESTED_CHECK_BEGIN \
106 { \
107 UNDO_NESTED_ASSERT(false); \
108 g_undo_callback_running = true; \
109 } \
110 ((void)0)
111# define UNDO_NESTED_CHECK_END \
112 { \
113 UNDO_NESTED_ASSERT(true); \
114 g_undo_callback_running = false; \
115 } \
116 ((void)0)
117#else
118# define UNDO_NESTED_ASSERT(state) ((void)0)
119# define UNDO_NESTED_CHECK_BEGIN ((void)0)
120# define UNDO_NESTED_CHECK_END ((void)0)
121#endif
122
125/* -------------------------------------------------------------------- */
133static void undosys_id_ref_store(void * /*user_data*/, UndoRefID *id_ref)
134{
135 BLI_assert(id_ref->name[0] == '\0');
136 if (id_ref->ptr) {
137 STRNCPY(id_ref->name, id_ref->ptr->name);
138 if (id_ref->ptr->lib) {
140 }
141 else {
142 id_ref->library_filepath_abs[0] = '\0';
143 }
144 /* Not needed, just prevents stale data access. */
145 id_ref->ptr = nullptr;
146 }
147}
148
149static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
150{
151 /* NOTE: we could optimize this,
152 * for now it's not too bad since it only runs when we access undo! */
153 Main *bmain = static_cast<Main *>(user_data);
155 bmain,
156 GS(id_ref->name),
157 id_ref->name + 2,
158 (id_ref->library_filepath_abs[0] ? id_ref->library_filepath_abs : nullptr));
159}
160
161static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
162{
163 CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
165 bool ok = us->type->step_encode(C, bmain, us);
167 if (ok) {
168 if (us->type->step_foreach_ID_ref != nullptr) {
169 /* Don't use from context yet because sometimes context is fake and
170 * not all members are filled in. */
172 }
173
174#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
175 if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
176 ustack->step_active_memfile = us;
177 }
178#endif
179 }
180 if (ok == false) {
181 CLOG_INFO(&LOG, 2, "encode callback didn't create undo step");
182 }
183 return ok;
184}
185
187 Main *bmain,
188 UndoStack *ustack,
189 UndoStep *us,
190 const eUndoStepDir dir,
191 bool is_final)
192{
193 CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
194
195 if (us->type->step_foreach_ID_ref) {
196#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
197 if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
198 for (UndoStep *us_iter = us->prev; us_iter; us_iter = us_iter->prev) {
199 if (us_iter->type == BKE_UNDOSYS_TYPE_MEMFILE) {
200 if (us_iter == ustack->step_active_memfile) {
201 /* Common case, we're already using the last memfile state. */
202 }
203 else {
204 /* Load the previous memfile state so any ID's referenced in this
205 * undo step will be correctly resolved, see: #56163. */
206 undosys_step_decode(C, bmain, ustack, us_iter, dir, false);
207 /* May have been freed on memfile read. */
208 bmain = G_MAIN;
209 }
210 break;
211 }
212 }
213 }
214#endif
215 /* Don't use from context yet because sometimes context is fake and
216 * not all members are filled in. */
218 }
219
221 us->type->step_decode(C, bmain, us, dir, is_final);
223
224#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
225 if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
226 ustack->step_active_memfile = us;
227 }
228#endif
229}
230
232{
233 CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
235 us->type->step_free(us);
237
238 BLI_remlink(&ustack->steps, us);
239 MEM_freeN(us);
240
241#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
242 if (ustack->step_active_memfile == us) {
243 ustack->step_active_memfile = nullptr;
244 }
245#endif
246}
247
250/* -------------------------------------------------------------------- */
254#ifndef NDEBUG
255static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
256{
257 if (ustack->step_active != nullptr) {
259 BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
260 }
261 if (expect_non_empty) {
263 }
264}
265#else
266static void undosys_stack_validate(UndoStack * /*ustack*/, bool /*expect_non_empty*/) {}
267#endif
268
270{
271 UndoStack *ustack = MEM_cnew<UndoStack>(__func__);
272 return ustack;
273}
274
276{
278 MEM_freeN(ustack);
279}
280
282{
283 UNDO_NESTED_ASSERT(false);
284 CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps));
285 for (UndoStep *us = static_cast<UndoStep *>(ustack->steps.last), *us_prev; us; us = us_prev) {
286 us_prev = us->prev;
288 }
289 if (UndoStep *us = ustack->step_init) {
291 ustack->step_init = nullptr;
292 }
293 BLI_listbase_clear(&ustack->steps);
294 ustack->step_active = nullptr;
295}
296
298{
299 /* Remove active and all following undo-steps. */
300 UndoStep *us = ustack->step_active;
301
302 if (us) {
303 ustack->step_active = us->prev;
304 bool is_not_empty = ustack->step_active != nullptr;
305
306 while (ustack->steps.last != ustack->step_active) {
307 UndoStep *us_iter = static_cast<UndoStep *>(ustack->steps.last);
308 undosys_step_free_and_unlink(ustack, us_iter);
309 undosys_stack_validate(ustack, is_not_empty);
310 }
311 }
312}
313
314/* Caller is responsible for handling active. */
316{
317 if (us) {
318 bool is_not_empty = true;
319 UndoStep *us_iter;
320 do {
321 us_iter = static_cast<UndoStep *>(ustack->steps.last);
322 BLI_assert(us_iter != ustack->step_active);
323 undosys_step_free_and_unlink(ustack, us_iter);
324 undosys_stack_validate(ustack, is_not_empty);
325 } while (us != us_iter);
326 }
327}
328
329static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
330{
331 if (us && us == us_exclude) {
332 us = us->prev;
333 }
334
335 if (us) {
336 bool is_not_empty = true;
337 UndoStep *us_iter;
338 do {
339 us_iter = static_cast<UndoStep *>(ustack->steps.first);
340 if (us_iter == us_exclude) {
341 us_iter = us_iter->next;
342 }
343 BLI_assert(us_iter != ustack->step_active);
344 undosys_step_free_and_unlink(ustack, us_iter);
345 undosys_stack_validate(ustack, is_not_empty);
346 } while (us != us_iter);
347 }
348}
349
350static bool undosys_stack_push_main(UndoStack *ustack, const char *name, Main *bmain)
351{
352 UNDO_NESTED_ASSERT(false);
353 BLI_assert(ustack->step_init == nullptr);
354 CLOG_INFO(&LOG, 1, "'%s'", name);
355 bContext *C_temp = CTX_create();
356 CTX_data_main_set(C_temp, bmain);
358 ustack, C_temp, name, BKE_UNDOSYS_TYPE_MEMFILE);
359 CTX_free(C_temp);
360 return (ret & UNDO_PUSH_RET_SUCCESS);
361}
362
364{
365 UNDO_NESTED_ASSERT(false);
366 undosys_stack_push_main(ustack, IFACE_("Original"), bmain);
367}
368
370{
372 if (!ELEM(ut, nullptr, BKE_UNDOSYS_TYPE_MEMFILE)) {
373 BKE_undosys_step_push_with_type(ustack, C, IFACE_("Original Mode"), ut);
374 }
375}
376
377bool BKE_undosys_stack_has_undo(const UndoStack *ustack, const char *name)
378{
379 if (name) {
380 const UndoStep *us = static_cast<UndoStep *>(
381 BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name)));
382 return us && us->prev;
383 }
384
385 return !BLI_listbase_is_empty(&ustack->steps);
386}
387
389{
390 UndoStep *us = ustack->step_active;
391 while (us && (us->type != ut)) {
392 us = us->prev;
393 }
394 return us;
395}
396
398{
399 UNDO_NESTED_ASSERT(false);
400 CLOG_INFO(&LOG, 1, "type='%s'", ut->name);
401 if (ustack->step_init && (ustack->step_init->type == ut)) {
402 return ustack->step_init;
403 }
404 return BKE_undosys_stack_active_with_type(ustack, ut);
405}
406
408{
409 UNDO_NESTED_ASSERT(false);
410 if ((steps == -1) && (memory_limit == 0)) {
411 return;
412 }
413
414 CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit);
415 UndoStep *us;
416 UndoStep *us_exclude = nullptr;
417 /* keep at least two (original + other) */
418 size_t data_size_all = 0;
419 size_t us_count = 0;
420 for (us = static_cast<UndoStep *>(ustack->steps.last); us && us->prev; us = us->prev) {
421 if (memory_limit) {
422 data_size_all += us->data_size;
423 if (data_size_all > memory_limit) {
424 CLOG_INFO(&LOG,
425 1,
426 "At step %zu: data_size_all=%zu >= memory_limit=%zu",
427 us_count,
428 data_size_all,
430 break;
431 }
432 }
433 if (steps != -1) {
434 if (us_count == steps) {
435 break;
436 }
437 if (us->skip == false) {
438 us_count += 1;
439 }
440 }
441 }
442
443 CLOG_INFO(&LOG, 1, "Total steps %zu: data_size_all=%zu", us_count, data_size_all);
444
445 if (us) {
446#ifdef WITH_GLOBAL_UNDO_KEEP_ONE
447 /* Hack, we need to keep at least one BKE_UNDOSYS_TYPE_MEMFILE. */
448 if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
449 us_exclude = us->prev;
450 while (us_exclude && us_exclude->type != BKE_UNDOSYS_TYPE_MEMFILE) {
451 us_exclude = us_exclude->prev;
452 }
453 /* Once this is outside the given number of 'steps', undoing onto this state
454 * may skip past many undo steps which is confusing, instead,
455 * disallow stepping onto this state entirely. */
456 if (us_exclude) {
457 us_exclude->skip = true;
458 }
459 }
460#endif
461 /* Free from first to last, free functions may update de-duplication info
462 * (see #MemFileUndoStep). */
463 undosys_stack_clear_all_first(ustack, us->prev, us_exclude);
464 }
465}
466
469/* -------------------------------------------------------------------- */
474 bContext *C,
475 const char *name,
476 const UndoType *ut)
477{
478 UNDO_NESTED_ASSERT(false);
479 /* We could detect and clean this up (but it should never happen!). */
480 BLI_assert(ustack->step_init == nullptr);
481 if (ut->step_encode_init) {
482 undosys_stack_validate(ustack, false);
483
484 if (UndoStep *us = ustack->step_init) {
486 ustack->step_init = nullptr;
487 }
488 if (ustack->step_active) {
490 }
491
492 UndoStep *us = static_cast<UndoStep *>(MEM_callocN(ut->step_size, __func__));
493 if (name != nullptr) {
494 STRNCPY(us->name, name);
495 }
496 us->type = ut;
497 ustack->step_init = us;
498 CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
499 ut->step_encode_init(C, us);
500 undosys_stack_validate(ustack, false);
501 return us;
502 }
503
504 return nullptr;
505}
506
508{
509 UNDO_NESTED_ASSERT(false);
510 /* We could detect and clean this up (but it should never happen!). */
511 BLI_assert(ustack->step_init == nullptr);
513 if (ut == nullptr) {
514 return nullptr;
515 }
516 return BKE_undosys_step_push_init_with_type(ustack, C, name, ut);
517}
518
520 bContext *C,
521 const char *name,
522 const UndoType *ut)
523{
524 BLI_assert((ut->flags & UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE) == 0 || C != nullptr);
525
526 UNDO_NESTED_ASSERT(false);
527 undosys_stack_validate(ustack, false);
528 bool is_not_empty = ustack->step_active != nullptr;
530
531 /* Might not be final place for this to be called - probably only want to call it from some
532 * undo handlers, not all of them? */
534 BKE_lib_override_library_main_operations_create(G_MAIN, false, (int *)&report_flags);
535 if (report_flags & RNA_OVERRIDE_MATCH_RESULT_CREATED) {
537 }
538
539 /* Remove all undo-steps after (also when 'ustack->step_active == nullptr'). */
540 while (ustack->steps.last != ustack->step_active) {
541 UndoStep *us_iter = static_cast<UndoStep *>(ustack->steps.last);
542 undosys_step_free_and_unlink(ustack, us_iter);
543 undosys_stack_validate(ustack, is_not_empty);
544 }
545
546 if (ustack->step_active) {
547 BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
548 }
549
550#ifdef WITH_GLOBAL_UNDO_ENSURE_UPDATED
551 if (ut->step_foreach_ID_ref != nullptr) {
552 if (G_MAIN->is_memfile_undo_written == false) {
553 const char *name_internal = "MemFile Internal (pre)";
554 /* Don't let 'step_init' cause issues when adding memfile undo step. */
555 void *step_init = ustack->step_init;
556 ustack->step_init = nullptr;
557 const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
558 /* Restore 'step_init'. */
559 ustack->step_init = static_cast<UndoStep *>(step_init);
560 if (ok) {
561 UndoStep *us = static_cast<UndoStep *>(ustack->steps.last);
562 BLI_assert(STREQ(us->name, name_internal));
563 us->skip = true;
564# ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
565 ustack->step_active_memfile = us;
566# endif
567 }
568 }
569 }
570#endif
571
572 bool use_memfile_step = false;
573 {
574 UndoStep *us = ustack->step_init ?
575 ustack->step_init :
576 static_cast<UndoStep *>(MEM_callocN(ut->step_size, __func__));
577 ustack->step_init = nullptr;
578 if (us->name[0] == '\0') {
579 STRNCPY(us->name, name);
580 }
581 us->type = ut;
582 /* True by default, code needs to explicitly set it to false if necessary. */
583 us->use_old_bmain_data = true;
584 /* Initialized, not added yet. */
585
586 CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
587
588 if (!undosys_step_encode(C, G_MAIN, ustack, us)) {
589 MEM_freeN(us);
590 undosys_stack_validate(ustack, true);
591 return retval;
592 }
593 ustack->step_active = us;
594 BLI_addtail(&ustack->steps, us);
595 use_memfile_step = us->use_memfile_step;
596 }
597
598 if (use_memfile_step) {
599 /* Make this the user visible undo state, so redo always applies
600 * on top of the mem-file undo instead of skipping it. see: #67256. */
601 UndoStep *us_prev = ustack->step_active;
602 const char *name_internal = us_prev->name;
603 const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
604 if (ok) {
605 UndoStep *us = static_cast<UndoStep *>(ustack->steps.last);
606 BLI_assert(STREQ(us->name, name_internal));
607 us_prev->skip = true;
608#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
609 ustack->step_active_memfile = us;
610#endif
611 ustack->step_active = us;
612 }
613 }
614
615 if (ustack->group_level > 0) {
616 /* Temporarily set skip for the active step.
617 * This is an invalid state which must be corrected once the last group ends. */
618 ustack->step_active->skip = true;
619 }
620
621 undosys_stack_validate(ustack, true);
622 return (retval | UNDO_PUSH_RET_SUCCESS);
623}
624
626{
627 UNDO_NESTED_ASSERT(false);
628 const UndoType *ut = ustack->step_init ? ustack->step_init->type :
630 if (ut == nullptr) {
632 }
633 return BKE_undosys_step_push_with_type(ustack, C, name, ut);
634}
635
637{
638 if (us) {
639 const UndoType *ut = us->type;
640 while ((us = us->next)) {
641 if (us->type == ut) {
642 return us;
643 }
644 }
645 }
646 return us;
647}
648
650{
651 if (us) {
652 const UndoType *ut = us->type;
653 while ((us = us->prev)) {
654 if (us->type == ut) {
655 return us;
656 }
657 }
658 }
659 return us;
660}
661
663 const char *name,
664 const UndoType *ut)
665{
666 LISTBASE_FOREACH_BACKWARD (UndoStep *, us, &ustack->steps) {
667 if (us->type == ut) {
668 if (STREQ(name, us->name)) {
669 return us;
670 }
671 }
672 }
673 return nullptr;
674}
675
677{
678 return static_cast<UndoStep *>(BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name)));
679}
680
682{
683 LISTBASE_FOREACH_BACKWARD (UndoStep *, us, &ustack->steps) {
684 if (us->type == ut) {
685 return us;
686 }
687 }
688 return nullptr;
689}
690
692 const UndoStep *us_target,
693 const UndoStep *us_reference)
694{
695 if (us_reference == nullptr) {
696 us_reference = ustack->step_active;
697 }
698
699 BLI_assert(us_reference != nullptr);
700
701 /* Note that we use heuristics to make this lookup as fast as possible in most common cases,
702 * assuming that:
703 * - Most cases are just undo or redo of one step from active one.
704 * - Otherwise, it is typically faster to check future steps since active one is usually close
705 * to the end of the list, rather than its start. */
706 /* NOTE: in case target step is the active one, we assume we are in an undo case... */
707 if (ELEM(us_target, us_reference, us_reference->prev)) {
708 return STEP_UNDO;
709 }
710 if (us_target == us_reference->next) {
711 return STEP_REDO;
712 }
713
714 /* Search forward, and then backward. */
715 for (UndoStep *us_iter = us_reference->next; us_iter != nullptr; us_iter = us_iter->next) {
716 if (us_iter == us_target) {
717 return STEP_REDO;
718 }
719 }
720 for (UndoStep *us_iter = us_reference->prev; us_iter != nullptr; us_iter = us_iter->prev) {
721 if (us_iter == us_target) {
722 return STEP_UNDO;
723 }
724 }
725
727 "Target undo step not found, this should not happen and may indicate an undo "
728 "stack corruption");
729 return STEP_INVALID;
730}
731
736static UndoStep *undosys_step_iter_first(UndoStep *us_reference, const eUndoStepDir undo_dir)
737{
738 if (us_reference->type->flags & UNDOTYPE_FLAG_DECODE_ACTIVE_STEP) {
739 /* Reading this step means an undo action reads undo twice.
740 * This should be avoided where possible, however some undo systems require it.
741 *
742 * Redo skips the current state as this represents the currently loaded state. */
743 return (undo_dir == -1) ? us_reference : us_reference->next;
744 }
745
746 /* Typical case, skip reading the current undo step. */
747 return (undo_dir == -1) ? us_reference->prev : us_reference->next;
748}
749
751 bContext *C,
752 UndoStep *us_target,
753 UndoStep *us_reference,
754 const bool use_skip)
755{
756 UNDO_NESTED_ASSERT(false);
757 if (us_target == nullptr) {
758 CLOG_ERROR(&LOG, "called with a nullptr target step");
759 return false;
760 }
761 undosys_stack_validate(ustack, true);
762
763 if (us_reference == nullptr) {
764 us_reference = ustack->step_active;
765 }
766 if (us_reference == nullptr) {
767 CLOG_ERROR(&LOG, "could not find a valid initial active target step as reference");
768 return false;
769 }
770
771 /* This considers we are in undo case if both `us_target` and `us_reference` are the same. */
772 const eUndoStepDir undo_dir = BKE_undosys_step_calc_direction(ustack, us_target, us_reference);
773 BLI_assert(undo_dir != STEP_INVALID);
774
775 /* This will be the active step once the undo process is complete.
776 *
777 * In case we do skip 'skipped' steps, the final active step may be several steps backward from
778 * the one passed as parameter. */
779 UndoStep *us_target_active = us_target;
780 if (use_skip) {
781 while (us_target_active != nullptr && us_target_active->skip) {
782 us_target_active = (undo_dir == -1) ? us_target_active->prev : us_target_active->next;
783 }
784 if (us_target_active == nullptr) {
785 CLOG_INFO(&LOG,
786 2,
787 "undo/redo did not find a step after stepping over skip-steps "
788 "(undo limit exceeded)");
789 return false;
790 }
791 }
792
793 CLOG_INFO(&LOG,
794 1,
795 "addr=%p, name='%s', type='%s', undo_dir=%d",
796 us_target,
797 us_target->name,
798 us_target->type->name,
799 undo_dir);
800
801 /* Undo/Redo steps until we reach given target step (or beyond if it has to be skipped),
802 * from given reference step. */
803 bool is_processing_extra_skipped_steps = false;
804 for (UndoStep *us_iter = undosys_step_iter_first(us_reference, undo_dir); us_iter != nullptr;
805 us_iter = (undo_dir == -1) ? us_iter->prev : us_iter->next)
806 {
807 BLI_assert(us_iter != nullptr);
808
809 const bool is_final = (us_iter == us_target_active);
810
811 if (!is_final && is_processing_extra_skipped_steps) {
812 BLI_assert(us_iter->skip == true);
813 CLOG_INFO(&LOG,
814 2,
815 "undo/redo continue with skip addr=%p, name='%s', type='%s'",
816 us_iter,
817 us_iter->name,
818 us_iter->type->name);
819 }
820
821 undosys_step_decode(C, G_MAIN, ustack, us_iter, undo_dir, is_final);
822 ustack->step_active = us_iter;
823
824 if (us_iter == us_target) {
825 is_processing_extra_skipped_steps = true;
826 }
827
828 if (is_final) {
829 /* Undo/Redo process is finished and successful. */
830 return true;
831 }
832 }
833
835 false,
836 "This should never be reached, either undo stack is corrupted, or code above is buggy");
837 return false;
838}
839
841{
842 /* Note that here we do not skip 'skipped' steps by default. */
843 return BKE_undosys_step_load_data_ex(ustack, C, us_target, nullptr, false);
844}
845
846void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index)
847{
848 UndoStep *us_target = static_cast<UndoStep *>(BLI_findlink(&ustack->steps, index));
849 BLI_assert(us_target->skip == false);
850 if (us_target == ustack->step_active) {
851 return;
852 }
853 BKE_undosys_step_load_data(ustack, C, us_target);
854}
855
857 bContext *C,
858 UndoStep *us_target,
859 bool use_skip)
860{
861 /* In case there is no active step, we consider we just load given step, so reference must be
862 * itself (due to weird 'load current active step in undo case' thing, see comments in
863 * #BKE_undosys_step_load_data_ex). */
864 UndoStep *us_reference = ustack->step_active != nullptr ? ustack->step_active : us_target;
865
866 BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == -1);
867
868 return BKE_undosys_step_load_data_ex(ustack, C, us_target, us_reference, use_skip);
869}
870
872{
873 return BKE_undosys_step_undo_with_data_ex(ustack, C, us_target, true);
874}
875
877{
878 if (ustack->step_active != nullptr) {
879 return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active->prev);
880 }
881 return false;
882}
883
885 bContext *C,
886 UndoStep *us_target,
887 bool use_skip)
888{
889 /* In case there is no active step, we consider we just load given step, so reference must be
890 * the previous one. */
891 UndoStep *us_reference = ustack->step_active != nullptr ? ustack->step_active : us_target->prev;
892
893 BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == 1);
894
895 return BKE_undosys_step_load_data_ex(ustack, C, us_target, us_reference, use_skip);
896}
897
899{
900 return BKE_undosys_step_redo_with_data_ex(ustack, C, us_target, true);
901}
902
904{
905 if (ustack->step_active != nullptr) {
906 return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active->next);
907 }
908 return false;
909}
910
912{
913 UndoType *ut = MEM_cnew<UndoType>(__func__);
914
915 undosys_fn(ut);
916
918
919 return ut;
920}
921
923{
924 while (UndoType *ut = static_cast<UndoType *>(BLI_pophead(&g_undo_types))) {
925 MEM_freeN(ut);
926 }
927}
928
931/* -------------------------------------------------------------------- */
951{
952 BLI_assert(ustack->group_level >= 0);
953 ustack->group_level += 1;
954}
955
957{
958 ustack->group_level -= 1;
959 BLI_assert(ustack->group_level >= 0);
960
961 if (ustack->group_level == 0) {
962 if (LIKELY(ustack->step_active != nullptr)) {
963 ustack->step_active->skip = false;
964 }
965 }
966}
967
970/* -------------------------------------------------------------------- */
976static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
977 UndoTypeForEachIDRefFn foreach_ID_ref_fn,
978 void *user_data))
979{
980 LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
981 const UndoType *ut = us->type;
982 if (ut->step_foreach_ID_ref != nullptr) {
983 ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data);
984 }
985 }
986}
987
990/* -------------------------------------------------------------------- */
995{
996 printf("Undo %d Steps (*: active, #=applied, M=memfile-active, S=skip)\n",
997 BLI_listbase_count(&ustack->steps));
998 int index = 0;
999 LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
1000 printf("[%c%c%c%c] %3d {%p} type='%s', name='%s'\n",
1001 (us == ustack->step_active) ? '*' : ' ',
1002 us->is_applied ? '#' : ' ',
1003 (us == ustack->step_active_memfile) ? 'M' : ' ',
1004 us->skip ? 'S' : ' ',
1005 index,
1006 (void *)us,
1007 us->type->name,
1008 us->name);
1009 index++;
1010 }
1011}
1012
void BKE_memfile_undo_free(MemFileUndoData *mfu)
void CTX_data_main_set(bContext *C, Main *bmain)
void CTX_free(bContext *C)
bContext * CTX_create()
#define G_MAIN
ID * BKE_libblock_find_name_and_library_filepath(Main *bmain, short type, const char *name, const char *lib_filepath_abs)
Definition lib_id.cc:1715
void BKE_lib_override_library_main_operations_create(Main *bmain, bool force_auto, int *r_report_flags)
@ UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE
@ UNDOTYPE_FLAG_DECODE_ACTIVE_STEP
void(*)(void *user_data, UndoRefID *id_ref) UndoTypeForEachIDRefFn
eUndoStepDir
@ STEP_INVALID
@ STEP_UNDO
@ STEP_REDO
eUndoPushReturn
@ UNDO_PUSH_RET_SUCCESS
@ UNDO_PUSH_RET_OVERRIDE_CHANGED
@ UNDO_PUSH_RET_FAILURE
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
#define LISTBASE_FOREACH(type, var, list)
#define LISTBASE_FOREACH_BACKWARD(type, var, list)
BLI_INLINE void BLI_listbase_clear(struct ListBase *lb)
void * BLI_findlink(const struct ListBase *listbase, int number) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:110
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:130
int BLI_findindex(const struct ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void * BLI_rfindstring(const struct ListBase *listbase, const char *id, int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void * BLI_pophead(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:251
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
#define STRNCPY(dst, src)
Definition BLI_string.h:593
#define UNUSED_FUNCTION(x)
#define ELEM(...)
#define STREQ(a, b)
#define LIKELY(x)
#define IFACE_(msgid)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:182
#define CLOG_INFO(clg_ref, level,...)
Definition CLG_log.h:179
These structs are the foundation for all linked lists in the library system.
Read Guarded memory(de)allocation.
eRNAOverrideMatchResult
@ RNA_OVERRIDE_MATCH_RESULT_CREATED
@ RNA_OVERRIDE_MATCH_RESULT_INIT
#define printf
#define offsetof(t, d)
#define GS(x)
Definition iris.cc:202
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
return ret
static const int steps
CLG_LogType * type
Definition CLG_log.h:108
struct Library * lib
Definition DNA_ID.h:419
char name[66]
Definition DNA_ID.h:425
char filepath_abs[1024]
Definition DNA_ID.h:509
struct Library_Runtime runtime
Definition DNA_ID.h:535
void * last
void * first
char name[MAX_ID_NAME]
struct ID * ptr
char library_filepath_abs[FILE_MAX]
UndoStep * step_init
UndoStep * step_active_memfile
UndoStep * step_active
ListBase steps
size_t data_size
UndoStep * prev
bool use_old_bmain_data
UndoStep * next
const UndoType * type
bool use_memfile_step
char name[64]
void(* step_encode_init)(bContext *C, UndoStep *us)
void(* step_foreach_ID_ref)(UndoStep *us, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
const char * name
void(* step_free)(UndoStep *us)
void(* step_decode)(bContext *C, Main *bmain, UndoStep *us, eUndoStepDir dir, bool is_final)
bool(* step_encode)(bContext *C, Main *bmain, UndoStep *us)
static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
static UndoStep * undosys_step_iter_first(UndoStep *us_reference, const eUndoStepDir undo_dir)
bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, bool use_skip)
bool BKE_undosys_stack_has_undo(const UndoStack *ustack, const char *name)
const UndoType * BKE_UNDOSYS_TYPE_SCULPT
bool BKE_undosys_step_redo_with_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C)
UndoStep * BKE_undosys_step_find_by_name_with_type(UndoStack *ustack, const char *name, const UndoType *ut)
bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C)
UndoStep * BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut)
UndoStep * BKE_undosys_step_same_type_next(UndoStep *us)
static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
bool BKE_undosys_step_undo(UndoStack *ustack, bContext *C)
void BKE_undosys_stack_clear_active(UndoStack *ustack)
#define UNDO_NESTED_CHECK_END
static const UndoType * BKE_undosys_type_from_context(bContext *C)
static ListBase g_undo_types
bool BKE_undosys_step_undo_with_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
static bool g_undo_callback_running
#define UNDO_NESTED_CHECK_BEGIN
eUndoPushReturn BKE_undosys_step_push(UndoStack *ustack, bContext *C, const char *name)
eUndoStepDir BKE_undosys_step_calc_direction(const UndoStack *ustack, const UndoStep *us_target, const UndoStep *us_reference)
static void undosys_id_ref_store(void *, UndoRefID *id_ref)
static void undosys_stack_clear_all_last(UndoStack *ustack, UndoStep *us)
const UndoType * BKE_UNDOSYS_TYPE_MEMFILE
UndoStep * BKE_undosys_step_same_type_prev(UndoStep *us)
const UndoType * BKE_UNDOSYS_TYPE_PARTICLE
static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
void BKE_undosys_type_free_all()
void BKE_undosys_stack_clear(UndoStack *ustack)
const UndoType * BKE_UNDOSYS_TYPE_TEXT
static bool undosys_stack_push_main(UndoStack *ustack, const char *name, Main *bmain)
void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size_t memory_limit)
bool BKE_undosys_step_load_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, UndoStep *us_reference, const bool use_skip)
UndoStep * BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name)
static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us)
void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index)
eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
UndoStep * BKE_undosys_step_push_init_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
void BKE_undosys_stack_destroy(UndoStack *ustack)
void BKE_undosys_stack_init_from_main(UndoStack *ustack, Main *bmain)
#define UNDO_NESTED_ASSERT(state)
void bke_undo_system_linker_workaround()
void BKE_undosys_stack_group_end(UndoStack *ustack)
static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
static CLG_LogRef LOG
UndoStep * BKE_undosys_stack_active_with_type(UndoStack *ustack, const UndoType *ut)
bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
UndoStack * BKE_undosys_stack_create()
const UndoType * BKE_UNDOSYS_TYPE_PAINTCURVE
bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, bool use_skip)
UndoType * BKE_undosys_type_append(void(*undosys_fn)(UndoType *))
void BKE_undosys_print(UndoStack *ustack)
static void undosys_step_decode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us, const eUndoStepDir dir, bool is_final)
UndoStep * BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const UndoType *ut)
UndoStep * BKE_undosys_step_push_init(UndoStack *ustack, bContext *C, const char *name)
void BKE_undosys_stack_group_begin(UndoStack *ustack)
const UndoType * BKE_UNDOSYS_TYPE_IMAGE
size_t memory_limit