Blender V4.5
undo_system.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <cstdio>
12#include <cstring>
13
14#include "CLG_log.h"
15
16#include "BLI_listbase.h"
17#include "BLI_string.h"
18#include "BLI_sys_types.h"
19#include "BLI_utildefines.h"
20
21#include "BLT_translation.hh"
22
23#include "DNA_listBase.h"
25
26#include "BKE_context.hh"
27#include "BKE_global.hh"
28#include "BKE_lib_id.hh"
29#include "BKE_lib_override.hh"
30#include "BKE_library.hh"
31#include "BKE_main.hh"
32#include "BKE_undo_system.hh"
33
34#include "RNA_access.hh"
35
36#include "MEM_guardedalloc.h"
37
38/* Header to pull symbols from the file which otherwise might get stripped away. */
39#include "BKE_blender_undo.hh"
40
41#define undo_stack _wm_undo_stack_disallow /* pass in as a variable always. */
42
44#define WITH_GLOBAL_UNDO_KEEP_ONE
45
47#define WITH_GLOBAL_UNDO_ENSURE_UPDATED
48
53#define WITH_GLOBAL_UNDO_CORRECT_ORDER
54
56static CLG_LogRef LOG = {"bke.undosys"};
57
58/* -------------------------------------------------------------------- */
61
68
69static ListBase g_undo_types = {nullptr, nullptr};
70
71/* An unused function with public linkage just to ensure symbols from the blender_undo.cc are not
72 * stripped. */
79
81{
82 LISTBASE_FOREACH (const UndoType *, ut, &g_undo_types) {
83 /* No poll means we don't check context. */
84 if (ut->poll && ut->poll(C)) {
85 return ut;
86 }
87 }
88 return nullptr;
89}
90
92
93/* -------------------------------------------------------------------- */
100
101#define WITH_NESTED_UNDO_CHECK
102
103#ifdef WITH_NESTED_UNDO_CHECK
104static bool g_undo_callback_running = false;
105# define UNDO_NESTED_ASSERT(state) BLI_assert(g_undo_callback_running == state)
106# define UNDO_NESTED_CHECK_BEGIN \
107 { \
108 UNDO_NESTED_ASSERT(false); \
109 g_undo_callback_running = true; \
110 } \
111 ((void)0)
112# define UNDO_NESTED_CHECK_END \
113 { \
114 UNDO_NESTED_ASSERT(true); \
115 g_undo_callback_running = false; \
116 } \
117 ((void)0)
118#else
119# define UNDO_NESTED_ASSERT(state) ((void)0)
120# define UNDO_NESTED_CHECK_BEGIN ((void)0)
121# define UNDO_NESTED_CHECK_END ((void)0)
122#endif
123
125
126/* -------------------------------------------------------------------- */
133
134static void undosys_id_ref_store(void * /*user_data*/, UndoRefID *id_ref)
135{
136 BLI_assert(id_ref->name[0] == '\0');
137 if (id_ref->ptr) {
138 STRNCPY(id_ref->name, id_ref->ptr->name);
139 if (id_ref->ptr->lib) {
140 STRNCPY(id_ref->library_filepath_abs, id_ref->ptr->lib->runtime->filepath_abs);
141 }
142 else {
143 id_ref->library_filepath_abs[0] = '\0';
144 }
145 /* Not needed, just prevents stale data access. */
146 id_ref->ptr = nullptr;
147 }
148}
149
150static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
151{
152 /* NOTE: we could optimize this,
153 * for now it's not too bad since it only runs when we access undo! */
154 Main *bmain = static_cast<Main *>(user_data);
156 bmain,
157 GS(id_ref->name),
158 id_ref->name + 2,
159 (id_ref->library_filepath_abs[0] ? id_ref->library_filepath_abs : nullptr));
160}
161
162static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
163{
164 CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
166 bool ok = us->type->step_encode(C, bmain, us);
168 if (ok) {
169 if (us->type->step_foreach_ID_ref != nullptr) {
170 /* Don't use from context yet because sometimes context is fake and
171 * not all members are filled in. */
173 }
174
175#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
176 if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
177 ustack->step_active_memfile = us;
178 }
179#endif
180 }
181 if (ok == false) {
182 CLOG_INFO(&LOG, 2, "encode callback didn't create undo step");
183 }
184 return ok;
185}
186
188 Main *bmain,
189 UndoStack *ustack,
190 UndoStep *us,
191 const eUndoStepDir dir,
192 bool is_final)
193{
194 CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
195
196 if (us->type->step_foreach_ID_ref) {
197#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
198 if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
199 for (UndoStep *us_iter = us->prev; us_iter; us_iter = us_iter->prev) {
200 if (us_iter->type == BKE_UNDOSYS_TYPE_MEMFILE) {
201 if (us_iter == ustack->step_active_memfile) {
202 /* Common case, we're already using the last memfile state. */
203 }
204 else {
205 /* Load the previous memfile state so any ID's referenced in this
206 * undo step will be correctly resolved, see: #56163. */
207 undosys_step_decode(C, bmain, ustack, us_iter, dir, false);
208 /* May have been freed on memfile read. */
209 bmain = G_MAIN;
210 }
211 break;
212 }
213 }
214 }
215#endif
216 /* Don't use from context yet because sometimes context is fake and
217 * not all members are filled in. */
219 }
220
222 us->type->step_decode(C, bmain, us, dir, is_final);
224
225#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
226 if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
227 ustack->step_active_memfile = us;
228 }
229#endif
230}
231
233{
234 CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
236 us->type->step_free(us);
238
239 BLI_remlink(&ustack->steps, us);
240 MEM_freeN(us);
241
242#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
243 if (ustack->step_active_memfile == us) {
244 ustack->step_active_memfile = nullptr;
245 }
246#endif
247}
248
250
251/* -------------------------------------------------------------------- */
254
255#ifndef NDEBUG
256static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
257{
258 if (ustack->step_active != nullptr) {
260 BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
261 }
262 if (expect_non_empty) {
264 }
265}
266#else
267static void undosys_stack_validate(UndoStack * /*ustack*/, bool /*expect_non_empty*/) {}
268#endif
269
271{
272 UndoStack *ustack = MEM_callocN<UndoStack>(__func__);
273 return ustack;
274}
275
277{
279 MEM_freeN(ustack);
280}
281
283{
284 UNDO_NESTED_ASSERT(false);
285 CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps));
286 for (UndoStep *us = static_cast<UndoStep *>(ustack->steps.last), *us_prev; us; us = us_prev) {
287 us_prev = us->prev;
289 }
290 if (UndoStep *us = ustack->step_init) {
292 ustack->step_init = nullptr;
293 }
294 BLI_listbase_clear(&ustack->steps);
295 ustack->step_active = nullptr;
296}
297
299{
300 /* Remove active and all following undo-steps. */
301 UndoStep *us = ustack->step_active;
302
303 if (us) {
304 ustack->step_active = us->prev;
305 bool is_not_empty = ustack->step_active != nullptr;
306
307 while (ustack->steps.last != ustack->step_active) {
308 UndoStep *us_iter = static_cast<UndoStep *>(ustack->steps.last);
309 undosys_step_free_and_unlink(ustack, us_iter);
310 undosys_stack_validate(ustack, is_not_empty);
311 }
312 }
313}
314
315/* Caller is responsible for handling active. */
317{
318 if (us) {
319 bool is_not_empty = true;
320 UndoStep *us_iter;
321 do {
322 us_iter = static_cast<UndoStep *>(ustack->steps.last);
323 BLI_assert(us_iter != ustack->step_active);
324 undosys_step_free_and_unlink(ustack, us_iter);
325 undosys_stack_validate(ustack, is_not_empty);
326 } while (us != us_iter);
327 }
328}
329
330static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
331{
332 if (us && us == us_exclude) {
333 us = us->prev;
334 }
335
336 if (us) {
337 bool is_not_empty = true;
338 UndoStep *us_iter;
339 do {
340 us_iter = static_cast<UndoStep *>(ustack->steps.first);
341 if (us_iter == us_exclude) {
342 us_iter = us_iter->next;
343 }
344 BLI_assert(us_iter != ustack->step_active);
345 undosys_step_free_and_unlink(ustack, us_iter);
346 undosys_stack_validate(ustack, is_not_empty);
347 } while (us != us_iter);
348 }
349}
350
351static bool undosys_stack_push_main(UndoStack *ustack, const char *name, Main *bmain)
352{
353 UNDO_NESTED_ASSERT(false);
354 BLI_assert(ustack->step_init == nullptr);
355 CLOG_INFO(&LOG, 1, "'%s'", name);
356 bContext *C_temp = CTX_create();
357 CTX_data_main_set(C_temp, bmain);
359 ustack, C_temp, name, BKE_UNDOSYS_TYPE_MEMFILE);
360 CTX_free(C_temp);
361 return (ret & UNDO_PUSH_RET_SUCCESS);
362}
363
365{
366 UNDO_NESTED_ASSERT(false);
367 undosys_stack_push_main(ustack, IFACE_("Original"), bmain);
368}
369
371{
373 if (!ELEM(ut, nullptr, BKE_UNDOSYS_TYPE_MEMFILE)) {
374 BKE_undosys_step_push_with_type(ustack, C, IFACE_("Original Mode"), ut);
375 }
376}
377
378bool BKE_undosys_stack_has_undo(const UndoStack *ustack, const char *name)
379{
380 if (name) {
381 const UndoStep *us = static_cast<UndoStep *>(
382 BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name)));
383 return us && us->prev;
384 }
385
386 return !BLI_listbase_is_empty(&ustack->steps);
387}
388
390{
391 UndoStep *us = ustack->step_active;
392 while (us && (us->type != ut)) {
393 us = us->prev;
394 }
395 return us;
396}
397
399{
400 UNDO_NESTED_ASSERT(false);
401 CLOG_INFO(&LOG, 1, "type='%s'", ut->name);
402 if (ustack->step_init && (ustack->step_init->type == ut)) {
403 return ustack->step_init;
404 }
405 return BKE_undosys_stack_active_with_type(ustack, ut);
406}
407
409{
410 UNDO_NESTED_ASSERT(false);
411 if ((steps == -1) && (memory_limit == 0)) {
412 return;
413 }
414
415 CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit);
416 UndoStep *us;
417 UndoStep *us_exclude = nullptr;
418 /* keep at least two (original + other) */
419 size_t data_size_all = 0;
420 size_t us_count = 0;
421 for (us = static_cast<UndoStep *>(ustack->steps.last); us && us->prev; us = us->prev) {
422 if (memory_limit) {
423 data_size_all += us->data_size;
424 if (data_size_all > memory_limit) {
425 CLOG_INFO(&LOG,
426 1,
427 "At step %zu: data_size_all=%zu >= memory_limit=%zu",
428 us_count,
429 data_size_all,
431 break;
432 }
433 }
434 if (steps != -1) {
435 if (us_count == steps) {
436 break;
437 }
438 if (us->skip == false) {
439 us_count += 1;
440 }
441 }
442 }
443
444 CLOG_INFO(&LOG, 1, "Total steps %zu: data_size_all=%zu", us_count, data_size_all);
445
446 if (us) {
447#ifdef WITH_GLOBAL_UNDO_KEEP_ONE
448 /* Hack, we need to keep at least one BKE_UNDOSYS_TYPE_MEMFILE. */
449 if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
450 us_exclude = us->prev;
451 while (us_exclude && us_exclude->type != BKE_UNDOSYS_TYPE_MEMFILE) {
452 us_exclude = us_exclude->prev;
453 }
454 /* Once this is outside the given number of 'steps', undoing onto this state
455 * may skip past many undo steps which is confusing, instead,
456 * disallow stepping onto this state entirely. */
457 if (us_exclude) {
458 us_exclude->skip = true;
459 }
460 }
461#endif
462 /* Free from first to last, free functions may update de-duplication info
463 * (see #MemFileUndoStep). */
464 undosys_stack_clear_all_first(ustack, us->prev, us_exclude);
465 }
466}
467
469
470/* -------------------------------------------------------------------- */
473
475 bContext *C,
476 const char *name,
477 const UndoType *ut)
478{
479 UNDO_NESTED_ASSERT(false);
480 if (ut->step_encode_init) {
481 undosys_stack_validate(ustack, false);
482
483 if (UndoStep *us = ustack->step_init) {
485 ustack->step_init = nullptr;
486 }
487 if (ustack->step_active) {
489 }
490
491 UndoStep *us = static_cast<UndoStep *>(MEM_callocN(ut->step_size, __func__));
492 if (name != nullptr) {
493 STRNCPY(us->name, name);
494 }
495 us->type = ut;
496 ustack->step_init = us;
497 CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
498 ut->step_encode_init(C, us);
499 undosys_stack_validate(ustack, false);
500 return us;
501 }
502
503 return nullptr;
504}
505
507{
508 UNDO_NESTED_ASSERT(false);
510 if (ut == nullptr) {
511 return nullptr;
512 }
513 return BKE_undosys_step_push_init_with_type(ustack, C, name, ut);
514}
515
517 bContext *C,
518 const char *name,
519 const UndoType *ut)
520{
521 BLI_assert((ut->flags & UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE) == 0 || C != nullptr);
522
523 UNDO_NESTED_ASSERT(false);
524 undosys_stack_validate(ustack, false);
525 bool is_not_empty = ustack->step_active != nullptr;
527
528 /* Might not be final place for this to be called - probably only want to call it from some
529 * undo handlers, not all of them? */
531 BKE_lib_override_library_main_operations_create(G_MAIN, false, (int *)&report_flags);
532 if (report_flags & RNA_OVERRIDE_MATCH_RESULT_CREATED) {
534 }
535
536 /* Remove all undo-steps after (also when 'ustack->step_active == nullptr'). */
537 while (ustack->steps.last != ustack->step_active) {
538 UndoStep *us_iter = static_cast<UndoStep *>(ustack->steps.last);
539 undosys_step_free_and_unlink(ustack, us_iter);
540 undosys_stack_validate(ustack, is_not_empty);
541 }
542
543 if (ustack->step_active) {
544 BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
545 }
546
547#ifdef WITH_GLOBAL_UNDO_ENSURE_UPDATED
548 if (ut->step_foreach_ID_ref != nullptr) {
549 if (G_MAIN->is_memfile_undo_written == false) {
550 const char *name_internal = "MemFile Internal (pre)";
551 /* Don't let 'step_init' cause issues when adding memfile undo step. */
552 void *step_init = ustack->step_init;
553 ustack->step_init = nullptr;
554 const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
555 /* Restore 'step_init'. */
556 ustack->step_init = static_cast<UndoStep *>(step_init);
557 if (ok) {
558 UndoStep *us = static_cast<UndoStep *>(ustack->steps.last);
559 BLI_assert(STREQ(us->name, name_internal));
560 us->skip = true;
561# ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
562 ustack->step_active_memfile = us;
563# endif
564 }
565 }
566 }
567#endif
568
569 bool use_memfile_step = false;
570 {
571 UndoStep *us = ustack->step_init ?
572 ustack->step_init :
573 static_cast<UndoStep *>(MEM_callocN(ut->step_size, __func__));
574 ustack->step_init = nullptr;
575 if (us->name[0] == '\0') {
576 STRNCPY(us->name, name);
577 }
578 us->type = ut;
579 /* True by default, code needs to explicitly set it to false if necessary. */
580 us->use_old_bmain_data = true;
581 /* Initialized, not added yet. */
582
583 CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
584
585 if (!undosys_step_encode(C, G_MAIN, ustack, us)) {
586 MEM_freeN(us);
587 undosys_stack_validate(ustack, true);
588 return retval;
589 }
590 ustack->step_active = us;
591 BLI_addtail(&ustack->steps, us);
592 use_memfile_step = us->use_memfile_step;
593 }
594
595 if (use_memfile_step) {
596 /* Make this the user visible undo state, so redo always applies
597 * on top of the mem-file undo instead of skipping it. see: #67256. */
598 UndoStep *us_prev = ustack->step_active;
599 const char *name_internal = us_prev->name;
600 const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
601 if (ok) {
602 UndoStep *us = static_cast<UndoStep *>(ustack->steps.last);
603 BLI_assert(STREQ(us->name, name_internal));
604 us_prev->skip = true;
605#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
606 ustack->step_active_memfile = us;
607#endif
608 ustack->step_active = us;
609 }
610 }
611
612 if (ustack->group_level > 0) {
613 /* Temporarily set skip for the active step.
614 * This is an invalid state which must be corrected once the last group ends. */
615 ustack->step_active->skip = true;
616 }
617
618 undosys_stack_validate(ustack, true);
619 return (retval | UNDO_PUSH_RET_SUCCESS);
620}
621
623{
624 UNDO_NESTED_ASSERT(false);
625 const UndoType *ut = ustack->step_init ? ustack->step_init->type :
627 if (ut == nullptr) {
629 }
630 return BKE_undosys_step_push_with_type(ustack, C, name, ut);
631}
632
634{
635 if (us) {
636 const UndoType *ut = us->type;
637 while ((us = us->next)) {
638 if (us->type == ut) {
639 return us;
640 }
641 }
642 }
643 return us;
644}
645
647{
648 if (us) {
649 const UndoType *ut = us->type;
650 while ((us = us->prev)) {
651 if (us->type == ut) {
652 return us;
653 }
654 }
655 }
656 return us;
657}
658
660 const char *name,
661 const UndoType *ut)
662{
663 LISTBASE_FOREACH_BACKWARD (UndoStep *, us, &ustack->steps) {
664 if (us->type == ut) {
665 if (STREQ(name, us->name)) {
666 return us;
667 }
668 }
669 }
670 return nullptr;
671}
672
674{
675 return static_cast<UndoStep *>(BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name)));
676}
677
679{
680 LISTBASE_FOREACH_BACKWARD (UndoStep *, us, &ustack->steps) {
681 if (us->type == ut) {
682 return us;
683 }
684 }
685 return nullptr;
686}
687
689 const UndoStep *us_target,
690 const UndoStep *us_reference)
691{
692 if (us_reference == nullptr) {
693 us_reference = ustack->step_active;
694 }
695
696 BLI_assert(us_reference != nullptr);
697
698 /* Note that we use heuristics to make this lookup as fast as possible in most common cases,
699 * assuming that:
700 * - Most cases are just undo or redo of one step from active one.
701 * - Otherwise, it is typically faster to check future steps since active one is usually close
702 * to the end of the list, rather than its start. */
703 /* NOTE: in case target step is the active one, we assume we are in an undo case... */
704 if (ELEM(us_target, us_reference, us_reference->prev)) {
705 return STEP_UNDO;
706 }
707 if (us_target == us_reference->next) {
708 return STEP_REDO;
709 }
710
711 /* Search forward, and then backward. */
712 for (UndoStep *us_iter = us_reference->next; us_iter != nullptr; us_iter = us_iter->next) {
713 if (us_iter == us_target) {
714 return STEP_REDO;
715 }
716 }
717 for (UndoStep *us_iter = us_reference->prev; us_iter != nullptr; us_iter = us_iter->prev) {
718 if (us_iter == us_target) {
719 return STEP_UNDO;
720 }
721 }
722
724 "Target undo step not found, this should not happen and may indicate an undo "
725 "stack corruption");
726 return STEP_INVALID;
727}
728
733static UndoStep *undosys_step_iter_first(UndoStep *us_reference, const eUndoStepDir undo_dir)
734{
735 if (us_reference->type->flags & UNDOTYPE_FLAG_DECODE_ACTIVE_STEP) {
736 /* Reading this step means an undo action reads undo twice.
737 * This should be avoided where possible, however some undo systems require it.
738 *
739 * Redo skips the current state as this represents the currently loaded state. */
740 return (undo_dir == -1) ? us_reference : us_reference->next;
741 }
742
743 /* Typical case, skip reading the current undo step. */
744 return (undo_dir == -1) ? us_reference->prev : us_reference->next;
745}
746
748 bContext *C,
749 UndoStep *us_target,
750 UndoStep *us_reference,
751 const bool use_skip)
752{
753 UNDO_NESTED_ASSERT(false);
754 if (us_target == nullptr) {
755 CLOG_ERROR(&LOG, "called with a nullptr target step");
756 return false;
757 }
758 undosys_stack_validate(ustack, true);
759
760 if (us_reference == nullptr) {
761 us_reference = ustack->step_active;
762 }
763 if (us_reference == nullptr) {
764 CLOG_ERROR(&LOG, "could not find a valid initial active target step as reference");
765 return false;
766 }
767
768 /* This considers we are in undo case if both `us_target` and `us_reference` are the same. */
769 const eUndoStepDir undo_dir = BKE_undosys_step_calc_direction(ustack, us_target, us_reference);
770 BLI_assert(undo_dir != STEP_INVALID);
771
772 /* This will be the active step once the undo process is complete.
773 *
774 * In case we do skip 'skipped' steps, the final active step may be several steps backward from
775 * the one passed as parameter. */
776 UndoStep *us_target_active = us_target;
777 if (use_skip) {
778 while (us_target_active != nullptr && us_target_active->skip) {
779 us_target_active = (undo_dir == -1) ? us_target_active->prev : us_target_active->next;
780 }
781 if (us_target_active == nullptr) {
782 CLOG_INFO(&LOG,
783 2,
784 "undo/redo did not find a step after stepping over skip-steps "
785 "(undo limit exceeded)");
786 return false;
787 }
788 }
789
790 CLOG_INFO(&LOG,
791 1,
792 "addr=%p, name='%s', type='%s', undo_dir=%d",
793 us_target,
794 us_target->name,
795 us_target->type->name,
796 undo_dir);
797
798 /* Undo/Redo steps until we reach given target step (or beyond if it has to be skipped),
799 * from given reference step. */
800 bool is_processing_extra_skipped_steps = false;
801 for (UndoStep *us_iter = undosys_step_iter_first(us_reference, undo_dir); us_iter != nullptr;
802 us_iter = (undo_dir == -1) ? us_iter->prev : us_iter->next)
803 {
804 BLI_assert(us_iter != nullptr);
805
806 const bool is_final = (us_iter == us_target_active);
807
808 if (!is_final && is_processing_extra_skipped_steps) {
809 BLI_assert(us_iter->skip == true);
810 CLOG_INFO(&LOG,
811 2,
812 "undo/redo continue with skip addr=%p, name='%s', type='%s'",
813 us_iter,
814 us_iter->name,
815 us_iter->type->name);
816 }
817
818 undosys_step_decode(C, G_MAIN, ustack, us_iter, undo_dir, is_final);
819 ustack->step_active = us_iter;
820
821 if (us_iter == us_target) {
822 is_processing_extra_skipped_steps = true;
823 }
824
825 if (is_final) {
826 /* Undo/Redo process is finished and successful. */
827 return true;
828 }
829 }
830
832 false,
833 "This should never be reached, either undo stack is corrupted, or code above is buggy");
834 return false;
835}
836
838{
839 /* Note that here we do not skip 'skipped' steps by default. */
840 return BKE_undosys_step_load_data_ex(ustack, C, us_target, nullptr, false);
841}
842
843void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index)
844{
845 UndoStep *us_target = static_cast<UndoStep *>(BLI_findlink(&ustack->steps, index));
846 BLI_assert(us_target->skip == false);
847 if (us_target == ustack->step_active) {
848 return;
849 }
850 BKE_undosys_step_load_data(ustack, C, us_target);
851}
852
854 bContext *C,
855 UndoStep *us_target,
856 bool use_skip)
857{
858 /* In case there is no active step, we consider we just load given step, so reference must be
859 * itself (due to weird 'load current active step in undo case' thing, see comments in
860 * #BKE_undosys_step_load_data_ex). */
861 UndoStep *us_reference = ustack->step_active != nullptr ? ustack->step_active : us_target;
862
863 BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == -1);
864
865 return BKE_undosys_step_load_data_ex(ustack, C, us_target, us_reference, use_skip);
866}
867
869{
870 return BKE_undosys_step_undo_with_data_ex(ustack, C, us_target, true);
871}
872
874{
875 if (ustack->step_active != nullptr) {
876 return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active->prev);
877 }
878 return false;
879}
880
882 bContext *C,
883 UndoStep *us_target,
884 bool use_skip)
885{
886 /* In case there is no active step, we consider we just load given step, so reference must be
887 * the previous one. */
888 UndoStep *us_reference = ustack->step_active != nullptr ? ustack->step_active : us_target->prev;
889
890 BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == 1);
891
892 return BKE_undosys_step_load_data_ex(ustack, C, us_target, us_reference, use_skip);
893}
894
896{
897 return BKE_undosys_step_redo_with_data_ex(ustack, C, us_target, true);
898}
899
901{
902 if (ustack->step_active != nullptr) {
903 return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active->next);
904 }
905 return false;
906}
907
909{
910 UndoType *ut = MEM_callocN<UndoType>(__func__);
911
912 undosys_fn(ut);
913
915
916 return ut;
917}
918
920{
921 while (UndoType *ut = static_cast<UndoType *>(BLI_pophead(&g_undo_types))) {
922 MEM_freeN(ut);
923 }
924}
925
927
928/* -------------------------------------------------------------------- */
946
948{
949 BLI_assert(ustack->group_level >= 0);
950 ustack->group_level += 1;
951}
952
954{
955 ustack->group_level -= 1;
956 BLI_assert(ustack->group_level >= 0);
957
958 if (ustack->group_level == 0) {
959 if (LIKELY(ustack->step_active != nullptr)) {
960 ustack->step_active->skip = false;
961 }
962 }
963}
964
966
967/* -------------------------------------------------------------------- */
972
973static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
974 UndoTypeForEachIDRefFn foreach_ID_ref_fn,
975 void *user_data))
976{
977 LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
978 const UndoType *ut = us->type;
979 if (ut->step_foreach_ID_ref != nullptr) {
980 ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data);
981 }
982 }
983}
984
986
987/* -------------------------------------------------------------------- */
990
992{
993 printf("Undo %d Steps (*: active, #=applied, M=memfile-active, S=skip)\n",
994 BLI_listbase_count(&ustack->steps));
995 int index = 0;
996 LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
997 printf("[%c%c%c%c] %3d {%p} type='%s', name='%s'\n",
998 (us == ustack->step_active) ? '*' : ' ',
999 us->is_applied ? '#' : ' ',
1000 (us == ustack->step_active_memfile) ? 'M' : ' ',
1001 us->skip ? 'S' : ' ',
1002 index,
1003 (void *)us,
1004 us->type->name,
1005 us->name);
1006 index++;
1007 }
1008}
1009
void BKE_memfile_undo_free(MemFileUndoData *mfu)
void CTX_data_main_set(bContext *C, Main *bmain)
void CTX_free(bContext *C)
bContext * CTX_create()
#define G_MAIN
ID * BKE_libblock_find_name_and_library_filepath(Main *bmain, short type, const char *name, const char *lib_filepath_abs)
Definition lib_id.cc:1749
void BKE_lib_override_library_main_operations_create(Main *bmain, bool force_auto, int *r_report_flags)
@ UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE
@ UNDOTYPE_FLAG_DECODE_ACTIVE_STEP
const UndoType * BKE_UNDOSYS_TYPE_SCULPT
void(*)(void *user_data, UndoRefID *id_ref) UndoTypeForEachIDRefFn
const UndoType * BKE_UNDOSYS_TYPE_MEMFILE
const UndoType * BKE_UNDOSYS_TYPE_PARTICLE
eUndoStepDir
@ STEP_INVALID
@ STEP_UNDO
@ STEP_REDO
const UndoType * BKE_UNDOSYS_TYPE_TEXT
eUndoPushReturn
@ UNDO_PUSH_RET_SUCCESS
@ UNDO_PUSH_RET_OVERRIDE_CHANGED
@ UNDO_PUSH_RET_FAILURE
const UndoType * BKE_UNDOSYS_TYPE_PAINTCURVE
const UndoType * BKE_UNDOSYS_TYPE_IMAGE
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
int BLI_findindex(const ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:586
void * BLI_findlink(const ListBase *listbase, int number) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:534
#define LISTBASE_FOREACH(type, var, list)
BLI_INLINE void BLI_listbase_clear(ListBase *lb)
BLI_INLINE bool BLI_listbase_is_empty(const ListBase *lb)
#define LISTBASE_FOREACH_BACKWARD(type, var, list)
void BLI_addtail(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:111
void BLI_remlink(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:131
void * BLI_rfindstring(const ListBase *listbase, const char *id, int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:626
int BLI_listbase_count(const ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:524
void * BLI_pophead(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:252
char * STRNCPY(char(&dst)[N], const char *src)
Definition BLI_string.h:688
#define UNUSED_FUNCTION(x)
#define ELEM(...)
#define STREQ(a, b)
#define LIKELY(x)
#define IFACE_(msgid)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:182
#define CLOG_INFO(clg_ref, level,...)
Definition CLG_log.h:179
These structs are the foundation for all linked lists in the library system.
Read Guarded memory(de)allocation.
eRNAOverrideMatchResult
@ RNA_OVERRIDE_MATCH_RESULT_CREATED
@ RNA_OVERRIDE_MATCH_RESULT_INIT
#define C
Definition RandGen.cpp:29
#define offsetof(t, d)
#define printf(...)
#define GS(a)
#define LOG(severity)
Definition log.h:32
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
return ret
static const int steps
struct Library * lib
Definition DNA_ID.h:410
char name[66]
Definition DNA_ID.h:415
LibraryRuntimeHandle * runtime
Definition DNA_ID.h:516
void * last
void * first
char name[MAX_ID_NAME]
struct ID * ptr
char library_filepath_abs[FILE_MAX]
UndoStep * step_init
UndoStep * step_active_memfile
UndoStep * step_active
ListBase steps
size_t data_size
UndoStep * prev
bool use_old_bmain_data
UndoStep * next
const UndoType * type
bool use_memfile_step
char name[64]
void(* step_encode_init)(bContext *C, UndoStep *us)
void(* step_foreach_ID_ref)(UndoStep *us, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
const char * name
void(* step_free)(UndoStep *us)
void(* step_decode)(bContext *C, Main *bmain, UndoStep *us, eUndoStepDir dir, bool is_final)
bool(* step_encode)(bContext *C, Main *bmain, UndoStep *us)
static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
static UndoStep * undosys_step_iter_first(UndoStep *us_reference, const eUndoStepDir undo_dir)
bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, bool use_skip)
bool BKE_undosys_stack_has_undo(const UndoStack *ustack, const char *name)
bool BKE_undosys_step_redo_with_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C)
UndoStep * BKE_undosys_step_find_by_name_with_type(UndoStack *ustack, const char *name, const UndoType *ut)
bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C)
UndoStep * BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut)
UndoStep * BKE_undosys_step_same_type_next(UndoStep *us)
static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
bool BKE_undosys_step_undo(UndoStack *ustack, bContext *C)
void BKE_undosys_stack_clear_active(UndoStack *ustack)
#define UNDO_NESTED_CHECK_END
static const UndoType * BKE_undosys_type_from_context(bContext *C)
static ListBase g_undo_types
bool BKE_undosys_step_undo_with_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
static bool g_undo_callback_running
#define UNDO_NESTED_CHECK_BEGIN
eUndoPushReturn BKE_undosys_step_push(UndoStack *ustack, bContext *C, const char *name)
eUndoStepDir BKE_undosys_step_calc_direction(const UndoStack *ustack, const UndoStep *us_target, const UndoStep *us_reference)
static void undosys_id_ref_store(void *, UndoRefID *id_ref)
static void undosys_stack_clear_all_last(UndoStack *ustack, UndoStep *us)
UndoStep * BKE_undosys_step_same_type_prev(UndoStep *us)
static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
void BKE_undosys_type_free_all()
void BKE_undosys_stack_clear(UndoStack *ustack)
static bool undosys_stack_push_main(UndoStack *ustack, const char *name, Main *bmain)
void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size_t memory_limit)
bool BKE_undosys_step_load_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, UndoStep *us_reference, const bool use_skip)
UndoStep * BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name)
static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us)
void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index)
eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
UndoStep * BKE_undosys_step_push_init_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
void BKE_undosys_stack_destroy(UndoStack *ustack)
void BKE_undosys_stack_init_from_main(UndoStack *ustack, Main *bmain)
#define UNDO_NESTED_ASSERT(state)
void bke_undo_system_linker_workaround()
void BKE_undosys_stack_group_end(UndoStack *ustack)
static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
UndoStep * BKE_undosys_stack_active_with_type(UndoStack *ustack, const UndoType *ut)
bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
UndoStack * BKE_undosys_stack_create()
bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, bool use_skip)
UndoType * BKE_undosys_type_append(void(*undosys_fn)(UndoType *))
void BKE_undosys_print(UndoStack *ustack)
static void undosys_step_decode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us, const eUndoStepDir dir, bool is_final)
UndoStep * BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const UndoType *ut)
UndoStep * BKE_undosys_step_push_init(UndoStack *ustack, bContext *C, const char *name)
void BKE_undosys_stack_group_begin(UndoStack *ustack)
size_t memory_limit