Blender V5.0
tracking_stabilize.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <climits>
12
13#include "DNA_movieclip_types.h"
14#include "DNA_scene_types.h"
15#include "RNA_prototypes.hh"
16
17#include "BLI_ghash.h"
18#include "BLI_listbase.h"
19#include "BLI_math_geom.h"
20#include "BLI_math_rotation.h"
21#include "BLI_math_vector.h"
22#include "BLI_sort_utils.h"
23#include "BLI_task.h"
24
25#include "BKE_fcurve.hh"
26#include "BKE_movieclip.h"
27#include "BKE_tracking.h"
28
30#include "IMB_imbuf.hh"
31#include "IMB_imbuf_types.hh"
32#include "IMB_interp.hh"
33#include "MEM_guardedalloc.h"
34
35/* == Parameterization constants == */
36
37/* When measuring the scale changes relative to the rotation pivot point, it
38 * might happen accidentally that a probe point (tracking point), which doesn't
39 * actually move on a circular path, gets very close to the pivot point, causing
40 * the measured scale contribution to go toward infinity. We damp this undesired
41 * effect by adding a bias (floor) to the measured distances, which will
42 * dominate very small distances and thus cause the corresponding track's
43 * contribution to diminish.
44 * Measurements happen in normalized (0...1) coordinates within a frame.
45 */
46static float SCALE_ERROR_LIMIT_BIAS = 0.01f;
47
48/* When to consider a track as completely faded out.
49 * This is used in conjunction with the "disabled" flag of the track
50 * to determine start positions, end positions and gaps
51 */
52static float EPSILON_WEIGHT = 0.005f;
53
54/* == private working data == */
55
56/* Per track baseline for stabilization, defined at reference frame.
57 * A track's reference frame is chosen as close as possible to the (global)
58 * anchor_frame. Baseline holds the constant part of each track's contribution
59 * to the observed movement; it is calculated at initialization pass, using the
60 * measurement value at reference frame plus the average contribution to fill
61 * the gap between global anchor_frame and the reference frame for this track.
62 * This struct with private working data is associated to the local call context
63 * via `StabContext::private_track_data`
64 */
67
68 /* measured relative to translated pivot */
70
71 /* measured relative to translated pivot */
73
76};
77
78/* Tracks are reordered for initialization, starting as close as possible to
79 * anchor_frame
80 */
86
87/* Per frame private working data, for accessing possibly animated values. */
101
107
109 MovieTrackingTrack *track,
110 TrackStabilizationBase *private_data)
111{
112 BLI_ghash_insert(ctx->private_track_data, track, private_data);
113}
114
116{
117 if (val != nullptr) {
118 MEM_freeN(static_cast<TrackStabilizationBase *>(val));
119 }
120}
121
122/* == access animated values for given frame == */
123
124static FCurve *retrieve_stab_animation(MovieClip *clip, const char *data_path, int idx)
125{
126 return id_data_find_fcurve(&clip->id,
127 &clip->tracking.stabilization,
128 &RNA_MovieTrackingStabilization,
129 data_path,
130 idx,
131 nullptr);
132}
133
135{
136 return id_data_find_fcurve(&clip->id, track, &RNA_MovieTrackingTrack, "weight_stab", 0, nullptr);
137}
138
139static float fetch_from_fcurve(const FCurve *animationCurve,
140 int framenr,
141 StabContext *ctx,
142 float default_value)
143{
144 if (ctx && ctx->use_animation && animationCurve) {
145 int scene_framenr = BKE_movieclip_remap_clip_to_scene_frame(ctx->clip, framenr);
146 return evaluate_fcurve(animationCurve, scene_framenr);
147 }
148 return default_value;
149}
150
151static float get_animated_locinf(StabContext *ctx, int framenr)
152{
153 return fetch_from_fcurve(ctx->locinf, framenr, ctx, ctx->stab->locinf);
154}
155
156static float get_animated_rotinf(StabContext *ctx, int framenr)
157{
158 return fetch_from_fcurve(ctx->rotinf, framenr, ctx, ctx->stab->rotinf);
159}
160
161static float get_animated_scaleinf(StabContext *ctx, int framenr)
162{
163 return fetch_from_fcurve(ctx->scaleinf, framenr, ctx, ctx->stab->scaleinf);
164}
165
166static void get_animated_target_pos(StabContext *ctx, int framenr, float target_pos[2])
167{
168 target_pos[0] = fetch_from_fcurve(ctx->target_pos[0], framenr, ctx, ctx->stab->target_pos[0]);
169 target_pos[1] = fetch_from_fcurve(ctx->target_pos[1], framenr, ctx, ctx->stab->target_pos[1]);
170}
171
172static float get_animated_target_rot(StabContext *ctx, int framenr)
173{
174 return fetch_from_fcurve(ctx->target_rot, framenr, ctx, ctx->stab->target_rot);
175}
176
177static float get_animated_target_scale(StabContext *ctx, int framenr)
178{
179 return fetch_from_fcurve(ctx->target_scale, framenr, ctx, ctx->stab->scale);
180}
181
182static float get_animated_weight(StabContext *ctx, MovieTrackingTrack *track, int framenr)
183{
185 if (working_data && working_data->track_weight_curve) {
186 int scene_framenr = BKE_movieclip_remap_clip_to_scene_frame(ctx->clip, framenr);
187 return evaluate_fcurve(working_data->track_weight_curve, scene_framenr);
188 }
189 /* Use weight at global 'current frame' as fallback default. */
190 return track->weight_stab;
191}
192
193static void use_values_from_fcurves(StabContext *ctx, bool toggle)
194{
195 if (ctx != nullptr) {
196 ctx->use_animation = toggle;
197 }
198}
199
200/* Prepare per call private working area.
201 * Used for access to possibly animated values: retrieve available F-Curves.
202 */
204{
205 StabContext *ctx = MEM_callocN<StabContext>("2D stabilization animation runtime data");
206 ctx->clip = clip;
207 ctx->tracking = &clip->tracking;
208 ctx->stab = &clip->tracking.stabilization;
209 ctx->private_track_data = BLI_ghash_ptr_new("2D stabilization per track private working data");
210 ctx->locinf = retrieve_stab_animation(clip, "influence_location", 0);
211 ctx->rotinf = retrieve_stab_animation(clip, "influence_rotation", 0);
212 ctx->scaleinf = retrieve_stab_animation(clip, "influence_scale", 0);
213 ctx->target_pos[0] = retrieve_stab_animation(clip, "target_pos", 0);
214 ctx->target_pos[1] = retrieve_stab_animation(clip, "target_pos", 1);
215 ctx->target_rot = retrieve_stab_animation(clip, "target_rot", 0);
216 ctx->target_scale = retrieve_stab_animation(clip, "target_zoom", 0);
217 ctx->use_animation = true;
218 return ctx;
219}
220
230{
231 if (ctx != nullptr) {
233 MEM_freeN(ctx);
234 }
235}
236
238{
240 return (working_data != nullptr && working_data->is_init_for_stabilization);
241}
242
244{
245 return (track->flag & TRACK_USE_2D_STAB) && is_init_for_stabilization(ctx, track);
246}
247
249 MovieTrackingTrack *track,
250 MovieTrackingMarker *marker)
251{
252 return (marker->flag & MARKER_DISABLED) ||
253 (EPSILON_WEIGHT > get_animated_weight(ctx, track, marker->framenr));
254}
255
256static int search_closest_marker_index(MovieTrackingTrack *track, int ref_frame)
257{
258 const MovieTrackingMarker *marker = BKE_tracking_marker_get(track, ref_frame);
259 return marker - track->markers;
260}
261
263 StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_higher)
264{
265 MovieTrackingMarker *markers = track->markers;
266 int end = track->markersnr;
267 BLI_assert(0 <= i && i < end);
268
269 while (i < end &&
270 (markers[i].framenr < ref_frame || is_effectively_disabled(ctx, track, &markers[i])))
271 {
272 i++;
273 }
274 if (i < end && markers[i].framenr < *next_higher) {
275 BLI_assert(markers[i].framenr >= ref_frame);
276 *next_higher = markers[i].framenr;
277 }
278}
279
281 StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_lower)
282{
283 MovieTrackingMarker *markers = track->markers;
284 BLI_assert(0 <= i && i < track->markersnr);
285 while (i >= 0 &&
286 (markers[i].framenr > ref_frame || is_effectively_disabled(ctx, track, &markers[i])))
287 {
288 i--;
289 }
290 if (0 <= i && markers[i].framenr > *next_lower) {
291 BLI_assert(markers[i].framenr <= ref_frame);
292 *next_lower = markers[i].framenr;
293 }
294}
295
296/* Find closest frames with usable stabilization data.
297 * A frame counts as _usable_ when there is at least one track marked for
298 * translation stabilization, which has an enabled tracking marker at this very
299 * frame. We search both for the next lower and next higher position, to allow
300 * the caller to interpolate gaps and to extrapolate at the ends of the
301 * definition range. */
303 int framenr,
304 int *next_lower,
305 int *next_higher)
306{
307 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
308
309 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
310 if (is_usable_for_stabilization(ctx, track)) {
311 int startpoint = search_closest_marker_index(track, framenr);
312 retrieve_next_higher_usable_frame(ctx, track, startpoint, framenr, next_higher);
313 retrieve_next_lower_usable_frame(ctx, track, startpoint, framenr, next_lower);
314 }
315 }
316}
317
318/* Find active (enabled) marker closest to the reference frame. */
320 MovieTrackingTrack *track,
321 int ref_frame)
322{
323 int next_lower = MINAFRAME;
324 int next_higher = MAXFRAME;
325 int i = search_closest_marker_index(track, ref_frame);
326 retrieve_next_higher_usable_frame(ctx, track, i, ref_frame, &next_higher);
327 retrieve_next_lower_usable_frame(ctx, track, i, ref_frame, &next_lower);
328
329 if ((next_higher - ref_frame) < (ref_frame - next_lower)) {
330 return BKE_tracking_marker_get_exact(track, next_higher);
331 }
332
333 return BKE_tracking_marker_get_exact(track, next_lower);
334}
335
336/* Retrieve tracking data, if available and applicable for this frame.
337 * The returned weight value signals the validity; data recorded for this
338 * tracking marker on the exact requested frame is output with the full weight
339 * of this track, while gaps in the data sequence cause the weight to go to zero.
340 */
342 MovieTrackingTrack *track,
343 int framenr,
344 float *r_weight)
345{
346 MovieTrackingMarker *marker = BKE_tracking_marker_get_exact(track, framenr);
347 if (marker != nullptr && !(marker->flag & MARKER_DISABLED)) {
348 *r_weight = get_animated_weight(ctx, track, framenr);
349 return marker;
350 }
351
352 /* No marker at this frame (=gap) or marker disabled. */
353 *r_weight = 0.0f;
354 return nullptr;
355}
356
357/* Define the reference point for rotation/scale measurement and compensation.
358 * The stabilizer works by assuming the image was distorted by a affine linear
359 * transform, i.e. it was rotated and stretched around this reference point
360 * (pivot point) and then shifted laterally. Any scale and orientation changes
361 * will be picked up relative to this point. And later the image will be
362 * stabilized by rotating around this point. The result can only be as
363 * accurate as this pivot point actually matches the real rotation center
364 * of the actual movements. Thus any scheme to define a pivot point is
365 * always guesswork.
366 *
367 * As a simple default, we use the weighted average of the location markers
368 * of the current frame as pivot point. TODO: It is planned to add further
369 * options, like e.g. anchoring the pivot point at the canvas. Moreover,
370 * it is planned to allow for a user controllable offset.
371 */
372static void setup_pivot(const float ref_pos[2], float r_pivot[2])
373{
374 zero_v2(r_pivot); /* TODO: add an animated offset position here. */
375 add_v2_v2(r_pivot, ref_pos);
376}
377
378/* Calculate the contribution of a single track at the time position (frame) of
379 * the given marker. Each track has a local reference frame, which is as close
380 * as possible to the global anchor_frame. Thus the translation contribution is
381 * comprised of the offset relative to the image position at that reference
382 * frame, plus a guess of the contribution for the time span between the
383 * anchor_frame and the local reference frame of this track. The constant part
384 * of this contribution is precomputed initially. At the anchor_frame, by
385 * definition the contribution of all tracks is zero, keeping the frame in place.
386 *
387 * track_ref is per track baseline contribution at reference frame; filled in at
388 * initialization
389 * marker is tracking data to use as contribution for current frame.
390 * result_offset is a total cumulated contribution of this track,
391 * relative to the stabilization anchor_frame,
392 * in normalized (0...1) coordinates.
393 */
395 MovieTrackingMarker *marker,
396 float result_offset[2])
397{
398 add_v2_v2v2(result_offset, track_ref->stabilization_offset_base, marker->pos);
399}
400
401/* Similar to the ::translation_contribution(), the rotation contribution is
402 * comprised of the contribution by this individual track, and the averaged
403 * contribution from anchor_frame to the ref point of this track.
404 * - Contribution is in terms of angles, -pi < angle < +pi, and all averaging
405 * happens in this domain.
406 * - Yet the actual measurement happens as vector between pivot and the current
407 * tracking point
408 * - Currently we use the center of frame as approximation for the rotation pivot
409 * point.
410 * - Moreover, the pivot point has to be compensated for the already determined
411 * shift offset, in order to get the pure rotation around the pivot.
412 * To turn this into a _contribution_, the likewise corrected angle at the
413 * reference frame has to be subtracted, to get only the pure angle difference
414 * this tracking point has captured.
415 * - To get from vectors to angles, we have to go through an arcus tangens,
416 * which involves the issue of the definition range: the resulting angles will
417 * flip by 360deg when the measured vector passes from the 2nd to the third
418 * quadrant, thus messing up the average calculation. Since _any_ tracking
419 * point might be used, these problems are quite common in practice.
420 * - Thus we perform the subtraction of the reference and the addition of the
421 * baseline contribution in polar coordinates as simple addition of angles;
422 * since these parts are fixed, we can bake them into a rotation matrix.
423 * With this approach, the border of the arcus tangens definition range will
424 * be reached only, when the _whole_ contribution approaches +- 180deg,
425 * meaning we've already tilted the frame upside down. This situation is way
426 * less common and can be tolerated.
427 * - As an additional feature, when activated, also changes in image scale
428 * relative to the rotation center can be picked up. To handle those values
429 * in the same framework, we average the scales as logarithms.
430 *
431 * aspect is a total aspect ratio of the undistorted image (includes fame and
432 * pixel aspect). The function returns a quality factor, which can be used
433 * to damp the contributions of points in close proximity to the pivot point,
434 * since such contributions might be dominated by rounding errors and thus
435 * poison the calculated average. When the quality factor goes towards zero,
436 * the weight of this contribution should be reduced accordingly.
437 */
439 MovieTrackingMarker *marker,
440 const float aspect,
441 const float pivot[2],
442 float *result_angle,
443 float *result_scale)
444{
445 float len, quality;
446 float pos[2];
447 sub_v2_v2v2(pos, marker->pos, pivot);
448
449 pos[0] *= aspect;
451
452 *result_angle = atan2f(pos[1], pos[0]);
453
454 len = len_v2(pos);
455
456 /* prevent points very close to the pivot point from poisoning the result */
459
460 *result_scale = len * track_ref->stabilization_scale_base;
461 BLI_assert(0.0 < *result_scale);
462
463 return quality;
464}
465
466/* Workaround to allow for rotation around an arbitrary pivot point.
467 * Currently, the public API functions do not support this flexibility.
468 * Rather, rotation will always be applied around a fixed origin.
469 * As a workaround, we shift the image after rotation to match the
470 * desired rotation center. And since this offset needs to be applied
471 * after the rotation and scaling, we can collapse it with the
472 * translation compensation, which is also a lateral shift (offset).
473 * The offset to apply is intended_pivot - rotated_pivot
474 */
475static void compensate_rotation_center(const int size,
476 float aspect,
477 const float angle,
478 const float scale,
479 const float pivot[2],
480 float result_translation[2])
481{
482 const float origin[2] = {0.5f * aspect * size, 0.5f * size};
483 float intended_pivot[2], rotated_pivot[2];
484 float rotation_mat[2][2];
485
486 copy_v2_v2(intended_pivot, pivot);
487 copy_v2_v2(rotated_pivot, pivot);
488 angle_to_mat2(rotation_mat, +angle);
489 sub_v2_v2(rotated_pivot, origin);
490 mul_m2_v2(rotation_mat, rotated_pivot);
491 mul_v2_fl(rotated_pivot, scale);
492 add_v2_v2(rotated_pivot, origin);
493 add_v2_v2(result_translation, intended_pivot);
494 sub_v2_v2(result_translation, rotated_pivot);
495}
496
497/* Weighted average of the per track cumulated contributions at given frame.
498 * Returns truth if all desired calculations could be done and all averages are
499 * available.
500 *
501 * NOTE: Even if the result is not `true`, the returned translation and angle
502 * are always sensible and as good as can be. Especially in the
503 * initialization phase we might not be able to get any average (yet) or
504 * get only a translation value. Since initialization visits tracks in a
505 * specific order, starting from anchor_frame, the result is logically
506 * correct non the less. But under normal operation conditions,
507 * a result of `false` should disable the stabilization function
508 */
510 int framenr,
511 float aspect,
512 float r_translation[2],
513 float r_pivot[2],
514 float *r_angle,
515 float *r_scale_step)
516{
517 bool ok;
518 float weight_sum;
519 MovieTracking *tracking = ctx->tracking;
520 MovieTrackingStabilization *stab = &tracking->stabilization;
521 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
522 float ref_pos[2];
524
525 zero_v2(r_translation);
526 *r_scale_step = 0.0f; /* logarithm */
527 *r_angle = 0.0f;
528
529 zero_v2(ref_pos);
530
531 ok = false;
532 weight_sum = 0.0f;
533 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
534 if (!is_init_for_stabilization(ctx, track)) {
535 continue;
536 }
537 if (track->flag & TRACK_USE_2D_STAB) {
538 float weight = 0.0f;
539 MovieTrackingMarker *marker = get_tracking_data_point(ctx, track, framenr, &weight);
540 if (marker) {
542 track);
543 BLI_assert(stabilization_base != nullptr);
544 float offset[2];
545 weight_sum += weight;
546 translation_contribution(stabilization_base, marker, offset);
547 r_translation[0] += weight * offset[0];
548 r_translation[1] += weight * offset[1];
549 ref_pos[0] += weight * marker->pos[0];
550 ref_pos[1] += weight * marker->pos[1];
551 ok |= (weight_sum > EPSILON_WEIGHT);
552 }
553 }
554 }
555 if (!ok) {
556 return false;
557 }
558
559 ref_pos[0] /= weight_sum;
560 ref_pos[1] /= weight_sum;
561 r_translation[0] /= weight_sum;
562 r_translation[1] /= weight_sum;
563 setup_pivot(ref_pos, r_pivot);
564
565 if (!(stab->flag & TRACKING_STABILIZE_ROTATION)) {
566 return ok;
567 }
568
569 ok = false;
570 weight_sum = 0.0f;
571 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
572 if (!is_init_for_stabilization(ctx, track)) {
573 continue;
574 }
575 if (track->flag & TRACK_USE_2D_STAB_ROT) {
576 float weight = 0.0f;
577 MovieTrackingMarker *marker = get_tracking_data_point(ctx, track, framenr, &weight);
578 if (marker) {
580 track);
581 BLI_assert(stabilization_base != nullptr);
582 float rotation, scale, quality;
583 quality = rotation_contribution(
584 stabilization_base, marker, aspect, r_pivot, &rotation, &scale);
585 const float quality_weight = weight * quality;
586 weight_sum += quality_weight;
587 *r_angle += rotation * quality_weight;
588 if (stab->flag & TRACKING_STABILIZE_SCALE) {
589 *r_scale_step += logf(scale) * quality_weight;
590 }
591 else {
592 *r_scale_step = 0;
593 }
594 /* NOTE: Use original marker weight and not the scaled one with the proximity here to allow
595 * simple stabilization setups when there is a single track in a close proximity of the
596 * center. */
597 ok |= (weight > EPSILON_WEIGHT);
598 }
599 }
600 }
601 if (ok) {
602 *r_scale_step /= weight_sum;
603 *r_angle /= weight_sum;
604 }
605 else {
606 /* We reach this point because translation could be calculated,
607 * but rotation/scale found no data to work on.
608 */
609 *r_scale_step = 0.0f;
610 *r_angle = 0.0f;
611 }
612 return true;
613}
614
615/* Calculate weight center of location tracks for given frame.
616 * This function performs similar calculations as average_track_contributions(),
617 * but does not require the tracks to be initialized for stabilization. Moreover,
618 * when there is no usable tracking data for the given frame number, data from
619 * a neighboring frame is used. Thus this function can be used to calculate
620 * a starting point on initialization.
621 */
622static void average_marker_positions(StabContext *ctx, int framenr, float r_ref_pos[2])
623{
624 bool ok = false;
625 float weight_sum;
626 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
627
628 zero_v2(r_ref_pos);
629 weight_sum = 0.0f;
630 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
631 if (track->flag & TRACK_USE_2D_STAB) {
632 float weight = 0.0f;
633 MovieTrackingMarker *marker = get_tracking_data_point(ctx, track, framenr, &weight);
634 if (marker) {
635 weight_sum += weight;
636 r_ref_pos[0] += weight * marker->pos[0];
637 r_ref_pos[1] += weight * marker->pos[1];
638 ok |= (weight_sum > EPSILON_WEIGHT);
639 }
640 }
641 }
642 if (ok) {
643 r_ref_pos[0] /= weight_sum;
644 r_ref_pos[1] /= weight_sum;
645 }
646 else {
647 /* No usable tracking data on any track on this frame.
648 * Use data from neighboring frames to extrapolate...
649 */
650 int next_lower = MINAFRAME;
651 int next_higher = MAXFRAME;
652 use_values_from_fcurves(ctx, true);
653 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
654 /* NOTE: we deliberately do not care if this track
655 * is already initialized for stabilization. */
656 if (track->flag & TRACK_USE_2D_STAB) {
657 int startpoint = search_closest_marker_index(track, framenr);
658 retrieve_next_higher_usable_frame(ctx, track, startpoint, framenr, &next_higher);
659 retrieve_next_lower_usable_frame(ctx, track, startpoint, framenr, &next_lower);
660 }
661 }
662 if (next_lower >= MINFRAME) {
663 /* use next usable frame to the left.
664 * Also default to this frame when we're in a gap */
665 average_marker_positions(ctx, next_lower, r_ref_pos);
666 }
667 else if (next_higher < MAXFRAME) {
668 average_marker_positions(ctx, next_higher, r_ref_pos);
669 }
670 use_values_from_fcurves(ctx, false);
671 }
672}
673
674/* Linear interpolation of data retrieved at two measurement points.
675 * This function is used to fill gaps in the middle of the covered area,
676 * at frames without any usable tracks for stabilization.
677 *
678 * framenr is a position to interpolate for.
679 * frame_a is a valid measurement point below framenr
680 * frame_b is a valid measurement point above framenr
681 * Returns truth if both measurements could actually be retrieved.
682 * Otherwise output parameters remain unaltered
683 */
685 int framenr,
686 int frame_a,
687 int frame_b,
688 const float aspect,
689 float r_translation[2],
690 float r_pivot[2],
691 float *r_angle,
692 float *r_scale_step)
693{
694 float t, s;
695 float trans_a[2], trans_b[2];
696 float angle_a, angle_b;
697 float scale_a, scale_b;
698 float pivot_a[2], pivot_b[2];
699 bool success = false;
700
701 BLI_assert(frame_a <= frame_b);
702 BLI_assert(frame_a <= framenr);
703 BLI_assert(framenr <= frame_b);
704
705 t = (float(framenr) - frame_a) / (frame_b - frame_a);
706 s = 1.0f - t;
707
709 ctx, frame_a, aspect, trans_a, pivot_a, &angle_a, &scale_a);
710 if (!success) {
711 return false;
712 }
714 ctx, frame_b, aspect, trans_b, pivot_b, &angle_b, &scale_b);
715 if (!success) {
716 return false;
717 }
718
719 interp_v2_v2v2(r_translation, trans_a, trans_b, t);
720 interp_v2_v2v2(r_pivot, pivot_a, pivot_b, t);
721 *r_scale_step = s * scale_a + t * scale_b;
722 *r_angle = s * angle_a + t * angle_b;
723 return true;
724}
725
726/* Reorder tracks starting with those providing a tracking data frame
727 * closest to the global anchor_frame. Tracks with a gap at anchor_frame or
728 * starting farer away from anchor_frame altogether will be visited later.
729 * This allows to build up baseline contributions incrementally.
730 *
731 * order is an array for sorting the tracks. Must be of suitable size to hold
732 * all tracks.
733 * Returns number of actually usable tracks, can be less than the overall number
734 * of tracks.
735 *
736 * NOTE: After returning, the order array holds entries up to the number of
737 * usable tracks, appropriately sorted starting with the closest tracks.
738 * Initialization includes disabled tracks, since they might be enabled
739 * through automation later.
740 */
742{
743 size_t tracknr = 0;
744 MovieTracking *tracking = ctx->tracking;
745 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(tracking);
746 int anchor_frame = tracking->stabilization.anchor_frame;
747
748 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
749 MovieTrackingMarker *marker;
750 order[tracknr].data = track;
751 marker = get_closest_marker(ctx, track, anchor_frame);
752 if (marker != nullptr && (track->flag & (TRACK_USE_2D_STAB | TRACK_USE_2D_STAB_ROT))) {
753 order[tracknr].sort_value = abs(marker->framenr - anchor_frame);
754 order[tracknr].reference_frame = marker->framenr;
755 tracknr++;
756 }
757 }
758 if (tracknr) {
759 qsort(order, tracknr, sizeof(TrackInitOrder), BLI_sortutil_cmp_int);
760 }
761 return tracknr;
762}
763
764/* Setup the constant part of this track's contribution to the determined frame
765 * movement. Tracks usually don't provide tracking data for every frame. Thus,
766 * for determining data at a given frame, we split up the contribution into a
767 * part covered by actual measurements on this track, and the initial gap
768 * between this track's reference frame and the global anchor_frame.
769 * The (missing) data for the gap can be substituted by the average offset
770 * observed by the other tracks covering the gap. This approximation doesn't
771 * introduce wrong data, but it records data with incorrect weight. A totally
772 * correct solution would require us to average the contribution per frame, and
773 * then integrate stepwise over all frames -- which of course would be way more
774 * expensive, especially for longer clips. To the contrary, our solution
775 * cumulates the total contribution per track and averages afterwards over all
776 * tracks; it can thus be calculated just based on the data of a single frame,
777 * plus the "baseline" for the reference frame, which is what we are computing
778 * here.
779 *
780 * Since we're averaging _contributions_, we have to calculate the _difference_
781 * of the measured position at current frame and the position at the reference
782 * frame. But the "reference" part of this difference is constant and can thus
783 * be packed together with the baseline contribution into a single precomputed
784 * vector per track.
785 *
786 * In case of the rotation contribution, the principle is the same, but we have
787 * to compensate for the already determined translation and measure the pure
788 * rotation, simply because this is how we model the offset: shift plus rotation
789 * around the shifted rotation center. To circumvent problems with the
790 * definition range of the arcus tangens function, we perform this baseline
791 * addition and reference angle subtraction in polar coordinates and bake this
792 * operation into a precomputed rotation matrix.
793 *
794 * track is a track to be initialized to initialize
795 * reference_frame is a local frame for this track, the closest pick to the
796 * global anchor_frame.
797 * aspect is a total aspect ratio of the undistorted image (includes fame and
798 * pixel aspect).
799 * target_pos is a possibly animated target position as set by the user for
800 * the reference_frame
801 * average_translation is a value observed by the _other_ tracks for the gap
802 * between reference_frame and anchor_frame. This
803 * average must not contain contributions of frames
804 * not yet initialized
805 * average_angle in a similar way, the rotation value observed by the
806 * _other_ tracks.
807 * average_scale_step is an image scale factor observed on average by the other
808 * tracks for this frame. This value is recorded and
809 * averaged as logarithm. The recorded scale changes
810 * are damped for very small contributions, to limit
811 * the effect of probe points approaching the pivot
812 * too closely.
813 *
814 * NOTE: when done, this track is marked as initialized
815 */
817 MovieTrackingTrack *track,
818 int reference_frame,
819 float aspect,
820 const float average_translation[2],
821 const float pivot[2],
822 const float average_angle,
823 const float average_scale_step)
824{
825 float pos[2], angle, len;
827 MovieTrackingMarker *marker = BKE_tracking_marker_get_exact(track, reference_frame);
828 /* Logic for initialization order ensures there *is* a marker on that
829 * very frame.
830 */
831 BLI_assert(marker != nullptr);
832 BLI_assert(local_data != nullptr);
833
834 /* Per track baseline value for translation. */
835 sub_v2_v2v2(local_data->stabilization_offset_base, average_translation, marker->pos);
836
837 /* Per track baseline value for rotation. */
838 sub_v2_v2v2(pos, marker->pos, pivot);
839
840 pos[0] *= aspect;
841 angle = average_angle - atan2f(pos[1], pos[0]);
843
844 /* Per track baseline value for zoom. */
846 local_data->stabilization_scale_base = expf(average_scale_step) / len;
847
848 local_data->is_init_for_stabilization = true;
849}
850
851static void init_all_tracks(StabContext *ctx, float aspect)
852{
853 size_t track_len = 0;
854 MovieClip *clip = ctx->clip;
855 MovieTracking *tracking = ctx->tracking;
856 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(tracking);
857 TrackInitOrder *order;
858
859 /* Attempt to start initialization at anchor_frame.
860 * By definition, offset contribution is zero there.
861 */
862 int reference_frame = tracking->stabilization.anchor_frame;
863 float average_angle = 0, average_scale_step = 0;
864 float average_translation[2], average_pos[2], pivot[2];
865 zero_v2(average_translation);
866 zero_v2(pivot);
867
868 /* Initialize private working data. */
869 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
871 if (!local_data) {
872 local_data = MEM_callocN<TrackStabilizationBase>("2D stabilization per track baseline data");
873 attach_stabilization_baseline_data(ctx, track, local_data);
874 }
875 BLI_assert(local_data != nullptr);
876 local_data->track_weight_curve = retrieve_track_weight_animation(clip, track);
877 local_data->is_init_for_stabilization = false;
878
879 track_len++;
880 }
881 if (!track_len) {
882 return;
883 }
884
885 order = MEM_calloc_arrayN<TrackInitOrder>(track_len, "stabilization track order");
886 if (!order) {
887 return;
888 }
889
890 track_len = establish_track_initialization_order(ctx, order);
891 if (track_len == 0) {
892 goto cleanup;
893 }
894
895 /* starting point for pivot, before having initialized any track */
896 average_marker_positions(ctx, reference_frame, average_pos);
897 setup_pivot(average_pos, pivot);
898
899 for (int i = 0; i < track_len; i++) {
900 MovieTrackingTrack *track = order[i].data;
901 if (reference_frame != order[i].reference_frame) {
902 reference_frame = order[i].reference_frame;
904 reference_frame,
905 aspect,
906 average_translation,
907 pivot,
908 &average_angle,
909 &average_scale_step);
910 }
912 track,
913 reference_frame,
914 aspect,
915 average_translation,
916 pivot,
917 average_angle,
918 average_scale_step);
919 }
920
921cleanup:
922 MEM_freeN(order);
923}
924
925/* Retrieve the measurement of frame movement by averaging contributions of
926 * active tracks.
927 *
928 * translation is a measurement in normalized 0..1 coordinates.
929 * angle is a measurement in radians -pi..+pi counter clockwise relative to
930 * translation compensated frame center
931 * scale_step is a measurement of image scale changes, in logarithmic scale
932 * (zero means scale == 1)
933 * Returns calculation enabled and all data retrieved as expected for this frame.
934 *
935 * NOTE: when returning `false`, output parameters are reset to neutral values.
936 */
938 int framenr,
939 float aspect,
940 float r_translation[2],
941 float r_pivot[2],
942 float *r_angle,
943 float *r_scale_step)
944{
945 bool success = false;
946
947 /* Early output if stabilization is disabled. */
948 if ((ctx->stab->flag & TRACKING_2D_STABILIZATION) == 0) {
949 zero_v2(r_translation);
950 *r_scale_step = 0.0f;
951 *r_angle = 0.0f;
952 return false;
953 }
954
956 ctx, framenr, aspect, r_translation, r_pivot, r_angle, r_scale_step);
957 if (!success) {
958 /* Try to hold extrapolated settings beyond the definition range
959 * and to interpolate in gaps without any usable tracking data
960 * to prevent sudden jump to image zero position.
961 */
962 int next_lower = MINAFRAME;
963 int next_higher = MAXFRAME;
964 use_values_from_fcurves(ctx, true);
965 find_next_working_frames(ctx, framenr, &next_lower, &next_higher);
966 if (next_lower >= MINFRAME && next_higher < MAXFRAME) {
968 framenr,
969 next_lower,
970 next_higher,
971 aspect,
972 r_translation,
973 r_pivot,
974 r_angle,
975 r_scale_step);
976 }
977 else if (next_higher < MAXFRAME) {
978 /* Before start of stabilized range: extrapolate start point
979 * settings.
980 */
982 ctx, next_higher, aspect, r_translation, r_pivot, r_angle, r_scale_step);
983 }
984 else if (next_lower >= MINFRAME) {
985 /* After end of stabilized range: extrapolate end point settings. */
987 ctx, next_lower, aspect, r_translation, r_pivot, r_angle, r_scale_step);
988 }
989 use_values_from_fcurves(ctx, false);
990 }
991 return success;
992}
993
994/* Calculate stabilization data (translation, scale and rotation) from given raw
995 * measurements. Result is in absolute image dimensions (expanded image, square
996 * pixels), includes automatic or manual scaling and compensates for a target
997 * frame position, if given.
998 *
999 * size is a size of the expanded image, the width in pixels is size * aspect.
1000 * aspect is a ratio (width / height) of the effective canvas (square pixels).
1001 * do_compensate denotes whether to actually output values necessary to
1002 * _compensate_ the determined frame movement.
1003 * Otherwise, the effective target movement is returned.
1004 */
1006 int framenr,
1007 int size,
1008 float aspect,
1009 bool do_compensate,
1010 float scale_step,
1011 float r_translation[2],
1012 float r_pivot[2],
1013 float *r_scale,
1014 float *r_angle)
1015{
1016 float target_pos[2], target_scale;
1017 float scaleinf = get_animated_scaleinf(ctx, framenr);
1018
1019 if (ctx->stab->flag & TRACKING_STABILIZE_SCALE) {
1020 *r_scale = expf(scale_step * scaleinf); /* Averaged in log scale */
1021 }
1022 else {
1023 *r_scale = 1.0f;
1024 }
1025
1026 mul_v2_fl(r_translation, get_animated_locinf(ctx, framenr));
1027 *r_angle *= get_animated_rotinf(ctx, framenr);
1028
1029 /* Compensate for a target frame position.
1030 * This allows to follow tracking / panning shots in a semi manual fashion,
1031 * when animating the settings for the target frame position.
1032 */
1033 get_animated_target_pos(ctx, framenr, target_pos);
1034 sub_v2_v2(r_translation, target_pos);
1035 *r_angle -= get_animated_target_rot(ctx, framenr);
1036 target_scale = get_animated_target_scale(ctx, framenr);
1037 if (target_scale != 0.0f) {
1038 *r_scale /= target_scale;
1039 /* target_scale is an expected/intended reference zoom value */
1040 }
1041
1042 /* Convert from relative to absolute coordinates, square pixels. */
1043 r_translation[0] *= float(size) * aspect;
1044 r_translation[1] *= float(size);
1045 r_pivot[0] *= float(size) * aspect;
1046 r_pivot[1] *= float(size);
1047
1048 /* Output measured data, or inverse of the measured values for
1049 * compensation?
1050 */
1051 if (do_compensate) {
1052 mul_v2_fl(r_translation, -1.0f);
1053 *r_angle *= -1.0f;
1054 if (*r_scale != 0.0f) {
1055 *r_scale = 1.0f / *r_scale;
1056 }
1057 }
1058}
1059
1060static void stabilization_data_to_mat4(float pixel_aspect,
1061 const float pivot[2],
1062 const float translation[2],
1063 float scale,
1064 float angle,
1065 float r_mat[4][4])
1066{
1067 float translation_mat[4][4], rotation_mat[4][4], scale_mat[4][4], pivot_mat[4][4],
1068 inv_pivot_mat[4][4], aspect_mat[4][4], inv_aspect_mat[4][4];
1069 const float scale_vector[3] = {scale, scale, 1.0f};
1070
1071 unit_m4(translation_mat);
1072 unit_m4(rotation_mat);
1073 unit_m4(scale_mat);
1074 unit_m4(aspect_mat);
1075 unit_m4(pivot_mat);
1076 unit_m4(inv_pivot_mat);
1077
1078 /* aspect ratio correction matrix */
1079 aspect_mat[0][0] /= pixel_aspect;
1080 invert_m4_m4(inv_aspect_mat, aspect_mat);
1081
1082 add_v2_v2(pivot_mat[3], pivot);
1083 sub_v2_v2(inv_pivot_mat[3], pivot);
1084
1085 size_to_mat4(scale_mat, scale_vector); /* scale matrix */
1086 add_v2_v2(translation_mat[3], translation); /* translation matrix */
1087 rotate_m4(rotation_mat, 'Z', angle); /* rotation matrix */
1088
1089 /* Compose transformation matrix. */
1090 mul_m4_series(r_mat,
1091 aspect_mat,
1092 translation_mat,
1093 pivot_mat,
1094 scale_mat,
1095 rotation_mat,
1096 inv_pivot_mat,
1097 inv_aspect_mat);
1098}
1099
1100/* Calculate scale factor necessary to eliminate black image areas
1101 * caused by the compensating movements of the stabilizer.
1102 * This function visits every frame where stabilization data is
1103 * available and determines the factor for this frame. The overall
1104 * largest factor found is returned as result.
1105 *
1106 * NOTE: all tracks need to be initialized before calling this function.
1107 */
1108static float calculate_autoscale_factor(StabContext *ctx, int size, float aspect)
1109{
1110 MovieTrackingStabilization *stab = ctx->stab;
1111 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
1112 float pixel_aspect = ctx->tracking->camera.pixel_aspect;
1113 int height = size, width = aspect * size;
1114
1115 int sfra = INT_MAX, efra = INT_MIN;
1116 float scale = 1.0f, scale_step = 0.0f;
1117
1118 /* Calculate maximal frame range of tracks where stabilization is active. */
1119 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
1120 if ((track->flag & TRACK_USE_2D_STAB) ||
1121 ((stab->flag & TRACKING_STABILIZE_ROTATION) && (track->flag & TRACK_USE_2D_STAB_ROT)))
1122 {
1123 int first_frame = track->markers[0].framenr;
1124 int last_frame = track->markers[track->markersnr - 1].framenr;
1125 sfra = min_ii(sfra, first_frame);
1126 efra = max_ii(efra, last_frame);
1127 }
1128 }
1129
1130 use_values_from_fcurves(ctx, true);
1131 for (int cfra = sfra; cfra <= efra; cfra++) {
1132 float translation[2], pivot[2], angle, tmp_scale;
1133 float mat[4][4];
1134 const float points[4][2] = {
1135 {0.0f, 0.0f}, {0.0f, float(height)}, {float(width), float(height)}, {float(width), 0.0f}};
1136 const bool do_compensate = true;
1137 /* Calculate stabilization parameters for the current frame. */
1139 ctx, cfra, aspect, translation, pivot, &angle, &scale_step);
1141 cfra,
1142 size,
1143 aspect,
1144 do_compensate,
1145 scale_step,
1146 translation,
1147 pivot,
1148 &tmp_scale,
1149 &angle);
1150 /* Compose transformation matrix. */
1151 /* NOTE: Here we operate in NON-COMPENSATED coordinates, meaning we have
1152 * to construct transformation matrix using proper pivot point.
1153 * Compensation for that will happen later on.
1154 */
1155 stabilization_data_to_mat4(pixel_aspect, pivot, translation, tmp_scale, angle, mat);
1156 /* Investigate the transformed border lines for this frame;
1157 * find out, where it cuts the original frame.
1158 */
1159 for (int edge_index = 0; edge_index < 4; edge_index++) {
1160 /* Calculate coordinates of stabilized frame edge points.
1161 * Use matrix multiplication here so we operate in homogeneous
1162 * coordinates.
1163 */
1164 float stable_edge_p1[3], stable_edge_p2[3];
1165 copy_v2_v2(stable_edge_p1, points[edge_index]);
1166 copy_v2_v2(stable_edge_p2, points[(edge_index + 1) % 4]);
1167 stable_edge_p1[2] = stable_edge_p2[2] = 0.0f;
1168 mul_m4_v3(mat, stable_edge_p1);
1169 mul_m4_v3(mat, stable_edge_p2);
1170 /* Now we iterate over all original frame corners (we call them
1171 * 'point' here) to see if there's black area between stabilized
1172 * frame edge and original point.
1173 */
1174 for (int point_index = 0; point_index < 4; point_index++) {
1175 const float point[3] = {points[point_index][0], points[point_index][1], 0.0f};
1176 /* Calculate vector which goes from first edge point to
1177 * second one.
1178 */
1179 float stable_edge_vec[3];
1180 sub_v3_v3v3(stable_edge_vec, stable_edge_p2, stable_edge_p1);
1181 /* Calculate vector which connects current frame point to
1182 * first edge point.
1183 */
1184 float point_to_edge_start_vec[3];
1185 sub_v3_v3v3(point_to_edge_start_vec, point, stable_edge_p1);
1186 /* Use this two vectors to check whether frame point is inside
1187 * of the stabilized frame or not.
1188 * If the point is inside, there is no black area happening
1189 * and no scaling required for it.
1190 */
1191 if (cross_v2v2(stable_edge_vec, point_to_edge_start_vec) >= 0.0f) {
1192 /* We are scaling around motion-compensated pivot point. */
1193 float scale_pivot[2];
1194 add_v2_v2v2(scale_pivot, pivot, translation);
1195 /* Calculate line which goes via `point` and parallel to
1196 * the stabilized frame edge. This line is coming via
1197 * `point` and `point2` at the end.
1198 */
1199 float point2[2];
1200 add_v2_v2v2(point2, point, stable_edge_vec);
1201 /* Calculate actual distance between pivot point and
1202 * the stabilized frame edge. Then calculate distance
1203 * between pivot point and line which goes via actual
1204 * corner and is parallel to the edge.
1205 *
1206 * Dividing one by another will give us required scale
1207 * factor to get rid of black areas.
1208 */
1209 float real_dist = dist_to_line_v2(scale_pivot, stable_edge_p1, stable_edge_p2);
1210 float required_dist = dist_to_line_v2(scale_pivot, point, point2);
1211 const float S = required_dist / real_dist;
1212 scale = max_ff(scale, S);
1213 }
1214 }
1215 }
1216 }
1217 if (stab->maxscale > 0.0f) {
1218 scale = min_ff(scale, stab->maxscale);
1219 }
1220 use_values_from_fcurves(ctx, false);
1221
1222 return scale;
1223}
1224
1225/* Prepare working data and determine reference point for each track.
1226 *
1227 * NOTE: These calculations _could_ be cached and reused for all frames of the
1228 * same clip. However, since proper initialization depends on (weight)
1229 * animation and setup of tracks, ensuring consistency of cached init data
1230 * turns out to be tricky, hard to maintain and generally not worth the
1231 * effort. Thus we'll re-initialize on every frame.
1232 */
1233static StabContext *init_stabilizer(MovieClip *clip, int size, float aspect)
1234{
1236 BLI_assert(ctx != nullptr);
1237 init_all_tracks(ctx, aspect);
1238 if (ctx->stab->flag & TRACKING_AUTOSCALE) {
1239 ctx->stab->scale = 1.0;
1240 ctx->stab->scale = calculate_autoscale_factor(ctx, size, aspect);
1241 }
1242 /* By default, just use values for the global current frame. */
1243 use_values_from_fcurves(ctx, false);
1244 return ctx;
1245}
1246
1247/* === public interface functions === */
1248
1250 int framenr,
1251 int width,
1252 int height,
1253 float translation[2],
1254 float *scale,
1255 float *angle)
1256{
1257 StabContext *ctx = nullptr;
1258 MovieTracking *tracking = &clip->tracking;
1260 /* Might become a parameter of a stabilization compositor node. */
1261 bool do_compensate = true;
1262 float scale_step = 0.0f;
1263 float pixel_aspect = tracking->camera.pixel_aspect;
1264 float aspect = float(width) * pixel_aspect / height;
1265 int size = height;
1266 float pivot[2];
1267
1268 if (enabled) {
1269 ctx = init_stabilizer(clip, size, aspect);
1270 }
1271
1273 ctx, framenr, aspect, translation, pivot, angle, &scale_step))
1274 {
1276 ctx, framenr, size, aspect, do_compensate, scale_step, translation, pivot, scale, angle);
1277 compensate_rotation_center(size, aspect, *angle, *scale, pivot, translation);
1278 }
1279 else {
1280 zero_v2(translation);
1281 *scale = 1.0f;
1282 *angle = 0.0f;
1283 }
1285}
1286
1293
1294static void tracking_stabilize_frame_interpolation_cb(void *__restrict userdata,
1295 const int y,
1296 const TaskParallelTLS *__restrict /*tls*/)
1297{
1298 using namespace blender;
1299
1301 static_cast<TrackingStabilizeFrameInterpolationData *>(userdata);
1302 ImBuf *ibuf = data->ibuf;
1303 ImBuf *tmpibuf = data->tmpibuf;
1304 float (*mat)[4] = data->mat;
1305
1306 float vec[3] = {0.0f, float(y), 0.0f};
1307 float rvec[3];
1308
1309 if (ibuf->float_buffer.data) {
1310 /* Float image. */
1311 float4 *dst = reinterpret_cast<float4 *>(tmpibuf->float_buffer.data) + y * tmpibuf->x;
1312 if (data->tracking_filter == TRACKING_FILTER_BILINEAR) {
1313 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1314 vec[0] = float(x);
1315 mul_v3_m4v3(rvec, mat, vec);
1316 *dst = imbuf::interpolate_bilinear_border_fl(ibuf, rvec[0], rvec[1]);
1317 }
1318 }
1319 else if (data->tracking_filter == TRACKING_FILTER_BICUBIC) {
1320 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1321 vec[0] = float(x);
1322 mul_v3_m4v3(rvec, mat, vec);
1323 *dst = imbuf::interpolate_cubic_bspline_fl(ibuf, rvec[0], rvec[1]);
1324 }
1325 }
1326 else {
1327 /* Nearest or fall back to nearest. */
1328 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1329 vec[0] = float(x);
1330 mul_v3_m4v3(rvec, mat, vec);
1331 *dst = imbuf::interpolate_nearest_border_fl(ibuf, rvec[0], rvec[1]);
1332 }
1333 }
1334 }
1335 else if (ibuf->byte_buffer.data) {
1336 /* Byte image. */
1337 uchar4 *dst = reinterpret_cast<uchar4 *>(tmpibuf->byte_buffer.data) + y * tmpibuf->x;
1338 if (data->tracking_filter == TRACKING_FILTER_BILINEAR) {
1339 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1340 vec[0] = float(x);
1341 mul_v3_m4v3(rvec, mat, vec);
1342 *dst = imbuf::interpolate_bilinear_border_byte(ibuf, rvec[0], rvec[1]);
1343 }
1344 }
1345 else if (data->tracking_filter == TRACKING_FILTER_BICUBIC) {
1346 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1347 vec[0] = float(x);
1348 mul_v3_m4v3(rvec, mat, vec);
1349 *dst = imbuf::interpolate_cubic_bspline_byte(ibuf, rvec[0], rvec[1]);
1350 }
1351 }
1352 else {
1353 /* Nearest or fall back to nearest. */
1354 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1355 vec[0] = float(x);
1356 mul_v3_m4v3(rvec, mat, vec);
1357 *dst = imbuf::interpolate_nearest_border_byte(ibuf, rvec[0], rvec[1]);
1358 }
1359 }
1360 }
1361}
1362
1364 MovieClip *clip, int framenr, ImBuf *ibuf, float translation[2], float *scale, float *angle)
1365{
1366 float tloc[2], tscale, tangle;
1367 MovieTracking *tracking = &clip->tracking;
1368 MovieTrackingStabilization *stab = &tracking->stabilization;
1369 ImBuf *tmpibuf;
1370 int width = ibuf->x, height = ibuf->y;
1371 float pixel_aspect = tracking->camera.pixel_aspect;
1372 float mat[4][4];
1373 int ibuf_flags;
1374
1375 if (translation) {
1376 copy_v2_v2(tloc, translation);
1377 }
1378
1379 if (scale) {
1380 tscale = *scale;
1381 }
1382
1383 /* Perform early output if no stabilization is used. */
1384 if ((stab->flag & TRACKING_2D_STABILIZATION) == 0) {
1385 if (translation) {
1386 zero_v2(translation);
1387 }
1388
1389 if (scale) {
1390 *scale = 1.0f;
1391 }
1392
1393 if (angle) {
1394 *angle = 0.0f;
1395 }
1396
1397 return ibuf;
1398 }
1399
1400 /* Allocate frame for stabilization result, copy alpha mode and color-space. */
1401 ibuf_flags = 0;
1402 if (ibuf->byte_buffer.data) {
1403 ibuf_flags |= IB_byte_data;
1404 }
1405 if (ibuf->float_buffer.data) {
1406 ibuf_flags |= IB_float_data;
1407 }
1408
1409 tmpibuf = IMB_allocImBuf(ibuf->x, ibuf->y, ibuf->planes, ibuf_flags);
1410 IMB_colormanagegent_copy_settings(ibuf, tmpibuf);
1411
1412 /* Calculate stabilization matrix. */
1413 BKE_tracking_stabilization_data_get(clip, framenr, width, height, tloc, &tscale, &tangle);
1415 ibuf->x, ibuf->y, pixel_aspect, tloc, tscale, tangle, mat);
1416
1417 /* The following code visits each nominal target grid position
1418 * and picks interpolated data "backwards" from source.
1419 * thus we need the inverse of the transformation to apply. */
1420 invert_m4(mat);
1421
1423 data.ibuf = ibuf;
1424 data.tmpibuf = tmpibuf;
1425 data.mat = mat;
1426 data.tracking_filter = tracking->stabilization.filter;
1427
1428 TaskParallelSettings settings;
1430 settings.use_threading = (tmpibuf->y > 128);
1432 0, tmpibuf->y, &data, tracking_stabilize_frame_interpolation_cb, &settings);
1433
1434 if (tmpibuf->float_buffer.data) {
1435 tmpibuf->userflags |= IB_RECT_INVALID;
1436 }
1437
1438 if (translation) {
1439 copy_v2_v2(translation, tloc);
1440 }
1441
1442 if (scale) {
1443 *scale = tscale;
1444 }
1445
1446 if (angle) {
1447 *angle = tangle;
1448 }
1449
1450 return tmpibuf;
1451}
1452
1454 int buffer_height,
1455 float pixel_aspect,
1456 float translation[2],
1457 float scale,
1458 float angle,
1459 float r_mat[4][4])
1460{
1461 /* Since we cannot receive the real pivot point coordinates (API limitation),
1462 * we perform the rotation/scale around the center of frame.
1463 * Then we correct by an additional shift, which was calculated in
1464 * compensate_rotation_center() and "sneaked in" as additional offset
1465 * in the translation parameter. This works, since translation needs to be
1466 * applied after rotation/scale anyway. Thus effectively the image gets
1467 * rotated around the desired pivot point
1468 */
1469 /* TODO(sergey): pivot shouldn't be calculated here, rather received
1470 * as a parameter.
1471 */
1472 float pivot[2];
1473 pivot[0] = 0.5f * pixel_aspect * buffer_width;
1474 pivot[1] = 0.5f * buffer_height;
1475 /* Compose transformation matrix. */
1476 stabilization_data_to_mat4(pixel_aspect, pivot, translation, scale, angle, r_mat);
1477}
float evaluate_fcurve(const FCurve *fcu, float evaltime)
FCurve * id_data_find_fcurve(ID *id, void *data, StructRNA *type, const char *prop_name, int index, bool *r_driven)
float BKE_movieclip_remap_clip_to_scene_frame(const struct MovieClip *clip, float framenr)
struct MovieTrackingObject * BKE_tracking_object_get_camera(const struct MovieTracking *tracking)
struct MovieTrackingMarker * BKE_tracking_marker_get(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:1358
struct MovieTrackingMarker * BKE_tracking_marker_get_exact(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:1391
#define BLI_assert(a)
Definition BLI_assert.h:46
GHash * BLI_ghash_ptr_new(const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
void * BLI_ghash_lookup(const GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.cc:731
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.cc:707
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.cc:860
#define LISTBASE_FOREACH(type, var, list)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
MINLINE float min_ff(float a, float b)
MINLINE int max_ii(int a, int b)
float dist_to_line_v2(const float p[2], const float l1[2], const float l2[2])
Definition math_geom.cc:286
void size_to_mat4(float R[4][4], const float size[3])
void mul_m4_v3(const float M[4][4], float r[3])
#define mul_m4_series(...)
void mul_v3_m4v3(float r[3], const float mat[4][4], const float vec[3])
bool invert_m4_m4(float inverse[4][4], const float mat[4][4])
void rotate_m4(float mat[4][4], char axis, float angle)
bool invert_m4(float mat[4][4])
void mul_m2_v2(const float mat[2][2], float vec[2])
void unit_m4(float m[4][4])
void angle_to_mat2(float R[2][2], float angle)
MINLINE float len_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v2_fl(float r[2], float f)
void interp_v2_v2v2(float r[2], const float a[2], const float b[2], float t)
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE void add_v2_v2(float r[2], const float a[2])
MINLINE float cross_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void zero_v2(float r[2])
int BLI_sortutil_cmp_int(const void *a_, const void *b_)
Definition sort_utils.cc:53
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:221
#define MINFRAME
#define MINAFRAME
#define MAXFRAME
@ TRACK_USE_2D_STAB
@ TRACK_USE_2D_STAB_ROT
@ TRACKING_AUTOSCALE
@ TRACKING_STABILIZE_SCALE
@ TRACKING_STABILIZE_ROTATION
@ TRACKING_2D_STABILIZATION
@ TRACKING_FILTER_BICUBIC
@ TRACKING_FILTER_BILINEAR
@ MARKER_DISABLED
static double angle(const Eigen::Vector3d &v1, const Eigen::Vector3d &v2)
Definition IK_Math.h:117
void IMB_colormanagegent_copy_settings(ImBuf *ibuf_src, ImBuf *ibuf_dst)
ImBuf * IMB_allocImBuf(unsigned int x, unsigned int y, unsigned char planes, unsigned int flags)
@ IB_RECT_INVALID
@ IB_float_data
@ IB_byte_data
Read Guarded memory(de)allocation.
BMesh const char void * data
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
nullptr float
#define logf(x)
#define expf(x)
uint pos
#define abs
bool enabled
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
float4 interpolate_nearest_border_fl(const ImBuf *in, float u, float v)
Definition IMB_interp.hh:27
uchar4 interpolate_nearest_border_byte(const ImBuf *in, float u, float v)
Definition IMB_interp.hh:23
uchar4 interpolate_cubic_bspline_byte(const ImBuf *in, float u, float v)
float4 interpolate_cubic_bspline_fl(const ImBuf *in, float u, float v)
uchar4 interpolate_bilinear_border_byte(const ImBuf *in, float u, float v)
Definition IMB_interp.hh:74
float4 interpolate_bilinear_border_fl(const ImBuf *in, float u, float v)
Definition IMB_interp.hh:78
#define atan2f
ImBufFloatBuffer float_buffer
ImBufByteBuffer byte_buffer
unsigned char planes
struct MovieTracking tracking
MovieTrackingMarker * markers
MovieTrackingStabilization stabilization
MovieTrackingCamera camera
MovieTrackingStabilization * stab
FCurve * target_pos[2]
GHash * private_track_data
MovieTracking * tracking
MovieTrackingTrack * data
float stabilization_rotation_base[2][2]
i
Definition text_draw.cc:230
void BKE_tracking_stabilization_data_to_mat4(int buffer_width, int buffer_height, float pixel_aspect, float translation[2], float scale, float angle, float r_mat[4][4])
static void retrieve_next_lower_usable_frame(StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_lower)
static void average_marker_positions(StabContext *ctx, int framenr, float r_ref_pos[2])
static void discard_stabilization_baseline_data(void *val)
static int establish_track_initialization_order(StabContext *ctx, TrackInitOrder *order)
static MovieTrackingMarker * get_closest_marker(StabContext *ctx, MovieTrackingTrack *track, int ref_frame)
static float get_animated_target_scale(StabContext *ctx, int framenr)
void BKE_tracking_stabilization_data_get(MovieClip *clip, int framenr, int width, int height, float translation[2], float *scale, float *angle)
static float get_animated_target_rot(StabContext *ctx, int framenr)
static float get_animated_rotinf(StabContext *ctx, int framenr)
static bool average_track_contributions(StabContext *ctx, int framenr, float aspect, float r_translation[2], float r_pivot[2], float *r_angle, float *r_scale_step)
static float fetch_from_fcurve(const FCurve *animationCurve, int framenr, StabContext *ctx, float default_value)
static void stabilization_calculate_data(StabContext *ctx, int framenr, int size, float aspect, bool do_compensate, float scale_step, float r_translation[2], float r_pivot[2], float *r_scale, float *r_angle)
static FCurve * retrieve_track_weight_animation(MovieClip *clip, MovieTrackingTrack *track)
static bool stabilization_determine_offset_for_frame(StabContext *ctx, int framenr, float aspect, float r_translation[2], float r_pivot[2], float *r_angle, float *r_scale_step)
static bool is_init_for_stabilization(StabContext *ctx, MovieTrackingTrack *track)
static bool is_usable_for_stabilization(StabContext *ctx, MovieTrackingTrack *track)
static bool is_effectively_disabled(StabContext *ctx, MovieTrackingTrack *track, MovieTrackingMarker *marker)
static void use_values_from_fcurves(StabContext *ctx, bool toggle)
static bool interpolate_averaged_track_contributions(StabContext *ctx, int framenr, int frame_a, int frame_b, const float aspect, float r_translation[2], float r_pivot[2], float *r_angle, float *r_scale_step)
static void get_animated_target_pos(StabContext *ctx, int framenr, float target_pos[2])
static float get_animated_locinf(StabContext *ctx, int framenr)
static void init_all_tracks(StabContext *ctx, float aspect)
static float get_animated_scaleinf(StabContext *ctx, int framenr)
static float get_animated_weight(StabContext *ctx, MovieTrackingTrack *track, int framenr)
static float SCALE_ERROR_LIMIT_BIAS
static int search_closest_marker_index(MovieTrackingTrack *track, int ref_frame)
static void find_next_working_frames(StabContext *ctx, int framenr, int *next_lower, int *next_higher)
static void attach_stabilization_baseline_data(StabContext *ctx, MovieTrackingTrack *track, TrackStabilizationBase *private_data)
static StabContext * init_stabilization_working_context(MovieClip *clip)
static void setup_pivot(const float ref_pos[2], float r_pivot[2])
static TrackStabilizationBase * access_stabilization_baseline_data(StabContext *ctx, MovieTrackingTrack *track)
static float calculate_autoscale_factor(StabContext *ctx, int size, float aspect)
static float EPSILON_WEIGHT
static void stabilization_data_to_mat4(float pixel_aspect, const float pivot[2], const float translation[2], float scale, float angle, float r_mat[4][4])
ImBuf * BKE_tracking_stabilize_frame(MovieClip *clip, int framenr, ImBuf *ibuf, float translation[2], float *scale, float *angle)
static void retrieve_next_higher_usable_frame(StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_higher)
static StabContext * init_stabilizer(MovieClip *clip, int size, float aspect)
static MovieTrackingMarker * get_tracking_data_point(StabContext *ctx, MovieTrackingTrack *track, int framenr, float *r_weight)
static float rotation_contribution(TrackStabilizationBase *track_ref, MovieTrackingMarker *marker, const float aspect, const float pivot[2], float *result_angle, float *result_scale)
static void compensate_rotation_center(const int size, float aspect, const float angle, const float scale, const float pivot[2], float result_translation[2])
static FCurve * retrieve_stab_animation(MovieClip *clip, const char *data_path, int idx)
static void discard_stabilization_working_context(StabContext *ctx)
static void init_track_for_stabilization(StabContext *ctx, MovieTrackingTrack *track, int reference_frame, float aspect, const float average_translation[2], const float pivot[2], const float average_angle, const float average_scale_step)
static void tracking_stabilize_frame_interpolation_cb(void *__restrict userdata, const int y, const TaskParallelTLS *__restrict)
static void translation_contribution(TrackStabilizationBase *track_ref, MovieTrackingMarker *marker, float result_offset[2])
uint len