Blender V4.3
tracking_stabilize.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include <climits>
12
13#include "DNA_movieclip_types.h"
14#include "DNA_scene_types.h"
15#include "RNA_prototypes.hh"
16
17#include "BLI_ghash.h"
18#include "BLI_listbase.h"
19#include "BLI_math_geom.h"
20#include "BLI_math_rotation.h"
21#include "BLI_math_vector.h"
22#include "BLI_sort_utils.h"
23#include "BLI_task.h"
24#include "BLI_utildefines.h"
25
26#include "BKE_fcurve.hh"
27#include "BKE_movieclip.h"
28#include "BKE_tracking.h"
29
31#include "IMB_imbuf.hh"
32#include "IMB_imbuf_types.hh"
33#include "IMB_interp.hh"
34#include "MEM_guardedalloc.h"
35
36/* == Parameterization constants == */
37
38/* When measuring the scale changes relative to the rotation pivot point, it
39 * might happen accidentally that a probe point (tracking point), which doesn't
40 * actually move on a circular path, gets very close to the pivot point, causing
41 * the measured scale contribution to go toward infinity. We damp this undesired
42 * effect by adding a bias (floor) to the measured distances, which will
43 * dominate very small distances and thus cause the corresponding track's
44 * contribution to diminish.
45 * Measurements happen in normalized (0...1) coordinates within a frame.
46 */
47static float SCALE_ERROR_LIMIT_BIAS = 0.01f;
48
49/* When to consider a track as completely faded out.
50 * This is used in conjunction with the "disabled" flag of the track
51 * to determine start positions, end positions and gaps
52 */
53static float EPSILON_WEIGHT = 0.005f;
54
55/* == private working data == */
56
57/* Per track baseline for stabilization, defined at reference frame.
58 * A track's reference frame is chosen as close as possible to the (global)
59 * anchor_frame. Baseline holds the constant part of each track's contribution
60 * to the observed movement; it is calculated at initialization pass, using the
61 * measurement value at reference frame plus the average contribution to fill
62 * the gap between global anchor_frame and the reference frame for this track.
63 * This struct with private working data is associated to the local call context
64 * via `StabContext::private_track_data`
65 */
68
69 /* measured relative to translated pivot */
71
72 /* measured relative to translated pivot */
74
77};
78
79/* Tracks are reordered for initialization, starting as close as possible to
80 * anchor_frame
81 */
87
88/* Per frame private working data, for accessing possibly animated values. */
102
108
110 MovieTrackingTrack *track,
111 TrackStabilizationBase *private_data)
112{
113 BLI_ghash_insert(ctx->private_track_data, track, private_data);
114}
115
117{
118 if (val != nullptr) {
119 MEM_freeN(val);
120 }
121}
122
123/* == access animated values for given frame == */
124
125static FCurve *retrieve_stab_animation(MovieClip *clip, const char *data_path, int idx)
126{
127 return id_data_find_fcurve(&clip->id,
128 &clip->tracking.stabilization,
129 &RNA_MovieTrackingStabilization,
130 data_path,
131 idx,
132 nullptr);
133}
134
136{
137 return id_data_find_fcurve(&clip->id, track, &RNA_MovieTrackingTrack, "weight_stab", 0, nullptr);
138}
139
140static float fetch_from_fcurve(const FCurve *animationCurve,
141 int framenr,
142 StabContext *ctx,
143 float default_value)
144{
145 if (ctx && ctx->use_animation && animationCurve) {
146 int scene_framenr = BKE_movieclip_remap_clip_to_scene_frame(ctx->clip, framenr);
147 return evaluate_fcurve(animationCurve, scene_framenr);
148 }
149 return default_value;
150}
151
152static float get_animated_locinf(StabContext *ctx, int framenr)
153{
154 return fetch_from_fcurve(ctx->locinf, framenr, ctx, ctx->stab->locinf);
155}
156
157static float get_animated_rotinf(StabContext *ctx, int framenr)
158{
159 return fetch_from_fcurve(ctx->rotinf, framenr, ctx, ctx->stab->rotinf);
160}
161
162static float get_animated_scaleinf(StabContext *ctx, int framenr)
163{
164 return fetch_from_fcurve(ctx->scaleinf, framenr, ctx, ctx->stab->scaleinf);
165}
166
167static void get_animated_target_pos(StabContext *ctx, int framenr, float target_pos[2])
168{
169 target_pos[0] = fetch_from_fcurve(ctx->target_pos[0], framenr, ctx, ctx->stab->target_pos[0]);
170 target_pos[1] = fetch_from_fcurve(ctx->target_pos[1], framenr, ctx, ctx->stab->target_pos[1]);
171}
172
173static float get_animated_target_rot(StabContext *ctx, int framenr)
174{
175 return fetch_from_fcurve(ctx->target_rot, framenr, ctx, ctx->stab->target_rot);
176}
177
178static float get_animated_target_scale(StabContext *ctx, int framenr)
179{
180 return fetch_from_fcurve(ctx->target_scale, framenr, ctx, ctx->stab->scale);
181}
182
183static float get_animated_weight(StabContext *ctx, MovieTrackingTrack *track, int framenr)
184{
186 if (working_data && working_data->track_weight_curve) {
187 int scene_framenr = BKE_movieclip_remap_clip_to_scene_frame(ctx->clip, framenr);
188 return evaluate_fcurve(working_data->track_weight_curve, scene_framenr);
189 }
190 /* Use weight at global 'current frame' as fallback default. */
191 return track->weight_stab;
192}
193
194static void use_values_from_fcurves(StabContext *ctx, bool toggle)
195{
196 if (ctx != nullptr) {
197 ctx->use_animation = toggle;
198 }
199}
200
201/* Prepare per call private working area.
202 * Used for access to possibly animated values: retrieve available F-Curves.
203 */
205{
206 StabContext *ctx = MEM_cnew<StabContext>("2D stabilization animation runtime data");
207 ctx->clip = clip;
208 ctx->tracking = &clip->tracking;
209 ctx->stab = &clip->tracking.stabilization;
210 ctx->private_track_data = BLI_ghash_ptr_new("2D stabilization per track private working data");
211 ctx->locinf = retrieve_stab_animation(clip, "influence_location", 0);
212 ctx->rotinf = retrieve_stab_animation(clip, "influence_rotation", 0);
213 ctx->scaleinf = retrieve_stab_animation(clip, "influence_scale", 0);
214 ctx->target_pos[0] = retrieve_stab_animation(clip, "target_pos", 0);
215 ctx->target_pos[1] = retrieve_stab_animation(clip, "target_pos", 1);
216 ctx->target_rot = retrieve_stab_animation(clip, "target_rot", 0);
217 ctx->target_scale = retrieve_stab_animation(clip, "target_zoom", 0);
218 ctx->use_animation = true;
219 return ctx;
220}
221
231{
232 if (ctx != nullptr) {
234 MEM_freeN(ctx);
235 }
236}
237
239{
241 return (working_data != nullptr && working_data->is_init_for_stabilization);
242}
243
245{
246 return (track->flag & TRACK_USE_2D_STAB) && is_init_for_stabilization(ctx, track);
247}
248
250 MovieTrackingTrack *track,
251 MovieTrackingMarker *marker)
252{
253 return (marker->flag & MARKER_DISABLED) ||
254 (EPSILON_WEIGHT > get_animated_weight(ctx, track, marker->framenr));
255}
256
257static int search_closest_marker_index(MovieTrackingTrack *track, int ref_frame)
258{
259 const MovieTrackingMarker *marker = BKE_tracking_marker_get(track, ref_frame);
260 return marker - track->markers;
261}
262
264 StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_higher)
265{
267 int end = track->markersnr;
268 BLI_assert(0 <= i && i < end);
269
270 while (i < end &&
271 (markers[i].framenr < ref_frame || is_effectively_disabled(ctx, track, &markers[i])))
272 {
273 i++;
274 }
275 if (i < end && markers[i].framenr < *next_higher) {
276 BLI_assert(markers[i].framenr >= ref_frame);
277 *next_higher = markers[i].framenr;
278 }
279}
280
282 StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_lower)
283{
285 BLI_assert(0 <= i && i < track->markersnr);
286 while (i >= 0 &&
287 (markers[i].framenr > ref_frame || is_effectively_disabled(ctx, track, &markers[i])))
288 {
289 i--;
290 }
291 if (0 <= i && markers[i].framenr > *next_lower) {
292 BLI_assert(markers[i].framenr <= ref_frame);
293 *next_lower = markers[i].framenr;
294 }
295}
296
297/* Find closest frames with usable stabilization data.
298 * A frame counts as _usable_ when there is at least one track marked for
299 * translation stabilization, which has an enabled tracking marker at this very
300 * frame. We search both for the next lower and next higher position, to allow
301 * the caller to interpolate gaps and to extrapolate at the ends of the
302 * definition range. */
304 int framenr,
305 int *next_lower,
306 int *next_higher)
307{
308 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
309
310 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
311 if (is_usable_for_stabilization(ctx, track)) {
312 int startpoint = search_closest_marker_index(track, framenr);
313 retrieve_next_higher_usable_frame(ctx, track, startpoint, framenr, next_higher);
314 retrieve_next_lower_usable_frame(ctx, track, startpoint, framenr, next_lower);
315 }
316 }
317}
318
319/* Find active (enabled) marker closest to the reference frame. */
321 MovieTrackingTrack *track,
322 int ref_frame)
323{
324 int next_lower = MINAFRAME;
325 int next_higher = MAXFRAME;
326 int i = search_closest_marker_index(track, ref_frame);
327 retrieve_next_higher_usable_frame(ctx, track, i, ref_frame, &next_higher);
328 retrieve_next_lower_usable_frame(ctx, track, i, ref_frame, &next_lower);
329
330 if ((next_higher - ref_frame) < (ref_frame - next_lower)) {
331 return BKE_tracking_marker_get_exact(track, next_higher);
332 }
333
334 return BKE_tracking_marker_get_exact(track, next_lower);
335}
336
337/* Retrieve tracking data, if available and applicable for this frame.
338 * The returned weight value signals the validity; data recorded for this
339 * tracking marker on the exact requested frame is output with the full weight
340 * of this track, while gaps in the data sequence cause the weight to go to zero.
341 */
343 MovieTrackingTrack *track,
344 int framenr,
345 float *r_weight)
346{
347 MovieTrackingMarker *marker = BKE_tracking_marker_get_exact(track, framenr);
348 if (marker != nullptr && !(marker->flag & MARKER_DISABLED)) {
349 *r_weight = get_animated_weight(ctx, track, framenr);
350 return marker;
351 }
352
353 /* No marker at this frame (=gap) or marker disabled. */
354 *r_weight = 0.0f;
355 return nullptr;
356}
357
358/* Define the reference point for rotation/scale measurement and compensation.
359 * The stabilizer works by assuming the image was distorted by a affine linear
360 * transform, i.e. it was rotated and stretched around this reference point
361 * (pivot point) and then shifted laterally. Any scale and orientation changes
362 * will be picked up relative to this point. And later the image will be
363 * stabilized by rotating around this point. The result can only be as
364 * accurate as this pivot point actually matches the real rotation center
365 * of the actual movements. Thus any scheme to define a pivot point is
366 * always guesswork.
367 *
368 * As a simple default, we use the weighted average of the location markers
369 * of the current frame as pivot point. TODO: It is planned to add further
370 * options, like e.g. anchoring the pivot point at the canvas. Moreover,
371 * it is planned to allow for a user controllable offset.
372 */
373static void setup_pivot(const float ref_pos[2], float r_pivot[2])
374{
375 zero_v2(r_pivot); /* TODO: add an animated offset position here. */
376 add_v2_v2(r_pivot, ref_pos);
377}
378
379/* Calculate the contribution of a single track at the time position (frame) of
380 * the given marker. Each track has a local reference frame, which is as close
381 * as possible to the global anchor_frame. Thus the translation contribution is
382 * comprised of the offset relative to the image position at that reference
383 * frame, plus a guess of the contribution for the time span between the
384 * anchor_frame and the local reference frame of this track. The constant part
385 * of this contribution is precomputed initially. At the anchor_frame, by
386 * definition the contribution of all tracks is zero, keeping the frame in place.
387 *
388 * track_ref is per track baseline contribution at reference frame; filled in at
389 * initialization
390 * marker is tracking data to use as contribution for current frame.
391 * result_offset is a total cumulated contribution of this track,
392 * relative to the stabilization anchor_frame,
393 * in normalized (0...1) coordinates.
394 */
396 MovieTrackingMarker *marker,
397 float result_offset[2])
398{
399 add_v2_v2v2(result_offset, track_ref->stabilization_offset_base, marker->pos);
400}
401
402/* Similar to the ::translation_contribution(), the rotation contribution is
403 * comprised of the contribution by this individual track, and the averaged
404 * contribution from anchor_frame to the ref point of this track.
405 * - Contribution is in terms of angles, -pi < angle < +pi, and all averaging
406 * happens in this domain.
407 * - Yet the actual measurement happens as vector between pivot and the current
408 * tracking point
409 * - Currently we use the center of frame as approximation for the rotation pivot
410 * point.
411 * - Moreover, the pivot point has to be compensated for the already determined
412 * shift offset, in order to get the pure rotation around the pivot.
413 * To turn this into a _contribution_, the likewise corrected angle at the
414 * reference frame has to be subtracted, to get only the pure angle difference
415 * this tracking point has captured.
416 * - To get from vectors to angles, we have to go through an arcus tangens,
417 * which involves the issue of the definition range: the resulting angles will
418 * flip by 360deg when the measured vector passes from the 2nd to the third
419 * quadrant, thus messing up the average calculation. Since _any_ tracking
420 * point might be used, these problems are quite common in practice.
421 * - Thus we perform the subtraction of the reference and the addition of the
422 * baseline contribution in polar coordinates as simple addition of angles;
423 * since these parts are fixed, we can bake them into a rotation matrix.
424 * With this approach, the border of the arcus tangens definition range will
425 * be reached only, when the _whole_ contribution approaches +- 180deg,
426 * meaning we've already tilted the frame upside down. This situation is way
427 * less common and can be tolerated.
428 * - As an additional feature, when activated, also changes in image scale
429 * relative to the rotation center can be picked up. To handle those values
430 * in the same framework, we average the scales as logarithms.
431 *
432 * aspect is a total aspect ratio of the undistorted image (includes fame and
433 * pixel aspect). The function returns a quality factor, which can be used
434 * to damp the contributions of points in close proximity to the pivot point,
435 * since such contributions might be dominated by rounding errors and thus
436 * poison the calculated average. When the quality factor goes towards zero,
437 * the weight of this contribution should be reduced accordingly.
438 */
440 MovieTrackingMarker *marker,
441 const float aspect,
442 const float pivot[2],
443 float *result_angle,
444 float *result_scale)
445{
446 float len, quality;
447 float pos[2];
448 sub_v2_v2v2(pos, marker->pos, pivot);
449
450 pos[0] *= aspect;
452
453 *result_angle = atan2f(pos[1], pos[0]);
454
455 len = len_v2(pos);
456
457 /* prevent points very close to the pivot point from poisoning the result */
460
461 *result_scale = len * track_ref->stabilization_scale_base;
462 BLI_assert(0.0 < *result_scale);
463
464 return quality;
465}
466
467/* Workaround to allow for rotation around an arbitrary pivot point.
468 * Currently, the public API functions do not support this flexibility.
469 * Rather, rotation will always be applied around a fixed origin.
470 * As a workaround, we shift the image after rotation to match the
471 * desired rotation center. And since this offset needs to be applied
472 * after the rotation and scaling, we can collapse it with the
473 * translation compensation, which is also a lateral shift (offset).
474 * The offset to apply is intended_pivot - rotated_pivot
475 */
476static void compensate_rotation_center(const int size,
477 float aspect,
478 const float angle,
479 const float scale,
480 const float pivot[2],
481 float result_translation[2])
482{
483 const float origin[2] = {0.5f * aspect * size, 0.5f * size};
484 float intended_pivot[2], rotated_pivot[2];
485 float rotation_mat[2][2];
486
487 copy_v2_v2(intended_pivot, pivot);
488 copy_v2_v2(rotated_pivot, pivot);
489 angle_to_mat2(rotation_mat, +angle);
490 sub_v2_v2(rotated_pivot, origin);
491 mul_m2_v2(rotation_mat, rotated_pivot);
492 mul_v2_fl(rotated_pivot, scale);
493 add_v2_v2(rotated_pivot, origin);
494 add_v2_v2(result_translation, intended_pivot);
495 sub_v2_v2(result_translation, rotated_pivot);
496}
497
498/* Weighted average of the per track cumulated contributions at given frame.
499 * Returns truth if all desired calculations could be done and all averages are
500 * available.
501 *
502 * NOTE: Even if the result is not `true`, the returned translation and angle
503 * are always sensible and as good as can be. Especially in the
504 * initialization phase we might not be able to get any average (yet) or
505 * get only a translation value. Since initialization visits tracks in a
506 * specific order, starting from anchor_frame, the result is logically
507 * correct non the less. But under normal operation conditions,
508 * a result of `false` should disable the stabilization function
509 */
511 int framenr,
512 float aspect,
513 float r_translation[2],
514 float r_pivot[2],
515 float *r_angle,
516 float *r_scale_step)
517{
518 bool ok;
519 float weight_sum;
520 MovieTracking *tracking = ctx->tracking;
521 MovieTrackingStabilization *stab = &tracking->stabilization;
522 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
523 float ref_pos[2];
525
526 zero_v2(r_translation);
527 *r_scale_step = 0.0f; /* logarithm */
528 *r_angle = 0.0f;
529
530 zero_v2(ref_pos);
531
532 ok = false;
533 weight_sum = 0.0f;
534 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
535 if (!is_init_for_stabilization(ctx, track)) {
536 continue;
537 }
538 if (track->flag & TRACK_USE_2D_STAB) {
539 float weight = 0.0f;
540 MovieTrackingMarker *marker = get_tracking_data_point(ctx, track, framenr, &weight);
541 if (marker) {
543 track);
544 BLI_assert(stabilization_base != nullptr);
545 float offset[2];
546 weight_sum += weight;
547 translation_contribution(stabilization_base, marker, offset);
548 r_translation[0] += weight * offset[0];
549 r_translation[1] += weight * offset[1];
550 ref_pos[0] += weight * marker->pos[0];
551 ref_pos[1] += weight * marker->pos[1];
552 ok |= (weight_sum > EPSILON_WEIGHT);
553 }
554 }
555 }
556 if (!ok) {
557 return false;
558 }
559
560 ref_pos[0] /= weight_sum;
561 ref_pos[1] /= weight_sum;
562 r_translation[0] /= weight_sum;
563 r_translation[1] /= weight_sum;
564 setup_pivot(ref_pos, r_pivot);
565
566 if (!(stab->flag & TRACKING_STABILIZE_ROTATION)) {
567 return ok;
568 }
569
570 ok = false;
571 weight_sum = 0.0f;
572 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
573 if (!is_init_for_stabilization(ctx, track)) {
574 continue;
575 }
576 if (track->flag & TRACK_USE_2D_STAB_ROT) {
577 float weight = 0.0f;
578 MovieTrackingMarker *marker = get_tracking_data_point(ctx, track, framenr, &weight);
579 if (marker) {
581 track);
582 BLI_assert(stabilization_base != nullptr);
583 float rotation, scale, quality;
584 quality = rotation_contribution(
585 stabilization_base, marker, aspect, r_pivot, &rotation, &scale);
586 const float quality_weight = weight * quality;
587 weight_sum += quality_weight;
588 *r_angle += rotation * quality_weight;
589 if (stab->flag & TRACKING_STABILIZE_SCALE) {
590 *r_scale_step += logf(scale) * quality_weight;
591 }
592 else {
593 *r_scale_step = 0;
594 }
595 /* NOTE: Use original marker weight and not the scaled one with the proximity here to allow
596 * simple stabilization setups when there is a single track in a close proximity of the
597 * center. */
598 ok |= (weight > EPSILON_WEIGHT);
599 }
600 }
601 }
602 if (ok) {
603 *r_scale_step /= weight_sum;
604 *r_angle /= weight_sum;
605 }
606 else {
607 /* We reach this point because translation could be calculated,
608 * but rotation/scale found no data to work on.
609 */
610 *r_scale_step = 0.0f;
611 *r_angle = 0.0f;
612 }
613 return true;
614}
615
616/* Calculate weight center of location tracks for given frame.
617 * This function performs similar calculations as average_track_contributions(),
618 * but does not require the tracks to be initialized for stabilization. Moreover,
619 * when there is no usable tracking data for the given frame number, data from
620 * a neighboring frame is used. Thus this function can be used to calculate
621 * a starting point on initialization.
622 */
623static void average_marker_positions(StabContext *ctx, int framenr, float r_ref_pos[2])
624{
625 bool ok = false;
626 float weight_sum;
627 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
628
629 zero_v2(r_ref_pos);
630 weight_sum = 0.0f;
631 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
632 if (track->flag & TRACK_USE_2D_STAB) {
633 float weight = 0.0f;
634 MovieTrackingMarker *marker = get_tracking_data_point(ctx, track, framenr, &weight);
635 if (marker) {
636 weight_sum += weight;
637 r_ref_pos[0] += weight * marker->pos[0];
638 r_ref_pos[1] += weight * marker->pos[1];
639 ok |= (weight_sum > EPSILON_WEIGHT);
640 }
641 }
642 }
643 if (ok) {
644 r_ref_pos[0] /= weight_sum;
645 r_ref_pos[1] /= weight_sum;
646 }
647 else {
648 /* No usable tracking data on any track on this frame.
649 * Use data from neighboring frames to extrapolate...
650 */
651 int next_lower = MINAFRAME;
652 int next_higher = MAXFRAME;
653 use_values_from_fcurves(ctx, true);
654 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
655 /* NOTE: we deliberately do not care if this track
656 * is already initialized for stabilization. */
657 if (track->flag & TRACK_USE_2D_STAB) {
658 int startpoint = search_closest_marker_index(track, framenr);
659 retrieve_next_higher_usable_frame(ctx, track, startpoint, framenr, &next_higher);
660 retrieve_next_lower_usable_frame(ctx, track, startpoint, framenr, &next_lower);
661 }
662 }
663 if (next_lower >= MINFRAME) {
664 /* use next usable frame to the left.
665 * Also default to this frame when we're in a gap */
666 average_marker_positions(ctx, next_lower, r_ref_pos);
667 }
668 else if (next_higher < MAXFRAME) {
669 average_marker_positions(ctx, next_higher, r_ref_pos);
670 }
671 use_values_from_fcurves(ctx, false);
672 }
673}
674
675/* Linear interpolation of data retrieved at two measurement points.
676 * This function is used to fill gaps in the middle of the covered area,
677 * at frames without any usable tracks for stabilization.
678 *
679 * framenr is a position to interpolate for.
680 * frame_a is a valid measurement point below framenr
681 * frame_b is a valid measurement point above framenr
682 * Returns truth if both measurements could actually be retrieved.
683 * Otherwise output parameters remain unaltered
684 */
686 int framenr,
687 int frame_a,
688 int frame_b,
689 const float aspect,
690 float r_translation[2],
691 float r_pivot[2],
692 float *r_angle,
693 float *r_scale_step)
694{
695 float t, s;
696 float trans_a[2], trans_b[2];
697 float angle_a, angle_b;
698 float scale_a, scale_b;
699 float pivot_a[2], pivot_b[2];
700 bool success = false;
701
702 BLI_assert(frame_a <= frame_b);
703 BLI_assert(frame_a <= framenr);
704 BLI_assert(framenr <= frame_b);
705
706 t = (float(framenr) - frame_a) / (frame_b - frame_a);
707 s = 1.0f - t;
708
710 ctx, frame_a, aspect, trans_a, pivot_a, &angle_a, &scale_a);
711 if (!success) {
712 return false;
713 }
715 ctx, frame_b, aspect, trans_b, pivot_b, &angle_b, &scale_b);
716 if (!success) {
717 return false;
718 }
719
720 interp_v2_v2v2(r_translation, trans_a, trans_b, t);
721 interp_v2_v2v2(r_pivot, pivot_a, pivot_b, t);
722 *r_scale_step = s * scale_a + t * scale_b;
723 *r_angle = s * angle_a + t * angle_b;
724 return true;
725}
726
727/* Reorder tracks starting with those providing a tracking data frame
728 * closest to the global anchor_frame. Tracks with a gap at anchor_frame or
729 * starting farer away from anchor_frame altogether will be visited later.
730 * This allows to build up baseline contributions incrementally.
731 *
732 * order is an array for sorting the tracks. Must be of suitable size to hold
733 * all tracks.
734 * Returns number of actually usable tracks, can be less than the overall number
735 * of tracks.
736 *
737 * NOTE: After returning, the order array holds entries up to the number of
738 * usable tracks, appropriately sorted starting with the closest tracks.
739 * Initialization includes disabled tracks, since they might be enabled
740 * through automation later.
741 */
743{
744 size_t tracknr = 0;
745 MovieTracking *tracking = ctx->tracking;
746 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(tracking);
747 int anchor_frame = tracking->stabilization.anchor_frame;
748
749 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
750 MovieTrackingMarker *marker;
751 order[tracknr].data = track;
752 marker = get_closest_marker(ctx, track, anchor_frame);
753 if (marker != nullptr && (track->flag & (TRACK_USE_2D_STAB | TRACK_USE_2D_STAB_ROT))) {
754 order[tracknr].sort_value = abs(marker->framenr - anchor_frame);
755 order[tracknr].reference_frame = marker->framenr;
756 tracknr++;
757 }
758 }
759 if (tracknr) {
760 qsort(order, tracknr, sizeof(TrackInitOrder), BLI_sortutil_cmp_int);
761 }
762 return tracknr;
763}
764
765/* Setup the constant part of this track's contribution to the determined frame
766 * movement. Tracks usually don't provide tracking data for every frame. Thus,
767 * for determining data at a given frame, we split up the contribution into a
768 * part covered by actual measurements on this track, and the initial gap
769 * between this track's reference frame and the global anchor_frame.
770 * The (missing) data for the gap can be substituted by the average offset
771 * observed by the other tracks covering the gap. This approximation doesn't
772 * introduce wrong data, but it records data with incorrect weight. A totally
773 * correct solution would require us to average the contribution per frame, and
774 * then integrate stepwise over all frames -- which of course would be way more
775 * expensive, especially for longer clips. To the contrary, our solution
776 * cumulates the total contribution per track and averages afterwards over all
777 * tracks; it can thus be calculated just based on the data of a single frame,
778 * plus the "baseline" for the reference frame, which is what we are computing
779 * here.
780 *
781 * Since we're averaging _contributions_, we have to calculate the _difference_
782 * of the measured position at current frame and the position at the reference
783 * frame. But the "reference" part of this difference is constant and can thus
784 * be packed together with the baseline contribution into a single precomputed
785 * vector per track.
786 *
787 * In case of the rotation contribution, the principle is the same, but we have
788 * to compensate for the already determined translation and measure the pure
789 * rotation, simply because this is how we model the offset: shift plus rotation
790 * around the shifted rotation center. To circumvent problems with the
791 * definition range of the arcus tangens function, we perform this baseline
792 * addition and reference angle subtraction in polar coordinates and bake this
793 * operation into a precomputed rotation matrix.
794 *
795 * track is a track to be initialized to initialize
796 * reference_frame is a local frame for this track, the closest pick to the
797 * global anchor_frame.
798 * aspect is a total aspect ratio of the undistorted image (includes fame and
799 * pixel aspect).
800 * target_pos is a possibly animated target position as set by the user for
801 * the reference_frame
802 * average_translation is a value observed by the _other_ tracks for the gap
803 * between reference_frame and anchor_frame. This
804 * average must not contain contributions of frames
805 * not yet initialized
806 * average_angle in a similar way, the rotation value observed by the
807 * _other_ tracks.
808 * average_scale_step is an image scale factor observed on average by the other
809 * tracks for this frame. This value is recorded and
810 * averaged as logarithm. The recorded scale changes
811 * are damped for very small contributions, to limit
812 * the effect of probe points approaching the pivot
813 * too closely.
814 *
815 * NOTE: when done, this track is marked as initialized
816 */
818 MovieTrackingTrack *track,
819 int reference_frame,
820 float aspect,
821 const float average_translation[2],
822 const float pivot[2],
823 const float average_angle,
824 const float average_scale_step)
825{
826 float pos[2], angle, len;
828 MovieTrackingMarker *marker = BKE_tracking_marker_get_exact(track, reference_frame);
829 /* Logic for initialization order ensures there *is* a marker on that
830 * very frame.
831 */
832 BLI_assert(marker != nullptr);
833 BLI_assert(local_data != nullptr);
834
835 /* Per track baseline value for translation. */
836 sub_v2_v2v2(local_data->stabilization_offset_base, average_translation, marker->pos);
837
838 /* Per track baseline value for rotation. */
839 sub_v2_v2v2(pos, marker->pos, pivot);
840
841 pos[0] *= aspect;
842 angle = average_angle - atan2f(pos[1], pos[0]);
843 angle_to_mat2(local_data->stabilization_rotation_base, angle);
844
845 /* Per track baseline value for zoom. */
847 local_data->stabilization_scale_base = expf(average_scale_step) / len;
848
849 local_data->is_init_for_stabilization = true;
850}
851
852static void init_all_tracks(StabContext *ctx, float aspect)
853{
854 size_t track_len = 0;
855 MovieClip *clip = ctx->clip;
856 MovieTracking *tracking = ctx->tracking;
857 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(tracking);
858 TrackInitOrder *order;
859
860 /* Attempt to start initialization at anchor_frame.
861 * By definition, offset contribution is zero there.
862 */
863 int reference_frame = tracking->stabilization.anchor_frame;
864 float average_angle = 0, average_scale_step = 0;
865 float average_translation[2], average_pos[2], pivot[2];
866 zero_v2(average_translation);
867 zero_v2(pivot);
868
869 /* Initialize private working data. */
870 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
872 if (!local_data) {
873 local_data = MEM_cnew<TrackStabilizationBase>("2D stabilization per track baseline data");
874 attach_stabilization_baseline_data(ctx, track, local_data);
875 }
876 BLI_assert(local_data != nullptr);
877 local_data->track_weight_curve = retrieve_track_weight_animation(clip, track);
878 local_data->is_init_for_stabilization = false;
879
880 track_len++;
881 }
882 if (!track_len) {
883 return;
884 }
885
886 order = MEM_cnew_array<TrackInitOrder>(track_len, "stabilization track order");
887 if (!order) {
888 return;
889 }
890
891 track_len = establish_track_initialization_order(ctx, order);
892 if (track_len == 0) {
893 goto cleanup;
894 }
895
896 /* starting point for pivot, before having initialized any track */
897 average_marker_positions(ctx, reference_frame, average_pos);
898 setup_pivot(average_pos, pivot);
899
900 for (int i = 0; i < track_len; i++) {
901 MovieTrackingTrack *track = order[i].data;
902 if (reference_frame != order[i].reference_frame) {
903 reference_frame = order[i].reference_frame;
905 reference_frame,
906 aspect,
907 average_translation,
908 pivot,
909 &average_angle,
910 &average_scale_step);
911 }
913 track,
914 reference_frame,
915 aspect,
916 average_translation,
917 pivot,
918 average_angle,
919 average_scale_step);
920 }
921
922cleanup:
923 MEM_freeN(order);
924}
925
926/* Retrieve the measurement of frame movement by averaging contributions of
927 * active tracks.
928 *
929 * translation is a measurement in normalized 0..1 coordinates.
930 * angle is a measurement in radians -pi..+pi counter clockwise relative to
931 * translation compensated frame center
932 * scale_step is a measurement of image scale changes, in logarithmic scale
933 * (zero means scale == 1)
934 * Returns calculation enabled and all data retrieved as expected for this frame.
935 *
936 * NOTE: when returning `false`, output parameters are reset to neutral values.
937 */
939 int framenr,
940 float aspect,
941 float r_translation[2],
942 float r_pivot[2],
943 float *r_angle,
944 float *r_scale_step)
945{
946 bool success = false;
947
948 /* Early output if stabilization is disabled. */
949 if ((ctx->stab->flag & TRACKING_2D_STABILIZATION) == 0) {
950 zero_v2(r_translation);
951 *r_scale_step = 0.0f;
952 *r_angle = 0.0f;
953 return false;
954 }
955
957 ctx, framenr, aspect, r_translation, r_pivot, r_angle, r_scale_step);
958 if (!success) {
959 /* Try to hold extrapolated settings beyond the definition range
960 * and to interpolate in gaps without any usable tracking data
961 * to prevent sudden jump to image zero position.
962 */
963 int next_lower = MINAFRAME;
964 int next_higher = MAXFRAME;
965 use_values_from_fcurves(ctx, true);
966 find_next_working_frames(ctx, framenr, &next_lower, &next_higher);
967 if (next_lower >= MINFRAME && next_higher < MAXFRAME) {
969 framenr,
970 next_lower,
971 next_higher,
972 aspect,
973 r_translation,
974 r_pivot,
975 r_angle,
976 r_scale_step);
977 }
978 else if (next_higher < MAXFRAME) {
979 /* Before start of stabilized range: extrapolate start point
980 * settings.
981 */
983 ctx, next_higher, aspect, r_translation, r_pivot, r_angle, r_scale_step);
984 }
985 else if (next_lower >= MINFRAME) {
986 /* After end of stabilized range: extrapolate end point settings. */
988 ctx, next_lower, aspect, r_translation, r_pivot, r_angle, r_scale_step);
989 }
990 use_values_from_fcurves(ctx, false);
991 }
992 return success;
993}
994
995/* Calculate stabilization data (translation, scale and rotation) from given raw
996 * measurements. Result is in absolute image dimensions (expanded image, square
997 * pixels), includes automatic or manual scaling and compensates for a target
998 * frame position, if given.
999 *
1000 * size is a size of the expanded image, the width in pixels is size * aspect.
1001 * aspect is a ratio (width / height) of the effective canvas (square pixels).
1002 * do_compensate denotes whether to actually output values necessary to
1003 * _compensate_ the determined frame movement.
1004 * Otherwise, the effective target movement is returned.
1005 */
1007 int framenr,
1008 int size,
1009 float aspect,
1010 bool do_compensate,
1011 float scale_step,
1012 float r_translation[2],
1013 float r_pivot[2],
1014 float *r_scale,
1015 float *r_angle)
1016{
1017 float target_pos[2], target_scale;
1018 float scaleinf = get_animated_scaleinf(ctx, framenr);
1019
1020 if (ctx->stab->flag & TRACKING_STABILIZE_SCALE) {
1021 *r_scale = expf(scale_step * scaleinf); /* Averaged in log scale */
1022 }
1023 else {
1024 *r_scale = 1.0f;
1025 }
1026
1027 mul_v2_fl(r_translation, get_animated_locinf(ctx, framenr));
1028 *r_angle *= get_animated_rotinf(ctx, framenr);
1029
1030 /* Compensate for a target frame position.
1031 * This allows to follow tracking / panning shots in a semi manual fashion,
1032 * when animating the settings for the target frame position.
1033 */
1034 get_animated_target_pos(ctx, framenr, target_pos);
1035 sub_v2_v2(r_translation, target_pos);
1036 *r_angle -= get_animated_target_rot(ctx, framenr);
1037 target_scale = get_animated_target_scale(ctx, framenr);
1038 if (target_scale != 0.0f) {
1039 *r_scale /= target_scale;
1040 /* target_scale is an expected/intended reference zoom value */
1041 }
1042
1043 /* Convert from relative to absolute coordinates, square pixels. */
1044 r_translation[0] *= float(size) * aspect;
1045 r_translation[1] *= float(size);
1046 r_pivot[0] *= float(size) * aspect;
1047 r_pivot[1] *= float(size);
1048
1049 /* Output measured data, or inverse of the measured values for
1050 * compensation?
1051 */
1052 if (do_compensate) {
1053 mul_v2_fl(r_translation, -1.0f);
1054 *r_angle *= -1.0f;
1055 if (*r_scale != 0.0f) {
1056 *r_scale = 1.0f / *r_scale;
1057 }
1058 }
1059}
1060
1061static void stabilization_data_to_mat4(float pixel_aspect,
1062 const float pivot[2],
1063 const float translation[2],
1064 float scale,
1065 float angle,
1066 float r_mat[4][4])
1067{
1068 float translation_mat[4][4], rotation_mat[4][4], scale_mat[4][4], pivot_mat[4][4],
1069 inv_pivot_mat[4][4], aspect_mat[4][4], inv_aspect_mat[4][4];
1070 const float scale_vector[3] = {scale, scale, 1.0f};
1071
1072 unit_m4(translation_mat);
1073 unit_m4(rotation_mat);
1074 unit_m4(scale_mat);
1075 unit_m4(aspect_mat);
1076 unit_m4(pivot_mat);
1077 unit_m4(inv_pivot_mat);
1078
1079 /* aspect ratio correction matrix */
1080 aspect_mat[0][0] /= pixel_aspect;
1081 invert_m4_m4(inv_aspect_mat, aspect_mat);
1082
1083 add_v2_v2(pivot_mat[3], pivot);
1084 sub_v2_v2(inv_pivot_mat[3], pivot);
1085
1086 size_to_mat4(scale_mat, scale_vector); /* scale matrix */
1087 add_v2_v2(translation_mat[3], translation); /* translation matrix */
1088 rotate_m4(rotation_mat, 'Z', angle); /* rotation matrix */
1089
1090 /* Compose transformation matrix. */
1091 mul_m4_series(r_mat,
1092 aspect_mat,
1093 translation_mat,
1094 pivot_mat,
1095 scale_mat,
1096 rotation_mat,
1097 inv_pivot_mat,
1098 inv_aspect_mat);
1099}
1100
1101/* Calculate scale factor necessary to eliminate black image areas
1102 * caused by the compensating movements of the stabilizer.
1103 * This function visits every frame where stabilization data is
1104 * available and determines the factor for this frame. The overall
1105 * largest factor found is returned as result.
1106 *
1107 * NOTE: all tracks need to be initialized before calling this function.
1108 */
1109static float calculate_autoscale_factor(StabContext *ctx, int size, float aspect)
1110{
1111 MovieTrackingStabilization *stab = ctx->stab;
1112 MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(ctx->tracking);
1113 float pixel_aspect = ctx->tracking->camera.pixel_aspect;
1114 int height = size, width = aspect * size;
1115
1116 int sfra = INT_MAX, efra = INT_MIN;
1117 float scale = 1.0f, scale_step = 0.0f;
1118
1119 /* Calculate maximal frame range of tracks where stabilization is active. */
1120 LISTBASE_FOREACH (MovieTrackingTrack *, track, &tracking_camera_object->tracks) {
1121 if ((track->flag & TRACK_USE_2D_STAB) ||
1122 ((stab->flag & TRACKING_STABILIZE_ROTATION) && (track->flag & TRACK_USE_2D_STAB_ROT)))
1123 {
1124 int first_frame = track->markers[0].framenr;
1125 int last_frame = track->markers[track->markersnr - 1].framenr;
1126 sfra = min_ii(sfra, first_frame);
1127 efra = max_ii(efra, last_frame);
1128 }
1129 }
1130
1131 use_values_from_fcurves(ctx, true);
1132 for (int cfra = sfra; cfra <= efra; cfra++) {
1133 float translation[2], pivot[2], angle, tmp_scale;
1134 float mat[4][4];
1135 const float points[4][2] = {
1136 {0.0f, 0.0f}, {0.0f, float(height)}, {float(width), float(height)}, {float(width), 0.0f}};
1137 const bool do_compensate = true;
1138 /* Calculate stabilization parameters for the current frame. */
1140 ctx, cfra, aspect, translation, pivot, &angle, &scale_step);
1142 cfra,
1143 size,
1144 aspect,
1145 do_compensate,
1146 scale_step,
1147 translation,
1148 pivot,
1149 &tmp_scale,
1150 &angle);
1151 /* Compose transformation matrix. */
1152 /* NOTE: Here we operate in NON-COMPENSATED coordinates, meaning we have
1153 * to construct transformation matrix using proper pivot point.
1154 * Compensation for that will happen later on.
1155 */
1156 stabilization_data_to_mat4(pixel_aspect, pivot, translation, tmp_scale, angle, mat);
1157 /* Investigate the transformed border lines for this frame;
1158 * find out, where it cuts the original frame.
1159 */
1160 for (int edge_index = 0; edge_index < 4; edge_index++) {
1161 /* Calculate coordinates of stabilized frame edge points.
1162 * Use matrix multiplication here so we operate in homogeneous
1163 * coordinates.
1164 */
1165 float stable_edge_p1[3], stable_edge_p2[3];
1166 copy_v2_v2(stable_edge_p1, points[edge_index]);
1167 copy_v2_v2(stable_edge_p2, points[(edge_index + 1) % 4]);
1168 stable_edge_p1[2] = stable_edge_p2[2] = 0.0f;
1169 mul_m4_v3(mat, stable_edge_p1);
1170 mul_m4_v3(mat, stable_edge_p2);
1171 /* Now we iterate over all original frame corners (we call them
1172 * 'point' here) to see if there's black area between stabilized
1173 * frame edge and original point.
1174 */
1175 for (int point_index = 0; point_index < 4; point_index++) {
1176 const float point[3] = {points[point_index][0], points[point_index][1], 0.0f};
1177 /* Calculate vector which goes from first edge point to
1178 * second one.
1179 */
1180 float stable_edge_vec[3];
1181 sub_v3_v3v3(stable_edge_vec, stable_edge_p2, stable_edge_p1);
1182 /* Calculate vector which connects current frame point to
1183 * first edge point.
1184 */
1185 float point_to_edge_start_vec[3];
1186 sub_v3_v3v3(point_to_edge_start_vec, point, stable_edge_p1);
1187 /* Use this two vectors to check whether frame point is inside
1188 * of the stabilized frame or not.
1189 * If the point is inside, there is no black area happening
1190 * and no scaling required for it.
1191 */
1192 if (cross_v2v2(stable_edge_vec, point_to_edge_start_vec) >= 0.0f) {
1193 /* We are scaling around motion-compensated pivot point. */
1194 float scale_pivot[2];
1195 add_v2_v2v2(scale_pivot, pivot, translation);
1196 /* Calculate line which goes via `point` and parallel to
1197 * the stabilized frame edge. This line is coming via
1198 * `point` and `point2` at the end.
1199 */
1200 float point2[2];
1201 add_v2_v2v2(point2, point, stable_edge_vec);
1202 /* Calculate actual distance between pivot point and
1203 * the stabilized frame edge. Then calculate distance
1204 * between pivot point and line which goes via actual
1205 * corner and is parallel to the edge.
1206 *
1207 * Dividing one by another will give us required scale
1208 * factor to get rid of black areas.
1209 */
1210 float real_dist = dist_to_line_v2(scale_pivot, stable_edge_p1, stable_edge_p2);
1211 float required_dist = dist_to_line_v2(scale_pivot, point, point2);
1212 const float S = required_dist / real_dist;
1213 scale = max_ff(scale, S);
1214 }
1215 }
1216 }
1217 }
1218 if (stab->maxscale > 0.0f) {
1219 scale = min_ff(scale, stab->maxscale);
1220 }
1221 use_values_from_fcurves(ctx, false);
1222
1223 return scale;
1224}
1225
1226/* Prepare working data and determine reference point for each track.
1227 *
1228 * NOTE: These calculations _could_ be cached and reused for all frames of the
1229 * same clip. However, since proper initialization depends on (weight)
1230 * animation and setup of tracks, ensuring consistency of cached init data
1231 * turns out to be tricky, hard to maintain and generally not worth the
1232 * effort. Thus we'll re-initialize on every frame.
1233 */
1234static StabContext *init_stabilizer(MovieClip *clip, int size, float aspect)
1235{
1237 BLI_assert(ctx != nullptr);
1238 init_all_tracks(ctx, aspect);
1239 if (ctx->stab->flag & TRACKING_AUTOSCALE) {
1240 ctx->stab->scale = 1.0;
1241 ctx->stab->scale = calculate_autoscale_factor(ctx, size, aspect);
1242 }
1243 /* By default, just use values for the global current frame. */
1244 use_values_from_fcurves(ctx, false);
1245 return ctx;
1246}
1247
1248/* === public interface functions === */
1249
1251 int framenr,
1252 int width,
1253 int height,
1254 float translation[2],
1255 float *scale,
1256 float *angle)
1257{
1258 StabContext *ctx = nullptr;
1259 MovieTracking *tracking = &clip->tracking;
1260 bool enabled = (tracking->stabilization.flag & TRACKING_2D_STABILIZATION);
1261 /* Might become a parameter of a stabilization compositor node. */
1262 bool do_compensate = true;
1263 float scale_step = 0.0f;
1264 float pixel_aspect = tracking->camera.pixel_aspect;
1265 float aspect = float(width) * pixel_aspect / height;
1266 int size = height;
1267 float pivot[2];
1268
1269 if (enabled) {
1270 ctx = init_stabilizer(clip, size, aspect);
1271 }
1272
1274 ctx, framenr, aspect, translation, pivot, angle, &scale_step))
1275 {
1277 ctx, framenr, size, aspect, do_compensate, scale_step, translation, pivot, scale, angle);
1278 compensate_rotation_center(size, aspect, *angle, *scale, pivot, translation);
1279 }
1280 else {
1281 zero_v2(translation);
1282 *scale = 1.0f;
1283 *angle = 0.0f;
1284 }
1286}
1287
1294
1295static void tracking_stabilize_frame_interpolation_cb(void *__restrict userdata,
1296 const int y,
1297 const TaskParallelTLS *__restrict /*tls*/)
1298{
1299 using namespace blender;
1300
1302 static_cast<TrackingStabilizeFrameInterpolationData *>(userdata);
1303 ImBuf *ibuf = data->ibuf;
1304 ImBuf *tmpibuf = data->tmpibuf;
1305 float(*mat)[4] = data->mat;
1306
1307 float vec[3] = {0.0f, float(y), 0.0f};
1308 float rvec[3];
1309
1310 if (ibuf->float_buffer.data) {
1311 /* Float image. */
1312 float4 *dst = reinterpret_cast<float4 *>(tmpibuf->float_buffer.data) + y * tmpibuf->x;
1313 if (data->tracking_filter == TRACKING_FILTER_BILINEAR) {
1314 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1315 vec[0] = float(x);
1316 mul_v3_m4v3(rvec, mat, vec);
1317 *dst = imbuf::interpolate_bilinear_border_fl(ibuf, rvec[0], rvec[1]);
1318 }
1319 }
1320 else if (data->tracking_filter == TRACKING_FILTER_BICUBIC) {
1321 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1322 vec[0] = float(x);
1323 mul_v3_m4v3(rvec, mat, vec);
1324 *dst = imbuf::interpolate_cubic_bspline_fl(ibuf, rvec[0], rvec[1]);
1325 }
1326 }
1327 else {
1328 /* Nearest or fallback to nearest. */
1329 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1330 vec[0] = float(x);
1331 mul_v3_m4v3(rvec, mat, vec);
1332 *dst = imbuf::interpolate_nearest_border_fl(ibuf, rvec[0], rvec[1]);
1333 }
1334 }
1335 }
1336 else if (ibuf->byte_buffer.data) {
1337 /* Byte image. */
1338 uchar4 *dst = reinterpret_cast<uchar4 *>(tmpibuf->byte_buffer.data) + y * tmpibuf->x;
1339 if (data->tracking_filter == TRACKING_FILTER_BILINEAR) {
1340 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1341 vec[0] = float(x);
1342 mul_v3_m4v3(rvec, mat, vec);
1343 *dst = imbuf::interpolate_bilinear_border_byte(ibuf, rvec[0], rvec[1]);
1344 }
1345 }
1346 else if (data->tracking_filter == TRACKING_FILTER_BICUBIC) {
1347 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1348 vec[0] = float(x);
1349 mul_v3_m4v3(rvec, mat, vec);
1350 *dst = imbuf::interpolate_cubic_bspline_byte(ibuf, rvec[0], rvec[1]);
1351 }
1352 }
1353 else {
1354 /* Nearest or fallback to nearest. */
1355 for (int x = 0; x < tmpibuf->x; x++, dst++) {
1356 vec[0] = float(x);
1357 mul_v3_m4v3(rvec, mat, vec);
1358 *dst = imbuf::interpolate_nearest_border_byte(ibuf, rvec[0], rvec[1]);
1359 }
1360 }
1361 }
1362}
1363
1365 MovieClip *clip, int framenr, ImBuf *ibuf, float translation[2], float *scale, float *angle)
1366{
1367 float tloc[2], tscale, tangle;
1368 MovieTracking *tracking = &clip->tracking;
1369 MovieTrackingStabilization *stab = &tracking->stabilization;
1370 ImBuf *tmpibuf;
1371 int width = ibuf->x, height = ibuf->y;
1372 float pixel_aspect = tracking->camera.pixel_aspect;
1373 float mat[4][4];
1374 int ibuf_flags;
1375
1376 if (translation) {
1377 copy_v2_v2(tloc, translation);
1378 }
1379
1380 if (scale) {
1381 tscale = *scale;
1382 }
1383
1384 /* Perform early output if no stabilization is used. */
1385 if ((stab->flag & TRACKING_2D_STABILIZATION) == 0) {
1386 if (translation) {
1387 zero_v2(translation);
1388 }
1389
1390 if (scale) {
1391 *scale = 1.0f;
1392 }
1393
1394 if (angle) {
1395 *angle = 0.0f;
1396 }
1397
1398 return ibuf;
1399 }
1400
1401 /* Allocate frame for stabilization result, copy alpha mode and color-space. */
1402 ibuf_flags = 0;
1403 if (ibuf->byte_buffer.data) {
1404 ibuf_flags |= IB_rect;
1405 }
1406 if (ibuf->float_buffer.data) {
1407 ibuf_flags |= IB_rectfloat;
1408 }
1409
1410 tmpibuf = IMB_allocImBuf(ibuf->x, ibuf->y, ibuf->planes, ibuf_flags);
1411 IMB_colormanagegent_copy_settings(ibuf, tmpibuf);
1412
1413 /* Calculate stabilization matrix. */
1414 BKE_tracking_stabilization_data_get(clip, framenr, width, height, tloc, &tscale, &tangle);
1416 ibuf->x, ibuf->y, pixel_aspect, tloc, tscale, tangle, mat);
1417
1418 /* The following code visits each nominal target grid position
1419 * and picks interpolated data "backwards" from source.
1420 * thus we need the inverse of the transformation to apply. */
1421 invert_m4(mat);
1422
1424 data.ibuf = ibuf;
1425 data.tmpibuf = tmpibuf;
1426 data.mat = mat;
1427 data.tracking_filter = tracking->stabilization.filter;
1428
1429 TaskParallelSettings settings;
1431 settings.use_threading = (tmpibuf->y > 128);
1433 0, tmpibuf->y, &data, tracking_stabilize_frame_interpolation_cb, &settings);
1434
1435 if (tmpibuf->float_buffer.data) {
1436 tmpibuf->userflags |= IB_RECT_INVALID;
1437 }
1438
1439 if (translation) {
1440 copy_v2_v2(translation, tloc);
1441 }
1442
1443 if (scale) {
1444 *scale = tscale;
1445 }
1446
1447 if (angle) {
1448 *angle = tangle;
1449 }
1450
1451 return tmpibuf;
1452}
1453
1455 int buffer_height,
1456 float pixel_aspect,
1457 float translation[2],
1458 float scale,
1459 float angle,
1460 float r_mat[4][4])
1461{
1462 /* Since we cannot receive the real pivot point coordinates (API limitation),
1463 * we perform the rotation/scale around the center of frame.
1464 * Then we correct by an additional shift, which was calculated in
1465 * compensate_rotation_center() and "sneaked in" as additional offset
1466 * in the translation parameter. This works, since translation needs to be
1467 * applied after rotation/scale anyway. Thus effectively the image gets
1468 * rotated around the desired pivot point
1469 */
1470 /* TODO(sergey): pivot shouldn't be calculated here, rather received
1471 * as a parameter.
1472 */
1473 float pivot[2];
1474 pivot[0] = 0.5f * pixel_aspect * buffer_width;
1475 pivot[1] = 0.5f * buffer_height;
1476 /* Compose transformation matrix. */
1477 stabilization_data_to_mat4(pixel_aspect, pivot, translation, scale, angle, r_mat);
1478}
float evaluate_fcurve(const FCurve *fcu, float evaltime)
FCurve * id_data_find_fcurve(ID *id, void *data, StructRNA *type, const char *prop_name, int index, bool *r_driven)
float BKE_movieclip_remap_clip_to_scene_frame(const struct MovieClip *clip, float framenr)
struct MovieTrackingObject * BKE_tracking_object_get_camera(const struct MovieTracking *tracking)
struct MovieTrackingMarker * BKE_tracking_marker_get(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:1358
struct MovieTrackingMarker * BKE_tracking_marker_get_exact(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:1391
#define BLI_assert(a)
Definition BLI_assert.h:50
GHash * BLI_ghash_ptr_new(const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
void * BLI_ghash_lookup(const GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:731
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.c:707
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.c:860
#define LISTBASE_FOREACH(type, var, list)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
MINLINE float min_ff(float a, float b)
MINLINE int max_ii(int a, int b)
float dist_to_line_v2(const float p[2], const float l1[2], const float l2[2])
Definition math_geom.cc:284
void unit_m4(float m[4][4])
Definition rct.c:1127
void size_to_mat4(float R[4][4], const float size[3])
void mul_m4_v3(const float M[4][4], float r[3])
#define mul_m4_series(...)
void mul_v3_m4v3(float r[3], const float mat[4][4], const float vec[3])
bool invert_m4_m4(float inverse[4][4], const float mat[4][4])
void rotate_m4(float mat[4][4], char axis, float angle)
bool invert_m4(float mat[4][4])
void mul_m2_v2(const float mat[2][2], float vec[2])
void angle_to_mat2(float R[2][2], float angle)
MINLINE float len_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v2_fl(float r[2], float f)
void interp_v2_v2v2(float r[2], const float a[2], const float b[2], float t)
Definition math_vector.c:21
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE void add_v2_v2(float r[2], const float a[2])
MINLINE float cross_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void zero_v2(float r[2])
int BLI_sortutil_cmp_int(const void *a_, const void *b_)
Definition sort_utils.c:53
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:230
#define MINFRAME
#define MINAFRAME
#define MAXFRAME
@ TRACKING_FILTER_BICUBIC
@ TRACKING_FILTER_BILINEAR
@ TRACKING_AUTOSCALE
@ TRACKING_STABILIZE_SCALE
@ TRACKING_STABILIZE_ROTATION
@ TRACKING_2D_STABILIZATION
@ TRACK_USE_2D_STAB
@ TRACK_USE_2D_STAB_ROT
@ MARKER_DISABLED
static double angle(const Eigen::Vector3d &v1, const Eigen::Vector3d &v2)
Definition IK_Math.h:125
void IMB_colormanagegent_copy_settings(ImBuf *ibuf_src, ImBuf *ibuf_dst)
Contains defines and structs used throughout the imbuf module.
@ IB_rectfloat
@ IB_rect
@ IB_RECT_INVALID
Read Guarded memory(de)allocation.
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
#define logf(x)
#define expf(x)
#define atan2f(x, y)
int len
draw_view in_light_buf[] float
struct ImBuf * IMB_allocImBuf(unsigned int, unsigned int, unsigned char, unsigned int)
const vector< Marker > & markers
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
ImBufFloatBuffer float_buffer
ImBufByteBuffer byte_buffer
unsigned char planes
MovieTrackingMarker * markers
MovieTrackingCamera camera
MovieTrackingStabilization * stab
FCurve * target_pos[2]
GHash * private_track_data
MovieTracking * tracking
MovieTrackingTrack * data
float stabilization_rotation_base[2][2]
void BKE_tracking_stabilization_data_to_mat4(int buffer_width, int buffer_height, float pixel_aspect, float translation[2], float scale, float angle, float r_mat[4][4])
static void retrieve_next_lower_usable_frame(StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_lower)
static void average_marker_positions(StabContext *ctx, int framenr, float r_ref_pos[2])
static void discard_stabilization_baseline_data(void *val)
static int establish_track_initialization_order(StabContext *ctx, TrackInitOrder *order)
static MovieTrackingMarker * get_closest_marker(StabContext *ctx, MovieTrackingTrack *track, int ref_frame)
static float get_animated_target_scale(StabContext *ctx, int framenr)
void BKE_tracking_stabilization_data_get(MovieClip *clip, int framenr, int width, int height, float translation[2], float *scale, float *angle)
static float get_animated_target_rot(StabContext *ctx, int framenr)
static float get_animated_rotinf(StabContext *ctx, int framenr)
static bool average_track_contributions(StabContext *ctx, int framenr, float aspect, float r_translation[2], float r_pivot[2], float *r_angle, float *r_scale_step)
static float fetch_from_fcurve(const FCurve *animationCurve, int framenr, StabContext *ctx, float default_value)
static void stabilization_calculate_data(StabContext *ctx, int framenr, int size, float aspect, bool do_compensate, float scale_step, float r_translation[2], float r_pivot[2], float *r_scale, float *r_angle)
static FCurve * retrieve_track_weight_animation(MovieClip *clip, MovieTrackingTrack *track)
static bool stabilization_determine_offset_for_frame(StabContext *ctx, int framenr, float aspect, float r_translation[2], float r_pivot[2], float *r_angle, float *r_scale_step)
static bool is_init_for_stabilization(StabContext *ctx, MovieTrackingTrack *track)
static bool is_usable_for_stabilization(StabContext *ctx, MovieTrackingTrack *track)
static bool is_effectively_disabled(StabContext *ctx, MovieTrackingTrack *track, MovieTrackingMarker *marker)
static void use_values_from_fcurves(StabContext *ctx, bool toggle)
static bool interpolate_averaged_track_contributions(StabContext *ctx, int framenr, int frame_a, int frame_b, const float aspect, float r_translation[2], float r_pivot[2], float *r_angle, float *r_scale_step)
static void get_animated_target_pos(StabContext *ctx, int framenr, float target_pos[2])
static float get_animated_locinf(StabContext *ctx, int framenr)
static void init_all_tracks(StabContext *ctx, float aspect)
static float get_animated_scaleinf(StabContext *ctx, int framenr)
static float get_animated_weight(StabContext *ctx, MovieTrackingTrack *track, int framenr)
static float SCALE_ERROR_LIMIT_BIAS
static int search_closest_marker_index(MovieTrackingTrack *track, int ref_frame)
static void find_next_working_frames(StabContext *ctx, int framenr, int *next_lower, int *next_higher)
static void attach_stabilization_baseline_data(StabContext *ctx, MovieTrackingTrack *track, TrackStabilizationBase *private_data)
static StabContext * init_stabilization_working_context(MovieClip *clip)
static void setup_pivot(const float ref_pos[2], float r_pivot[2])
static TrackStabilizationBase * access_stabilization_baseline_data(StabContext *ctx, MovieTrackingTrack *track)
static float calculate_autoscale_factor(StabContext *ctx, int size, float aspect)
static float EPSILON_WEIGHT
static void stabilization_data_to_mat4(float pixel_aspect, const float pivot[2], const float translation[2], float scale, float angle, float r_mat[4][4])
ImBuf * BKE_tracking_stabilize_frame(MovieClip *clip, int framenr, ImBuf *ibuf, float translation[2], float *scale, float *angle)
static void retrieve_next_higher_usable_frame(StabContext *ctx, MovieTrackingTrack *track, int i, int ref_frame, int *next_higher)
static StabContext * init_stabilizer(MovieClip *clip, int size, float aspect)
static MovieTrackingMarker * get_tracking_data_point(StabContext *ctx, MovieTrackingTrack *track, int framenr, float *r_weight)
static float rotation_contribution(TrackStabilizationBase *track_ref, MovieTrackingMarker *marker, const float aspect, const float pivot[2], float *result_angle, float *result_scale)
static void compensate_rotation_center(const int size, float aspect, const float angle, const float scale, const float pivot[2], float result_translation[2])
static FCurve * retrieve_stab_animation(MovieClip *clip, const char *data_path, int idx)
static void discard_stabilization_working_context(StabContext *ctx)
static void init_track_for_stabilization(StabContext *ctx, MovieTrackingTrack *track, int reference_frame, float aspect, const float average_translation[2], const float pivot[2], const float average_angle, const float average_scale_step)
static void tracking_stabilize_frame_interpolation_cb(void *__restrict userdata, const int y, const TaskParallelTLS *__restrict)
static void translation_contribution(TrackStabilizationBase *track_ref, MovieTrackingMarker *marker, float result_offset[2])
ccl_device_inline int abs(int x)
Definition util/math.h:120