Blender V4.3
COM_PlaneDistortCommonOperation.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2013 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
6
7#include "BLI_jitter_2d.h"
8#include "BLI_math_geom.h"
9#include "BLI_math_matrix.h"
10
11#include "BKE_tracking.h"
12
13namespace blender::compositor {
14
16 : motion_blur_samples_(1), motion_blur_shutter_(0.5f)
17{
18}
19
20void PlaneDistortBaseOperation::calculate_corners(const float corners[4][2],
21 bool normalized,
22 int sample)
23{
25 MotionSample *sample_data = &samples_[sample];
26 if (normalized) {
27 for (int i = 0; i < 4; i++) {
28 sample_data->frame_space_corners[i][0] = corners[i][0] * this->get_width();
29 sample_data->frame_space_corners[i][1] = corners[i][1] * this->get_height();
30 }
31 }
32 else {
33 for (int i = 0; i < 4; i++) {
34 sample_data->frame_space_corners[i][0] = corners[i][0];
35 sample_data->frame_space_corners[i][1] = corners[i][1];
36 }
37 }
38}
39
40/* ******** PlaneDistort WarpImage ******** */
41
42BLI_INLINE void warp_coord(float x, float y, float matrix[3][3], float uv[2], float deriv[2][2])
43{
44 float vec[3] = {x, y, 1.0f};
45 mul_m3_v3(matrix, vec);
46 uv[0] = vec[0] / vec[2];
47 uv[1] = vec[1] / vec[2];
48
49 /* Offset so that pixel center corresponds to a (0.5, 0.5), which helps keeping transformed
50 * image sharp. */
51 uv[0] += 0.5f;
52 uv[1] += 0.5f;
53
54 deriv[0][0] = (matrix[0][0] - matrix[0][2] * uv[0]) / vec[2];
55 deriv[1][0] = (matrix[0][1] - matrix[0][2] * uv[1]) / vec[2];
56 deriv[0][1] = (matrix[1][0] - matrix[1][2] * uv[0]) / vec[2];
57 deriv[1][1] = (matrix[1][1] - matrix[1][2] * uv[1]) / vec[2];
58}
59
65
67 bool normalized,
68 int sample)
69{
71
72 const NodeOperation *image = get_input_operation(0);
73 const int width = image->get_width();
74 const int height = image->get_height();
75
76 MotionSample *sample_data = &samples_[sample];
77
78 /* If the image which is to be warped empty assume unit transform and don't attempt to calculate
79 * actual homography (otherwise homography solver will attempt to deal with singularity). */
80 if (width == 0 || height == 0) {
81 unit_m3(sample_data->perspective_matrix);
82 return;
83 }
84
85 float frame_corners[4][2] = {
86 {0.0f, 0.0f}, {float(width), 0.0f}, {float(width), float(height)}, {0.0f, float(height)}};
88 sample_data->frame_space_corners, frame_corners, sample_data->perspective_matrix);
89}
90
92 const rcti &area,
94{
95 const MemoryBuffer *input_img = inputs[0];
96 float uv[2];
97 float deriv[2][2];
98 BuffersIterator<float> it = output->iterate_with({}, area);
99 if (motion_blur_samples_ == 1) {
100 for (; !it.is_end(); ++it) {
101 warp_coord(it.x, it.y, samples_[0].perspective_matrix, uv, deriv);
102 input_img->read_elem_filtered(uv[0], uv[1], deriv[0], deriv[1], true, it.out);
103 }
104 }
105 else {
106 for (; !it.is_end(); ++it) {
107 zero_v4(it.out);
108 for (const int sample : IndexRange(motion_blur_samples_)) {
109 float color[4];
110 warp_coord(it.x, it.y, samples_[sample].perspective_matrix, uv, deriv);
111 input_img->read_elem_filtered(uv[0], uv[1], deriv[0], deriv[1], true, color);
112 add_v4_v4(it.out, color);
113 }
114 mul_v4_fl(it.out, 1.0f / float(motion_blur_samples_));
115 }
116 }
117}
118
120 const rcti &output_area,
121 rcti &r_input_area)
122{
123 if (input_idx != 0) {
124 r_input_area = output_area;
125 return;
126 }
127
128 /* TODO: figure out the area needed for warping and EWA filtering. */
129 r_input_area = get_input_operation(0)->get_canvas();
130
131/* Old implementation but resulting coordinates are way out of input operation bounds and in some
132 * cases the area result may incorrectly cause cropping. */
133#if 0
134 float min[2], max[2];
135 INIT_MINMAX2(min, max);
136 for (int sample = 0; sample < motion_blur_samples_; sample++) {
137 float UVs[4][2];
138 float deriv[2][2];
139 MotionSample *sample_data = &samples_[sample];
140 /* TODO(sergey): figure out proper way to do this. */
141 warp_coord(output_area.xmin - 2,
142 output_area.ymin - 2,
143 sample_data->perspective_matrix,
144 UVs[0],
145 deriv);
146 warp_coord(output_area.xmax + 2,
147 output_area.ymin - 2,
148 sample_data->perspective_matrix,
149 UVs[1],
150 deriv);
151 warp_coord(output_area.xmax + 2,
152 output_area.ymax + 2,
153 sample_data->perspective_matrix,
154 UVs[2],
155 deriv);
156 warp_coord(output_area.xmin - 2,
157 output_area.ymax + 2,
158 sample_data->perspective_matrix,
159 UVs[3],
160 deriv);
161 for (int i = 0; i < 4; i++) {
162 minmax_v2v2_v2(min, max, UVs[i]);
163 }
164 }
165
166 r_input_area.xmin = min[0] - 1;
167 r_input_area.ymin = min[1] - 1;
168 r_input_area.xmax = max[0] + 1;
169 r_input_area.ymax = max[1] + 1;
170#endif
171}
172
173/* ******** PlaneDistort Mask ******** */
174
179
181 const rcti &area,
182 Span<MemoryBuffer *> /*inputs*/)
183{
184 for (BuffersIterator<float> it = output->iterate_with({}, area); !it.is_end(); ++it) {
185 float accumulated_mask = 0.0f;
186 const float2 point = float2(it.x, it.y) + 0.5f;
187 for (const int motion_sample : IndexRange(motion_blur_samples_)) {
188 MotionSample &sample = samples_[motion_sample];
189 const bool is_inside_plane = isect_point_tri_v2(point,
190 sample.frame_space_corners[0],
191 sample.frame_space_corners[1],
192 sample.frame_space_corners[2]) ||
193 isect_point_tri_v2(point,
194 sample.frame_space_corners[0],
195 sample.frame_space_corners[2],
196 sample.frame_space_corners[3]);
197 accumulated_mask += is_inside_plane ? 1.0f : 0.0f;
198 }
199 *it.out = accumulated_mask / motion_blur_samples_;
200 }
201}
202
203} // namespace blender::compositor
void BKE_tracking_homography_between_two_quads(float reference_corners[4][2], float corners[4][2], float H[3][3])
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_INLINE
int isect_point_tri_v2(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
void mul_m3_v3(const float M[3][3], float r[3])
void unit_m3(float m[3][3])
MINLINE void mul_v4_fl(float r[4], float f)
MINLINE void add_v4_v4(float r[4], const float a[4])
void minmax_v2v2_v2(float min[2], float max[2], const float vec[2])
MINLINE void zero_v4(float r[4])
#define INIT_MINMAX2(min, max)
SIMD_FORCE_INLINE btVector3 normalized() const
Return a normalized version of this vector.
a MemoryBuffer contains access to the data
void read_elem_filtered(float x, float y, float dx[2], float dy[2], bool extend_boundary, float *out) const
BuffersIterator< float > iterate_with(Span< MemoryBuffer * > inputs)
NodeOperation contains calculation logic.
void add_output_socket(DataType datatype)
NodeOperation * get_input_operation(int index)
void add_input_socket(DataType datatype, ResizeMode resize_mode=ResizeMode::Center)
virtual void calculate_corners(const float corners[4][2], bool normalized, int sample)
void update_memory_buffer_partial(MemoryBuffer *output, const rcti &area, Span< MemoryBuffer * > inputs) override
void update_memory_buffer_partial(MemoryBuffer *output, const rcti &area, Span< MemoryBuffer * > inputs) override
void calculate_corners(const float corners[4][2], bool normalized, int sample) override
void get_area_of_interest(int input_idx, const rcti &output_area, rcti &r_input_area) override
Get input operation area being read by this operation on rendering given output area.
draw_view in_light_buf[] float
BLI_INLINE void warp_coord(float x, float y, float matrix[3][3], float uv[2], float deriv[2][2])
typename BuffersIteratorBuilder< T >::Iterator BuffersIterator
VecBase< float, 2 > float2
#define min(a, b)
Definition sort.c:32
int ymin
int ymax
int xmin
int xmax