Blender V5.0
node_composite_map_uv.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2006 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "MEM_guardedalloc.h"
10
11#include "BLI_math_vector.hh"
13
14#include "GPU_shader.hh"
15#include "GPU_texture.hh"
16
17#include "DNA_node_types.h"
18
19#include "RNA_enum_types.hh"
20
21#include "BKE_node.hh"
22
24#include "COM_domain.hh"
25#include "COM_node_operation.hh"
26#include "COM_utilities.hh"
27
29
31
33{
34 b.use_custom_socket_order();
35 b.allow_any_socket_order();
36
37 b.add_input<decl::Color>("Image")
38 .default_value({1.0f, 1.0f, 1.0f, 1.0f})
39 .hide_value()
41 .structure_type(StructureType::Dynamic);
42 b.add_output<decl::Color>("Image").structure_type(StructureType::Dynamic).align_with_previous();
43
44 b.add_input<decl::Vector>("UV")
45 .default_value({1.0f, 0.0f, 0.0f})
46 .min(0.0f)
47 .max(1.0f)
49 "The UV coordinates at which to sample the texture. The Z component is assumed to "
50 "contain an alpha channel")
51 .structure_type(StructureType::Dynamic);
52
53 PanelDeclarationBuilder &sampling_panel = b.add_panel("Sampling").default_closed(true);
54 sampling_panel.add_input<decl::Menu>("Interpolation")
58 .description("Interpolation method");
59 sampling_panel.add_input<decl::Menu>("Extension X")
60 .default_value(CMP_NODE_EXTENSION_MODE_CLIP)
63 .description("The extension mode applied to the X axis");
64 sampling_panel.add_input<decl::Menu>("Extension Y")
65 .default_value(CMP_NODE_EXTENSION_MODE_CLIP)
68 .description("The extension mode applied to the Y axis");
69}
70
71static void node_composit_init_map_uv(bNodeTree * /*ntree*/, bNode *node)
72{
74 node->storage = data;
75}
76
77using namespace blender::compositor;
78
80 public:
82
83 void execute() override
84 {
85 const Result &input = this->get_input("Image");
86 if (input.is_single_value()) {
87 Result &output = this->get_result("Image");
88 output.share_data(input);
89 return;
90 }
91
92 const Result &input_uv = this->get_input("UV");
93 if (input_uv.is_single_value()) {
94 this->execute_single();
95 return;
96 }
97
98 if (this->context().use_gpu()) {
99 this->execute_gpu();
100 }
101 else {
102 this->execute_cpu();
103 }
104 }
105
107 {
108 const Interpolation interpolation = this->get_interpolation();
109 gpu::Shader *shader = context().get_shader(this->get_shader_name(interpolation));
110 GPU_shader_bind(shader);
111
112 const Result &input_image = get_input("Image");
113 if (interpolation == Interpolation::Anisotropic) {
114 GPU_texture_anisotropic_filter(input_image, true);
115 GPU_texture_mipmap_mode(input_image, true, true);
116 }
117 else {
118 const bool use_bilinear = ELEM(
120 GPU_texture_filter_mode(input_image, use_bilinear);
121 }
122
123 GPU_texture_extend_mode_x(input_image,
125 GPU_texture_extend_mode_y(input_image,
127
128 input_image.bind_as_texture(shader, "input_tx");
129
130 const Result &input_uv = get_input("UV");
131 input_uv.bind_as_texture(shader, "uv_tx");
132
133 const Domain domain = compute_domain();
134 Result &output_image = get_result("Image");
135 output_image.allocate_texture(domain);
136 output_image.bind_as_image(shader, "output_img");
137
139
140 input_image.unbind_as_texture();
141 input_uv.unbind_as_texture();
142 output_image.unbind_as_image();
144 }
145
146 char const *get_shader_name(const Interpolation &interpolation)
147 {
148 switch (interpolation) {
150 return "compositor_map_uv_anisotropic";
152 return "compositor_map_uv_bicubic";
155 return "compositor_map_uv";
156 }
157
158 return "compositor_map_uv";
159 }
160
162 {
163 const Interpolation interpolation = this->get_interpolation();
164 if (interpolation == Interpolation::Anisotropic) {
166 }
167 else {
168 this->execute_cpu_interpolation(interpolation);
169 }
170 }
171
173 {
174 const Interpolation interpolation = this->get_interpolation();
175 const ExtensionMode extension_mode_x = this->get_extension_mode_x();
176 const ExtensionMode extension_mode_y = this->get_extension_mode_y();
177 const Result &input_uv = get_input("UV");
178 const Result &input_image = get_input("Image");
179
180 float2 uv_coordinates = input_uv.get_single_value<float3>().xy();
181 float4 sampled_color = sample_pixel(this->context(),
182 input_image,
183 interpolation,
184 extension_mode_x,
185 extension_mode_y,
186 uv_coordinates);
187
188 /* The UV input is assumed to contain an alpha channel as its third channel, since the
189 * UV coordinates might be defined in only a subset area of the UV texture as mentioned.
190 * In that case, the alpha is typically opaque at the subset area and transparent
191 * everywhere else, and alpha pre-multiplication is then performed. This format of having
192 * an alpha channel in the UV coordinates is the format used by UV passes in render
193 * engines, hence the mentioned logic. */
194 float alpha = input_uv.get_single_value<float3>().z;
195
196 float4 result = sampled_color * alpha;
197
198 Result &output = get_result("Image");
199 output.allocate_single_value();
200 output.set_single_value(result);
201 }
202
203 void execute_cpu_interpolation(const Interpolation &interpolation)
204 {
205 const ExtensionMode extension_mode_x = this->get_extension_mode_x();
206 const ExtensionMode extension_mode_y = this->get_extension_mode_y();
207 const Result &input_image = get_input("Image");
208 const Result &input_uv = get_input("UV");
209
210 const Domain domain = compute_domain();
211 Result &output_image = get_result("Image");
212 output_image.allocate_texture(domain);
213
214 parallel_for(domain.size, [&](const int2 texel) {
215 float2 uv_coordinates = input_uv.load_pixel<float3>(texel).xy();
216 float4 sampled_color = input_image.sample(
217 uv_coordinates, interpolation, extension_mode_x, extension_mode_y);
218 /* The UV input is assumed to contain an alpha channel as its third channel, since the
219 * UV coordinates might be defined in only a subset area of the UV texture as mentioned.
220 * In that case, the alpha is typically opaque at the subset area and transparent
221 * everywhere else, and alpha pre-multiplication is then performed. This format of having
222 * an alpha channel in the UV coordinates is the format used by UV passes in render
223 * engines, hence the mentioned logic. */
224 float alpha = input_uv.load_pixel<float3>(texel).z;
225
226 float4 result = sampled_color * alpha;
227
228 output_image.store_pixel(texel, result);
229 });
230 }
231
233 {
234 const Result &input_image = get_input("Image");
235 const Result &input_uv = get_input("UV");
236
237 const Domain domain = compute_domain();
238 Result &output_image = get_result("Image");
239 output_image.allocate_texture(domain);
240
241 /* In order to perform EWA sampling, we need to compute the partial derivative of the UV
242 * coordinates along the x and y directions using a finite difference approximation. But in
243 * order to avoid loading multiple neighboring UV coordinates for each pixel, we operate on
244 * the image in 2x2 blocks of pixels, where the derivatives are computed horizontally and
245 * vertically across the 2x2 block such that odd texels use a forward finite difference
246 * equation while even invocations use a backward finite difference equation. */
247 const int2 size = domain.size;
248 const int2 uv_size = input_uv.domain().size;
249 parallel_for(math::divide_ceil(size, int2(2)), [&](const int2 base_texel) {
250 const int x = base_texel.x * 2;
251 const int y = base_texel.y * 2;
252
253 const int2 lower_left_texel = int2(x, y);
254 const int2 lower_right_texel = int2(x + 1, y);
255 const int2 upper_left_texel = int2(x, y + 1);
256 const int2 upper_right_texel = int2(x + 1, y + 1);
257
258 const float2 lower_left_uv = input_uv.load_pixel<float3>(lower_left_texel).xy();
259 const float2 lower_right_uv = input_uv.load_pixel_extended<float3>(lower_right_texel).xy();
260 const float2 upper_left_uv = input_uv.load_pixel_extended<float3>(upper_left_texel).xy();
261 const float2 upper_right_uv = input_uv.load_pixel_extended<float3>(upper_right_texel).xy();
262
263 /* Compute the partial derivatives using finite difference. Divide by the input size since
264 * sample_ewa_zero assumes derivatives with respect to texel coordinates. */
265 const float2 lower_x_gradient = (lower_right_uv - lower_left_uv) / uv_size.x;
266 const float2 left_y_gradient = (upper_left_uv - lower_left_uv) / uv_size.y;
267 const float2 right_y_gradient = (upper_right_uv - lower_right_uv) / uv_size.y;
268 const float2 upper_x_gradient = (upper_right_uv - upper_left_uv) / uv_size.x;
269
270 /* Computes one of the 2x2 pixels given its texel location, coordinates, and gradients. */
271 auto compute_pixel = [&](const int2 &texel,
272 const float2 &coordinates,
273 const float2 &x_gradient,
274 const float2 &y_gradient) {
275 /* Sample the input using the UV coordinates passing in the computed gradients in order
276 * to utilize the anisotropic filtering capabilities of the sampler. */
277 float4 sampled_color = input_image.sample_ewa_zero(coordinates, x_gradient, y_gradient);
278
279 /* The UV input is assumed to contain an alpha channel as its third channel, since the
280 * UV coordinates might be defined in only a subset area of the UV texture as mentioned.
281 * In that case, the alpha is typically opaque at the subset area and transparent
282 * everywhere else, and alpha pre-multiplication is then performed. This format of having
283 * an alpha channel in the UV coordinates is the format used by UV passes in render
284 * engines, hence the mentioned logic. */
285 float alpha = input_uv.load_pixel<float3>(texel).z;
286
287 float4 result = sampled_color * alpha;
288
289 output_image.store_pixel(texel, result);
290 };
291
292 /* Compute each of the pixels in the 2x2 block, making sure to exempt out of bounds right
293 * and upper pixels. */
294 compute_pixel(lower_left_texel, lower_left_uv, lower_x_gradient, left_y_gradient);
295 if (lower_right_texel.x != size.x) {
296 compute_pixel(lower_right_texel, lower_right_uv, lower_x_gradient, right_y_gradient);
297 }
298 if (upper_left_texel.y != size.y) {
299 compute_pixel(upper_left_texel, upper_left_uv, upper_x_gradient, left_y_gradient);
300 }
301 if (upper_right_texel.x != size.x && upper_right_texel.y != size.y) {
302 compute_pixel(upper_right_texel, upper_right_uv, upper_x_gradient, right_y_gradient);
303 }
304 });
305 }
306
308 {
309 const Result &input = this->get_input("Interpolation");
310 const MenuValue default_menu_value = MenuValue(CMP_NODE_INTERPOLATION_BILINEAR);
311 const MenuValue menu_value = input.get_single_value_default(default_menu_value);
312 const CMPNodeInterpolation interpolation = static_cast<CMPNodeInterpolation>(menu_value.value);
313 switch (interpolation) {
322 }
323
325 }
326
328 {
329 const Result &input = this->get_input("Extension X");
330 const MenuValue default_menu_value = MenuValue(CMP_NODE_EXTENSION_MODE_CLIP);
331 const MenuValue menu_value = input.get_single_value_default(default_menu_value);
332 const CMPExtensionMode extension_x = static_cast<CMPExtensionMode>(menu_value.value);
333 switch (extension_x) {
335 return ExtensionMode::Clip;
340 }
341
342 return ExtensionMode::Clip;
343 }
344
346 {
347 const Result &input = this->get_input("Extension Y");
348 const MenuValue default_menu_value = MenuValue(CMP_NODE_EXTENSION_MODE_CLIP);
349 const MenuValue menu_value = input.get_single_value_default(default_menu_value);
350 const CMPExtensionMode extension_y = static_cast<CMPExtensionMode>(menu_value.value);
351 switch (extension_y) {
353 return ExtensionMode::Clip;
358 }
359
360 return ExtensionMode::Clip;
361 }
362};
363
365{
366 return new MapUVOperation(context, node);
367}
368
369} // namespace blender::nodes::node_composite_map_uv_cc
370
372{
374
375 static blender::bke::bNodeType ntype;
376
377 cmp_node_type_base(&ntype, "CompositorNodeMapUV", CMP_NODE_MAP_UV);
378 ntype.ui_name = "Map UV";
379 ntype.ui_description =
380 "Map a texture using UV coordinates, to apply a texture to objects in compositing";
381 ntype.enum_name_legacy = "MAP_UV";
383 ntype.declare = file_ns::cmp_node_map_uv_declare;
384 ntype.get_compositor_operation = file_ns::get_compositor_operation;
385 ntype.initfunc = file_ns::node_composit_init_map_uv;
388
390}
#define NODE_CLASS_DISTORT
Definition BKE_node.hh:455
#define CMP_NODE_MAP_UV
#define ELEM(...)
CMPNodeInterpolation
@ CMP_NODE_INTERPOLATION_NEAREST
@ CMP_NODE_INTERPOLATION_BILINEAR
@ CMP_NODE_INTERPOLATION_BICUBIC
@ CMP_NODE_INTERPOLATION_ANISOTROPIC
CMPExtensionMode
@ CMP_NODE_EXTENSION_MODE_EXTEND
@ CMP_NODE_EXTENSION_MODE_CLIP
@ CMP_NODE_EXTENSION_MODE_REPEAT
void GPU_shader_bind(blender::gpu::Shader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_shader_unbind()
void GPU_texture_extend_mode_y(blender::gpu::Texture *texture, GPUSamplerExtendMode extend_mode)
void GPU_texture_extend_mode_x(blender::gpu::Texture *texture, GPUSamplerExtendMode extend_mode)
void GPU_texture_anisotropic_filter(blender::gpu::Texture *texture, bool use_aniso)
void GPU_texture_mipmap_mode(blender::gpu::Texture *texture, bool use_mipmap, bool use_filter)
void GPU_texture_filter_mode(blender::gpu::Texture *texture, bool use_filter)
Read Guarded memory(de)allocation.
#define NOD_REGISTER_NODE(REGISTER_FUNC)
BMesh const char void * data
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE const btScalar & z() const
Return the z value.
Definition btQuadWord.h:117
gpu::Shader * get_shader(const char *info_name, ResultPrecision precision)
NodeOperation(Context &context, DNode node)
Result & get_result(StringRef identifier)
Definition operation.cc:39
Result & get_input(StringRef identifier) const
Definition operation.cc:138
virtual Domain compute_domain()
Definition operation.cc:56
void share_data(const Result &source)
Definition result.cc:523
void store_pixel(const int2 &texel, const T &pixel_value)
void allocate_texture(const Domain domain, const bool from_pool=true, const std::optional< ResultStorageType > storage_type=std::nullopt)
Definition result.cc:389
void unbind_as_texture() const
Definition result.cc:511
void bind_as_texture(gpu::Shader *shader, const char *texture_name) const
Definition result.cc:487
T load_pixel_extended(const int2 &texel) const
const Domain & domain() const
T load_pixel(const int2 &texel) const
void unbind_as_image() const
Definition result.cc:517
float4 sample_ewa_zero(const float2 &coordinates, const float2 &x_gradient, const float2 &y_gradient) const
void bind_as_image(gpu::Shader *shader, const char *image_name, bool read=false) const
Definition result.cc:498
bool is_single_value() const
Definition result.cc:758
const T & get_single_value() const
DeclType::Builder & add_input(StringRef name, StringRef identifier="")
const CompositorInputRealizationMode & compositor_realization_mode() const
void execute_cpu_interpolation(const Interpolation &interpolation)
char const * get_shader_name(const Interpolation &interpolation)
#define input
#define output
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void node_register_type(bNodeType &ntype)
Definition node.cc:2416
void node_type_storage(bNodeType &ntype, std::optional< StringRefNull > storagename, void(*freefunc)(bNode *node), void(*copyfunc)(bNodeTree *dest_ntree, bNode *dest_node, const bNode *src_node))
Definition node.cc:5414
float4 sample_pixel(Context &context, const Result &input, const Interpolation &interpolation, const ExtensionMode &extension_mode_x, const ExtensionMode &extension_mode_y, const float2 coordinates)
void compute_dispatch_threads_at_least(gpu::Shader *shader, int2 threads_range, int2 local_size=int2(16))
Definition utilities.cc:196
GPUSamplerExtendMode map_extension_mode_to_extend_mode(const ExtensionMode &mode)
Definition domain.cc:70
void parallel_for(const int2 range, const Function &function)
VecBase< T, Size > divide_ceil(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
static void node_composit_init_map_uv(bNodeTree *, bNode *node)
static void cmp_node_map_uv_declare(NodeDeclarationBuilder &b)
static NodeOperation * get_compositor_operation(Context &context, DNode node)
VecBase< float, 4 > float4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
VecBase< float, 3 > float3
static void register_node_type_cmp_mapuv()
void cmp_node_type_base(blender::bke::bNodeType *ntype, std::string idname, const std::optional< int16_t > legacy_type)
void node_free_standard_storage(bNode *node)
Definition node_util.cc:42
void node_copy_standard_storage(bNodeTree *, bNode *dest_node, const bNode *src_node)
Definition node_util.cc:54
const EnumPropertyItem rna_enum_node_compositor_extension_items[]
const EnumPropertyItem rna_enum_node_compositor_interpolation_items[]
#define min(a, b)
Definition sort.cc:36
void * storage
Defines a node type.
Definition BKE_node.hh:238
std::string ui_description
Definition BKE_node.hh:244
NodeGetCompositorOperationFunction get_compositor_operation
Definition BKE_node.hh:348
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:289
const char * enum_name_legacy
Definition BKE_node.hh:247
NodeDeclareFunction declare
Definition BKE_node.hh:362
int xy[2]
Definition wm_draw.cc:178