Blender V5.0
eevee_film.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
14
15#include "BLI_hash.h"
16#include "BLI_rect.h"
17#include "BLI_set.hh"
18
19#include "BKE_compositor.hh"
20#include "BKE_scene.hh"
21
22#include "GPU_framebuffer.hh"
23#include "GPU_texture.hh"
24
25#include "DRW_render.hh"
26#include "RE_pipeline.h"
27
28#include "draw_view_data.hh"
29
30#include "eevee_film.hh"
31#include "eevee_instance.hh"
32
33namespace blender::eevee {
34
35/* -------------------------------------------------------------------- */
38
39void Film::init_aovs(const Set<std::string> &passes_used_by_viewport_compositor)
40{
41 Vector<ViewLayerAOV *> aovs;
42
43 aovs_info.display_id = -1;
44 aovs_info.display_is_value = false;
45 aovs_info.value_len = aovs_info.color_len = 0;
46
47 if (inst_.is_viewport()) {
48 /* Viewport case. */
49 if (inst_.v3d->shading.render_pass == EEVEE_RENDER_PASS_AOV) {
50 /* AOV display, request only a single AOV. */
52 &inst_.view_layer->aovs, inst_.v3d->shading.aov_name, offsetof(ViewLayerAOV, name));
53
54 /* AOV found in view layer. */
55 if (aov) {
56 aovs.append(aov);
57 aovs_info.display_id = 0;
58 aovs_info.display_is_value = (aov->type == AOV_TYPE_VALUE);
59 }
60 }
61
62 if (inst_.is_viewport_compositor_enabled) {
63 LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
64 /* Already added as a display pass. No need to add again. */
65 if (!aovs.is_empty() && aovs.last() == aov) {
66 continue;
67 }
68
69 if (passes_used_by_viewport_compositor.contains(aov->name)) {
70 aovs.append(aov);
71 }
72 }
73 }
74 }
75 else {
76 /* Render case. */
77 LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
78 aovs.append(aov);
79 }
80 }
81
82 if (aovs.size() > AOV_MAX) {
83 inst_.info_append_i18n("Error: Too many AOVs");
84 return;
85 }
86
87 for (ViewLayerAOV *aov : aovs) {
88 bool is_value = (aov->type == AOV_TYPE_VALUE);
89 int &index = is_value ? aovs_info.value_len : aovs_info.color_len;
90 uint &hash = is_value ? aovs_info.hash_value[index].x : aovs_info.hash_color[index].x;
92 index++;
93 }
94
95 if (!aovs.is_empty()) {
96 enabled_categories_ |= PASS_CATEGORY_AOV;
97 }
98}
99
101{
102 gpu::Texture *pass_tx = this->get_aov_texture(aov);
103
104 if (pass_tx == nullptr) {
105 return nullptr;
106 }
107
109
110 return (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
111}
112
114{
115 bool is_value = (aov->type == AOV_TYPE_VALUE);
116 Texture &accum_tx = is_value ? value_accum_tx_ : color_accum_tx_;
117
118 Span<uint4> aovs_hash(is_value ? aovs_info.hash_value : aovs_info.hash_color,
119 is_value ? aovs_info.value_len : aovs_info.color_len);
120 /* Find AOV index. */
122 int aov_index = -1;
123 int i = 0;
124 for (uint4 candidate_hash : aovs_hash) {
125 if (candidate_hash.x == hash) {
126 aov_index = i;
127 break;
128 }
129 i++;
130 }
131
132 if (aov_index == -1) {
133 return nullptr;
134 }
135
136 accum_tx.ensure_layer_views();
137
138 int index = aov_index + (is_value ? data_.aov_value_id : data_.aov_color_id);
139 return accum_tx.layer_view(index);
140}
141
143
144/* -------------------------------------------------------------------- */
147
148void Film::sync_mist()
149{
150 const CameraData &cam = inst_.camera.data_get();
151 const ::World *world = inst_.scene->world;
152 float mist_start = world ? world->miststa : cam.clip_near;
153 float mist_distance = world ? world->mistdist : fabsf(cam.clip_far - cam.clip_near);
154 int mist_type = world ? world->mistype : int(WO_MIST_LINEAR);
155
156 switch (mist_type) {
158 data_.mist_exponent = 2.0f;
159 break;
160 case WO_MIST_LINEAR:
161 data_.mist_exponent = 1.0f;
162 break;
164 data_.mist_exponent = 0.5f;
165 break;
166 }
167
168 data_.mist_scale = 1.0 / mist_distance;
169 data_.mist_bias = -mist_start / mist_distance;
170}
171
173
174/* -------------------------------------------------------------------- */
177
178inline bool operator==(const FilmData &a, const FilmData &b)
179{
180 return (a.extent == b.extent) && (a.offset == b.offset) &&
181 (a.render_extent == b.render_extent) && (a.overscan == b.overscan) &&
182 (a.filter_radius == b.filter_radius) && (a.scaling_factor == b.scaling_factor) &&
183 (a.background_opacity == b.background_opacity);
184}
185
186inline bool operator!=(const FilmData &a, const FilmData &b)
187{
188 return !(a == b);
189}
190
192
193/* -------------------------------------------------------------------- */
196
198{
200
201 /* We enforce the use of combined pass to be compliant with Cycles and EEVEE-Legacy (#122188). */
203
204#define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \
205 SET_FLAG_FROM_TEST(result, \
206 (view_layer->passflag & SCE_PASS_##name_legacy) != 0, \
207 EEVEE_RENDER_PASS_##name_eevee);
208
209 ENABLE_FROM_LEGACY(DEPTH, DEPTH)
210 ENABLE_FROM_LEGACY(MIST, MIST)
211 ENABLE_FROM_LEGACY(NORMAL, NORMAL)
212 ENABLE_FROM_LEGACY(POSITION, POSITION)
213 ENABLE_FROM_LEGACY(SHADOW, SHADOW)
214 ENABLE_FROM_LEGACY(AO, AO)
215 ENABLE_FROM_LEGACY(EMIT, EMIT)
216 ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
217 ENABLE_FROM_LEGACY(DIFFUSE_COLOR, DIFFUSE_COLOR)
218 ENABLE_FROM_LEGACY(GLOSSY_COLOR, SPECULAR_COLOR)
219 ENABLE_FROM_LEGACY(DIFFUSE_DIRECT, DIFFUSE_LIGHT)
220 ENABLE_FROM_LEGACY(GLOSSY_DIRECT, SPECULAR_LIGHT)
221 ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
223
224#undef ENABLE_FROM_LEGACY
225
235
236 return result;
237}
238
239/* Get all pass types used by the viewport compositor from the set of all needed passes. */
241 const Set<std::string> &viewport_compositor_needed_passes, const ViewLayer *view_layer)
242{
243 const eViewLayerEEVEEPassType scene_enabled_passes = enabled_passes(view_layer);
244
245 /* Go over all possible pass types, check if their possible pass names exist in the viewport
246 * compositor needed passes, and if true, mark them as needed. */
247 eViewLayerEEVEEPassType viewport_compositor_enabled_passes = eViewLayerEEVEEPassType(0);
248 for (const int i : IndexRange(EEVEE_RENDER_PASS_MAX_BIT + 1)) {
249 /* Mask by the scene enabled passes, because some pass types like EEVEE_RENDER_PASS_UNUSED_8
250 * have no corresponding pass names, so they will assert later. */
251 eViewLayerEEVEEPassType pass_type = eViewLayerEEVEEPassType(scene_enabled_passes & (1 << i));
252 if (pass_type == 0) {
253 continue;
254 }
255
256 for (const std::string &pass_name : Film::pass_to_render_pass_names(pass_type, view_layer)) {
257 if (viewport_compositor_needed_passes.contains(pass_name)) {
258 viewport_compositor_enabled_passes |= pass_type;
259 }
260 }
261 }
262
263 return viewport_compositor_enabled_passes;
264}
265
266void Film::init(const int2 &extent, const rcti *output_rect)
267{
268 using namespace math;
269
270 Sampling &sampling = inst_.sampling;
271 Scene &scene = *inst_.scene;
272
273 if (inst_.is_viewport()) {
274 /* Update detection of viewport setting. */
275 const View3DShading &shading = inst_.v3d->shading;
276 int update = 0;
278 update += assign_if_different(ui_aov_name_, std::string(shading.aov_name));
279 if (update) {
280 inst_.sampling.reset();
281 }
282 }
283
285
286 /* Compute the passes needed by the viewport compositor. */
287 Set<std::string> passes_used_by_viewport_compositor;
288 if (inst_.is_viewport_compositor_enabled) {
289 passes_used_by_viewport_compositor = bke::compositor::get_used_passes(scene, inst_.view_layer);
290 viewport_compositor_enabled_passes_ = get_viewport_compositor_enabled_passes(
291 passes_used_by_viewport_compositor, inst_.view_layer);
292 }
293
294 enabled_categories_ = PassCategory(0);
295 init_aovs(passes_used_by_viewport_compositor);
296
297 {
298 /* Enable passes that need to be rendered. */
299 if (inst_.is_viewport()) {
300 /* Viewport Case. */
302 inst_.v3d->shading.render_pass) |
303 viewport_compositor_enabled_passes_;
304
305 if (inst_.overlays_enabled() || inst_.gpencil_engine_enabled()) {
306 /* Overlays and Grease Pencil needs the depth for correct compositing.
307 * Using the render pass ensure we store the center depth. */
309 }
310
311 if (assign_if_different(enabled_passes_, enabled_passes)) {
312 inst_.sampling.reset();
313 }
314 }
315 else {
316 /* Render Case. */
317 enabled_passes_ = enabled_passes(inst_.view_layer);
318 }
319
320 /* Filter obsolete passes. */
322
323 if (scene.r.mode & R_MBLUR) {
324 /* Disable motion vector pass if motion blur is enabled. */
325 enabled_passes_ &= ~EEVEE_RENDER_PASS_VECTOR;
326 }
327 }
328 {
329 data_.scaling_factor = 1;
330 if (inst_.is_viewport()) {
331 data_.scaling_factor = BKE_render_preview_pixel_size(&inst_.scene->r);
332 }
333 /* Sharpen the LODs (1.5x) to avoid TAA filtering causing over-blur (see #122941). */
334 data_.texture_lod_bias = 1.0f / (data_.scaling_factor * 1.5f);
335 }
336 {
337 rcti fallback_rect;
338 if (BLI_rcti_is_empty(output_rect)) {
339 BLI_rcti_init(&fallback_rect, 0, extent[0], 0, extent[1]);
340 output_rect = &fallback_rect;
341 }
342
343 display_extent = extent;
344
345 data_.extent = int2(BLI_rcti_size_x(output_rect), BLI_rcti_size_y(output_rect));
346 data_.offset = int2(output_rect->xmin, output_rect->ymin);
347 data_.extent_inv = 1.0f / float2(data_.extent);
348 data_.render_extent = divide_ceil(data_.extent, int2(data_.scaling_factor));
349 data_.overscan = overscan_pixels_get(inst_.camera.overscan(), data_.render_extent);
350 data_.render_extent += data_.overscan * 2;
351
352 is_valid_render_extent_ = data_.render_extent.x <= GPU_max_texture_size() &&
353 data_.render_extent.y <= GPU_max_texture_size();
354 if (!is_valid_render_extent_) {
355 inst_.info_append_i18n(
356 "Required render size ({}px) is larger than reported texture size limit ({}px).",
357 max_ii(data_.render_extent.x, data_.render_extent.y),
359
360 data_.extent = int2(4, 4);
361 data_.render_extent = int2(4, 4);
362 data_.extent_inv = 1.0f / float2(data_.extent);
363 data_.offset = int2(0, 0);
364 data_.overscan = 0;
365 }
366
367 data_.filter_radius = clamp_f(scene.r.gauss, 0.0f, 100.0f);
368 if (sampling.sample_count() == 1) {
369 /* Disable filtering if sample count is 1. */
370 data_.filter_radius = 0.0f;
371 }
372 if (data_.scaling_factor > 1) {
373 /* Fixes issue when using scaling factor and no filtering.
374 * Without this, the filter becomes a dirac and samples gets only the fallback weight.
375 * This results in a box blur instead of no filtering. */
376 data_.filter_radius = math::max(data_.filter_radius, 0.0001f);
377 }
378
379 data_.cryptomatte_samples_len = inst_.view_layer->cryptomatte_levels;
380
381 data_.background_opacity = (scene.r.alphamode == R_ALPHAPREMUL) ? 0.0f : 1.0f;
382 if (inst_.is_viewport() && false /* TODO(fclem): StudioLight */) {
383 data_.background_opacity = inst_.v3d->shading.studiolight_background;
384 }
385
400
401 data_.exposure_scale = pow2f(scene.view_settings.exposure);
402 if (enabled_passes_ & data_passes) {
403 enabled_categories_ |= PASS_CATEGORY_DATA;
404 }
405 if (enabled_passes_ & color_passes_1) {
406 enabled_categories_ |= PASS_CATEGORY_COLOR_1;
407 }
408 if (enabled_passes_ & color_passes_2) {
409 enabled_categories_ |= PASS_CATEGORY_COLOR_2;
410 }
411 if (enabled_passes_ & color_passes_3) {
412 enabled_categories_ |= PASS_CATEGORY_COLOR_3;
413 }
414 }
415 {
416 /* Set pass offsets. */
417
418 data_.display_id = aovs_info.display_id;
419 data_.display_storage_type = aovs_info.display_is_value ? PASS_STORAGE_VALUE :
421
422 /* Combined is in a separate buffer. */
423 data_.combined_id = (enabled_passes_ & EEVEE_RENDER_PASS_COMBINED) ? 0 : -1;
424 /* Depth is in a separate buffer. */
425 data_.depth_id = (enabled_passes_ & EEVEE_RENDER_PASS_DEPTH) ? 0 : -1;
426
427 data_.color_len = 0;
428 data_.value_len = 0;
429
430 auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type) {
431 ePassStorageType storage_type = pass_storage_type(pass_type);
432 int index = (enabled_passes_ & pass_type) ?
433 (storage_type == PASS_STORAGE_VALUE ? data_.value_len : data_.color_len)++ :
434 -1;
435 if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) {
436 data_.display_id = index;
437 data_.display_storage_type = storage_type;
438 }
439 return index;
440 };
441
442 data_.mist_id = pass_index_get(EEVEE_RENDER_PASS_MIST);
443 data_.normal_id = pass_index_get(EEVEE_RENDER_PASS_NORMAL);
444 data_.position_id = pass_index_get(EEVEE_RENDER_PASS_POSITION);
445 data_.vector_id = pass_index_get(EEVEE_RENDER_PASS_VECTOR);
446 data_.diffuse_light_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_LIGHT);
447 data_.diffuse_color_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_COLOR);
448 data_.specular_light_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_LIGHT);
449 data_.specular_color_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_COLOR);
450 data_.volume_light_id = pass_index_get(EEVEE_RENDER_PASS_VOLUME_LIGHT);
451 data_.emission_id = pass_index_get(EEVEE_RENDER_PASS_EMIT);
452 data_.environment_id = pass_index_get(EEVEE_RENDER_PASS_ENVIRONMENT);
453 data_.shadow_id = pass_index_get(EEVEE_RENDER_PASS_SHADOW);
454 data_.ambient_occlusion_id = pass_index_get(EEVEE_RENDER_PASS_AO);
455 data_.transparent_id = pass_index_get(EEVEE_RENDER_PASS_TRANSPARENT);
456
457 data_.aov_color_id = data_.color_len;
458 data_.aov_value_id = data_.value_len;
459
460 data_.aov_color_len = aovs_info.color_len;
461 data_.aov_value_len = aovs_info.value_len;
462
463 data_.color_len += data_.aov_color_len;
464 data_.value_len += data_.aov_value_len;
465
466 int cryptomatte_id = 0;
467 auto cryptomatte_index_get = [&](eViewLayerEEVEEPassType pass_type) {
468 int index = -1;
469 if (enabled_passes_ & pass_type) {
470 index = cryptomatte_id;
471 cryptomatte_id += divide_ceil_u(data_.cryptomatte_samples_len, 2u);
472
473 if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) {
474 data_.display_id = index;
475 data_.display_storage_type = PASS_STORAGE_CRYPTOMATTE;
476 }
477 }
478 return index;
479 };
480 data_.cryptomatte_object_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT);
481 data_.cryptomatte_asset_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET);
482 data_.cryptomatte_material_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL);
483
484 if ((enabled_passes_ &
487 {
488 enabled_categories_ |= PASS_CATEGORY_CRYPTOMATTE;
489 }
490 }
491 {
492 int2 weight_extent = (inst_.camera.is_panoramic() || (data_.scaling_factor > 1)) ?
493 data_.extent :
494 int2(1);
495
496 gpu::TextureFormat color_format = gpu::TextureFormat::SFLOAT_16_16_16_16;
497 gpu::TextureFormat float_format = gpu::TextureFormat::SFLOAT_16;
498 gpu::TextureFormat weight_format = gpu::TextureFormat::SFLOAT_32;
499 gpu::TextureFormat depth_format = gpu::TextureFormat::SFLOAT_32;
500 gpu::TextureFormat cryptomatte_format = gpu::TextureFormat::SFLOAT_32_32_32_32;
501
502 int reset = 0;
503 reset += depth_tx_.ensure_2d(depth_format, data_.extent);
504 reset += combined_tx_.current().ensure_2d(color_format, data_.extent);
505 reset += combined_tx_.next().ensure_2d(color_format, data_.extent);
506 /* Two layers, one for nearest sample weight and one for weight accumulation. */
507 reset += weight_tx_.current().ensure_2d_array(weight_format, weight_extent, 2);
508 reset += weight_tx_.next().ensure_2d_array(weight_format, weight_extent, 2);
509 reset += color_accum_tx_.ensure_2d_array(color_format,
510 (data_.color_len > 0) ? data_.extent : int2(1),
511 (data_.color_len > 0) ? data_.color_len : 1);
512 reset += value_accum_tx_.ensure_2d_array(float_format,
513 (data_.value_len > 0) ? data_.extent : int2(1),
514 (data_.value_len > 0) ? data_.value_len : 1);
515 /* Divided by two as two cryptomatte samples fit in pixel (RG, BA). */
516 int cryptomatte_array_len = cryptomatte_layer_len_get() *
517 divide_ceil_u(data_.cryptomatte_samples_len, 2u);
518 reset += cryptomatte_tx_.ensure_2d_array(cryptomatte_format,
519 (cryptomatte_array_len > 0) ? data_.extent : int2(1),
520 (cryptomatte_array_len > 0) ? cryptomatte_array_len :
521 1);
522
523 if (reset > 0) {
524 data_.use_history = 0;
525 use_reprojection_ = false;
526
527 /* Avoid NaN in uninitialized texture memory making history blending dangerous. */
528 color_accum_tx_.clear(float4(0.0f));
529 value_accum_tx_.clear(float4(0.0f));
530 combined_tx_.current().clear(float4(0.0f));
531 weight_tx_.current().clear(float4(0.0f));
532 depth_tx_.clear(float4(0.0f));
533 cryptomatte_tx_.clear(float4(0.0f));
534 }
535 }
536}
537
539{
540 /* We use a fragment shader for viewport because we need to output the depth.
541 *
542 * Compute shader is also used to work around Metal/Intel iGPU issues concerning
543 * read write support for array textures. In this case the copy_ps_ is used to
544 * copy the right color/value to the framebuffer. */
545 use_compute_ = !inst_.is_viewport() ||
547
548 eShaderType shader = use_compute_ ? FILM_COMP : FILM_FRAG;
549
550 /* TODO(fclem): Shader variation for panoramic & scaled resolution. */
551
552 gpu::Shader *sh = inst_.shaders.static_shader_get(shader);
553 accumulate_ps_.init();
554 init_pass(accumulate_ps_, sh);
555 /* Sync with rendering passes. */
557 if (use_compute_) {
558 accumulate_ps_.dispatch(int3(math::divide_ceil(data_.extent, int2(FILM_GROUP_SIZE)), 1));
559 }
560 else {
561 accumulate_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
562 }
563
564 copy_ps_.init();
565 if (use_compute_ && inst_.is_viewport()) {
566 init_pass(copy_ps_, inst_.shaders.static_shader_get(FILM_COPY));
567 copy_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
568 }
569
570 const int cryptomatte_layer_count = cryptomatte_layer_len_get();
571 const bool is_cryptomatte_pass_enabled = cryptomatte_layer_count > 0;
572 const bool do_cryptomatte_sorting = !inst_.is_viewport() || inst_.is_viewport_compositor_enabled;
573 cryptomatte_post_ps_.init();
574 if (is_cryptomatte_pass_enabled && do_cryptomatte_sorting) {
575 cryptomatte_post_ps_.state_set(DRW_STATE_NO_DRAW);
576 cryptomatte_post_ps_.shader_set(inst_.shaders.static_shader_get(FILM_CRYPTOMATTE_POST));
577 cryptomatte_post_ps_.bind_image("cryptomatte_img", &cryptomatte_tx_);
578 cryptomatte_post_ps_.bind_resources(inst_.uniform_data);
579 cryptomatte_post_ps_.push_constant("cryptomatte_layer_len", cryptomatte_layer_count);
580 cryptomatte_post_ps_.push_constant("cryptomatte_samples_per_layer",
581 inst_.view_layer->cryptomatte_levels);
582 int2 dispatch_size = math::divide_ceil(int2(cryptomatte_tx_.size()), int2(FILM_GROUP_SIZE));
583 cryptomatte_post_ps_.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
584 cryptomatte_post_ps_.dispatch(int3(UNPACK2(dispatch_size), 1));
585 }
586}
587
588void Film::init_pass(PassSimple &pass, gpu::Shader *sh)
589{
591 RenderBuffers &rbuffers = inst_.render_buffers;
592 VelocityModule &velocity = inst_.velocity;
593
594 pass.specialize_constant(sh, "enabled_categories", uint(enabled_categories_));
595 pass.specialize_constant(sh, "samples_len", &data_.samples_len);
596 pass.specialize_constant(sh, "use_reprojection", &use_reprojection_);
597 pass.specialize_constant(sh, "scaling_factor", data_.scaling_factor);
598 pass.specialize_constant(sh, "combined_id", &data_.combined_id);
599 pass.specialize_constant(sh, "display_id", &data_.display_id);
600 pass.specialize_constant(sh, "normal_id", &data_.normal_id);
602 pass.shader_set(sh);
603 /* For viewport, only previous motion is supported.
604 * Still bind previous step to avoid undefined behavior. */
605 eVelocityStep step_next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
606
607 pass.bind_resources(inst_.uniform_data);
608 pass.bind_ubo("camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
609 pass.bind_ubo("camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
610 pass.bind_ubo("camera_next", &(*velocity.camera_steps[step_next]));
611 pass.bind_texture("depth_tx", &rbuffers.depth_tx);
612 pass.bind_texture("combined_tx", &combined_final_tx_);
613 pass.bind_texture("vector_tx", &rbuffers.vector_tx);
614 pass.bind_texture("rp_color_tx", &rbuffers.rp_color_tx);
615 pass.bind_texture("rp_value_tx", &rbuffers.rp_value_tx);
616 pass.bind_texture("cryptomatte_tx", &rbuffers.cryptomatte_tx);
617 /* NOTE(@fclem): 16 is the max number of sampled texture in many implementations.
618 * If we need more, we need to pack more of the similar passes in the same textures as arrays or
619 * use image binding instead. */
620 pass.bind_image("in_weight_img", &weight_tx_.current());
621 pass.bind_image("out_weight_img", &weight_tx_.next());
622 pass.bind_texture("in_combined_tx", &combined_tx_.current(), filter);
623 pass.bind_image("out_combined_img", &combined_tx_.next());
624 pass.bind_image("depth_img", &depth_tx_);
625 pass.bind_image("color_accum_img", &color_accum_tx_);
626 pass.bind_image("value_accum_img", &value_accum_tx_);
627 pass.bind_image("cryptomatte_img", &cryptomatte_tx_);
628 pass.bind_resources(inst_.uniform_data);
629}
630
632{
633 use_reprojection_ = inst_.sampling.interactive_mode();
634
635 /* Just bypass the reprojection and reset the accumulation. */
636 if (inst_.is_viewport() && !use_reprojection_ && inst_.sampling.is_reset()) {
637 use_reprojection_ = false;
638 data_.use_history = false;
639 }
640
641 aovs_info.push_update();
642
643 sync_mist();
644
645 /* Update sample table length for specialization warm up.
646 * Otherwise, we will warm a specialization that is not actually used.
647 * We still need to update it once per sample afterward. */
648 update_sample_table();
649
650 inst_.manager->warm_shader_specialization(accumulate_ps_);
651 inst_.manager->warm_shader_specialization(copy_ps_);
652 inst_.manager->warm_shader_specialization(cryptomatte_post_ps_);
653}
654
656{
657 float2 jitter = inst_.sampling.rng_2d_get(SAMPLING_FILTER_U);
658
659 if (!use_box_filter && data_.filter_radius < M_SQRT1_2 && !inst_.camera.is_panoramic()) {
660 /* For filter size less than a pixel, change sampling strategy and use a uniform disk
661 * distribution covering the filter shape. This avoids putting samples in areas without any
662 * weights. */
663 /* TODO(fclem): Importance sampling could be a better option here. */
664 /* NOTE: We bias the disk to encompass most of the energy of the filter to avoid energy issues
665 * with motion blur at low sample. */
666 const float bias = 0.5f;
667 jitter = Sampling::sample_disk(jitter) * bias * data_.filter_radius;
668 }
669 else {
670 /* Jitter the size of a whole pixel. [-0.5..0.5] */
671 jitter -= 0.5f;
672 }
673
674 if (data_.scaling_factor > 1) {
675 /* In this case, the jitter sequence is the same for the number of film pixel a render pixel
676 * covers. This allows to add a manual offset to the different film pixels to ensure they get
677 * appropriate coverage instead of waiting that random sampling covers all the area. This
678 * ensures a much faster convergence. */
679 const int scale = data_.scaling_factor;
680 const int render_pixel_per_final_pixel = square_i(scale);
681 /* TODO(fclem): Random in Z-order curve. */
682 /* Works great for the scaling factor we have. */
683 int prime = (render_pixel_per_final_pixel / 2) - 1;
684 /* For now just randomize in scan-lines using a prime number. */
685 uint64_t index = (inst_.sampling.sample_index() * prime) % render_pixel_per_final_pixel;
686 int2 pixel_co = int2(index % scale, index / scale);
687 /* The jitter is applied on render target pixels. Make it proportional to film pixel. */
688 jitter /= float(scale);
689 /* Offset from the render pixel center to the center of film pixel. */
690 jitter += ((float2(pixel_co) + 0.5f) / scale) - 0.5f;
691 }
692 return jitter;
693}
694
696{
697 if (inst_.is_viewport() && use_reprojection_) {
698 /* Enable motion vector rendering but not the accumulation buffer. */
699 return enabled_passes_ | EEVEE_RENDER_PASS_VECTOR;
700 }
701 return enabled_passes_;
702}
703
705{
706 int result = 0;
707 result += data_.cryptomatte_object_id == -1 ? 0 : 1;
708 result += data_.cryptomatte_asset_id == -1 ? 0 : 1;
709 result += data_.cryptomatte_material_id == -1 ? 0 : 1;
710 return result;
711}
712
713void Film::update_sample_table()
714{
715 /* Offset in render target pixels. */
717
718 int filter_radius_ceil = ceilf(data_.filter_radius);
719 float filter_radius_sqr = square_f(data_.filter_radius);
720
721 /* Reset */
722 for (FilmSample &sample : data_.samples) {
723 sample.texel = int2(0, 0);
724 sample.weight = 0.0f;
725 }
726
727 data_.samples_len = 0;
728 if (data_.scaling_factor > 1) {
729 /* For this case there might be no valid samples for some pixels.
730 * Still visit all four neighbors to have the best weight available.
731 * Note that weight is computed on the GPU as it is different for each sample. */
732 /* TODO(fclem): Make it work for filters larger than then scaling_factor. */
733 for (int y = 0; y <= 1; y++) {
734 for (int x = 0; x <= 1; x++) {
735 FilmSample &sample = data_.samples[data_.samples_len];
736 sample.texel = int2(x, y);
737 sample.weight = -1.0f; /* Computed on GPU. */
738 data_.samples_len++;
739 }
740 }
741 data_.samples_weight_total = -1.0f; /* Computed on GPU. */
742 }
743 else if (use_box_filter || data_.filter_radius < 0.01f) {
744 /* Disable gather filtering. */
745 data_.samples[0].texel = int2(0, 0);
746 data_.samples[0].weight = 1.0f;
747 data_.samples_weight_total = 1.0f;
748 data_.samples_len = 1;
749 }
750 /* NOTE: Threshold determined by hand until we don't hit the assert below. */
751 else if (data_.filter_radius < 2.20f) {
752 /* Small filter Size. */
753 int closest_index = 0;
754 float closest_distance = FLT_MAX;
755 data_.samples_weight_total = 0.0f;
756 /* TODO(fclem): For optimization, could try Z-tile ordering. */
757 for (int y = -filter_radius_ceil; y <= filter_radius_ceil; y++) {
758 for (int x = -filter_radius_ceil; x <= filter_radius_ceil; x++) {
759 float2 pixel_offset = float2(x, y) - data_.subpixel_offset;
760 float distance_sqr = math::length_squared(pixel_offset);
761 if (distance_sqr < filter_radius_sqr) {
762 if (data_.samples_len >= FILM_PRECOMP_SAMPLE_MAX) {
763 BLI_assert_msg(0, "Precomputed sample table is too small.");
764 break;
765 }
766 FilmSample &sample = data_.samples[data_.samples_len];
767 sample.texel = int2(x, y);
768 sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
769 data_.samples_weight_total += sample.weight;
770
771 if (distance_sqr < closest_distance) {
772 closest_distance = distance_sqr;
773 closest_index = data_.samples_len;
774 }
775 data_.samples_len++;
776 }
777 }
778 }
779 /* Put the closest one in first position. */
780 if (closest_index != 0) {
781 std::swap(data_.samples[closest_index], data_.samples[0]);
782 }
783 /* Avoid querying a different shader specialization for this case.
784 * This can happen with the default settings. */
785 if (data_.samples_len <= 9) {
786 data_.samples_len = 9;
787 }
788 }
789 else {
790 /* Large Filter Size. */
791 MutableSpan<FilmSample> sample_table(data_.samples, FILM_PRECOMP_SAMPLE_MAX);
792 /* To avoid hitting driver TDR and slowing rendering too much we use random sampling. */
793 /* TODO(fclem): This case needs more work. We could distribute the samples better to avoid
794 * loading the same pixel twice. */
795 data_.samples_len = sample_table.size();
796 data_.samples_weight_total = 0.0f;
797
798 int i = 0;
799 for (FilmSample &sample : sample_table) {
800 /* TODO(fclem): Own RNG. */
801 float2 random_2d = inst_.sampling.rng_2d_get(SAMPLING_SSS_U);
802 /* This randomization makes sure we converge to the right result but also makes nearest
803 * neighbor filtering not converging rapidly. */
804 random_2d.x = (random_2d.x + i) / float(FILM_PRECOMP_SAMPLE_MAX);
805
806 float2 pixel_offset = math::floor(Sampling::sample_spiral(random_2d) * data_.filter_radius);
807 sample.texel = int2(pixel_offset);
808
809 float distance_sqr = math::length_squared(pixel_offset - data_.subpixel_offset);
810 sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
811 data_.samples_weight_total += sample.weight;
812 i++;
813 }
814 }
815
816 /* Round to specific amount of sample to avoid variation in sample count to cause stutter on
817 * startup because of shader specialization. */
818 if (data_.samples_len == 1) {
819 data_.samples_len = 1;
820 }
821 else if (data_.samples_len <= 4) {
822 data_.samples_len = 4;
823 }
824 else if (data_.samples_len <= 9) {
825 data_.samples_len = 9;
826 }
827 else if (data_.samples_len <= 16) {
828 data_.samples_len = 16;
829 }
830 else {
832 }
833}
834
835void Film::accumulate(View &view, gpu::Texture *combined_final_tx)
836{
837 if (inst_.is_viewport()) {
838 DefaultFramebufferList *dfbl = inst_.draw_ctx->viewport_framebuffer_list_get();
839 DefaultTextureList *dtxl = inst_.draw_ctx->viewport_texture_list_get();
841 /* Clear when using render borders. */
842 if (data_.extent != int2(GPU_texture_width(dtxl->color), GPU_texture_height(dtxl->color))) {
843 float4 clear_color = {0.0f, 0.0f, 0.0f, 0.0f};
844 GPU_framebuffer_clear_color(dfbl->default_fb, clear_color);
845 }
846 GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
847 }
848
849 update_sample_table();
850
851 combined_final_tx_ = combined_final_tx;
852
853 data_.display_only = false;
854 inst_.uniform_data.push_update();
855
856 inst_.manager->submit(accumulate_ps_, view);
857 inst_.manager->submit(copy_ps_, view);
858
859 combined_tx_.swap();
860 weight_tx_.swap();
861
862 /* Use history after first sample. */
863 if (data_.use_history == 0) {
864 data_.use_history = 1;
865 }
866}
867
869{
870 BLI_assert(inst_.is_viewport());
871
872 /* Acquire dummy render buffers for correct binding. They will not be used. */
873 inst_.render_buffers.acquire(int2(1));
874
875 DefaultFramebufferList *dfbl = inst_.draw_ctx->viewport_framebuffer_list_get();
877 GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
878
879 combined_final_tx_ = inst_.render_buffers.combined_tx;
880
881 data_.display_only = true;
882 inst_.uniform_data.push_update();
883
885
886 DRW_manager_get()->submit(accumulate_ps_, drw_view);
887
888 inst_.render_buffers.release();
889
890 /* IMPORTANT: Do not swap! No accumulation has happened. */
891}
892
894{
895 DRW_manager_get()->submit(cryptomatte_post_ps_);
896}
897
898float *Film::read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset)
899{
900 gpu::Texture *pass_tx = this->get_pass_texture(pass_type, layer_offset);
901
903
904 float *result = (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
905
906 if (pass_is_float3(pass_type)) {
907 /* Convert result in place as we cannot do this conversion on GPU. */
908 for (const int px : IndexRange(GPU_texture_width(pass_tx) * GPU_texture_height(pass_tx))) {
909 float3 tmp = *(reinterpret_cast<float3 *>(result + px * 4));
910 *(reinterpret_cast<float3 *>(result) + px) = tmp;
911 }
912 }
913
914 return result;
915}
916
918{
919 ePassStorageType storage_type = pass_storage_type(pass_type);
920 const bool is_value = storage_type == PASS_STORAGE_VALUE;
921 const bool is_cryptomatte = storage_type == PASS_STORAGE_CRYPTOMATTE;
922
923 Texture &accum_tx = (pass_type == EEVEE_RENDER_PASS_COMBINED) ?
924 combined_tx_.current() :
925 (pass_type == EEVEE_RENDER_PASS_DEPTH) ?
926 depth_tx_ :
927 (is_cryptomatte ? cryptomatte_tx_ :
928 (is_value ? value_accum_tx_ : color_accum_tx_));
929
930 int index = pass_id_get(pass_type);
931 if (index == -1) {
932 return nullptr;
933 }
934
935 accum_tx.ensure_layer_views();
936 return accum_tx.layer_view(index + layer_offset);
937}
938
939/* Gets the appropriate shader to write the given pass type. This is because passes of different
940 * types are stored in different textures types and formats. */
942{
943 switch (pass_type) {
948 default:
949 break;
950 }
951
952 switch (Film::pass_storage_type(pass_type)) {
959 }
960
962}
963
964/* Gets the appropriate shader to write the given AOV pass. */
966{
967 switch (aov->type) {
968 case AOV_TYPE_VALUE:
970 case AOV_TYPE_COLOR:
972 }
973
975}
976
978{
979 this->cryptomatte_sort();
980
981 /* Write standard passes. */
982 for (const int i : IndexRange(EEVEE_RENDER_PASS_MAX_BIT + 1)) {
984 viewport_compositor_enabled_passes_ & (1 << i));
985 if (pass_type == 0) {
986 continue;
987 }
988
989 /* The compositor will use the viewport color texture as the combined pass because the viewport
990 * texture will include Grease Pencil, so no need to write the combined pass from the engine
991 * side. */
992 if (pass_type == EEVEE_RENDER_PASS_COMBINED) {
993 continue;
994 }
995
996 Vector<std::string> pass_names = Film::pass_to_render_pass_names(pass_type, inst_.view_layer);
997 for (const int64_t pass_offset : IndexRange(pass_names.size())) {
998 gpu::Texture *pass_texture = this->get_pass_texture(pass_type, pass_offset);
999 if (!pass_texture) {
1000 continue;
1001 }
1002
1003 /* Allocate passes that spans the entire display extent, even when border rendering, then
1004 * copy the border region while zeroing the rest. That's because the compositor doesn't have
1005 * a distinction between display and data windows at the moment, so it expects passes to have
1006 * the extent of the viewport. Furthermore, we still do not support passes from Cycles and
1007 * external engines, so the viewport size assumption holds at the compositor side to support
1008 * all cases for now. */
1009 const char *pass_name = pass_names[pass_offset].c_str();
1010 draw::TextureFromPool &output_pass_texture = DRW_viewport_pass_texture_get(pass_name);
1011 output_pass_texture.acquire(this->display_extent, GPU_texture_format(pass_texture));
1012
1013 PassSimple write_pass_ps = {"Film.WriteViewportCompositorPass"};
1014 const eShaderType write_shader_type = get_write_pass_shader_type(pass_type);
1015 write_pass_ps.shader_set(inst_.shaders.static_shader_get(write_shader_type));
1016 write_pass_ps.push_constant("offset", data_.offset);
1017 write_pass_ps.bind_texture("input_tx", pass_texture);
1018 write_pass_ps.bind_image("output_img", output_pass_texture);
1019 write_pass_ps.barrier(GPU_BARRIER_TEXTURE_FETCH);
1020 write_pass_ps.dispatch(math::divide_ceil(this->display_extent, int2(FILM_GROUP_SIZE)));
1021 inst_.manager->submit(write_pass_ps);
1022 }
1023 }
1024
1025 /* Write AOV passes. */
1026 LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
1027 if ((aov->flag & AOV_CONFLICT) != 0) {
1028 continue;
1029 }
1030 gpu::Texture *pass_texture = this->get_aov_texture(aov);
1031 if (!pass_texture) {
1032 continue;
1033 }
1034
1035 /* See above comment regarding the allocation extent. */
1036 draw::TextureFromPool &output_pass_texture = DRW_viewport_pass_texture_get(aov->name);
1037 output_pass_texture.acquire(this->display_extent, GPU_texture_format(pass_texture));
1038
1039 PassSimple write_pass_ps = {"Film.WriteViewportCompositorPass"};
1040 const eShaderType write_shader_type = get_aov_write_pass_shader_type(aov);
1041 write_pass_ps.shader_set(inst_.shaders.static_shader_get(write_shader_type));
1042 write_pass_ps.push_constant("offset", data_.offset);
1043 write_pass_ps.bind_texture("input_tx", pass_texture);
1044 write_pass_ps.bind_image("output_img", output_pass_texture);
1045 write_pass_ps.barrier(GPU_BARRIER_TEXTURE_FETCH);
1046 write_pass_ps.dispatch(math::divide_ceil(this->display_extent, int2(FILM_GROUP_SIZE)));
1047 inst_.manager->submit(write_pass_ps);
1048 }
1049}
1050
1052
1053} // namespace blender::eevee
int BKE_render_preview_pixel_size(const RenderData *r)
Definition scene.cc:2930
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
BLI_INLINE unsigned int BLI_hash_string(const char *str)
Definition BLI_hash.h:67
#define LISTBASE_FOREACH(type, var, list)
void * BLI_findstring(const ListBase *listbase, const char *id, int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:608
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE float pow2f(float x)
MINLINE float clamp_f(float value, float min, float max)
MINLINE int square_i(int a)
MINLINE int max_ii(int a, int b)
MINLINE float square_f(float a)
#define M_SQRT1_2
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition BLI_rect.h:198
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition rct.cc:414
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition BLI_rect.h:194
bool BLI_rcti_is_empty(const struct rcti *rect)
unsigned int uint
#define UNPACK2(a)
#define SET_FLAG_FROM_TEST(value, test, flag)
@ VIEW_LAYER_CRYPTOMATTE_MATERIAL
@ VIEW_LAYER_CRYPTOMATTE_ASSET
@ VIEW_LAYER_CRYPTOMATTE_OBJECT
#define EEVEE_RENDER_PASS_MAX_BIT
eViewLayerEEVEEPassType
@ EEVEE_RENDER_PASS_UNUSED_8
@ EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL
@ EEVEE_RENDER_PASS_AO
@ EEVEE_RENDER_PASS_NORMAL
@ EEVEE_RENDER_PASS_UNUSED_14
@ EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT
@ EEVEE_RENDER_PASS_DIFFUSE_LIGHT
@ EEVEE_RENDER_PASS_VOLUME_LIGHT
@ EEVEE_RENDER_PASS_AOV
@ EEVEE_RENDER_PASS_DIFFUSE_COLOR
@ EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET
@ EEVEE_RENDER_PASS_ENVIRONMENT
@ EEVEE_RENDER_PASS_COMBINED
@ EEVEE_RENDER_PASS_SPECULAR_LIGHT
@ EEVEE_RENDER_PASS_VECTOR
@ EEVEE_RENDER_PASS_SPECULAR_COLOR
@ EEVEE_RENDER_PASS_EMIT
@ EEVEE_RENDER_PASS_DEPTH
@ EEVEE_RENDER_PASS_MIST
@ EEVEE_RENDER_PASS_TRANSPARENT
@ EEVEE_RENDER_PASS_SHADOW
@ EEVEE_RENDER_PASS_POSITION
@ AOV_TYPE_COLOR
@ AOV_TYPE_VALUE
@ AOV_CONFLICT
struct ViewLayerAOV ViewLayerAOV
@ R_MBLUR
@ R_ALPHAPREMUL
@ WO_MIST_QUADRATIC
@ WO_MIST_INVERSE_QUADRATIC
@ WO_MIST_LINEAR
static AppView * view
int GPU_max_texture_size()
void GPU_framebuffer_viewport_set(blender::gpu::FrameBuffer *fb, int x, int y, int width, int height)
void GPU_framebuffer_clear_color(blender::gpu::FrameBuffer *fb, const float clear_col[4])
void GPU_framebuffer_bind(blender::gpu::FrameBuffer *fb)
@ GPU_DEVICE_INTEL
bool GPU_type_matches(GPUDeviceType device, GPUOSType os, GPUDriverType driver)
@ GPU_DRIVER_ANY
@ GPU_OS_MAC
@ GPU_PRIM_TRIS
@ GPU_BARRIER_TEXTURE_FETCH
Definition GPU_state.hh:37
@ GPU_BARRIER_SHADER_IMAGE_ACCESS
Definition GPU_state.hh:35
@ GPU_BARRIER_TEXTURE_UPDATE
Definition GPU_state.hh:39
void GPU_memory_barrier(GPUBarrier barrier)
Definition gpu_state.cc:326
int GPU_texture_height(const blender::gpu::Texture *texture)
blender::gpu::TextureFormat GPU_texture_format(const blender::gpu::Texture *texture)
int GPU_texture_width(const blender::gpu::Texture *texture)
@ GPU_DATA_FLOAT
void * GPU_texture_read(blender::gpu::Texture *texture, eGPUDataFormat data_format, int mip_level)
@ GPU_SAMPLER_FILTERING_LINEAR
long long int int64_t
unsigned long long int uint64_t
void reset()
clear internal cached data and reset random seed
bool contains(const Key &key) const
Definition BLI_set.hh:310
int64_t size() const
void append(const T &value)
const T & last(const int64_t n=0) const
bool is_empty() const
bool contains(const Key &key) const
Definition BLI_set.hh:310
int64_t size() const
void submit(PassSimple &pass, View &view)
void acquire(int2 extent, blender::gpu::TextureFormat format, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL)
bool ensure_layer_views(bool cube_as_array=false)
gpu::Texture * layer_view(int layer)
static View & default_get()
Definition draw_view.cc:317
void bind_resources(U &resources)
Definition draw_pass.hh:449
void shader_set(gpu::Shader *shader)
void bind_texture(const char *name, gpu::Texture *texture, GPUSamplerState state=sampler_auto)
void specialize_constant(gpu::Shader *shader, const char *name, const float &data)
void bind_image(const char *name, gpu::Texture *image)
void dispatch(int group_len)
void state_set(DRWState state, int clip_plane_count=0)
void barrier(GPUBarrier type)
void bind_ubo(const char *name, gpu::UniformBuf *buffer)
void push_constant(const char *name, const float &data)
const CameraData & data_get() const
float2 pixel_jitter_get() const
int pass_id_get(eViewLayerEEVEEPassType pass_type) const
static bool pass_is_float3(eViewLayerEEVEEPassType pass_type)
gpu::Texture * get_pass_texture(eViewLayerEEVEEPassType pass_type, int layer_offset)
struct blender::eevee::Film::DepthState depth
static const Vector< std::string > pass_to_render_pass_names(eViewLayerEEVEEPassType pass_type, const ViewLayer *view_layer)
int cryptomatte_layer_len_get() const
static ePassStorageType pass_storage_type(eViewLayerEEVEEPassType pass_type)
float * read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset)
void write_viewport_compositor_passes()
static int overscan_pixels_get(float overscan, int2 extent)
void accumulate(View &view, gpu::Texture *combined_final_tx)
void init(const int2 &full_extent, const rcti *output_rect)
float * read_aov(ViewLayerAOV *aov)
StorageBuffer< AOVsInfoData > aovs_info
Definition eevee_film.hh:59
eViewLayerEEVEEPassType enabled_passes_get() const
static constexpr bool use_box_filter
Definition eevee_film.hh:61
gpu::Texture * get_aov_texture(ViewLayerAOV *aov)
UniformDataModule uniform_data
static float2 sample_disk(const float2 &rand)
uint64_t sample_count() const
static float2 sample_spiral(const float2 &rand)
virtual void init(const shader::ShaderCreateInfo &info, bool is_batch_compilation)=0
nullptr float
#define offsetof(t, d)
blender::draw::TextureFromPool & DRW_viewport_pass_texture_get(const char *pass_name)
blender::draw::Manager * DRW_manager_get()
@ DRW_STATE_NO_DRAW
Definition draw_state.hh:27
@ DRW_STATE_DEPTH_GREATER_EQUAL
Definition draw_state.hh:41
@ DRW_STATE_WRITE_DEPTH
Definition draw_state.hh:29
@ DRW_STATE_WRITE_COLOR
Definition draw_state.hh:30
@ DRW_STATE_DEPTH_ALWAYS
Definition draw_state.hh:36
#define FILM_GROUP_SIZE
#define ENABLE_FROM_LEGACY(name_legacy, name_eevee)
#define FILM_PRECOMP_SAMPLE_MAX
#define AOV_MAX
#define filter
Set< std::string > get_used_passes(const Scene &scene, const ViewLayer *view_layer)
detail::Pass< command::DrawCommandBuf > PassSimple
@ FILM_PASS_CONVERT_CRYPTOMATTE
static eShaderType get_write_pass_shader_type(eViewLayerEEVEEPassType pass_type)
static float film_filter_weight(float filter_radius, float sample_distance_sqr)
bool operator!=(const CameraData &a, const CameraData &b)
static eShaderType get_aov_write_pass_shader_type(const ViewLayerAOV *aov)
static eViewLayerEEVEEPassType enabled_passes(const ViewLayer *view_layer)
bool operator==(const CameraData &a, const CameraData &b)
static eViewLayerEEVEEPassType get_viewport_compositor_enabled_passes(const Set< std::string > &viewport_compositor_needed_passes, const ViewLayer *view_layer)
T length_squared(const VecBase< T, Size > &a)
MatBase< T, NumCol, NumRow > scale(const MatBase< T, NumCol, NumRow > &mat, const VectorT &scale)
T floor(const T &a)
VecBase< T, Size > divide_ceil(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
T max(const T &a, const T &b)
VecBase< uint32_t, 4 > uint4
bool assign_if_different(T &old_value, T new_value)
VecBase< float, 4 > float4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
VecBase< int32_t, 3 > int3
VecBase< float, 3 > float3
static void update(bNodeTree *ntree)
#define hash
Definition noise_c.cc:154
const char * name
#define fabsf
#define ceilf
#define FLT_MAX
Definition stdcycles.h:14
blender::gpu::FrameBuffer * default_fb
blender::gpu::Texture * color
ColorManagedViewSettings view_settings
struct RenderData r
struct World * world
struct ViewLayerEEVEE eevee
short cryptomatte_flag
float miststa
FilmSample samples[FILM_PRECOMP_SAMPLE_MAX]
float x
int ymin
int xmin
i
Definition text_draw.cc:230