Blender V4.3
eevee_view.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
18#include "BKE_global.hh"
19#include "DRW_render.hh"
20
21#include "eevee_instance.hh"
22
23#include "eevee_view.hh"
24
25namespace blender::eevee {
26
27/* -------------------------------------------------------------------- */
32
34{
35 int2 render_extent = inst_.film.render_extent_get();
36
37 if (false /* inst_.camera.is_panoramic() */) {
38 int64_t render_pixel_count = render_extent.x * int64_t(render_extent.y);
39 /* Divide pixel count between the 6 views. Rendering to a square target. */
40 extent_[0] = extent_[1] = ceilf(sqrtf(1 + (render_pixel_count / 6)));
41 /* TODO(@fclem): Clip unused views here. */
42 is_enabled_ = true;
43 }
44 else {
45 extent_ = render_extent;
46 /* Only enable -Z view. */
47 is_enabled_ = (StringRefNull(name_) == "negZ_view");
48 }
49
50 if (!is_enabled_) {
51 return;
52 }
53
54 /* Create views. */
55 const CameraData &cam = inst_.camera.data_get();
56
57 float4x4 viewmat, winmat;
58 if (false /* inst_.camera.is_panoramic() */) {
59 /* TODO(@fclem) Over-scans. */
60 /* For now a mandatory 5% over-scan for DoF. */
61 float side = cam.clip_near * 1.05f;
62 float near = cam.clip_near;
63 float far = cam.clip_far;
64 winmat = math::projection::perspective(-side, side, -side, side, near, far);
65 viewmat = face_matrix_ * cam.viewmat;
66 }
67 else {
68 viewmat = cam.viewmat;
69 winmat = cam.winmat;
70 }
71
72 main_view_.sync(viewmat, winmat);
73}
74
76{
77 if (!is_enabled_) {
78 return;
79 }
80
81 update_view();
82
84
85 /* Needs to be before planar_probes because it needs correct crypto-matte & render-pass buffers
86 * to reuse the same deferred shaders. */
87 RenderBuffers &rbufs = inst_.render_buffers;
88 rbufs.acquire(extent_);
89
90 /* Needs to be before anything else because it query its own gbuffer. */
91 inst_.planar_probes.set_view(render_view_, extent_);
92
93 combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
95 prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
97
98 GBuffer &gbuf = inst_.gbuffer;
99 gbuf.acquire(extent_,
102
103 gbuffer_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
109
110 /* If camera has any motion, compute motion vector in the film pass. Otherwise, we avoid float
111 * precision issue by setting the motion of all static geometry to 0. */
112 float4 clear_velocity = float4(inst_.velocity.camera_has_motion() ? VELOCITY_INVALID : 0.0f);
113
114 GPU_framebuffer_bind(prepass_fb_);
115 GPU_framebuffer_clear_color(prepass_fb_, clear_velocity);
116 /* Alpha stores transmittance. So start at 1. */
117 float4 clear_color = {0.0f, 0.0f, 0.0f, 1.0f};
118 GPU_framebuffer_bind(combined_fb_);
119 GPU_framebuffer_clear_color_depth(combined_fb_, clear_color, 1.0f);
120 inst_.pipelines.background.clear(render_view_);
121
122 /* TODO(fclem): Move it after the first prepass (and hiz update) once pipeline is stabilized. */
123 inst_.lights.set_view(render_view_, extent_);
124
126
127 inst_.volume.draw_prepass(main_view_);
128
129 /* TODO(Miguel Pozo): Deferred and forward prepass should happen before the GBuffer pass. */
130 inst_.pipelines.deferred.render(main_view_,
131 render_view_,
132 prepass_fb_,
133 combined_fb_,
134 gbuffer_fb_,
135 extent_,
136 rt_buffer_opaque_,
137 rt_buffer_refract_);
138
139 inst_.pipelines.background.render(render_view_);
140
141 inst_.gbuffer.release();
142
143 inst_.volume.draw_compute(main_view_, extent_);
144
145 inst_.ambient_occlusion.render_pass(render_view_);
146
147 inst_.pipelines.forward.render(render_view_, prepass_fb_, combined_fb_, extent_);
148
149 render_transparent_pass(rbufs);
150
151 inst_.lights.debug_draw(render_view_, combined_fb_);
152 inst_.hiz_buffer.debug_draw(render_view_, combined_fb_);
153 inst_.shadows.debug_draw(render_view_, combined_fb_);
154 inst_.volume_probes.viewport_draw(render_view_, combined_fb_);
155 inst_.sphere_probes.viewport_draw(render_view_, combined_fb_);
156 inst_.planar_probes.viewport_draw(render_view_, combined_fb_);
157
158 GPUTexture *combined_final_tx = render_postfx(rbufs.combined_tx);
159 inst_.film.accumulate(jitter_view_, combined_final_tx);
160
161 rbufs.release();
162 postfx_tx_.release();
163
165}
166
167void ShadingView::render_transparent_pass(RenderBuffers &rbufs)
168{
169 if (rbufs.data.transparent_id != -1) {
170 transparent_fb_.ensure(
173 /* Alpha stores transmittance. So start at 1. */
174 float4 clear_color = {0.0f, 0.0f, 0.0f, 1.0f};
175 GPU_framebuffer_bind(transparent_fb_);
176 GPU_framebuffer_clear_color(transparent_fb_, clear_color);
177 inst_.pipelines.forward.render(render_view_, prepass_fb_, transparent_fb_, rbufs.extent_get());
178 }
179}
180
181GPUTexture *ShadingView::render_postfx(GPUTexture *input_tx)
182{
183 if (!inst_.depth_of_field.postfx_enabled() && !inst_.motion_blur.postfx_enabled()) {
184 return input_tx;
185 }
186 postfx_tx_.acquire(extent_, GPU_RGBA16F);
187
188 /* Fix a sync bug on AMD + Mesa when volume + motion blur create artifacts
189 * except if there is a clear event between them. */
190 if (inst_.volume.enabled() && inst_.motion_blur.postfx_enabled() &&
193 {
194 postfx_tx_.clear(float4(0.0f));
195 }
196
197 GPUTexture *output_tx = postfx_tx_;
198
199 /* Swapping is done internally. Actual output is set to the next input. */
200 inst_.motion_blur.render(render_view_, &input_tx, &output_tx);
201 inst_.depth_of_field.render(render_view_, &input_tx, &output_tx, dof_buffer_);
202
203 return input_tx;
204}
205
206void ShadingView::update_view()
207{
208 const Film &film = inst_.film;
209
210 float4x4 viewmat = main_view_.viewmat();
211 float4x4 winmat = main_view_.winmat();
212
213 if (film.scaling_factor_get() > 1) {
214 /* This whole section ensures that the render target pixel grid will match the film pixel pixel
215 * grid. Otherwise the weight computation inside the film accumulation will be wrong. */
216
217 float left, right, bottom, top, near, far;
218 projmat_dimensions(winmat.ptr(), &left, &right, &bottom, &top, &near, &far);
219 const float2 bottom_left_with_overscan = float2(left, bottom);
220 const float2 top_right_with_overscan = float2(right, top);
221 const float2 render_size_with_overscan = top_right_with_overscan - bottom_left_with_overscan;
222
223 float2 bottom_left = bottom_left_with_overscan;
224 float2 top_right = top_right_with_overscan;
225 float2 render_size = render_size_with_overscan;
226
227 float overscan = inst_.camera.overscan();
228 if (overscan > 0.0f) {
229 /* Size of overscan on the screen. */
230 const float max_size_with_overscan = math::reduce_max(render_size);
231 const float max_size_original = max_size_with_overscan / (1.0f + 2.0f * overscan);
232 const float overscan_size = (max_size_with_overscan - max_size_original) / 2.0f;
233 /* Undo overscan to get the initial dimension of the screen. */
234 bottom_left = bottom_left_with_overscan + overscan_size;
235 top_right = top_right_with_overscan - overscan_size;
236 /* Render target size on the screen (without overscan). */
237 render_size = top_right - bottom_left;
238 }
239
240 /* Final pixel size on the screen. */
241 const float2 pixel_size = render_size / float2(film.film_extent_get());
242
243 /* Render extent in final film pixel unit. */
244 const int2 render_extent = film.render_extent_get() * film.scaling_factor_get();
245 const int overscan_pixels = film.render_overscan_get() * film.scaling_factor_get();
246
247 const float2 render_bottom_left = bottom_left - pixel_size * float(overscan_pixels);
248 const float2 render_top_right = render_bottom_left + pixel_size * float2(render_extent);
249
250 if (main_view_.is_persp()) {
251 winmat = math::projection::perspective(render_bottom_left.x,
252 render_top_right.x,
253 render_bottom_left.y,
254 render_top_right.y,
255 near,
256 far);
257 }
258 else {
259 winmat = math::projection::orthographic(render_bottom_left.x,
260 render_top_right.x,
261 render_bottom_left.y,
262 render_top_right.y,
263 near,
264 far);
265 }
266 }
267
268 /* Anti-Aliasing / Super-Sampling jitter. */
269 float2 jitter = inst_.film.pixel_jitter_get() / float2(extent_);
270 /* Transform to NDC space. */
271 jitter *= 2.0f;
272
273 window_translate_m4(winmat.ptr(), winmat.ptr(), UNPACK2(jitter));
274 jitter_view_.sync(viewmat, winmat);
275
276 /* FIXME(fclem): The offset may be noticeably large and the culling might make object pop
277 * out of the blurring radius. To fix this, use custom enlarged culling matrix. */
278 inst_.depth_of_field.jitter_apply(winmat, viewmat);
279 render_view_.sync(viewmat, winmat);
280}
281
284/* -------------------------------------------------------------------- */
289{
290 const auto update_info = inst_.sphere_probes.world_update_info_pop();
291 if (!update_info.has_value()) {
292 return;
293 }
294
295 View view = {"Capture.View"};
296 GPU_debug_group_begin("World.Capture");
297
298 if (update_info->do_render) {
299 for (int face : IndexRange(6)) {
300 float4x4 view_m4 = cubeface_mat(face);
301 float4x4 win_m4 = math::projection::perspective(-update_info->clipping_distances.x,
302 update_info->clipping_distances.x,
303 -update_info->clipping_distances.x,
304 update_info->clipping_distances.x,
305 update_info->clipping_distances.x,
306 update_info->clipping_distances.y);
307 view.sync(view_m4, win_m4);
308
309 combined_fb_.ensure(GPU_ATTACHMENT_NONE,
310 GPU_ATTACHMENT_TEXTURE_CUBEFACE(inst_.sphere_probes.cubemap_tx_, face));
311 GPU_framebuffer_bind(combined_fb_);
312 inst_.pipelines.world.render(view);
313 }
314
315 inst_.sphere_probes.remap_to_octahedral_projection(update_info->atlas_coord, true);
316 }
317
319}
320
322{
323 Framebuffer prepass_fb;
324 View view = {"Capture.View"};
325 while (const auto update_info = inst_.sphere_probes.probe_update_info_pop()) {
326 GPU_debug_group_begin("Probe.Capture");
327
328 if (!inst_.pipelines.data.is_sphere_probe) {
329 inst_.pipelines.data.is_sphere_probe = true;
331 }
332
333 int2 extent = int2(update_info->cube_target_extent);
334 inst_.render_buffers.acquire(extent);
335
339
340 inst_.gbuffer.acquire(extent,
343
344 for (int face : IndexRange(6)) {
345 float4x4 view_m4 = cubeface_mat(face);
346 view_m4 = math::translate(view_m4, -update_info->probe_pos);
347 float4x4 win_m4 = math::projection::perspective(-update_info->clipping_distances.x,
348 update_info->clipping_distances.x,
349 -update_info->clipping_distances.x,
350 update_info->clipping_distances.x,
351 update_info->clipping_distances.x,
352 update_info->clipping_distances.y);
353 view.sync(view_m4, win_m4);
354
356 GPU_ATTACHMENT_TEXTURE_CUBEFACE(inst_.sphere_probes.cubemap_tx_, face));
357
359 GPU_ATTACHMENT_TEXTURE_CUBEFACE(inst_.sphere_probes.cubemap_tx_, face),
364
365 GPU_framebuffer_bind(combined_fb_);
366 GPU_framebuffer_clear_color_depth(combined_fb_, float4(0.0f, 0.0f, 0.0f, 1.0f), 1.0f);
367 inst_.pipelines.probe.render(view, prepass_fb, combined_fb_, gbuffer_fb_, extent);
368 }
369
370 inst_.render_buffers.release();
371 inst_.gbuffer.release();
373 inst_.sphere_probes.remap_to_octahedral_projection(update_info->atlas_coord, false);
374 }
375
376 if (inst_.pipelines.data.is_sphere_probe) {
377 inst_.pipelines.data.is_sphere_probe = false;
379 }
380}
381
384/* -------------------------------------------------------------------- */
389{
390 if (!inst_.lookdev.enabled_) {
391 return;
392 }
393 GPU_debug_group_begin("Lookdev");
394
395 const float radius = inst_.lookdev.sphere_radius_;
396 const float clip = inst_.camera.data_get().clip_near;
398 -radius, radius, -radius, radius, clip);
399 const float4x4 &view_m4 = inst_.camera.data_get().viewmat;
400 view_.sync(view_m4, win_m4);
401
402 inst_.lookdev.draw(view_);
403 inst_.lookdev.display();
404
406}
407
410} // namespace blender::eevee
void window_translate_m4(float winmat[4][4], float perspmat[4][4], float x, float y)
void projmat_dimensions(const float winmat[4][4], float *r_left, float *r_right, float *r_bottom, float *r_top, float *r_near, float *r_far)
#define UNPACK2(a)
void GPU_debug_group_end()
Definition gpu_debug.cc:33
void GPU_debug_group_begin(const char *name)
Definition gpu_debug.cc:22
#define GPU_ATTACHMENT_TEXTURE(_texture)
#define GPU_ATTACHMENT_TEXTURE_CUBEFACE(_texture, _face)
#define GPU_ATTACHMENT_NONE
void GPU_framebuffer_clear_color_depth(GPUFrameBuffer *fb, const float clear_col[4], float clear_depth)
void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer)
void GPU_framebuffer_clear_color(GPUFrameBuffer *fb, const float clear_col[4])
#define GPU_ATTACHMENT_TEXTURE_LAYER(_texture, _layer)
@ GPU_DRIVER_OFFICIAL
bool GPU_type_matches_ex(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver, eGPUBackendType backend)
@ GPU_OS_UNIX
@ GPU_DEVICE_ATI
Definition film.h:30
void ensure(GPUAttachment depth=GPU_ATTACHMENT_NONE, GPUAttachment color1=GPU_ATTACHMENT_NONE, GPUAttachment color2=GPU_ATTACHMENT_NONE, GPUAttachment color3=GPU_ATTACHMENT_NONE, GPUAttachment color4=GPU_ATTACHMENT_NONE, GPUAttachment color5=GPU_ATTACHMENT_NONE, GPUAttachment color6=GPU_ATTACHMENT_NONE, GPUAttachment color7=GPU_ATTACHMENT_NONE, GPUAttachment color8=GPU_ATTACHMENT_NONE)
void acquire(int2 extent, eGPUTextureFormat format, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL)
void clear(float4 values)
GPUTexture * layer_view(int layer)
bool is_persp(int view_id=0) const
Definition draw_view.hh:90
const float4x4 & winmat(int view_id=0) const
Definition draw_view.hh:145
const float4x4 & viewmat(int view_id=0) const
Definition draw_view.hh:133
void sync(const float4x4 &view_mat, const float4x4 &win_mat, int view_id=0)
Definition draw_view.cc:20
const CameraData & data_get() const
void render(View &main_view, View &render_view, Framebuffer &prepass_fb, Framebuffer &combined_fb, Framebuffer &gbuffer_fb, int2 extent, RayTraceBuffer &rt_buffer_opaque_layer, RayTraceBuffer &rt_buffer_refract_layer)
void render(View &view, Framebuffer &prepass_fb, Framebuffer &combined_fb, Framebuffer &gbuffer_fb, int2 extent)
void render(View &view, GPUTexture **input_tx, GPUTexture **output_tx, DepthOfFieldBuffer &dof_buffer)
void jitter_apply(float4x4 &winmat, float4x4 &viewmat)
float2 pixel_jitter_get() const
void accumulate(View &view, GPUTexture *combined_final_tx)
int2 render_extent_get() const
void render(View &view, Framebuffer &prepass_fb, Framebuffer &combined_fb, int2 extent)
void debug_draw(View &view, GPUFrameBuffer *view_fb)
void set_source(GPUTexture **texture, int layer=-1)
VolumeProbeModule volume_probes
SphereProbeModule sphere_probes
AmbientOcclusion ambient_occlusion
PlanarProbeModule planar_probes
UniformDataModule uniform_data
MotionBlurModule motion_blur
void set_view(View &view, const int2 extent)
void debug_draw(View &view, GPUFrameBuffer *view_fb)
void render(View &view, GPUTexture **input_tx, GPUTexture **output_tx)
void set_view(const draw::View &main_view, int2 main_view_extent)
void viewport_draw(View &view, GPUFrameBuffer *view_fb)
void debug_draw(View &view, GPUFrameBuffer *view_fb)
void viewport_draw(View &view, GPUFrameBuffer *view_fb)
void draw_compute(View &main_view, int2 extent)
void draw_prepass(View &main_view)
void viewport_draw(View &view, GPUFrameBuffer *view_fb)
#define ceilf(x)
#define sqrtf(x)
void DRW_stats_group_start(const char *name)
void DRW_stats_group_end()
draw_view in_light_buf[] float
#define VELOCITY_INVALID
uint top
static int left
float4x4 cubeface_mat(int face)
MatBase< T, 4, 4 > orthographic(T left, T right, T bottom, T top, T near_clip, T far_clip)
Create an orthographic projection matrix using OpenGL coordinate convention: Maps each axis range to ...
MatBase< T, 4, 4 > orthographic_infinite(T left, T right, T bottom, T top)
Create an orthographic projection matrix using OpenGL coordinate convention: Maps each axis range to ...
MatBase< T, 4, 4 > perspective(T left, T right, T bottom, T top, T near_clip, T far_clip)
Create a perspective projection matrix using OpenGL coordinate convention: Maps each axis range to [-...
T reduce_max(const VecBase< T, Size > &a)
MatBase< T, NumCol, NumRow > translate(const MatBase< T, NumCol, NumRow > &mat, const VectorT &translation)
MatBase< float, 4, 4 > float4x4
VecBase< float, 4 > float4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
__int64 int64_t
Definition stdint.h:89
void acquire(int2 extent, int data_count, int normal_count)
float x
float y