Blender V5.0
path_trace_work.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
8#include "scene/pass.h"
9#include "session/buffers.h"
10#include "util/unique_ptr.h"
11
13
14class BufferParams;
15class Device;
16class DeviceScene;
17class Film;
19class RenderBuffers;
20
22 public:
24 float occupancy = 1.0f;
25 };
26
27 /* Create path trace work which fits best the device.
28 *
29 * The cancel request flag is used for a cheap check whether cancel is to be performed as soon as
30 * possible. This could be, for example, request to cancel rendering on camera navigation in
31 * viewport. */
33 Film *film,
34 DeviceScene *device_scene,
35 const bool *cancel_requested_flag);
36
37 virtual ~PathTraceWork();
38
39 /* Access the render buffers.
40 *
41 * Is only supposed to be used by the PathTrace to update buffer allocation and slicing to
42 * correspond to the big tile size and relative device performance. */
44
45 /* Set effective parameters of the big tile and the work itself. */
46 void set_effective_buffer_params(const BufferParams &effective_full_params,
47 const BufferParams &effective_big_tile_params,
48 const BufferParams &effective_buffer_params);
49
50 /* Check whether the big tile is being worked on by multiple path trace works. */
51 bool has_multiple_works() const;
52
53 /* Allocate working memory for execution. Must be called before init_execution(). */
54 virtual void alloc_work_memory() {};
55
56 /* Initialize execution of kernels.
57 * Will ensure that all device queues are initialized for execution.
58 *
59 * This method is to be called after any change in the scene. It is not needed to call it prior
60 * to an every call of the `render_samples()`. */
61 virtual void init_execution() = 0;
62
63 /* Render given number of samples as a synchronous blocking call.
64 * The samples are added to the render buffer associated with this work. */
65 virtual void render_samples(RenderStatistics &statistics,
66 const int start_sample,
67 const int samples_num,
68 const int sample_offset) = 0;
69
70 /* Copy render result from this work to the corresponding place of the GPU display.
71 *
72 * The `pass_mode` indicates whether to access denoised or noisy version of the display pass. The
73 * noisy pass mode will be passed here when it is known that the buffer does not have denoised
74 * passes yet (because denoiser did not run). If the denoised pass is requested and denoiser is
75 * not used then this function will fall-back to the noisy pass instead. */
76 virtual void copy_to_display(PathTraceDisplay *display,
77 PassMode pass_mode,
78 const int num_samples) = 0;
79
80 virtual void destroy_gpu_resources(PathTraceDisplay *display) = 0;
81
82 /* Copy data from/to given render buffers.
83 * Will copy pixels from a corresponding place (from multi-device point of view) of the render
84 * buffers, and copy work's render buffers to the corresponding place of the destination. */
85
86 /* Notes:
87 * - Copies work's render buffer from the device.
88 * - Copies CPU-side buffer of the given buffer
89 * - Does not copy the buffer to its device. */
90 void copy_to_render_buffers(RenderBuffers *render_buffers);
91
92 /* Notes:
93 * - Does not copy given render buffers from the device.
94 * - Copies work's render buffer to its device. */
95 void copy_from_render_buffers(const RenderBuffers *render_buffers);
96
97 /* Special version of the `copy_from_render_buffers()` which only copies denoised passes from the
98 * given render buffers, leaving rest of the passes.
99 *
100 * Same notes about device copying applies to this call as well. */
101 void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers);
102
103 /* Copy render buffers to/from device using an appropriate device queue when needed so that
104 * things are executed in order with the `render_samples()`. */
107
108 /* Zero render buffers to/from device using an appropriate device queue when needed so that
109 * things are executed in order with the `render_samples()`. */
110 virtual bool zero_render_buffers() = 0;
111
112 /* Access pixels rendered by this work and copy them to the corresponding location in the
113 * destination.
114 *
115 * NOTE: Does not perform copy of buffers from the device. Use `copy_render_tile_from_device()`
116 * to update host-side data. */
117 bool get_render_tile_pixels(const PassAccessor &pass_accessor,
118 const PassAccessor::Destination &destination);
119
120 /* Set pass data for baking. */
121 bool set_render_tile_pixels(PassAccessor &pass_accessor, const PassAccessor::Source &source);
122
123 /* Perform convergence test on the render buffer, and filter the convergence mask.
124 * Returns number of active pixels (the ones which did not converge yet). */
125 virtual int adaptive_sampling_converge_filter_count_active(const float threshold,
126 bool reset) = 0;
127
128 /* Denoise Volume Scattering Probability Guiding buffers. */
130
131 /* Run cryptomatte pass post-processing kernels. */
132 virtual void cryptomatte_postproces() = 0;
133
134 /* Cheap-ish request to see whether rendering is requested and is to be stopped as soon as
135 * possible, without waiting for any samples to be finished. */
137 {
138 /* NOTE: Rely on the fact that on x86 CPU reading scalar can happen without atomic even in
139 * threaded environment. */
141 }
142
143 /* Access to the device which is used to path trace this work on. */
145 {
146 return device_;
147 }
148
149#if defined(WITH_PATH_GUIDING)
150 /* Initializes the per-thread guiding kernel data. */
151 virtual void guiding_init_kernel_globals(void * /*unused*/,
152 void * /*unused*/,
153 const bool /*unused*/)
154 {
155 }
156#endif
157
158 protected:
159 PathTraceWork(Device *device,
160 Film *film,
161 DeviceScene *device_scene,
162 const bool *cancel_requested_flag);
163
164 PassAccessor::PassAccessInfo get_display_pass_access_info(PassMode pass_mode) const;
165
166 /* Get destination which offset and stride are configured so that writing to it will write to a
167 * proper location of GPU display texture, taking current tile and device slice into account. */
168 PassAccessor::Destination get_display_destination_template(const PathTraceDisplay *display,
169 const PassMode mode) const;
170
171 /* Device which will be used for path tracing.
172 * Note that it is an actual render device (and never is a multi-device). */
174
175 /* Film is used to access display pass configuration for GPU display update.
176 * Note that only fields which are not a part of kernel data can be accessed via the Film. */
178
179 /* Device side scene storage, that may be used for integrator logic. */
181
182 /* Render buffers where sampling is being accumulated into, allocated for a fraction of the big
183 * tile which is being rendered by this work.
184 * It also defines possible subset of a big tile in the case of multi-device rendering. */
186
187 /* Effective parameters of the full, big tile, and current work render buffer.
188 * The latter might be different from `buffers_->params` when there is a resolution divider
189 * involved. */
193
194 const bool *cancel_requested_flag_ = nullptr;
195};
196
void reset()
clear internal cached data and reset random seed
Definition film.h:29
virtual bool zero_render_buffers()=0
virtual void cryptomatte_postproces()=0
void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers)
PassAccessor::Destination get_display_destination_template(const PathTraceDisplay *display, const PassMode mode) const
RenderBuffers * get_render_buffers()
virtual int adaptive_sampling_converge_filter_count_active(const float threshold, bool reset)=0
unique_ptr< RenderBuffers > buffers_
BufferParams effective_full_params_
virtual bool copy_render_buffers_from_device()=0
static unique_ptr< PathTraceWork > create(Device *device, Film *film, DeviceScene *device_scene, const bool *cancel_requested_flag)
PassAccessor::PassAccessInfo get_display_pass_access_info(PassMode pass_mode) const
BufferParams effective_big_tile_params_
bool get_render_tile_pixels(const PassAccessor &pass_accessor, const PassAccessor::Destination &destination)
virtual void denoise_volume_guiding_buffers()=0
virtual bool copy_render_buffers_to_device()=0
bool has_multiple_works() const
void copy_to_render_buffers(RenderBuffers *render_buffers)
void set_effective_buffer_params(const BufferParams &effective_full_params, const BufferParams &effective_big_tile_params, const BufferParams &effective_buffer_params)
virtual void destroy_gpu_resources(PathTraceDisplay *display)=0
virtual void copy_to_display(PathTraceDisplay *display, PassMode pass_mode, const int num_samples)=0
void copy_from_render_buffers(const RenderBuffers *render_buffers)
BufferParams effective_buffer_params_
virtual void render_samples(RenderStatistics &statistics, const int start_sample, const int samples_num, const int sample_offset)=0
DeviceScene * device_scene_
virtual void init_execution()=0
PathTraceWork(Device *device, Film *film, DeviceScene *device_scene, const bool *cancel_requested_flag)
Device * get_device() const
bool is_cancel_requested() const
const bool * cancel_requested_flag_
virtual ~PathTraceWork()
bool set_render_tile_pixels(PassAccessor &pass_accessor, const PassAccessor::Source &source)
virtual void alloc_work_memory()
#define CCL_NAMESPACE_END
PassMode
Definition pass.h:20