Blender V4.3
path_trace_work.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
8#include "scene/pass.h"
9#include "session/buffers.h"
10#include "util/types.h"
11#include "util/unique_ptr.h"
12
14
15class BufferParams;
16class Device;
17class DeviceScene;
18class Film;
20class RenderBuffers;
21
23 public:
25 float occupancy = 1.0f;
26 };
27
28 /* Create path trace work which fits best the device.
29 *
30 * The cancel request flag is used for a cheap check whether cancel is to be performed as soon as
31 * possible. This could be, for example, request to cancel rendering on camera navigation in
32 * viewport. */
33 static unique_ptr<PathTraceWork> create(Device *device,
34 Film *film,
35 DeviceScene *device_scene,
36 bool *cancel_requested_flag);
37
38 virtual ~PathTraceWork();
39
40 /* Access the render buffers.
41 *
42 * Is only supposed to be used by the PathTrace to update buffer allocation and slicing to
43 * correspond to the big tile size and relative device performance. */
45
46 /* Set effective parameters of the big tile and the work itself. */
47 void set_effective_buffer_params(const BufferParams &effective_full_params,
48 const BufferParams &effective_big_tile_params,
49 const BufferParams &effective_buffer_params);
50
51 /* Check whether the big tile is being worked on by multiple path trace works. */
52 bool has_multiple_works() const;
53
54 /* Allocate working memory for execution. Must be called before init_execution(). */
55 virtual void alloc_work_memory(){};
56
57 /* Initialize execution of kernels.
58 * Will ensure that all device queues are initialized for execution.
59 *
60 * This method is to be called after any change in the scene. It is not needed to call it prior
61 * to an every call of the `render_samples()`. */
62 virtual void init_execution() = 0;
63
64 /* Render given number of samples as a synchronous blocking call.
65 * The samples are added to the render buffer associated with this work. */
66 virtual void render_samples(RenderStatistics &statistics,
67 int start_sample,
68 int samples_num,
69 int sample_offset) = 0;
70
71 /* Copy render result from this work to the corresponding place of the GPU display.
72 *
73 * The `pass_mode` indicates whether to access denoised or noisy version of the display pass. The
74 * noisy pass mode will be passed here when it is known that the buffer does not have denoised
75 * passes yet (because denoiser did not run). If the denoised pass is requested and denoiser is
76 * not used then this function will fall-back to the noisy pass instead. */
77 virtual void copy_to_display(PathTraceDisplay *display, PassMode pass_mode, int num_samples) = 0;
78
79 virtual void destroy_gpu_resources(PathTraceDisplay *display) = 0;
80
81 /* Copy data from/to given render buffers.
82 * Will copy pixels from a corresponding place (from multi-device point of view) of the render
83 * buffers, and copy work's render buffers to the corresponding place of the destination. */
84
85 /* Notes:
86 * - Copies work's render buffer from the device.
87 * - Copies CPU-side buffer of the given buffer
88 * - Does not copy the buffer to its device. */
89 void copy_to_render_buffers(RenderBuffers *render_buffers);
90
91 /* Notes:
92 * - Does not copy given render buffers from the device.
93 * - Copies work's render buffer to its device. */
94 void copy_from_render_buffers(const RenderBuffers *render_buffers);
95
96 /* Special version of the `copy_from_render_buffers()` which only copies denoised passes from the
97 * given render buffers, leaving rest of the passes.
98 *
99 * Same notes about device copying applies to this call as well. */
100 void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers);
101
102 /* Copy render buffers to/from device using an appropriate device queue when needed so that
103 * things are executed in order with the `render_samples()`. */
106
107 /* Zero render buffers to/from device using an appropriate device queue when needed so that
108 * things are executed in order with the `render_samples()`. */
109 virtual bool zero_render_buffers() = 0;
110
111 /* Access pixels rendered by this work and copy them to the corresponding location in the
112 * destination.
113 *
114 * NOTE: Does not perform copy of buffers from the device. Use `copy_render_tile_from_device()`
115 * to update host-side data. */
116 bool get_render_tile_pixels(const PassAccessor &pass_accessor,
117 const PassAccessor::Destination &destination);
118
119 /* Set pass data for baking. */
120 bool set_render_tile_pixels(PassAccessor &pass_accessor, const PassAccessor::Source &source);
121
122 /* Perform convergence test on the render buffer, and filter the convergence mask.
123 * Returns number of active pixels (the ones which did not converge yet). */
124 virtual int adaptive_sampling_converge_filter_count_active(float threshold, bool reset) = 0;
125
126 /* Run cryptomatte pass post-processing kernels. */
127 virtual void cryptomatte_postproces() = 0;
128
129 /* Cheap-ish request to see whether rendering is requested and is to be stopped as soon as
130 * possible, without waiting for any samples to be finished. */
131 inline bool is_cancel_requested() const
132 {
133 /* NOTE: Rely on the fact that on x86 CPU reading scalar can happen without atomic even in
134 * threaded environment. */
136 }
137
138 /* Access to the device which is used to path trace this work on. */
140 {
141 return device_;
142 }
143
144#ifdef WITH_PATH_GUIDING
145 /* Initializes the per-thread guiding kernel data. */
146 virtual void guiding_init_kernel_globals(void *, void *, const bool) {}
147#endif
148
149 protected:
150 PathTraceWork(Device *device,
151 Film *film,
152 DeviceScene *device_scene,
153 bool *cancel_requested_flag);
154
156
157 /* Get destination which offset and stride are configured so that writing to it will write to a
158 * proper location of GPU display texture, taking current tile and device slice into account. */
160 const PathTraceDisplay *display) const;
161
162 /* Device which will be used for path tracing.
163 * Note that it is an actual render device (and never is a multi-device). */
165
166 /* Film is used to access display pass configuration for GPU display update.
167 * Note that only fields which are not a part of kernel data can be accessed via the Film. */
169
170 /* Device side scene storage, that may be used for integrator logic. */
172
173 /* Render buffers where sampling is being accumulated into, allocated for a fraction of the big
174 * tile which is being rendered by this work.
175 * It also defines possible subset of a big tile in the case of multi-device rendering. */
176 unique_ptr<RenderBuffers> buffers_;
177
178 /* Effective parameters of the full, big tile, and current work render buffer.
179 * The latter might be different from `buffers_->params` when there is a resolution divider
180 * involved. */
184
185 bool *cancel_requested_flag_ = nullptr;
186};
187
void reset()
clear internal cached data and reset random seed
Definition film.h:30
virtual bool zero_render_buffers()=0
virtual void copy_to_display(PathTraceDisplay *display, PassMode pass_mode, int num_samples)=0
virtual void cryptomatte_postproces()=0
virtual int adaptive_sampling_converge_filter_count_active(float threshold, bool reset)=0
void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers)
RenderBuffers * get_render_buffers()
unique_ptr< RenderBuffers > buffers_
BufferParams effective_full_params_
virtual bool copy_render_buffers_from_device()=0
PassAccessor::PassAccessInfo get_display_pass_access_info(PassMode pass_mode) const
BufferParams effective_big_tile_params_
bool * cancel_requested_flag_
bool get_render_tile_pixels(const PassAccessor &pass_accessor, const PassAccessor::Destination &destination)
PassAccessor::Destination get_display_destination_template(const PathTraceDisplay *display) const
virtual bool copy_render_buffers_to_device()=0
bool has_multiple_works() const
void copy_to_render_buffers(RenderBuffers *render_buffers)
void set_effective_buffer_params(const BufferParams &effective_full_params, const BufferParams &effective_big_tile_params, const BufferParams &effective_buffer_params)
virtual void destroy_gpu_resources(PathTraceDisplay *display)=0
void copy_from_render_buffers(const RenderBuffers *render_buffers)
BufferParams effective_buffer_params_
DeviceScene * device_scene_
virtual void render_samples(RenderStatistics &statistics, int start_sample, int samples_num, int sample_offset)=0
virtual void init_execution()=0
static unique_ptr< PathTraceWork > create(Device *device, Film *film, DeviceScene *device_scene, bool *cancel_requested_flag)
Device * get_device() const
bool is_cancel_requested() const
PathTraceWork(Device *device, Film *film, DeviceScene *device_scene, bool *cancel_requested_flag)
virtual ~PathTraceWork()
bool set_render_tile_pixels(PassAccessor &pass_accessor, const PassAccessor::Source &source)
virtual void alloc_work_memory()
#define CCL_NAMESPACE_END
PassMode
Definition pass.h:20