34 const bool *cancel_requested_flag);
39 const int start_sample,
40 const int samples_num,
41 const int sample_offset)
override;
45 const int num_samples)
override;
56#if defined(WITH_PATH_GUIDING)
61 void guiding_init_kernel_globals(
void *guiding_field,
62 void *sample_data_storage,
63 const bool train)
override;
76 const int samples_num);
void reset()
clear internal cached data and reset random seed
bool copy_render_buffers_to_device() override
vector< ThreadKernelGlobalsCPU > kernel_thread_globals_
void render_samples(RenderStatistics &statistics, const int start_sample, const int samples_num, const int sample_offset) override
void init_execution() override
bool copy_render_buffers_from_device() override
int adaptive_sampling_converge_filter_count_active(const float threshold, bool reset) override
void copy_to_display(PathTraceDisplay *display, PassMode pass_mode, const int num_samples) override
void destroy_gpu_resources(PathTraceDisplay *display) override
void cryptomatte_postproces() override
const CPUKernels & kernels_
void render_samples_full_pipeline(ThreadKernelGlobalsCPU *kernel_globals, const KernelWorkTile &work_tile, const int samples_num)
PathTraceWorkCPU(Device *device, Film *film, DeviceScene *device_scene, const bool *cancel_requested_flag)
bool zero_render_buffers() override
void denoise_volume_guiding_buffers() override
PathTraceWork(Device *device, Film *film, DeviceScene *device_scene, const bool *cancel_requested_flag)
#define CCL_NAMESPACE_END
ccl_gpu_kernel_postfix ccl_global KernelWorkTile const int ccl_global float * render_buffer