17 :
PassAccessor(pass_access_info, exposure, num_samples), queue_(queue)
35 const int destination_stride = destination.stride != 0 ? destination.stride :
42 if (destination.d_pixels) {
43 DCHECK_EQ(destination.stride, 0) <<
"Custom stride for float destination is not implemented.";
46 &destination.d_pixels,
52 &destination.pixel_offset,
58 if (destination.d_pixels_half_rgba) {
62 &destination.d_pixels_half_rgba,
81#define DEFINE_PASS_ACCESSOR(pass, kernel_pass) \
82 void PassAccessorGPU::get_pass_##pass(const RenderBuffers *render_buffers, \
83 const BufferParams &buffer_params, \
84 const Destination &destination) const \
86 run_film_convert_kernels( \
87 DEVICE_KERNEL_FILM_CONVERT_##kernel_pass, render_buffers, buffer_params, destination); \
108#undef DEFINE_PASS_ACCESSOR
virtual void init_execution()=0
virtual bool synchronize()=0
virtual bool enqueue(DeviceKernel kernel, const int work_size, DeviceKernelArguments const &args)=0
void run_film_convert_kernels(DeviceKernel kernel, const RenderBuffers *render_buffers, const BufferParams &buffer_params, const Destination &destination) const
PassAccessorGPU(DeviceQueue *queue, const PassAccessInfo &pass_access_info, float exposure, int num_samples)
virtual void init_kernel_film_convert(KernelFilmConvert *kfilm_convert, const BufferParams &buffer_params, const Destination &destination) const
device_vector< float > buffer
device_ptr device_pointer
#define CCL_NAMESPACE_END
ccl_gpu_kernel_postfix ccl_global const int ccl_global float const int work_size
#define DEFINE_PASS_ACCESSOR(pass, kernel_pass)