Blender V5.0
eevee_lightprobe_volume.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
6
7#include "BKE_global.hh"
8#include "BKE_lightprobe.h"
9
10#include "GPU_capabilities.hh"
11
12#include "GPU_debug.hh"
13
14#include "eevee_debug_shared.hh"
15#include "eevee_instance.hh"
17
18#include <cstdio>
19
20namespace blender::eevee {
21
22/* -------------------------------------------------------------------- */
25
27{
28 display_grids_enabled_ = inst_.draw_overlays;
29
30 /* This might become an option in the future. */
31 bool use_l2_band = false;
32 int sh_coef_len = use_l2_band ? 9 : 4;
33 BLI_assert(gpu::TextureFormat::VOLUME_PROBE_FORMAT == gpu::TextureFormat::SFLOAT_16_16_16_16);
34 int texel_byte_size = 8; /* Assumes gpu::TextureFormat::SFLOAT_16_16_16_16. */
35 uint atlas_col_count = 0;
36 uint atlas_row_count = 0;
37
38 if (assign_if_different(irradiance_pool_size_,
39 uint(inst_.scene->eevee.gi_irradiance_pool_size)) ||
40 !irradiance_atlas_tx_.is_valid())
41 {
42 irradiance_atlas_tx_.free();
43 /* Find highest pool size within device limits. */
44 for (uint irradiance_pool_size = irradiance_pool_size_;
45 irradiance_pool_size >= 16 && !irradiance_atlas_tx_.is_valid();
46 irradiance_pool_size >>= 1)
47 {
48 int atlas_byte_size = 1024 * 1024 * irradiance_pool_size;
49 /* Reshape texture to improve grid occupancy within device limits. */
50 constexpr uint atlas_col_count_min = 16;
51 constexpr uint atlas_col_count_max = 16384;
52 for (uint atlas_col_count_try = atlas_col_count_min;
53 atlas_col_count_try <= atlas_col_count_max && !irradiance_atlas_tx_.is_valid();
54 atlas_col_count_try <<= 1)
55 {
56 int3 atlas_extent(IRRADIANCE_GRID_BRICK_SIZE);
57 atlas_extent.z *= sh_coef_len;
58 /* Add space for validity bits. */
59 atlas_extent.z += IRRADIANCE_GRID_BRICK_SIZE / 4;
60 atlas_extent.x *= atlas_col_count_try;
61
62 /* Determine the row count depending on the scene settings. */
63 int row_byte_size = math::reduce_mul(atlas_extent) * texel_byte_size;
64 atlas_row_count = divide_ceil_u(atlas_byte_size, row_byte_size);
65 atlas_extent.y *= atlas_row_count;
66
70 irradiance_atlas_tx_.ensure_3d(
71 gpu::TextureFormat::VOLUME_PROBE_FORMAT, atlas_extent, usage);
72 if (irradiance_atlas_tx_.is_valid()) {
73 do_full_update_ = true;
74 irradiance_pool_size_alloc_ = irradiance_pool_size;
75 atlas_col_count = atlas_col_count_try;
76 }
77 }
78 }
79 }
80 if (irradiance_pool_size_alloc_ != irradiance_pool_size_) {
81 inst_.info_append_i18n(
82 "Warning: Could not allocate light probes volume pool of {} MB, using {} MB instead.",
83 irradiance_pool_size_,
84 irradiance_pool_size_alloc_);
85 }
86
87 if (do_full_update_) {
88 do_full_update_ = false;
89 do_update_world_ = true;
90
91 /* Delete all references to existing bricks. */
92 for (VolumeProbe &grid : inst_.light_probes.volume_map_.values()) {
93 grid.bricks.clear();
94 }
95 brick_pool_.clear();
96 /* Fill with all the available bricks. */
97 for (auto i : IndexRange(atlas_row_count * atlas_col_count)) {
98 if (i == 0) {
99 /* Reserve one brick for the world. */
100 world_brick_index_ = 0;
101 }
102 else {
103 IrradianceBrick brick;
104 brick.atlas_coord = uint2(i % atlas_col_count, i / atlas_col_count) *
106 brick_pool_.append(irradiance_brick_pack(brick));
107 }
108 }
109
110 if (irradiance_atlas_tx_.is_valid()) {
111 /* Clear the pool to avoid any interpolation to undefined values. */
112 irradiance_atlas_tx_.clear(float4(0.0f));
113 }
114 }
115
116 if (irradiance_atlas_tx_.is_valid() == false) {
117 inst_.info_append_i18n("Irradiance Atlas texture could not be created");
118 }
119}
120
122{
123 if (inst_.is_baking()) {
124 bake.sync();
125 }
126}
127
129{
130 if (brick_pool_.size() < brick_len) {
131 /* Fail allocation. Not enough brick in the atlas. */
132 return {};
133 }
134 Vector<IrradianceBrickPacked> allocated(brick_len);
135 /* Copy bricks to return vector. */
136 allocated.as_mutable_span().copy_from(brick_pool_.as_span().take_back(brick_len));
137 /* Remove bricks from the pool. */
138 brick_pool_.resize(brick_pool_.size() - brick_len);
139
140 return allocated;
141}
142
144{
145 brick_pool_.extend(bricks.as_span());
146 bricks.clear();
147}
148
150{
151 Vector<VolumeProbe *> grid_loaded;
152
153 bool any_update = false;
154 /* First allocate the needed bricks and populate the brick buffer. */
155 bricks_infos_buf_.clear();
156 for (VolumeProbe &grid : inst_.light_probes.volume_map_.values()) {
157 LightProbeGridCacheFrame *cache = grid.cache ? grid.cache->grid_static_cache : nullptr;
158 if (cache == nullptr) {
159 continue;
160 }
161
162 if (cache->baking.L0 == nullptr && cache->irradiance.L0 == nullptr) {
163 /* No data. */
164 continue;
165 }
166
167 int3 grid_size = int3(cache->size);
168 if (grid_size.x <= 0 || grid_size.y <= 0 || grid_size.z <= 0) {
169 inst_.info_append_i18n("Error: Malformed irradiance grid data");
170 continue;
171 }
172
173 /* TODO frustum cull and only load visible grids. */
174
175 /* Note that we reserve 1 slot for the world irradiance. */
176 if (grid_loaded.size() >= IRRADIANCE_GRID_MAX - 1) {
177 inst_.info_append_i18n("Error: Too many irradiance grids in the scene");
178 /* TODO frustum cull and only load visible grids. */
179 // inst_.info_append_i18n("Error: Too many grid visible");
180 continue;
181 }
182
183 int3 grid_size_with_padding = grid_size + 2;
184 if (grid.bricks.is_empty()) {
185 int3 grid_size_in_bricks = math::divide_ceil(grid_size_with_padding,
187 int brick_len = grid_size_in_bricks.x * grid_size_in_bricks.y * grid_size_in_bricks.z;
188 grid.bricks = bricks_alloc(brick_len);
189
190 if (grid.bricks.is_empty()) {
191 inst_.info_append_i18n("Error: Irradiance grid allocation failed");
192 continue;
193 }
194 grid.do_update = true;
195 }
196
197 if (do_update_world_) {
198 /* Update grid composition if world changed. */
199 grid.do_update = true;
200 }
201
202 any_update = any_update || grid.do_update;
203
204 grid.brick_offset = bricks_infos_buf_.size();
205 bricks_infos_buf_.extend(grid.bricks);
206
207 float4x4 grid_to_world = grid.object_to_world * math::from_location<float4x4>(float3(-1.0f)) *
209 float3(2.0f / float3(grid_size_with_padding - 1))) *
211
212 grid.world_to_grid_transposed = float3x4(math::transpose(math::invert(grid_to_world)));
213 grid.grid_size_padded = grid_size_with_padding;
214 grid_loaded.append(&grid);
215 }
216
217 /* TODO: This is greedy update detection. We should check if a change can influence each grid
218 * before tagging update. But this is a bit too complex and update is quite cheap. So we update
219 * everything if there is any update on any grid. */
220 if (any_update) {
221 for (VolumeProbe *grid : grid_loaded) {
222 grid->do_update = true;
223 }
224 }
225
226 /* Then create brick & grid infos UBOs content. */
227 int world_grid_index = 0;
228 {
229 /* Stable sorting of grids. */
230 std::sort(
231 grid_loaded.begin(), grid_loaded.end(), [](const VolumeProbe *a, const VolumeProbe *b) {
232 float volume_a = math::determinant(float3x3(a->object_to_world));
233 float volume_b = math::determinant(float3x3(b->object_to_world));
234 if (volume_a != volume_b) {
235 /* Smallest first. */
236 return volume_a < volume_b;
237 }
238 /* Volumes are identical. Any arbitrary criteria can be used to sort them.
239 * Use position to avoid unstable result caused by depsgraph non deterministic eval
240 * order. This could also become a priority parameter. */
242 float3 _b = b->object_to_world.location();
243 if (_a.x != _b.x) {
244 return _a.x < _b.x;
245 }
246 if (_a.y != _b.y) {
247 return _a.y < _b.y;
248 }
249 if (_a.z != _b.z) {
250 return _a.z < _b.z;
251 }
252 /* Fallback to memory address, since there's no good alternative. */
253 return a < b;
254 });
255
256 /* Insert grids in UBO in sorted order. */
257 int grids_len = 0;
258 for (VolumeProbe *grid : grid_loaded) {
259 grid->grid_index = grids_len;
260 grids_infos_buf_[grids_len++] = *grid;
261 }
262
263 /* Insert world grid last. */
264 world_grid_index = grids_len++;
265
266 VolumeProbeData grid;
268 grid.grid_size_padded = int3(1);
269 grid.brick_offset = bricks_infos_buf_.size();
270 grid.normal_bias = 0.0f;
271 grid.view_bias = 0.0f;
272 grid.facing_bias = 0.0f;
273 grids_infos_buf_[world_grid_index] = grid;
274
275 bricks_infos_buf_.append(world_brick_index_);
276
277 if (grids_len < IRRADIANCE_GRID_MAX) {
278 /* Tag last grid as invalid to stop the iteration. */
279 grids_infos_buf_[grids_len].grid_size_padded = int3(-1);
280 }
281
282 bricks_infos_buf_.push_update();
283 grids_infos_buf_.push_update();
284 }
285
286 /* Upload data for world. */
287 if (do_update_world_) {
288 grid_upload_ps_.init();
289 grid_upload_ps_.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_WORLD));
290 grid_upload_ps_.bind_resources(inst_.uniform_data);
291 grid_upload_ps_.bind_ssbo("harmonic_buf", &inst_.sphere_probes.spherical_harmonics_buf());
292 grid_upload_ps_.bind_ubo("grids_infos_buf", &grids_infos_buf_);
293 grid_upload_ps_.bind_ssbo("bricks_infos_buf", &bricks_infos_buf_);
294 grid_upload_ps_.push_constant("grid_index", world_grid_index);
295 grid_upload_ps_.bind_image("irradiance_atlas_img", &irradiance_atlas_tx_);
296 /* Sync with extraction. */
297 grid_upload_ps_.barrier(GPU_BARRIER_SHADER_STORAGE);
298 /* Only upload one brick. */
299 grid_upload_ps_.dispatch(int3(1));
300 /* Sync with next load. */
301 grid_upload_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
302
303 inst_.manager->submit(grid_upload_ps_);
304 }
305
306 /* Upload data for each grid that need to be inserted in the atlas.
307 * Upload by order of dependency. */
308 /* Start at world index to not load any other grid (+1 because we decrement at loop start). */
309 int grid_start_index = grid_loaded.size() + 1;
310 for (auto it = grid_loaded.rbegin(); it != grid_loaded.rend(); ++it) {
311 grid_start_index--;
312
313 VolumeProbe *grid = *it;
314 if (!grid->do_update) {
315 continue;
316 }
317
318 grid->do_update = false;
319
320 LightProbeGridCacheFrame *cache = grid->cache->grid_static_cache;
321
322 /* Staging textures are recreated for each light grid to avoid increasing VRAM usage. */
323 draw::Texture irradiance_a_tx = {"irradiance_a_tx"};
324 draw::Texture irradiance_b_tx = {"irradiance_b_tx"};
325 draw::Texture irradiance_c_tx = {"irradiance_c_tx"};
326 draw::Texture irradiance_d_tx = {"irradiance_d_tx"};
327 draw::Texture validity_tx = {"validity_tx"};
328
330 int3 grid_size = int3(cache->size);
331 if (cache->baking.L0) {
332 irradiance_a_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
333 grid_size,
334 usage,
335 (const float *)cache->baking.L0);
336 irradiance_b_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
337 grid_size,
338 usage,
339 (const float *)cache->baking.L1_a);
340 irradiance_c_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
341 grid_size,
342 usage,
343 (const float *)cache->baking.L1_b);
344 irradiance_d_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
345 grid_size,
346 usage,
347 (const float *)cache->baking.L1_c);
348 validity_tx.ensure_3d(
349 gpu::TextureFormat::SFLOAT_16, grid_size, usage, cache->baking.validity);
350 if (cache->baking.validity == nullptr) {
351 /* Avoid displaying garbage data. */
352 validity_tx.clear(float4(0.0));
353 }
354 }
355 else if (cache->irradiance.L0) {
356 irradiance_a_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
357 grid_size,
358 usage,
359 (const float *)cache->irradiance.L0);
360 irradiance_b_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
361 grid_size,
362 usage,
363 (const float *)cache->irradiance.L1_a);
364 irradiance_c_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
365 grid_size,
366 usage,
367 (const float *)cache->irradiance.L1_b);
368 irradiance_d_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
369 grid_size,
370 usage,
371 (const float *)cache->irradiance.L1_c);
372 validity_tx.ensure_3d(gpu::TextureFormat::UNORM_8, grid_size, usage);
373 if (cache->connectivity.validity) {
374 /* TODO(fclem): Make texture creation API work with different data types. */
375 GPU_texture_update_sub(validity_tx,
377 cache->connectivity.validity,
378 0,
379 0,
380 0,
381 UNPACK3(grid_size));
382 }
383 else {
384 /* Avoid displaying garbage data. */
385 validity_tx.clear(float4(0.0));
386 }
387 }
388 else {
389 continue;
390 }
391
392 if (irradiance_a_tx.is_valid() == false) {
393 inst_.info_append_i18n("Error: Could not allocate irradiance staging texture");
394 /* Avoid undefined behavior with uninitialized values. Still load a clear texture. */
395 const float4 zero(0.0f);
396 irradiance_a_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16, int3(1), usage, zero);
397 irradiance_b_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16, int3(1), usage, zero);
398 irradiance_c_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16, int3(1), usage, zero);
399 irradiance_d_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16, int3(1), usage, zero);
400 validity_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16, int3(1), usage, zero);
401 }
402
403 bool visibility_available = cache->visibility.L0 != nullptr;
404 bool is_baking = cache->irradiance.L0 == nullptr;
405
406 draw::Texture visibility_a_tx = {"visibility_a_tx"};
407 draw::Texture visibility_b_tx = {"visibility_b_tx"};
408 draw::Texture visibility_c_tx = {"visibility_c_tx"};
409 draw::Texture visibility_d_tx = {"visibility_d_tx"};
410 if (visibility_available) {
411 visibility_a_tx.ensure_3d(
412 gpu::TextureFormat::SFLOAT_16, grid_size, usage, (const float *)cache->visibility.L0);
413 visibility_b_tx.ensure_3d(
414 gpu::TextureFormat::SFLOAT_16, grid_size, usage, (const float *)cache->visibility.L1_a);
415 visibility_c_tx.ensure_3d(
416 gpu::TextureFormat::SFLOAT_16, grid_size, usage, (const float *)cache->visibility.L1_b);
417 visibility_d_tx.ensure_3d(
418 gpu::TextureFormat::SFLOAT_16, grid_size, usage, (const float *)cache->visibility.L1_c);
419
420 GPU_texture_swizzle_set(visibility_a_tx, "111r");
421 GPU_texture_swizzle_set(visibility_b_tx, "111r");
422 GPU_texture_swizzle_set(visibility_c_tx, "111r");
423 GPU_texture_swizzle_set(visibility_d_tx, "111r");
424 }
425 else if (!is_baking) {
426 /* Missing visibility. Load default visibility L0 = 1, L1 = (0, 0, 0). */
427 GPU_texture_swizzle_set(irradiance_a_tx, "rgb1");
428 GPU_texture_swizzle_set(irradiance_b_tx, "rgb0");
429 GPU_texture_swizzle_set(irradiance_c_tx, "rgb0");
430 GPU_texture_swizzle_set(irradiance_d_tx, "rgb0");
431 }
432
433 grid_upload_ps_.init();
434 grid_upload_ps_.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_LOAD));
435
436 grid_upload_ps_.bind_resources(inst_.uniform_data);
437 grid_upload_ps_.push_constant("validity_threshold", grid->validity_threshold);
438 grid_upload_ps_.push_constant("dilation_threshold", grid->dilation_threshold);
439 grid_upload_ps_.push_constant("dilation_radius", grid->dilation_radius);
440 grid_upload_ps_.push_constant("grid_index", grid->grid_index);
441 grid_upload_ps_.push_constant("grid_start_index", grid_start_index);
442 grid_upload_ps_.push_constant("grid_local_to_world", grid->object_to_world);
443 grid_upload_ps_.push_constant("grid_intensity_factor", grid->intensity);
444 grid_upload_ps_.bind_ubo("grids_infos_buf", &grids_infos_buf_);
445 grid_upload_ps_.bind_ssbo("bricks_infos_buf", &bricks_infos_buf_);
446 grid_upload_ps_.bind_texture("irradiance_a_tx", &irradiance_a_tx);
447 grid_upload_ps_.bind_texture("irradiance_b_tx", &irradiance_b_tx);
448 grid_upload_ps_.bind_texture("irradiance_c_tx", &irradiance_c_tx);
449 grid_upload_ps_.bind_texture("irradiance_d_tx", &irradiance_d_tx);
450 grid_upload_ps_.bind_texture("validity_tx", &validity_tx);
451 grid_upload_ps_.bind_image("irradiance_atlas_img", &irradiance_atlas_tx_);
452 /* NOTE: We are read and writing the same texture that we are sampling from. If that causes an
453 * issue, we should revert to manual trilinear interpolation. */
454 grid_upload_ps_.bind_texture("irradiance_atlas_tx", &irradiance_atlas_tx_);
455 /* If visibility is invalid, either it is still baking and visibility is stored with
456 * irradiance, or it is missing and we sample a completely uniform visibility. */
457 bool use_vis = visibility_available;
458 grid_upload_ps_.bind_texture("visibility_a_tx", use_vis ? &visibility_a_tx : &irradiance_a_tx);
459 grid_upload_ps_.bind_texture("visibility_b_tx", use_vis ? &visibility_b_tx : &irradiance_b_tx);
460 grid_upload_ps_.bind_texture("visibility_c_tx", use_vis ? &visibility_c_tx : &irradiance_c_tx);
461 grid_upload_ps_.bind_texture("visibility_d_tx", use_vis ? &visibility_d_tx : &irradiance_d_tx);
462
463 /* Runtime grid is padded for blending with surrounding probes. */
464 int3 grid_size_with_padding = grid_size + 2;
465 /* Note that we take into account the padding border of each brick. */
466 int3 grid_size_in_bricks = math::divide_ceil(grid_size_with_padding,
468 grid_upload_ps_.dispatch(grid_size_in_bricks);
469 /* Sync with next load. */
470 grid_upload_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
471
472 inst_.manager->submit(grid_upload_ps_);
473
474 irradiance_a_tx.free();
475 irradiance_b_tx.free();
476 irradiance_c_tx.free();
477 irradiance_d_tx.free();
478 }
479
480 do_update_world_ = false;
481}
482
484{
485 if (!inst_.is_baking()) {
486 debug_pass_draw(view, view_fb);
487 display_pass_draw(view, view_fb);
488 }
489}
490
491void VolumeProbeModule::debug_pass_draw(View &view, gpu::FrameBuffer *view_fb)
492{
493 switch (inst_.debug_mode) {
495 inst_.info_append("Debug Mode: Surfels Normal");
496 break;
498 inst_.info_append("Debug Mode: Surfels Cluster");
499 break;
501 inst_.info_append("Debug Mode: Surfels Irradiance");
502 break;
504 inst_.info_append("Debug Mode: Surfels Visibility");
505 break;
507 inst_.info_append("Debug Mode: Irradiance Validity");
508 break;
510 inst_.info_append("Debug Mode: Virtual Offset");
511 break;
512 default:
513 /* Nothing to display. */
514 return;
515 }
516
517 for (const VolumeProbe &grid : inst_.light_probes.volume_map_.values()) {
518 if (grid.cache == nullptr) {
519 continue;
520 }
521
522 LightProbeGridCacheFrame *cache = grid.cache->grid_static_cache;
523
524 if (cache == nullptr) {
525 continue;
526 }
527
528 switch (inst_.debug_mode) {
529 case eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_NORMAL:
530 case eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_CLUSTER:
531 case eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_VISIBILITY:
532 case eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_IRRADIANCE: {
533 if (cache->surfels == nullptr || cache->surfels_len == 0) {
534 continue;
535 }
536 float max_axis_len = math::reduce_max(math::to_scale(grid.object_to_world));
537 debug_ps_.init();
538 debug_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
539 DRW_STATE_CLIP_CONTROL_UNIT_RANGE | inst_.film.depth.test_state);
540 debug_ps_.framebuffer_set(&view_fb);
541 debug_ps_.shader_set(inst_.shaders.static_shader_get(DEBUG_SURFELS));
542 debug_ps_.push_constant("debug_surfel_radius", 0.5f * max_axis_len / grid.surfel_density);
543 debug_ps_.push_constant("debug_mode", int(inst_.debug_mode));
544
545 debug_surfels_buf_.resize(cache->surfels_len);
546 /* TODO(fclem): Cleanup: Could have a function in draw::StorageArrayBuffer that takes an
547 * input data. */
548 Span<Surfel> grid_surfels(static_cast<Surfel *>(cache->surfels), cache->surfels_len);
549 MutableSpan<Surfel>(debug_surfels_buf_.data(), cache->surfels_len).copy_from(grid_surfels);
550 debug_surfels_buf_.push_update();
551
552 debug_ps_.bind_ssbo("surfels_buf", debug_surfels_buf_);
553 debug_ps_.draw_procedural(GPU_PRIM_TRI_STRIP, cache->surfels_len, 4);
554
555 inst_.manager->submit(debug_ps_, view);
556 break;
557 }
558
559 case eDebugMode::DEBUG_IRRADIANCE_CACHE_VALIDITY:
560 case eDebugMode::DEBUG_IRRADIANCE_CACHE_VIRTUAL_OFFSET: {
561 int3 grid_size = int3(cache->size);
562 debug_ps_.init();
563 debug_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
564 DRW_STATE_CLIP_CONTROL_UNIT_RANGE | inst_.film.depth.test_state);
565 debug_ps_.framebuffer_set(&view_fb);
566 debug_ps_.shader_set(inst_.shaders.static_shader_get(DEBUG_IRRADIANCE_GRID));
567 debug_ps_.push_constant("debug_mode", int(inst_.debug_mode));
568 debug_ps_.push_constant("grid_mat", grid.object_to_world);
569
571 Texture debug_data_tx = {"debug_data_tx"};
572
573 if (inst_.debug_mode == eDebugMode::DEBUG_IRRADIANCE_CACHE_VALIDITY) {
574 const float *data;
575 if (cache->baking.validity) {
576 data = cache->baking.validity;
577 debug_data_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16, grid_size, usage, data);
578 }
579 else if (cache->connectivity.validity) {
580 debug_data_tx.ensure_3d(gpu::TextureFormat::UNORM_8, grid_size, usage);
581 /* TODO(fclem): Make texture creation API work with different data types. */
582 GPU_texture_update_sub(debug_data_tx,
584 cache->connectivity.validity,
585 0,
586 0,
587 0,
588 UNPACK3(grid_size));
589 }
590 else {
591 continue;
592 }
593 debug_ps_.push_constant("debug_value", grid.validity_threshold);
594 debug_ps_.bind_texture("debug_data_tx", debug_data_tx);
595 debug_ps_.draw_procedural(GPU_PRIM_POINTS, 1, grid_size.x * grid_size.y * grid_size.z);
596 }
597 else {
598 if (cache->baking.virtual_offset) {
599 const float *data = (const float *)cache->baking.virtual_offset;
600 debug_data_tx.ensure_3d(
601 gpu::TextureFormat::SFLOAT_16_16_16_16, grid_size, usage, data);
602 }
603 else {
604 continue;
605 }
606 debug_ps_.bind_texture("debug_data_tx", debug_data_tx);
607 debug_ps_.draw_procedural(
608 GPU_PRIM_LINES, 1, grid_size.x * grid_size.y * grid_size.z * 2);
609 }
610
611 inst_.manager->submit(debug_ps_, view);
612 break;
613 }
614
615 default:
616 break;
617 }
618 }
619}
620
621void VolumeProbeModule::display_pass_draw(View &view, gpu::FrameBuffer *view_fb)
622{
623 if (!display_grids_enabled_) {
624 return;
625 }
626
627 for (const VolumeProbe &grid : inst_.light_probes.volume_map_.values()) {
628 if (!grid.viewport_display || grid.viewport_display_size == 0.0f || !grid.cache ||
629 !grid.cache->grid_static_cache)
630 {
631 continue;
632 }
633
634 LightProbeGridCacheFrame *cache = grid.cache->grid_static_cache;
635
636 /* Display texture. Updated for each individual light grid to avoid increasing VRAM usage. */
637 draw::Texture irradiance_a_tx = {"irradiance_a_tx"};
638 draw::Texture irradiance_b_tx = {"irradiance_b_tx"};
639 draw::Texture irradiance_c_tx = {"irradiance_c_tx"};
640 draw::Texture irradiance_d_tx = {"irradiance_d_tx"};
641 draw::Texture validity_tx = {"validity_tx"};
642
644 int3 grid_size = int3(cache->size);
645 if (cache->baking.L0) {
646 irradiance_a_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
647 grid_size,
648 usage,
649 (const float *)cache->baking.L0);
650 irradiance_b_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
651 grid_size,
652 usage,
653 (const float *)cache->baking.L1_a);
654 irradiance_c_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
655 grid_size,
656 usage,
657 (const float *)cache->baking.L1_b);
658 irradiance_d_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16_16,
659 grid_size,
660 usage,
661 (const float *)cache->baking.L1_c);
662 validity_tx.ensure_3d(
663 gpu::TextureFormat::SFLOAT_16, grid_size, usage, (const float *)cache->baking.validity);
664 if (cache->baking.validity == nullptr) {
665 /* Avoid displaying garbage data. */
666 validity_tx.clear(float4(0.0));
667 }
668 }
669 else if (cache->irradiance.L0) {
670 irradiance_a_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
671 grid_size,
672 usage,
673 (const float *)cache->irradiance.L0);
674 irradiance_b_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
675 grid_size,
676 usage,
677 (const float *)cache->irradiance.L1_a);
678 irradiance_c_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
679 grid_size,
680 usage,
681 (const float *)cache->irradiance.L1_b);
682 irradiance_d_tx.ensure_3d(gpu::TextureFormat::SFLOAT_16_16_16,
683 grid_size,
684 usage,
685 (const float *)cache->irradiance.L1_c);
686 validity_tx.ensure_3d(gpu::TextureFormat::UNORM_8, grid_size, usage);
687 if (cache->connectivity.validity) {
688 /* TODO(fclem): Make texture creation API work with different data types. */
689 GPU_texture_update_sub(validity_tx,
691 cache->connectivity.validity,
692 0,
693 0,
694 0,
695 UNPACK3(grid_size));
696 }
697 else {
698 /* Avoid displaying garbage data. */
699 validity_tx.clear(float4(0.0));
700 }
701 }
702 else {
703 continue;
704 }
705
706 display_grids_ps_.init();
707 display_grids_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
708 DRW_STATE_CLIP_CONTROL_UNIT_RANGE | inst_.film.depth.test_state |
710 display_grids_ps_.framebuffer_set(&view_fb);
711 display_grids_ps_.shader_set(inst_.shaders.static_shader_get(DISPLAY_PROBE_VOLUME));
712
713 display_grids_ps_.push_constant("sphere_radius", grid.viewport_display_size);
714 display_grids_ps_.push_constant("grid_resolution", grid_size);
715 display_grids_ps_.push_constant("grid_to_world", grid.object_to_world);
716 display_grids_ps_.push_constant("world_to_grid", grid.world_to_object);
717 /* TODO(fclem): Make it an option when display options are moved to probe DNA. */
718 display_grids_ps_.push_constant("display_validity", false);
719
720 display_grids_ps_.bind_texture("irradiance_a_tx", &irradiance_a_tx);
721 display_grids_ps_.bind_texture("irradiance_b_tx", &irradiance_b_tx);
722 display_grids_ps_.bind_texture("irradiance_c_tx", &irradiance_c_tx);
723 display_grids_ps_.bind_texture("irradiance_d_tx", &irradiance_d_tx);
724 display_grids_ps_.bind_texture("validity_tx", &validity_tx);
725
726 int sample_count = int(BKE_lightprobe_grid_cache_frame_sample_count(cache));
727 int triangle_count = sample_count * 2;
728 display_grids_ps_.draw_procedural(GPU_PRIM_TRIS, 1, triangle_count * 3);
729
730 inst_.manager->submit(display_grids_ps_, view);
731
732 irradiance_a_tx.free();
733 irradiance_b_tx.free();
734 irradiance_c_tx.free();
735 irradiance_d_tx.free();
736 }
737}
738
740
741/* -------------------------------------------------------------------- */
744
745void IrradianceBake::init(const Object &probe_object)
746{
747 float max_axis_len = math::reduce_max(math::to_scale(probe_object.object_to_world()));
748
749 const ::LightProbe &lightprobe = DRW_object_get_data_for_drawing<::LightProbe>(probe_object);
750 surfel_density_ = lightprobe.grid_surfel_density / max_axis_len;
751 min_distance_to_surface_ = lightprobe.grid_surface_bias;
752 max_virtual_offset_ = lightprobe.grid_escape_bias;
753 clip_distance_ = lightprobe.clipend;
754 capture_world_ = (lightprobe.grid_flag & LIGHTPROBE_GRID_CAPTURE_WORLD);
755 capture_indirect_ = (lightprobe.grid_flag & LIGHTPROBE_GRID_CAPTURE_INDIRECT);
756 capture_emission_ = (lightprobe.grid_flag & LIGHTPROBE_GRID_CAPTURE_EMISSION);
757
758 /* Initialize views data, since they're used by other modules. */
760}
761
763{
764 {
765 PassSimple &pass = surfel_light_eval_ps_;
766 pass.init();
767 /* Apply lights contribution to scene surfel representation. */
768 pass.shader_set(inst_.shaders.static_shader_get(SURFEL_LIGHT));
769 pass.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
770 pass.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
771 pass.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
772 pass.bind_resources(inst_.uniform_data);
773 pass.bind_resources(inst_.lights);
774 pass.bind_resources(inst_.shadows);
775 /* Sync with the surfel creation stage. */
779 pass.dispatch(&dispatch_per_surfel_);
780 }
781 {
782 PassSimple &pass = surfel_cluster_build_ps_;
783 pass.init();
784 pass.shader_set(inst_.shaders.static_shader_get(SURFEL_CLUSTER_BUILD));
785 pass.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
786 pass.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
787 pass.bind_image("cluster_list_img", &cluster_list_tx_);
789 pass.dispatch(&dispatch_per_surfel_);
791 }
792 {
793 PassSimple &pass = surfel_ray_build_ps_;
794 pass.init();
795 {
796 /* Count number of surfel per list. */
797 PassSimple::Sub &sub = pass.sub("ListPrepare");
798 sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_PREPARE));
799 sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
800 sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
801 sub.bind_ssbo("list_counter_buf", &list_counter_buf_);
802 sub.bind_ssbo("list_info_buf", &list_info_buf_);
804 sub.dispatch(&dispatch_per_surfel_);
805 }
806 {
807 /* Prefix sum of list sizes. Outputs an IndexRange per list. */
808 PassSimple::Sub &sub = pass.sub("ListPrefix");
809 sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_PREFIX));
810 sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
811 sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
812 sub.bind_ssbo("list_counter_buf", &list_counter_buf_);
813 sub.bind_ssbo("list_range_buf", &list_range_buf_);
814 sub.bind_ssbo("list_info_buf", &list_info_buf_);
816 sub.dispatch(&dispatch_per_list_);
817 }
818 {
819 /* Copy surfel list sorting data into a flat array.
820 * All lists data are contiguous in memory using the IndexRange from previous pass. */
821 PassSimple::Sub &sub = pass.sub("ListFlatten");
822 sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_FLATTEN));
823 sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
824 sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
825 sub.bind_ssbo("list_counter_buf", &list_counter_buf_);
826 sub.bind_ssbo("list_range_buf", &list_range_buf_);
827 sub.bind_ssbo("list_item_distance_buf", &list_item_distance_buf_);
828 sub.bind_ssbo("list_item_surfel_id_buf", &list_item_surfel_id_buf_);
829 sub.bind_ssbo("list_info_buf", &list_info_buf_);
831 sub.dispatch(&dispatch_per_surfel_);
832 }
833 {
834 /* Radix sort of the list. Output surfel index in the sorted list. */
835 PassSimple::Sub &sub = pass.sub("ListSort");
836 sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_SORT));
837 sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
838 sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
839 sub.bind_ssbo("list_range_buf", &list_range_buf_);
840 sub.bind_ssbo("list_item_surfel_id_buf", &list_item_surfel_id_buf_);
841 sub.bind_ssbo("list_item_distance_buf", &list_item_distance_buf_);
842 sub.bind_ssbo("sorted_surfel_id_buf", &sorted_surfel_id_buf_);
843 sub.bind_ssbo("list_info_buf", &list_info_buf_);
845 sub.dispatch(&dispatch_per_surfel_);
846 }
847 {
848 /* Take the sorted lists array and copy adjacent surfel indices back to the Surfels.
849 * Also relink coplanar surfels to avoid over shadowing. */
850 PassSimple::Sub &sub = pass.sub("ListBuild");
851 sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_BUILD));
852 sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
853 sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
854 sub.bind_ssbo("list_start_buf", &list_start_buf_);
855 sub.bind_ssbo("list_range_buf", &list_range_buf_);
856 sub.bind_ssbo("sorted_surfel_id_buf", &sorted_surfel_id_buf_);
857 sub.bind_ssbo("list_info_buf", &list_info_buf_);
859 sub.dispatch(&dispatch_per_list_);
860 }
861 }
862 {
863 PassSimple &pass = surfel_light_propagate_ps_;
864 pass.init();
865 {
866 PassSimple::Sub &sub = pass.sub("RayEval");
867 sub.shader_set(inst_.shaders.static_shader_get(SURFEL_RAY));
868 sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
869 sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
870 sub.bind_resources(inst_.sphere_probes);
871 sub.push_constant("radiance_src", &radiance_src_);
872 sub.push_constant("radiance_dst", &radiance_dst_);
874 sub.dispatch(&dispatch_per_surfel_);
875 }
876 }
877 {
878 PassSimple &pass = irradiance_capture_ps_;
879 pass.init();
880 pass.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_RAY));
881 pass.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
882 pass.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
883 pass.bind_resources(inst_.sphere_probes);
884 pass.bind_ssbo("list_start_buf", &list_start_buf_);
885 pass.bind_ssbo("list_info_buf", &list_info_buf_);
886 pass.push_constant("radiance_src", &radiance_src_);
887 pass.bind_image("irradiance_L0_img", &irradiance_L0_tx_);
888 pass.bind_image("irradiance_L1_a_img", &irradiance_L1_a_tx_);
889 pass.bind_image("irradiance_L1_b_img", &irradiance_L1_b_tx_);
890 pass.bind_image("irradiance_L1_c_img", &irradiance_L1_c_tx_);
891 pass.bind_image("validity_img", &validity_tx_);
892 pass.bind_image("virtual_offset_img", &virtual_offset_tx_);
894 pass.dispatch(&dispatch_per_grid_sample_);
895 }
896 {
897 PassSimple &pass = irradiance_offset_ps_;
898 pass.init();
899 pass.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_OFFSET));
900 pass.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
901 pass.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
902 pass.bind_ssbo("list_start_buf", &list_start_buf_);
903 pass.bind_ssbo("list_info_buf", &list_info_buf_);
904 pass.bind_image("cluster_list_img", &cluster_list_tx_);
905 pass.bind_image("virtual_offset_img", &virtual_offset_tx_);
907 pass.dispatch(&dispatch_per_grid_sample_);
908 }
909}
910
912 const float3 &scene_max,
913 const float4x4 &probe_to_world)
914{
915 using namespace blender::math;
916
917 float3 location, scale;
918 Quaternion rotation;
919 to_loc_rot_scale(probe_to_world, location, rotation, scale);
920 /* Remove scale from view matrix. */
921 float4x4 viewinv = from_loc_rot_scale<float4x4>(location, rotation, float3(1.0f));
922 float4x4 viewmat = invert(viewinv);
923
924 /* Compute the intersection between the grid and the scene extents. */
925 float3 extent_min = float3(FLT_MAX);
926 float3 extent_max = float3(-FLT_MAX);
927 for (int x : {0, 1}) {
928 for (int y : {0, 1}) {
929 for (int z : {0, 1}) {
930 float3 ws_corner = scene_min + ((scene_max - scene_min) * float3(x, y, z));
931 float3 ls_corner = transform_point(viewmat, ws_corner);
932 extent_min = min(extent_min, ls_corner);
933 extent_max = max(extent_max, ls_corner);
934 }
935 }
936 }
937 /* Clip distance is added to every axis in both directions, not just Z. */
938 float3 target_extent = scale + clip_distance_;
939 extent_min = max(extent_min, -target_extent);
940 extent_max = min(extent_max, target_extent);
941
942 grid_pixel_extent_ = max(int3(1), int3(surfel_density_ * (extent_max - extent_min)));
943 grid_pixel_extent_ = min(grid_pixel_extent_, int3(16384));
944
945 float3 ls_midpoint = midpoint(extent_min, extent_max);
946 scene_bound_sphere_ = float4(transform_point(viewinv, ls_midpoint),
947 distance(extent_min, extent_max) / 2.0f);
948
949 /* We could use multi-view rendering here to avoid multiple submissions but it is unlikely to
950 * make any difference. The bottleneck is still the light propagation loop. */
951 auto sync_view = [&](View &view, CartesianBasis basis) {
952 float4x4 capture_viewinv = viewinv * from_rotation<float4x4>(basis);
953
954 float3 capture_extent_min = transform_point(invert(basis), extent_min);
955 float3 capture_extent_max = transform_point(invert(basis), extent_max);
956
957 float4x4 capture_winmat = projection::orthographic(capture_extent_min.x,
958 capture_extent_max.x,
959 capture_extent_min.y,
960 capture_extent_max.y,
961 -capture_extent_min.z,
962 -capture_extent_max.z);
963
964 view.visibility_test(false);
965 view.sync(invert(capture_viewinv), capture_winmat);
966 };
967
968 sync_view(view_x_, basis_x_);
969 sync_view(view_y_, basis_y_);
970 sync_view(view_z_, basis_z_);
971}
972
973void IrradianceBake::surfels_create(const Object &probe_object)
974{
980 using namespace blender::math;
981
982 const ::LightProbe &lightprobe = DRW_object_get_data_for_drawing<::LightProbe>(probe_object);
983
984 int3 grid_resolution = int3(&lightprobe.grid_resolution_x);
985 float4x4 grid_local_to_world = invert(probe_object.world_to_object());
986 float3 grid_scale = math::to_scale(probe_object.object_to_world());
987
988 /* TODO(fclem): Options. */
989 capture_info_buf_.capture_world_direct = capture_world_;
990 capture_info_buf_.capture_world_indirect = capture_world_ && capture_indirect_;
991 capture_info_buf_.capture_visibility_direct = !capture_world_;
992 capture_info_buf_.capture_visibility_indirect = !(capture_world_ && capture_indirect_);
993 capture_info_buf_.capture_indirect = capture_indirect_;
994 capture_info_buf_.capture_emission = capture_emission_;
995
996 LightProbeModule &light_probes = inst_.light_probes;
997 SphereProbeData &world_data = *static_cast<SphereProbeData *>(&light_probes.world_sphere_);
998 capture_info_buf_.world_atlas_coord = world_data.atlas_coord;
999
1000 dispatch_per_grid_sample_ = math::divide_ceil(grid_resolution, int3(IRRADIANCE_GRID_GROUP_SIZE));
1001 capture_info_buf_.irradiance_grid_size = grid_resolution;
1002 capture_info_buf_.irradiance_grid_local_to_world = grid_local_to_world;
1003 capture_info_buf_.irradiance_grid_world_to_local = probe_object.world_to_object();
1004 capture_info_buf_.irradiance_grid_world_to_local_rotation = float4x4(
1005 invert(normalize(float3x3(grid_local_to_world))));
1006
1007 capture_info_buf_.min_distance_to_surface = min_distance_to_surface_;
1008 capture_info_buf_.max_virtual_offset = max_virtual_offset_;
1009 capture_info_buf_.surfel_radius = 0.5f / surfel_density_;
1010 /* Make virtual offset distances scale relative. */
1011 float min_distance_between_grid_samples = math::reduce_min(grid_scale / float3(grid_resolution));
1012 capture_info_buf_.min_distance_to_surface *= min_distance_between_grid_samples;
1013 capture_info_buf_.max_virtual_offset *= min_distance_between_grid_samples;
1014 capture_info_buf_.clamp_direct = (lightprobe.grid_clamp_direct > 0.0) ?
1015 lightprobe.grid_clamp_direct :
1016 1e20f;
1017 capture_info_buf_.clamp_indirect = (lightprobe.grid_clamp_indirect > 0.0) ?
1018 lightprobe.grid_clamp_indirect :
1019 1e20f;
1020
1023
1024 /* 32bit float is needed here otherwise we loose too much energy from rounding error during the
1025 * accumulation when the sample count is above 500. */
1026 irradiance_L0_tx_.ensure_3d(
1027 gpu::TextureFormat::SFLOAT_32_32_32_32, grid_resolution, texture_usage);
1028 irradiance_L1_a_tx_.ensure_3d(
1029 gpu::TextureFormat::SFLOAT_32_32_32_32, grid_resolution, texture_usage);
1030 irradiance_L1_b_tx_.ensure_3d(
1031 gpu::TextureFormat::SFLOAT_32_32_32_32, grid_resolution, texture_usage);
1032 irradiance_L1_c_tx_.ensure_3d(
1033 gpu::TextureFormat::SFLOAT_32_32_32_32, grid_resolution, texture_usage);
1034 validity_tx_.ensure_3d(gpu::TextureFormat::SFLOAT_32, grid_resolution, texture_usage);
1035 virtual_offset_tx_.ensure_3d(
1036 gpu::TextureFormat::SFLOAT_16_16_16_16, grid_resolution, texture_usage);
1037
1038 if (!irradiance_L0_tx_.is_valid() || !irradiance_L1_a_tx_.is_valid() ||
1039 !irradiance_L1_b_tx_.is_valid() || !irradiance_L1_c_tx_.is_valid() ||
1040 !validity_tx_.is_valid() || !virtual_offset_tx_.is_valid())
1041 {
1042 inst_.info_append_i18n("Error: Not enough memory to bake {}.", probe_object.id.name);
1043 do_break_ = true;
1044 return;
1045 }
1046
1047 irradiance_L0_tx_.clear(float4(0.0f));
1048 irradiance_L1_a_tx_.clear(float4(0.0f));
1049 irradiance_L1_b_tx_.clear(float4(0.0f));
1050 irradiance_L1_c_tx_.clear(float4(0.0f));
1051 validity_tx_.clear(float4(0.0f));
1052 virtual_offset_tx_.clear(float4(0.0f));
1053
1054 GPU_debug_group_begin("IrradianceBake.SceneBounds");
1055
1056 {
1057 draw::Manager &manager = *inst_.manager;
1058 PassSimple &pass = irradiance_bounds_ps_;
1059 pass.init();
1060 pass.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_BOUNDS));
1061 pass.bind_ssbo("capture_info_buf", &capture_info_buf_);
1062 pass.bind_ssbo("bounds_buf", &manager.bounds_buf.current());
1063 pass.push_constant("resource_len", int(manager.resource_handle_count()));
1064 pass.dispatch(
1066 }
1067
1068 /* Raster the scene to query the number of surfel needed. */
1069 capture_info_buf_.do_surfel_count = false;
1070 capture_info_buf_.do_surfel_output = false;
1071
1072 const int neg_flt_max = int(0xFF7FFFFFu ^ 0x7FFFFFFFu); /* floatBitsToOrderedInt(-FLT_MAX) */
1073 const int pos_flt_max = 0x7F7FFFFF; /* floatBitsToOrderedInt(FLT_MAX) */
1074 capture_info_buf_.scene_bound_x_min = pos_flt_max;
1075 capture_info_buf_.scene_bound_y_min = pos_flt_max;
1076 capture_info_buf_.scene_bound_z_min = pos_flt_max;
1077 capture_info_buf_.scene_bound_x_max = neg_flt_max;
1078 capture_info_buf_.scene_bound_y_max = neg_flt_max;
1079 capture_info_buf_.scene_bound_z_max = neg_flt_max;
1080
1081 capture_info_buf_.push_update();
1082
1083 inst_.manager->submit(irradiance_bounds_ps_);
1084
1086 capture_info_buf_.read();
1087
1088 if (capture_info_buf_.scene_bound_x_min == pos_flt_max) {
1089 /* No valid object has been found. */
1090 do_break_ = true;
1091 return;
1092 }
1093
1094 auto ordered_int_bits_to_float = [](int32_t int_value) -> float {
1095 int32_t float_bits = (int_value < 0) ? (int_value ^ 0x7FFFFFFF) : int_value;
1096 return *reinterpret_cast<float *>(&float_bits);
1097 };
1098
1099 float3 scene_min = float3(ordered_int_bits_to_float(capture_info_buf_.scene_bound_x_min),
1100 ordered_int_bits_to_float(capture_info_buf_.scene_bound_y_min),
1101 ordered_int_bits_to_float(capture_info_buf_.scene_bound_z_min));
1102 float3 scene_max = float3(ordered_int_bits_to_float(capture_info_buf_.scene_bound_x_max),
1103 ordered_int_bits_to_float(capture_info_buf_.scene_bound_y_max),
1104 ordered_int_bits_to_float(capture_info_buf_.scene_bound_z_max));
1105 /* To avoid loosing any surface to the clipping planes, add some padding. */
1106 float epsilon = 1.0f / surfel_density_;
1107 scene_min -= epsilon;
1108 scene_max += epsilon;
1109 surfel_raster_views_sync(scene_min, scene_max, probe_object.object_to_world());
1110
1112
1113 /* WORKAROUND: Sync camera with correct bounds for light culling. */
1114 inst_.camera.sync();
1115 /* WORKAROUND: Sync shadows tile-maps count again with new camera bounds. Fixes issues with sun
1116 * lights. */
1117 inst_.shadows.end_sync();
1118 inst_.lights.end_sync();
1119
1120 GPU_debug_group_begin("IrradianceBake.SurfelsCount");
1121
1122 /* Raster the scene to query the number of surfel needed. */
1123 capture_info_buf_.do_surfel_count = true;
1124 capture_info_buf_.do_surfel_output = false;
1125 capture_info_buf_.surfel_len = 0u;
1126 capture_info_buf_.push_update();
1127
1128 empty_raster_fb_.ensure(math::abs(transform_point(invert(basis_x_), grid_pixel_extent_).xy()));
1129 inst_.pipelines.capture.render(view_x_);
1130 empty_raster_fb_.ensure(math::abs(transform_point(invert(basis_y_), grid_pixel_extent_).xy()));
1131 inst_.pipelines.capture.render(view_y_);
1132 empty_raster_fb_.ensure(math::abs(transform_point(invert(basis_z_), grid_pixel_extent_).xy()));
1133 inst_.pipelines.capture.render(view_z_);
1134
1136
1137 /* Allocate surfel pool. */
1139 capture_info_buf_.read();
1140 if (capture_info_buf_.surfel_len == 0) {
1141 /* No surfel to allocate. */
1142 return;
1143 }
1144
1145 if (capture_info_buf_.surfel_len > surfels_buf_.size()) {
1146 CLOG_INFO(
1147 &Instance::log, "IrradianceBake: Allocating %u surfels.", capture_info_buf_.surfel_len);
1148
1149 size_t max_size = GPU_max_storage_buffer_size();
1151 int total_mem_kb, free_mem_kb;
1152 GPU_mem_stats_get(&total_mem_kb, &free_mem_kb);
1153 /* Leave at least 128MByte for OS and stuffs.
1154 * Try to avoid crashes because of OUT_OF_MEMORY errors. */
1155 size_t max_alloc = (size_t(total_mem_kb) - 128 * 1024) * 1024;
1156 /* Cap to 95% of available memory. */
1157 size_t max_free = size_t((size_t(free_mem_kb) * 1024) * 0.95f);
1158
1159 max_size = min(max_size, min(max_alloc, max_free));
1160 }
1161
1162 size_t required_mem = sizeof(Surfel) * (capture_info_buf_.surfel_len - surfels_buf_.size());
1163 if (required_mem > max_size) {
1164 const bool is_ssbo_bound = (max_size == GPU_max_storage_buffer_size());
1165 const uint req_mb = required_mem / (1024 * 1024);
1166 const uint max_mb = max_size / (1024 * 1024);
1167
1168 if (is_ssbo_bound) {
1169 inst_.info_append_i18n(
1170 "Cannot allocate enough video memory to bake \"{}\" ({} / {} MBytes).\n"
1171 "Try reducing surfel resolution or capture distance to lower the size of the "
1172 "allocation.",
1173 probe_object.id.name,
1174 req_mb,
1175 max_mb);
1176 }
1177 else {
1178 inst_.info_append_i18n(
1179 "Not enough available video memory to bake \"{}\" ({} / {} MBytes).\n"
1180 "Try reducing surfel resolution or capture distance to lower the size of the "
1181 "allocation.",
1182 probe_object.id.name,
1183 req_mb,
1184 max_mb);
1185 }
1186
1187 if (G.background) {
1188 /* Print something in background mode instead of failing silently. */
1189 fprintf(stderr, "%s", inst_.info_get());
1190 }
1191
1192 do_break_ = true;
1193 return;
1194 }
1195 }
1196
1197 surfels_buf_.resize(capture_info_buf_.surfel_len);
1198 surfels_buf_.clear_to_zero();
1199
1200 dispatch_per_surfel_.x = divide_ceil_u(surfels_buf_.size(), SURFEL_GROUP_SIZE);
1201
1202 GPU_debug_group_begin("IrradianceBake.SurfelsCreate");
1203
1204 /* Raster the scene to generate the surfels. */
1205 capture_info_buf_.do_surfel_count = true;
1206 capture_info_buf_.do_surfel_output = true;
1207 capture_info_buf_.surfel_len = 0u;
1208 capture_info_buf_.push_update();
1209
1210 empty_raster_fb_.ensure(math::abs(transform_point(invert(basis_x_), grid_pixel_extent_).xy()));
1211 inst_.pipelines.capture.render(view_x_);
1212 empty_raster_fb_.ensure(math::abs(transform_point(invert(basis_y_), grid_pixel_extent_).xy()));
1213 inst_.pipelines.capture.render(view_y_);
1214 empty_raster_fb_.ensure(math::abs(transform_point(invert(basis_z_), grid_pixel_extent_).xy()));
1215 inst_.pipelines.capture.render(view_z_);
1216
1217 /* Sync with any other following pass using the surfel buffer. */
1219 /* Read back so that following push_update will contain correct surfel count. */
1220 capture_info_buf_.read();
1221
1223}
1224
1226{
1227 /* Use the last setup view. This should work since the view is orthographic. */
1228 /* TODO(fclem): Remove this. It is only present to avoid crash inside `shadows.set_view` */
1229 inst_.render_buffers.acquire(int2(1));
1230 inst_.hiz_buffer.set_source(&inst_.render_buffers.depth_tx);
1231 inst_.lights.set_view(view_z_, grid_pixel_extent_.xy());
1232 inst_.shadows.set_view(view_z_, grid_pixel_extent_.xy());
1234 /* There seems to be a synchronization issue with shadow rendering pass. If not waiting, the
1235 * surfels are lit without shadows. Waiting for sync here shouldn't be a huge bottleneck
1236 * anyway. */
1237 GPU_finish();
1238 }
1239 inst_.render_buffers.release();
1240
1241 inst_.manager->submit(surfel_light_eval_ps_, view_z_);
1242}
1243
1245{
1246 if (max_virtual_offset_ == 0.0f) {
1247 return;
1248 }
1251
1252 cluster_list_tx_.ensure_3d(
1253 gpu::TextureFormat::SINT_32, capture_info_buf_.irradiance_grid_size, texture_usage);
1254 cluster_list_tx_.clear(int4(-1));
1255 /* View is not important here. It is only for validation. */
1256 inst_.manager->submit(surfel_cluster_build_ps_, view_z_);
1257}
1258
1260{
1261 if (max_virtual_offset_ == 0.0f) {
1262 /* NOTE: Virtual offset texture should already have been cleared to 0. */
1263 return;
1264 }
1265
1266 inst_.manager->submit(irradiance_offset_ps_, view_z_);
1267
1268 /* Not needed after this point. */
1269 cluster_list_tx_.free();
1270}
1271
1273{
1274 using namespace blender::math;
1275
1276 float2 rand_uv = inst_.sampling.rng_2d_get(eSamplingDimension::SAMPLING_LENS_U);
1277 const float3 ray_direction = Sampling::sample_sphere(rand_uv);
1278 const float3 up = ray_direction;
1279 const float3 forward = cross(up, normalize(orthogonal(up)));
1280 const float4x4 viewinv = from_orthonormal_axes<float4x4>(float3(0.0f), forward, up);
1281 const float4x4 viewmat = invert(viewinv);
1282
1283 /* Compute projection bounds. */
1284 float2 min, max;
1285 min = max = transform_point(viewmat, scene_bound_sphere_.xyz()).xy();
1286 min -= scene_bound_sphere_.w;
1287 max += scene_bound_sphere_.w;
1288
1289 /* This avoid light leaking by making sure that for one surface there will always be at least 1
1290 * surfel capture inside a ray list. Since the surface with the maximum distance (after
1291 * projection) between adjacent surfels is a slope that goes through 3 corners of a cube,
1292 * the distance the grid needs to cover is the diagonal of a cube face.
1293 *
1294 * The lower the number the more surfels it clumps together in the same surfel-list.
1295 * Biasing the grid_density like that will create many invalid link between coplanar surfels.
1296 * These are dealt with during the list sorting pass.
1297 *
1298 * This has a side effect of inflating shadows and emissive surfaces.
1299 *
1300 * We add an extra epsilon just in case. We really need this step to be leak free. */
1301 const float max_distance_between_neighbor_surfels_inv = M_SQRT1_2 - 1e-4;
1302 /* Surfel list per unit distance. */
1303 const float ray_grid_density = surfel_density_ * max_distance_between_neighbor_surfels_inv;
1304 /* Surfel list size in unit distance. */
1305 const float pixel_size = 1.0f / ray_grid_density;
1306 list_info_buf_.ray_grid_size = math::max(int2(1), int2(ray_grid_density * (max - min)));
1307
1308 /* Add a 2 pixels margin to have empty lists for irradiance grid samples to fall into (as they
1309 * are not considered by the scene bounds). The first pixel margin is because we are jittering
1310 * the grid position. */
1311 list_info_buf_.ray_grid_size += int2(4);
1312 min -= pixel_size * 2.0f;
1313 max += pixel_size * 2.0f;
1314
1315 /* Randomize grid center to avoid uneven inflating of corners in some directions. */
1316 const float2 aa_rand = inst_.sampling.rng_2d_get(eSamplingDimension::SAMPLING_FILTER_U);
1317 /* Offset in surfel list "pixel". */
1318 const float2 aa_offset = (aa_rand - 0.5f) * 0.499f;
1319 min += pixel_size * aa_offset;
1320
1321 list_info_buf_.list_max = list_info_buf_.ray_grid_size.x * list_info_buf_.ray_grid_size.y;
1322 list_info_buf_.push_update();
1323
1324 /* NOTE: Z values do not really matter since we are not doing any rasterization. */
1325 const float4x4 winmat = projection::orthographic<float>(min.x, max.x, min.y, max.y, 0, 1);
1326
1327 ray_view_.sync(viewmat, winmat);
1328
1329 dispatch_per_list_.x = divide_ceil_u(list_info_buf_.list_max, SURFEL_LIST_GROUP_SIZE);
1330
1331 list_start_buf_.resize(ceil_to_multiple_u(list_info_buf_.list_max, 4));
1332 list_counter_buf_.resize(ceil_to_multiple_u(list_info_buf_.list_max, 4));
1333 list_range_buf_.resize(ceil_to_multiple_u(list_info_buf_.list_max * 2, 4));
1334
1335 list_item_distance_buf_.resize(ceil_to_multiple_u(max_ii(1, capture_info_buf_.surfel_len), 4));
1336 list_item_surfel_id_buf_.resize(ceil_to_multiple_u(max_ii(1, capture_info_buf_.surfel_len), 4));
1337 sorted_surfel_id_buf_.resize(ceil_to_multiple_u(max_ii(1, capture_info_buf_.surfel_len), 4));
1338
1339 GPU_storagebuf_clear(list_counter_buf_, 0);
1340 /* Clear for the case where there are no list or no surfel.
1341 * Otherwise the irradiance_capture stage will have broken lists. */
1342 GPU_storagebuf_clear(list_start_buf_, -1);
1343 inst_.manager->submit(surfel_ray_build_ps_, ray_view_);
1344}
1345
1347{
1348 /* NOTE: Subtract 1 because after `sampling.step()`. */
1349 capture_info_buf_.sample_index = inst_.sampling.sample_index() - 1;
1350 capture_info_buf_.sample_count = inst_.sampling.sample_count();
1351 capture_info_buf_.push_update();
1352
1353 inst_.manager->submit(surfel_light_propagate_ps_, ray_view_);
1354
1355 std::swap(radiance_src_, radiance_dst_);
1356}
1357
1359{
1360 inst_.manager->submit(irradiance_capture_ps_, ray_view_);
1361}
1362
1363void IrradianceBake::read_surfels(LightProbeGridCacheFrame *cache_frame)
1364{
1365 if (!ELEM(inst_.debug_mode,
1370 {
1371 return;
1372 }
1373
1375 capture_info_buf_.read();
1376 surfels_buf_.read();
1377
1378 cache_frame->surfels_len = capture_info_buf_.surfel_len;
1379 cache_frame->surfels = MEM_malloc_arrayN<Surfel>(cache_frame->surfels_len, __func__);
1380
1381 MutableSpan<Surfel> surfels_dst((Surfel *)cache_frame->surfels, cache_frame->surfels_len);
1382 Span<Surfel> surfels_src(surfels_buf_.data(), cache_frame->surfels_len);
1383 surfels_dst.copy_from(surfels_src);
1384}
1385
1386void IrradianceBake::read_virtual_offset(LightProbeGridCacheFrame *cache_frame)
1387{
1388 if (!ELEM(inst_.debug_mode, eDebugMode::DEBUG_IRRADIANCE_CACHE_VIRTUAL_OFFSET)) {
1389 return;
1390 }
1391
1393
1394 cache_frame->baking.virtual_offset = (float (*)[4])virtual_offset_tx_.read<float4>(
1396}
1397
1399{
1401
1402 read_surfels(cache_frame);
1403 read_virtual_offset(cache_frame);
1404
1405 cache_frame->size[0] = irradiance_L0_tx_.width();
1406 cache_frame->size[1] = irradiance_L0_tx_.height();
1407 cache_frame->size[2] = irradiance_L0_tx_.depth();
1408
1410
1411 cache_frame->baking.L0 = (float (*)[4])irradiance_L0_tx_.read<float4>(GPU_DATA_FLOAT);
1412 cache_frame->baking.L1_a = (float (*)[4])irradiance_L1_a_tx_.read<float4>(GPU_DATA_FLOAT);
1413 cache_frame->baking.L1_b = (float (*)[4])irradiance_L1_b_tx_.read<float4>(GPU_DATA_FLOAT);
1414 cache_frame->baking.L1_c = (float (*)[4])irradiance_L1_c_tx_.read<float4>(GPU_DATA_FLOAT);
1415 cache_frame->baking.validity = (float *)validity_tx_.read<float>(GPU_DATA_FLOAT);
1416
1417 return cache_frame;
1418}
1419
1421{
1423
1424 read_surfels(cache_frame);
1425 read_virtual_offset(cache_frame);
1426
1427 cache_frame->size[0] = irradiance_L0_tx_.width();
1428 cache_frame->size[1] = irradiance_L0_tx_.height();
1429 cache_frame->size[2] = irradiance_L0_tx_.depth();
1430
1432
1433 cache_frame->baking.L0 = (float (*)[4])irradiance_L0_tx_.read<float4>(GPU_DATA_FLOAT);
1434 cache_frame->baking.L1_a = (float (*)[4])irradiance_L1_a_tx_.read<float4>(GPU_DATA_FLOAT);
1435 cache_frame->baking.L1_b = (float (*)[4])irradiance_L1_b_tx_.read<float4>(GPU_DATA_FLOAT);
1436 cache_frame->baking.L1_c = (float (*)[4])irradiance_L1_c_tx_.read<float4>(GPU_DATA_FLOAT);
1437 cache_frame->baking.validity = (float *)validity_tx_.read<float>(GPU_DATA_FLOAT);
1438
1439 int64_t sample_count = int64_t(irradiance_L0_tx_.width()) * irradiance_L0_tx_.height() *
1440 irradiance_L0_tx_.depth();
1441 size_t coefficient_texture_size = sizeof(*cache_frame->irradiance.L0) * sample_count;
1442 size_t validity_texture_size = sizeof(*cache_frame->connectivity.validity) * sample_count;
1443 cache_frame->irradiance.L0 = (float (*)[3])MEM_mallocN(coefficient_texture_size, __func__);
1444 cache_frame->irradiance.L1_a = (float (*)[3])MEM_mallocN(coefficient_texture_size, __func__);
1445 cache_frame->irradiance.L1_b = (float (*)[3])MEM_mallocN(coefficient_texture_size, __func__);
1446 cache_frame->irradiance.L1_c = (float (*)[3])MEM_mallocN(coefficient_texture_size, __func__);
1447 cache_frame->connectivity.validity = (uint8_t *)MEM_mallocN(validity_texture_size, __func__);
1448
1449 size_t visibility_texture_size = sizeof(*cache_frame->irradiance.L0) * sample_count;
1450 cache_frame->visibility.L0 = (float *)MEM_mallocN(visibility_texture_size, __func__);
1451 cache_frame->visibility.L1_a = (float *)MEM_mallocN(visibility_texture_size, __func__);
1452 cache_frame->visibility.L1_b = (float *)MEM_mallocN(visibility_texture_size, __func__);
1453 cache_frame->visibility.L1_c = (float *)MEM_mallocN(visibility_texture_size, __func__);
1454
1455 /* TODO(fclem): This could be done on GPU if that's faster. */
1456 for (auto i : IndexRange(sample_count)) {
1457 copy_v3_v3(cache_frame->irradiance.L0[i], cache_frame->baking.L0[i]);
1458 copy_v3_v3(cache_frame->irradiance.L1_a[i], cache_frame->baking.L1_a[i]);
1459 copy_v3_v3(cache_frame->irradiance.L1_b[i], cache_frame->baking.L1_b[i]);
1460 copy_v3_v3(cache_frame->irradiance.L1_c[i], cache_frame->baking.L1_c[i]);
1461
1462 cache_frame->visibility.L0[i] = cache_frame->baking.L0[i][3];
1463 cache_frame->visibility.L1_a[i] = cache_frame->baking.L1_a[i][3];
1464 cache_frame->visibility.L1_b[i] = cache_frame->baking.L1_b[i][3];
1465 cache_frame->visibility.L1_c[i] = cache_frame->baking.L1_c[i][3];
1467 cache_frame->baking.validity[i]);
1468 }
1469
1470 MEM_SAFE_FREE(cache_frame->baking.L0);
1471 MEM_SAFE_FREE(cache_frame->baking.L1_a);
1472 MEM_SAFE_FREE(cache_frame->baking.L1_b);
1473 MEM_SAFE_FREE(cache_frame->baking.L1_c);
1474 MEM_SAFE_FREE(cache_frame->baking.validity);
1475
1476 return cache_frame;
1477}
1478
1480
1481} // namespace blender::eevee
General operations for probes.
struct LightProbeGridCacheFrame * BKE_lightprobe_grid_cache_frame_create(void)
int64_t BKE_lightprobe_grid_cache_frame_sample_count(const struct LightProbeGridCacheFrame *cache)
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE uint ceil_to_multiple_u(uint a, uint b)
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE int max_ii(int a, int b)
#define M_SQRT1_2
MINLINE void copy_v3_v3(float r[3], const float a[3])
unsigned int uint
#define UNPACK3(a)
#define ELEM(...)
#define CLOG_INFO(clg_ref,...)
Definition CLG_log.h:190
struct LightProbeGridCacheFrame LightProbeGridCacheFrame
@ LIGHTPROBE_GRID_CAPTURE_EMISSION
@ LIGHTPROBE_GRID_CAPTURE_WORLD
@ LIGHTPROBE_GRID_CAPTURE_INDIRECT
T & DRW_object_get_data_for_drawing(const Object &object)
static AppView * view
static void View(GHOST_IWindow *window, bool stereo, int eye=0)
void GPU_mem_stats_get(int *r_totalmem, int *r_freemem)
bool GPU_mem_stats_supported()
size_t GPU_max_storage_buffer_size()
void GPU_debug_group_end()
Definition gpu_debug.cc:33
void GPU_debug_group_begin(const char *name)
Definition gpu_debug.cc:22
@ GPU_DEVICE_ANY
bool GPU_type_matches(GPUDeviceType device, GPUOSType os, GPUDriverType driver)
@ GPU_DRIVER_ANY
@ GPU_OS_MAC
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRI_STRIP
@ GPU_PRIM_TRIS
@ GPU_BARRIER_SHADER_STORAGE
Definition GPU_state.hh:48
@ GPU_BARRIER_TEXTURE_FETCH
Definition GPU_state.hh:37
@ GPU_BARRIER_BUFFER_UPDATE
Definition GPU_state.hh:56
@ GPU_BARRIER_SHADER_IMAGE_ACCESS
Definition GPU_state.hh:35
@ GPU_BARRIER_TEXTURE_UPDATE
Definition GPU_state.hh:39
void GPU_finish()
Definition gpu_state.cc:310
void GPU_memory_barrier(GPUBarrier barrier)
Definition gpu_state.cc:326
void GPU_storagebuf_clear(blender::gpu::StorageBuf *ssbo, uint32_t clear_value)
void GPU_texture_swizzle_set(blender::gpu::Texture *texture, const char swizzle[4])
void GPU_texture_update_sub(blender::gpu::Texture *texture, eGPUDataFormat data_format, const void *pixels, int offset_x, int offset_y, int offset_z, int width, int height, int depth)
@ GPU_DATA_UBYTE
@ GPU_DATA_FLOAT
eGPUTextureUsage
@ GPU_TEXTURE_USAGE_SHADER_READ
@ GPU_TEXTURE_USAGE_SHADER_WRITE
@ GPU_TEXTURE_USAGE_HOST_READ
@ GPU_TEXTURE_USAGE_ATTACHMENT
@ GPU_TEXTURE_USAGE_ATOMIC
#define MEM_SAFE_FREE(v)
BMesh const char void * data
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
long long int int64_t
SIMD_FORCE_INLINE const btScalar & z() const
Return the z value.
Definition btQuadWord.h:117
constexpr void copy_from(Span< T > values) const
Definition BLI_span.hh:739
int64_t size() const
void append(const T &value)
MutableSpan< T > as_mutable_span()
void extend(Span< T > array)
Span< T > as_span() const
SwapChain< ObjectBoundsBuf, 2 > bounds_buf
uint resource_handle_count() const
void bind_resources(U &resources)
Definition draw_pass.hh:449
void shader_set(gpu::Shader *shader)
void bind_texture(const char *name, gpu::Texture *texture, GPUSamplerState state=sampler_auto)
void bind_image(const char *name, gpu::Texture *image)
PassBase< DrawCommandBufType > & sub(const char *name)
Definition draw_pass.hh:690
void dispatch(int group_len)
void barrier(GPUBarrier type)
void push_constant(const char *name, const float &data)
void bind_ssbo(const char *name, gpu::StorageBuf *buffer)
detail::PassBase< command::DrawCommandBuf > Sub
Definition draw_pass.hh:499
void init(const Object &probe_object)
LightProbeGridCacheFrame * read_result_packed()
LightProbeGridCacheFrame * read_result_unpacked()
void surfels_create(const Object &probe_object)
void surfel_raster_views_sync(const float3 &scene_min, const float3 &scene_max, const float4x4 &probe_to_world)
static float3 sample_sphere(const float2 &rand)
Vector< IrradianceBrickPacked > bricks_alloc(int brick_len)
void bricks_free(Vector< IrradianceBrickPacked > &bricks)
void viewport_draw(View &view, gpu::FrameBuffer *view_fb)
nullptr float
@ DRW_STATE_WRITE_DEPTH
Definition draw_state.hh:29
@ DRW_STATE_CLIP_CONTROL_UNIT_RANGE
Definition draw_state.hh:68
@ DRW_STATE_WRITE_COLOR
Definition draw_state.hh:30
@ DRW_STATE_CULL_BACK
Definition draw_state.hh:43
#define SURFEL_GROUP_SIZE
#define IRRADIANCE_BOUNDS_GROUP_SIZE
#define RBUFS_UTILITY_TEX_SLOT
#define CAPTURE_BUF_SLOT
#define SURFEL_LIST_GROUP_SIZE
#define IRRADIANCE_GRID_MAX
#define IRRADIANCE_GRID_BRICK_SIZE
#define IRRADIANCE_GRID_GROUP_SIZE
#define SURFEL_BUF_SLOT
VecBase< float, D > normalize(VecOp< float, D >) RET
VecBase< float, 3 > cross(VecOp< float, 3 >, VecOp< float, 3 >) RET
float distance(VecOp< float, D >, VecOp< float, D >) RET
BLI_INLINE void grid_to_world(HairGrid *grid, float vecw[3], const float vec[3])
CCL_NAMESPACE_BEGIN ccl_device float invert(const float color, const float factor)
Definition invert.h:11
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
MINLINE unsigned char unit_float_to_uchar_clamp(float val)
#define G(x, y, z)
detail::Pass< command::DrawCommandBuf > PassSimple
static IrradianceBrickPacked irradiance_brick_pack(IrradianceBrick brick)
@ DEBUG_IRRADIANCE_CACHE_SURFELS_VISIBILITY
@ DEBUG_IRRADIANCE_CACHE_SURFELS_IRRADIANCE
MatBase< T, 4, 4 > orthographic(T left, T right, T bottom, T top, T near_clip, T far_clip)
Create an orthographic projection matrix using OpenGL coordinate convention: Maps each axis range to ...
QuaternionBase< float > Quaternion
MatBase< T, NumCol, NumRow > transpose(const MatBase< T, NumRow, NumCol > &mat)
MatBase< T, NumCol, NumRow > scale(const MatBase< T, NumCol, NumRow > &mat, const VectorT &scale)
T reduce_max(const VecBase< T, Size > &a)
VecBase< T, Size > divide_ceil(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
T reduce_min(const VecBase< T, Size > &a)
T min(const T &a, const T &b)
CartesianBasis invert(const CartesianBasis &basis)
T midpoint(const T &a, const T &b)
MatT from_scale(const VecBase< typename MatT::base_type, ScaleDim > &scale)
T reduce_mul(const VecBase< T, Size > &a)
VecBase< T, 3 > to_scale(const MatBase< T, NumCol, NumRow > &mat)
VecBase< T, 3 > orthogonal(const VecBase< T, 3 > &v)
T max(const T &a, const T &b)
T abs(const T &a)
MatT from_location(const typename MatT::loc_type &location)
void to_loc_rot_scale(const MatBase< T, 3, 3 > &mat, VecBase< T, 2 > &r_location, AngleRadianBase< T > &r_rotation, VecBase< T, 2 > &r_scale)
CartesianBasis from_orthonormal_axes(const AxisSigned forward, const AxisSigned up)
MatT from_rotation(const RotationT &rotation)
MatT from_loc_rot_scale(const typename MatT::loc_type &location, const RotationT &rotation, const VecBase< typename MatT::base_type, ScaleDim > &scale)
VecBase< uint32_t, 2 > uint2
VecBase< int32_t, 4 > int4
bool assign_if_different(T &old_value, T new_value)
MatBase< float, 4, 4 > float4x4
MatBase< float, 3, 4 > float3x4
VecBase< float, 4 > float4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
VecBase< int32_t, 3 > int3
MatBase< float, 3, 3 > float3x3
VecBase< float, 3 > float3
#define min(a, b)
Definition sort.cc:36
#define FLT_MAX
Definition stdcycles.h:14
char name[258]
Definition DNA_ID.h:432
LightProbeVisibilityData visibility
LightProbeConnectivityData connectivity
LightProbeIrradianceData irradiance
VecBase< T, 2 > xy() const
i
Definition text_draw.cc:230
max
Definition text_draw.cc:251
ccl_device_inline float3 transform_point(const ccl_private Transform *t, const float3 a)
Definition transform.h:56
int xy[2]
Definition wm_draw.cc:178