Blender V5.0
eevee_shader.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11
12#include "GPU_capabilities.hh"
13
14#include "BKE_material.hh"
15#include "BKE_node_runtime.hh"
16
17#include "DNA_world_types.h"
18
20
21#include "eevee_shader.hh"
22
23#include "eevee_shadow.hh"
24
25#include "BLI_assert.h"
26#include "BLI_math_bits.h"
27
28namespace blender::eevee {
29
30/* -------------------------------------------------------------------- */
34
36{
37 return &get_static_cache().get();
38}
39
41{
42 get_static_cache().release();
43}
44
46{
47 for (auto i : IndexRange(MAX_SHADER_TYPE)) {
48 const char *name = static_shader_create_info_name_get(eShaderType(i));
49#ifndef NDEBUG
50 if (name == nullptr) {
51 std::cerr << "EEVEE: Missing case for eShaderType(" << i
52 << ") in static_shader_create_info_name_get().";
53 BLI_assert(0);
54 }
55 const GPUShaderCreateInfo *create_info = GPU_shader_create_info_get(name);
56 BLI_assert_msg(create_info != nullptr, "EEVEE: Missing create info for static shader.");
57#endif
58 shaders_[i] = StaticShader(name);
59 }
60}
61
63{
64 /* Cancel compilation to avoid asserts on exit at ShaderCompiler destructor. */
65
66 /* Specializations first, to avoid releasing the base shader while the specialization compilation
67 * is still in flight. */
68 for (SpecializationBatchHandle &handle : specialization_handles_.values()) {
69 if (handle) {
71 }
72 }
73}
74
76
77/* -------------------------------------------------------------------- */
81
82ShaderGroups ShaderModule::static_shaders_load(const ShaderGroups request_bits,
83 bool block_until_ready)
84{
85 std::lock_guard lock(mutex_);
86
88 auto request = [&](ShaderGroups bit, Span<eShaderType> shader_types) {
89 if (request_bits & bit) {
90 bool all_loaded = true;
91 for (eShaderType shader : shader_types) {
92 if (shaders_[shader].is_ready()) {
93 /* Noop. */
94 }
95 else if (block_until_ready) {
96 shaders_[shader].get();
97 }
98 else {
99 shaders_[shader].ensure_compile_async();
100 all_loaded = false;
101 }
102 }
103 if (all_loaded) {
104 ready |= bit;
105 }
106 }
107 };
108
109#define AS_SPAN(arr) Span<eShaderType>(arr, ARRAY_SIZE(arr))
110 {
111 /* These are the slowest shaders by far. Submitting them first make sure they overlap with
112 * other shaders compilation. */
113 const eShaderType shader_list[] = {DEFERRED_LIGHT_TRIPLE,
119 request(DEFERRED_LIGHTING_SHADERS, AS_SPAN(shader_list));
120 }
121 {
122 const eShaderType shader_list[] = {AMBIENT_OCCLUSION_PASS};
123 request(AMBIENT_OCCLUSION_SHADERS, AS_SPAN(shader_list));
124 }
125 {
126 const eShaderType shader_list[] = {RENDERPASS_CLEAR,
127 FILM_COPY,
128 FILM_COMP,
130 FILM_FRAG,
136 request(FILM_SHADERS, AS_SPAN(shader_list));
137 }
138 {
139 const eShaderType shader_list[] = {DEFERRED_CAPTURE_EVAL};
140 request(DEFERRED_CAPTURE_SHADERS, AS_SPAN(shader_list));
141 }
142 {
143 const eShaderType shader_list[] = {DEFERRED_PLANAR_EVAL};
144 request(DEFERRED_PLANAR_SHADERS, AS_SPAN(shader_list));
145 }
146 {
147 const eShaderType shader_list[] = {DOF_BOKEH_LUT,
159 DOF_SETUP,
164 request(DEPTH_OF_FIELD_SHADERS, AS_SPAN(shader_list));
165 }
166 {
167 const eShaderType shader_list[] = {HIZ_UPDATE, HIZ_UPDATE_LAYER};
168 request(HIZ_SHADERS, AS_SPAN(shader_list));
169 }
170 {
171 const eShaderType shader_list[] = {
173 request(HORIZON_SCAN_SHADERS, AS_SPAN(shader_list));
174 }
175 {
176 const eShaderType shader_list[] = {LIGHT_CULLING_DEBUG,
182 request(LIGHT_CULLING_SHADERS, AS_SPAN(shader_list));
183 }
184 {
185 const eShaderType shader_list[] = {
187 request(IRRADIANCE_BAKE_SHADERS, AS_SPAN(shader_list));
188 }
189 {
190 const eShaderType shader_list[] = {MOTION_BLUR_GATHER,
194 request(MOTION_BLUR_SHADERS, AS_SPAN(shader_list));
195 }
196 {
197 const eShaderType shader_list[] = {RAY_DENOISE_BILATERAL,
206 request(RAYTRACING_SHADERS, AS_SPAN(shader_list));
207 }
208 {
209 const eShaderType shader_list[] = {SPHERE_PROBE_CONVOLVE,
214 request(SPHERE_PROBE_SHADERS, AS_SPAN(shader_list));
215 }
216 {
218 request(VOLUME_PROBE_SHADERS, AS_SPAN(shader_list));
219 }
220 {
221 const eShaderType shader_list[] = {SHADOW_CLIPMAP_CLEAR,
238 request(SHADOW_SHADERS, AS_SPAN(shader_list));
239 }
240 {
241 const eShaderType shader_list[] = {SUBSURFACE_CONVOLVE, SUBSURFACE_SETUP};
242 request(SUBSURFACE_SHADERS, AS_SPAN(shader_list));
243 }
244 {
245 const eShaderType shader_list[] = {SURFEL_CLUSTER_BUILD,
250 SURFEL_RAY};
251 request(SURFEL_SHADERS, AS_SPAN(shader_list));
252 }
253 {
254 const eShaderType shader_list[] = {VERTEX_COPY};
255 request(VERTEX_COPY_SHADERS, AS_SPAN(shader_list));
256 }
257 {
258 const eShaderType shader_list[] = {SHADOW_TILEMAP_TAG_USAGE_VOLUME,
264 request(VOLUME_EVAL_SHADERS, AS_SPAN(shader_list));
265 }
266#undef AS_SPAN
267 return ready;
268}
269
270bool ShaderModule::request_specializations(bool block_until_ready,
271 int render_buffers_shadow_id,
272 int shadow_ray_count,
273 int shadow_ray_step_count,
274 bool use_split_indirect,
275 bool use_lightprobe_eval)
276{
277 std::lock_guard lock(mutex_);
278
279 SpecializationBatchHandle &specialization_handle = specialization_handles_.lookup_or_add_cb(
280 {render_buffers_shadow_id,
281 shadow_ray_count,
282 shadow_ray_step_count,
283 use_split_indirect,
284 use_lightprobe_eval},
285 [&]() {
286 Vector<ShaderSpecialization> specializations;
287 for (int i : IndexRange(3)) {
289 int render_pass_shadow_id_index = GPU_shader_get_constant(sh, "render_pass_shadow_id");
290 int use_split_indirect_index = GPU_shader_get_constant(sh, "use_split_indirect");
291 int use_lightprobe_eval_index = GPU_shader_get_constant(sh, "use_lightprobe_eval");
292 int use_transmission_index = GPU_shader_get_constant(sh, "use_transmission");
293 int shadow_ray_count_index = GPU_shader_get_constant(sh, "shadow_ray_count");
294 int shadow_ray_step_count_index = GPU_shader_get_constant(sh, "shadow_ray_step_count");
295
297
298 for (bool use_transmission : {false, true}) {
299 sp.set_value(render_pass_shadow_id_index, render_buffers_shadow_id);
300 sp.set_value(use_split_indirect_index, use_split_indirect);
301 sp.set_value(use_lightprobe_eval_index, use_lightprobe_eval);
302 sp.set_value(use_transmission_index, use_transmission);
303 sp.set_value(shadow_ray_count_index, shadow_ray_count);
304 sp.set_value(shadow_ray_step_count_index, shadow_ray_step_count);
305
306 specializations.append({sh, sp});
307 }
308 }
309
310 return GPU_shader_batch_specializations(specializations);
311 });
312
313 if (specialization_handle) {
314 while (!GPU_shader_batch_specializations_is_ready(specialization_handle) && block_until_ready)
315 {
316 /* Block until ready. */
317 }
318 }
319
320 return specialization_handle == 0;
321}
322
323const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_type)
324{
325 switch (shader_type) {
327 return "eevee_ambient_occlusion_pass";
328 case FILM_COPY:
329 return "eevee_film_copy_frag";
330 case FILM_COMP:
331 return "eevee_film_comp";
333 return "eevee_film_cryptomatte_post";
334 case FILM_FRAG:
335 return "eevee_film_frag";
337 return "eevee_film_pass_convert_combined";
339 return "eevee_film_pass_convert_depth";
341 return "eevee_film_pass_convert_value";
343 return "eevee_film_pass_convert_color";
345 return "eevee_film_pass_convert_cryptomatte";
346 case DEFERRED_COMBINE:
347 return "eevee_deferred_combine";
349 return "eevee_deferred_light_single";
351 return "eevee_deferred_light_double";
353 return "eevee_deferred_light_triple";
355 return "eevee_deferred_aov_clear";
357 return "eevee_deferred_capture_eval";
359 return "eevee_deferred_planar_eval";
361 return "eevee_deferred_thickness_amend";
363 return "eevee_deferred_tile_classify";
364 case HIZ_DEBUG:
365 return "eevee_hiz_debug";
366 case HIZ_UPDATE:
367 return "eevee_hiz_update";
368 case HIZ_UPDATE_LAYER:
369 return "eevee_hiz_update_layer";
370 case HORIZON_DENOISE:
371 return "eevee_horizon_denoise";
372 case HORIZON_RESOLVE:
373 return "eevee_horizon_resolve";
374 case HORIZON_SCAN:
375 return "eevee_horizon_scan";
376 case HORIZON_SETUP:
377 return "eevee_horizon_setup";
378 case LOOKDEV_DISPLAY:
379 return "eevee_lookdev_display";
381 return "eevee_motion_blur_gather";
383 return "eevee_motion_blur_tiles_dilate";
385 return "eevee_motion_blur_tiles_flatten_rgba";
387 return "eevee_motion_blur_tiles_flatten_rg";
388 case DEBUG_SURFELS:
389 return "eevee_debug_surfels";
391 return "eevee_debug_irradiance_grid";
392 case DEBUG_GBUFFER:
393 return "eevee_debug_gbuffer";
395 return "eevee_display_lightprobe_volume";
397 return "eevee_display_lightprobe_sphere";
399 return "eevee_display_lightprobe_planar";
400 case DOF_BOKEH_LUT:
401 return "eevee_depth_of_field_bokeh_lut";
402 case DOF_DOWNSAMPLE:
403 return "eevee_depth_of_field_downsample";
404 case DOF_FILTER:
405 return "eevee_depth_of_field_filter";
407 return "eevee_depth_of_field_gather_foreground_lut";
409 return "eevee_depth_of_field_gather_foreground_no_lut";
411 return "eevee_depth_of_field_gather_background_lut";
413 return "eevee_depth_of_field_gather_background_no_lut";
415 return "eevee_depth_of_field_hole_fill";
416 case DOF_REDUCE:
417 return "eevee_depth_of_field_reduce";
418 case DOF_RESOLVE:
419 return "eevee_depth_of_field_resolve_no_lut";
420 case DOF_RESOLVE_LUT:
421 return "eevee_depth_of_field_resolve_lut";
422 case DOF_SETUP:
423 return "eevee_depth_of_field_setup";
424 case DOF_SCATTER:
425 return "eevee_depth_of_field_scatter";
426 case DOF_STABILIZE:
427 return "eevee_depth_of_field_stabilize";
429 return "eevee_depth_of_field_tiles_dilate_minabs";
431 return "eevee_depth_of_field_tiles_dilate_minmax";
433 return "eevee_depth_of_field_tiles_flatten";
435 return "eevee_light_culling_debug";
437 return "eevee_light_culling_select";
439 return "eevee_light_culling_sort";
441 return "eevee_light_culling_tile";
443 return "eevee_light_culling_zbin";
445 return "eevee_light_shadow_setup";
447 return "eevee_ray_denoise_spatial";
449 return "eevee_ray_denoise_temporal";
451 return "eevee_ray_denoise_bilateral";
452 case RAY_GENERATE:
453 return "eevee_ray_generate";
455 return "eevee_ray_trace_fallback";
456 case RAY_TRACE_PLANAR:
457 return "eevee_ray_trace_planar";
458 case RAY_TRACE_SCREEN:
459 return "eevee_ray_trace_screen";
461 return "eevee_ray_tile_classify";
462 case RAY_TILE_COMPACT:
463 return "eevee_ray_tile_compact";
464 case RENDERPASS_CLEAR:
465 return "eevee_renderpass_clear";
467 return "eevee_lightprobe_volume_bounds";
469 return "eevee_lightprobe_volume_offset";
471 return "eevee_lightprobe_volume_ray";
473 return "eevee_lightprobe_volume_load";
475 return "eevee_lightprobe_volume_world";
477 return "eevee_lightprobe_sphere_convolve";
479 return "eevee_lightprobe_sphere_remap";
481 return "eevee_lightprobe_sphere_irradiance";
483 return "eevee_lightprobe_sphere_select";
485 return "eevee_lightprobe_sphere_sunlight";
487 return "eevee_shadow_clipmap_clear";
488 case SHADOW_DEBUG:
489 return "eevee_shadow_debug";
491 return "eevee_shadow_page_allocate";
493 return "eevee_shadow_page_clear";
495 return "eevee_shadow_page_defrag";
496 case SHADOW_PAGE_FREE:
497 return "eevee_shadow_page_free";
498 case SHADOW_PAGE_MASK:
499 return "eevee_shadow_page_mask";
501 return "eevee_shadow_tilemap_amend";
503 return "eevee_shadow_tilemap_bounds";
505 return "eevee_shadow_tilemap_finalize";
507 return "eevee_shadow_tilemap_rendermap";
509 return "eevee_shadow_tilemap_init";
511 return "eevee_shadow_tag_update";
513 return "eevee_shadow_tag_usage_opaque";
515 return "eevee_shadow_tag_usage_surfels";
517 return "eevee_shadow_tag_usage_transparent";
519 return "eevee_shadow_page_tile_clear";
521 return "eevee_shadow_page_tile_store";
523 return "eevee_shadow_tag_usage_volume";
525 return "eevee_shadow_view_visibility";
527 return "eevee_subsurface_convolve";
528 case SUBSURFACE_SETUP:
529 return "eevee_subsurface_setup";
531 return "eevee_surfel_cluster_build";
532 case SURFEL_LIGHT:
533 return "eevee_surfel_light";
535 return "eevee_surfel_list_build";
537 return "eevee_surfel_list_flatten";
539 return "eevee_surfel_list_prefix";
541 return "eevee_surfel_list_prepare";
542 case SURFEL_LIST_SORT:
543 return "eevee_surfel_list_sort";
544 case SURFEL_RAY:
545 return "eevee_surfel_ray";
546 case VERTEX_COPY:
547 return "eevee_vertex_copy";
549 return "eevee_volume_integration";
551 return "eevee_volume_occupancy_convert";
552 case VOLUME_RESOLVE:
553 return "eevee_volume_resolve";
554 case VOLUME_SCATTER:
555 return "eevee_volume_scatter";
557 return "eevee_volume_scatter_with_lights";
558 /* To avoid compiler warning about missing case. */
559 case MAX_SHADER_TYPE:
560 return "";
561 }
562 return "";
563}
564
566{
567 return shaders_[shader_type].get();
568}
569
571
572/* -------------------------------------------------------------------- */
576
577/* Helper class to get free sampler slots for materials. */
579 int first_reserved_;
580 int last_reserved_;
581 int index_;
582
583 public:
585 eMaterialGeometry geometry_type,
586 bool has_shader_to_rgba)
587 {
588 index_ = 0;
589 if (ELEM(geometry_type, MAT_GEOM_POINTCLOUD, MAT_GEOM_CURVES)) {
590 index_ = 2;
591 }
592
593 first_reserved_ = MATERIAL_TEXTURE_RESERVED_SLOT_FIRST;
595 if (geometry_type == MAT_GEOM_WORLD) {
597 }
598 else if (pipeline_type == MAT_PIPE_DEFERRED && has_shader_to_rgba) {
600 }
601 else if (pipeline_type == MAT_PIPE_FORWARD) {
603 }
604 }
605
606 int get()
607 {
608 if (index_ == first_reserved_) {
609 index_ = last_reserved_ + 1;
610 }
611 return index_++;
612 }
613};
614
616{
617 using namespace blender::gpu::shader;
618
619 uint64_t shader_uuid = GPU_material_uuid_get(gpumat);
620
621 eMaterialPipeline pipeline_type;
622 eMaterialGeometry geometry_type;
623 eMaterialDisplacement displacement_type;
624 eMaterialThickness thickness_type;
625 bool transparent_shadows;
627 pipeline_type,
628 geometry_type,
629 displacement_type,
630 thickness_type,
631 transparent_shadows);
632
633 GPUCodegenOutput &codegen = *codegen_;
634 ShaderCreateInfo &info = *reinterpret_cast<ShaderCreateInfo *>(codegen.create_info);
635
636 /* WORKAROUND: Add new ob attr buffer. */
637 if (GPU_material_uniform_attributes(gpumat) != nullptr) {
638 info.additional_info("draw_object_attributes");
639
640 /* Search and remove the old object attribute UBO which would creating bind point collision. */
641 for (auto &resource_info : info.batch_resources_) {
642 if (resource_info.bind_type == ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER &&
643 resource_info.uniformbuf.name == GPU_ATTRIBUTE_UBO_BLOCK_NAME "[512]")
644 {
645 info.batch_resources_.remove_first_occurrence_and_reorder(resource_info);
646 break;
647 }
648 }
649 /* Remove references to the UBO. */
650 info.define("UNI_ATTR(a)", "float4(0.0)");
651 }
652
653 SamplerSlots sampler_slots(
654 pipeline_type, geometry_type, GPU_material_flag_get(gpumat, GPU_MATFLAG_SHADER_TO_RGBA));
655
656 for (auto &resource : info.batch_resources_) {
658 resource.slot = sampler_slots.get();
659 }
660 }
661
662 bool use_ao_node = false;
663
665 ELEM(pipeline_type, MAT_PIPE_FORWARD, MAT_PIPE_DEFERRED) &&
666 geometry_type_has_surface(geometry_type))
667 {
668 info.define("MAT_AMBIENT_OCCLUSION");
669 use_ao_node = true;
670 }
671
673 if (pipeline_type != MAT_PIPE_SHADOW || transparent_shadows) {
674 info.define("MAT_TRANSPARENT");
675 }
676 /* Transparent material do not have any velocity specific pipeline. */
677 if (pipeline_type == MAT_PIPE_PREPASS_FORWARD_VELOCITY) {
678 pipeline_type = MAT_PIPE_PREPASS_FORWARD;
679 }
680 }
681
682 /* Only deferred material allow use of cryptomatte and render passes. */
683 if (pipeline_type == MAT_PIPE_DEFERRED) {
684 info.additional_info("eevee_render_pass_out");
685 info.additional_info("eevee_cryptomatte_out");
686 }
687
689 info.define("MAT_DIFFUSE");
690 }
692 info.define("MAT_SUBSURFACE");
693 }
695 info.define("MAT_REFRACTION");
696 }
698 info.define("MAT_TRANSLUCENT");
699 }
701 info.define("MAT_REFLECTION");
702 }
704 info.define("MAT_CLEARCOAT");
705 }
707 info.define("MAT_REFLECTION_COLORLESS");
708 }
710 info.define("MAT_REFRACTION_COLORLESS");
711 }
712
713 const eClosureBits closure_bits = shader_closure_bits_from_flag(gpumat);
714
715 int32_t closure_bin_count = to_gbuffer_bin_count(closure_bits);
716 switch (closure_bin_count) {
717 /* These need to be separated since the strings need to be static. */
718 case 0:
719 case 1:
720 info.define("CLOSURE_BIN_COUNT", "1");
721 break;
722 case 2:
723 info.define("CLOSURE_BIN_COUNT", "2");
724 break;
725 case 3:
726 info.define("CLOSURE_BIN_COUNT", "3");
727 break;
728 default:
730 break;
731 }
732
733 if (pipeline_type == MAT_PIPE_DEFERRED) {
734 switch (closure_bin_count) {
735 /* These need to be separated since the strings need to be static. */
736 case 0:
737 case 1:
738 info.define("GBUFFER_LAYER_MAX", "1");
739 break;
740 case 2:
741 info.define("GBUFFER_LAYER_MAX", "2");
742 break;
743 case 3:
744 info.define("GBUFFER_LAYER_MAX", "3");
745 break;
746 default:
748 break;
749 }
750
751 if (closure_bin_count == 2) {
752 /* In a lot of cases, we can predict that we do not need the extra GBuffer layers. This
753 * simplifies the shader code and improves compilation time (see #145347). */
754 const bool colorless_reflection = !GPU_material_flag_get(
756 const bool colorless_refraction = !GPU_material_flag_get(
758 int closure_layer_count = 0;
759 if (closure_bits & CLOSURE_DIFFUSE) {
760 closure_layer_count += 1;
761 }
762 if (closure_bits & CLOSURE_SSS) {
763 closure_layer_count += 2;
764 }
765 if (closure_bits & CLOSURE_REFLECTION) {
766 closure_layer_count += colorless_reflection ? 1 : 2;
767 }
768 if (closure_bits & CLOSURE_REFRACTION) {
769 closure_layer_count += colorless_refraction ? 1 : 2;
770 }
771 if (closure_bits & CLOSURE_TRANSLUCENT) {
772 closure_layer_count += 1;
773 }
774 if (closure_bits & CLOSURE_CLEARCOAT) {
775 closure_layer_count += colorless_reflection ? 1 : 2;
776 }
777
778 if (closure_layer_count <= 2) {
779 info.define("GBUFFER_SIMPLE_CLOSURE_LAYOUT");
780 }
781 }
782 }
783
784 if ((pipeline_type == MAT_PIPE_FORWARD) ||
786 {
787 switch (closure_bin_count) {
788 case 0:
789 /* Define nothing. This will in turn define SKIP_LIGHT_EVAL. */
790 break;
791 /* These need to be separated since the strings need to be static. */
792 case 1:
793 info.define("LIGHT_CLOSURE_EVAL_COUNT", "1");
794 break;
795 case 2:
796 info.define("LIGHT_CLOSURE_EVAL_COUNT", "2");
797 break;
798 case 3:
799 info.define("LIGHT_CLOSURE_EVAL_COUNT", "3");
800 break;
801 default:
803 break;
804 }
805 }
806
808 switch (geometry_type) {
809 case MAT_GEOM_MESH:
810 /* Support using gpu builtin barycentrics. */
811 info.define("USE_BARYCENTRICS");
813 break;
814 case MAT_GEOM_CURVES:
815 /* Support using one float2 attribute. See #hair_get_barycentric(). */
816 info.define("USE_BARYCENTRICS");
817 break;
818 default:
819 /* No support */
820 break;
821 }
822 }
823
824 /* Allow to use Reverse-Z on OpenGL. Does nothing in other backend. */
826
827 std::stringstream global_vars;
828 switch (geometry_type) {
829 case MAT_GEOM_MESH:
830 if (pipeline_type == MAT_PIPE_VOLUME_MATERIAL) {
831 /* If mesh has a volume output, it can receive volume grid attributes from smoke
832 * simulation modifier. But the vertex shader might still need access to the vertex
833 * attribute for displacement. */
834 /* TODO(fclem): Eventually, we could add support for loading both. For now, remove the
835 * vertex inputs after conversion (avoid name collision). */
836 for (auto &input : info.vertex_inputs_) {
837 info.sampler(sampler_slots.get(), ImageType::Float3D, input.name, Frequency::BATCH);
838 }
839 info.vertex_inputs_.clear();
840 /* Volume materials require these for loading the grid attributes from smoke sims. */
841 info.additional_info("draw_volume_infos");
842 }
843 break;
845 case MAT_GEOM_CURVES:
847 for (auto &input : info.vertex_inputs_) {
848 if (input.name == "orco") {
850 global_vars << input.type << " " << input.name << ";\n";
851 }
852 else {
853 info.sampler(sampler_slots.get(), ImageType::FloatBuffer, input.name, Frequency::BATCH);
854 }
855 }
856 info.vertex_inputs_.clear();
857 break;
858 case MAT_GEOM_WORLD:
859 if (pipeline_type == MAT_PIPE_VOLUME_MATERIAL) {
860 /* Even if world do not have grid attributes, we use dummy texture binds to pass correct
861 * defaults. So we have to replace all attributes as samplers. */
862 for (auto &input : info.vertex_inputs_) {
863 info.sampler(sampler_slots.get(), ImageType::Float3D, input.name, Frequency::BATCH);
864 }
865 info.vertex_inputs_.clear();
866 }
871 for (auto &input : info.vertex_inputs_) {
872 global_vars << input.type << " " << input.name << ";\n";
873 }
874 info.vertex_inputs_.clear();
875 break;
876 case MAT_GEOM_VOLUME:
878 for (auto &input : info.vertex_inputs_) {
879 info.sampler(sampler_slots.get(), ImageType::Float3D, input.name, Frequency::BATCH);
880 }
881 info.vertex_inputs_.clear();
882 break;
883 }
884
885 const bool support_volume_attributes = ELEM(geometry_type, MAT_GEOM_MESH, MAT_GEOM_VOLUME);
886 const bool do_vertex_attrib_load = !ELEM(geometry_type, MAT_GEOM_WORLD, MAT_GEOM_VOLUME) &&
887 (pipeline_type != MAT_PIPE_VOLUME_MATERIAL ||
888 !support_volume_attributes);
889
890 if (!do_vertex_attrib_load && !info.vertex_out_interfaces_.is_empty()) {
891 /* Codegen outputs only one interface. */
892 const StageInterfaceInfo &iface = *info.vertex_out_interfaces_.first();
893 /* Globals the attrib_load() can write to when it is in the fragment shader. */
894 global_vars << "struct " << iface.name << " {\n";
895 for (const auto &inout : iface.inouts) {
896 global_vars << " " << inout.type << " " << inout.name << ";\n";
897 }
898 global_vars << "};\n";
899 global_vars << iface.name << " " << iface.instance_name << ";\n";
900
901 info.vertex_out_interfaces_.clear();
902 }
903
904 const char *domain_type_frag = "";
905 const char *domain_type_vert = "";
906 switch (geometry_type) {
907 case MAT_GEOM_MESH:
908 domain_type_frag = (pipeline_type == MAT_PIPE_VOLUME_MATERIAL) ? "VolumePoint" :
909 "MeshVertex";
910 domain_type_vert = "MeshVertex";
911 break;
913 domain_type_frag = domain_type_vert = "PointCloudPoint";
914 break;
915 case MAT_GEOM_CURVES:
916 domain_type_frag = domain_type_vert = "CurvesPoint";
917 break;
918 case MAT_GEOM_WORLD:
919 domain_type_frag = (pipeline_type == MAT_PIPE_VOLUME_MATERIAL) ? "VolumePoint" :
920 "WorldPoint";
921 domain_type_vert = "WorldPoint";
922 break;
923 case MAT_GEOM_VOLUME:
924 domain_type_frag = domain_type_vert = "VolumePoint";
925 break;
926 }
927
928 std::stringstream attr_load;
929 attr_load << "{\n";
930 attr_load << (!codegen.attr_load.empty() ? codegen.attr_load : "");
931 attr_load << "}\n\n";
932
933 std::stringstream vert_gen, frag_gen;
934
935 if (do_vertex_attrib_load) {
936 vert_gen << global_vars.str() << "void attrib_load(" << domain_type_vert << " domain)"
937 << attr_load.str();
938 frag_gen << "void attrib_load(" << domain_type_frag << " domain) {}\n"; /* Placeholder. */
939 }
940 else {
941 vert_gen << "void attrib_load(" << domain_type_vert << " domain) {}\n"; /* Placeholder. */
942 frag_gen << global_vars.str() << "void attrib_load(" << domain_type_frag << " domain)"
943 << attr_load.str();
944 }
945
946 {
947 const bool use_vertex_displacement = !codegen.displacement.empty() &&
948 (displacement_type != MAT_DISPLACEMENT_BUMP) &&
949 !ELEM(geometry_type, MAT_GEOM_WORLD, MAT_GEOM_VOLUME);
950
951 vert_gen << "float3 nodetree_displacement()\n";
952 vert_gen << "{\n";
953 vert_gen << ((use_vertex_displacement) ? codegen.displacement.serialized :
954 "return float3(0);\n");
955 vert_gen << "}\n\n";
956
957 Vector<StringRefNull> dependencies = {};
958 if (use_vertex_displacement) {
959 dependencies.append("eevee_geom_types_lib.glsl");
960 dependencies.append("eevee_nodetree_lib.glsl");
961 dependencies.extend(codegen.displacement.dependencies);
962 }
963
964 info.generated_sources.append({"eevee_nodetree_vert_lib.glsl", dependencies, vert_gen.str()});
965 }
966
967 if (pipeline_type != MAT_PIPE_VOLUME_OCCUPANCY) {
968 Vector<StringRefNull> dependencies;
969 if (use_ao_node) {
970 dependencies.append("eevee_ambient_occlusion_lib.glsl");
971 }
972 dependencies.append("eevee_geom_types_lib.glsl");
973 dependencies.append("eevee_nodetree_lib.glsl");
974
975 for (const auto &graph : codegen.material_functions) {
976 frag_gen << graph.serialized;
977 dependencies.extend(graph.dependencies);
978 }
979
980 if (!codegen.displacement.empty()) {
981 /* Bump displacement. Needed to recompute normals after displacement. */
982 info.define("MAT_DISPLACEMENT_BUMP");
983
984 frag_gen << "float3 nodetree_displacement()\n";
985 frag_gen << "{\n";
986 frag_gen << codegen.displacement.serialized;
987 dependencies.extend(codegen.displacement.dependencies);
988 frag_gen << "}\n\n";
989 }
990
991 frag_gen << "Closure nodetree_surface(float closure_rand)\n";
992 frag_gen << "{\n";
993 frag_gen << " closure_weights_reset(closure_rand);\n";
994 frag_gen << codegen.surface.serialized_or_default("return Closure(0);\n");
995 dependencies.extend(codegen.surface.dependencies);
996 frag_gen << "}\n\n";
997
998 /* TODO(fclem): Find a way to pass material parameters inside the material UBO. */
999 info.define("thickness_mode", thickness_type == MAT_THICKNESS_SLAB ? "-1.0" : "1.0");
1000
1001 frag_gen << "float nodetree_thickness()\n";
1002 frag_gen << "{\n";
1003 if (codegen.thickness.empty()) {
1004 /* Check presence of closure needing thickness to not add mandatory dependency on obinfos. */
1007 {
1008 frag_gen << "return 0.0;\n";
1009 }
1010 else {
1011 if (info.additional_infos_.first_index_of_try("draw_object_infos") == -1) {
1012 info.additional_info("draw_object_infos");
1013 }
1014 /* TODO(fclem): Should use `to_scale` but the gpu_shader_math_matrix_lib.glsl isn't
1015 * included everywhere yet. */
1016 frag_gen << "float3 ob_scale;\n";
1017 frag_gen << "ob_scale.x = length(drw_modelmat()[0].xyz);\n";
1018 frag_gen << "ob_scale.y = length(drw_modelmat()[1].xyz);\n";
1019 frag_gen << "ob_scale.z = length(drw_modelmat()[2].xyz);\n";
1020 frag_gen << "float3 ls_dimensions = safe_rcp(abs(drw_object_infos().orco_mul.xyz));\n";
1021 frag_gen << "float3 ws_dimensions = ob_scale * ls_dimensions;\n";
1022 /* Choose the minimum axis so that cuboids are better represented. */
1023 frag_gen << "return reduce_min(ws_dimensions);\n";
1024 }
1025 }
1026 else {
1027 frag_gen << codegen.thickness.serialized;
1028 dependencies.extend(codegen.thickness.dependencies);
1029 }
1030 frag_gen << "}\n\n";
1031
1032 frag_gen << "Closure nodetree_volume()\n";
1033 frag_gen << "{\n";
1034 frag_gen << " closure_weights_reset(0.0);\n";
1035 frag_gen << codegen.volume.serialized_or_default("return Closure(0);\n");
1036 dependencies.extend(codegen.volume.dependencies);
1037 frag_gen << "}\n\n";
1038
1039 info.generated_sources.append({"eevee_nodetree_frag_lib.glsl", dependencies, frag_gen.str()});
1040 }
1041
1042 int reserved_attr_slots = 0;
1043
1044 /* Geometry Info. */
1045 switch (geometry_type) {
1046 case MAT_GEOM_WORLD:
1047 info.additional_info("eevee_geom_world");
1048 break;
1049 case MAT_GEOM_CURVES:
1050 info.additional_info("eevee_geom_curves");
1051 break;
1052 case MAT_GEOM_MESH:
1053 info.additional_info("eevee_geom_mesh");
1054 reserved_attr_slots = 2; /* Number of vertex attributes inside eevee_geom_mesh. */
1055 break;
1057 info.additional_info("eevee_geom_pointcloud");
1058 break;
1059 case MAT_GEOM_VOLUME:
1060 info.additional_info("eevee_geom_volume");
1061 reserved_attr_slots = 1; /* Number of vertex attributes inside eevee_geom_mesh. */
1062 break;
1063 }
1064
1065 /* Make shaders that have as too many attributes fail compilation and have correct error
1066 * report instead of raising an error. */
1067 if (info.vertex_inputs_.size() > 0) {
1068 const int last_attr_index = info.vertex_inputs_.last().index;
1069 if (last_attr_index - reserved_attr_slots < 0) {
1070 const char *material_name = (info.name_.c_str() + 2);
1071 std::cerr << "Error: EEVEE: Material " << material_name << " uses too many attributes."
1072 << std::endl;
1073 /* Avoid assert in ShaderCreateInfo::finalize. */
1074 info.vertex_inputs_.clear();
1075 }
1076 }
1077
1078 /* Pipeline Info. */
1079 switch (geometry_type) {
1080 case MAT_GEOM_WORLD:
1081 switch (pipeline_type) {
1083 info.additional_info("eevee_surf_volume");
1084 break;
1085 default:
1086 info.additional_info("eevee_surf_world");
1087 break;
1088 }
1089 break;
1090 default:
1091 switch (pipeline_type) {
1094 info.additional_info("eevee_surf_depth", "eevee_velocity_geom");
1095 break;
1099 info.additional_info("eevee_surf_depth");
1100 break;
1102 info.additional_info("eevee_surf_depth", "eevee_clip_plane");
1103 break;
1104 case MAT_PIPE_SHADOW:
1105 /* Determine surface shadow shader depending on used update technique. */
1108 info.additional_info("eevee_surf_shadow_atomic");
1109 } break;
1111 info.additional_info("eevee_surf_shadow_tbdr");
1112 } break;
1113 default: {
1115 } break;
1116 }
1117 break;
1119 info.additional_info("eevee_surf_occupancy");
1120 break;
1122 info.additional_info("eevee_surf_volume");
1123 break;
1124 case MAT_PIPE_CAPTURE:
1125 info.additional_info("eevee_surf_capture");
1126 break;
1127 case MAT_PIPE_DEFERRED:
1129 info.additional_info("eevee_surf_deferred_hybrid");
1130 }
1131 else {
1132 info.additional_info("eevee_surf_deferred");
1133 }
1134 break;
1135 case MAT_PIPE_FORWARD:
1136 info.additional_info("eevee_surf_forward");
1137 break;
1138 default:
1140 break;
1141 }
1142 break;
1143 }
1144}
1145
1150
1151/* WATCH: This can be called from another thread! Needs to not touch the shader module in any
1152 * thread unsafe manner. */
1153static void codegen_callback(void *void_thunk, GPUMaterial *mat, GPUCodegenOutput *codegen)
1154{
1155 CallbackThunk *thunk = static_cast<CallbackThunk *>(void_thunk);
1156 thunk->shader_module->material_create_info_amend(mat, codegen);
1157}
1158
1159static GPUPass *pass_replacement_cb(void *void_thunk, GPUMaterial *mat)
1160{
1161 using namespace blender::gpu::shader;
1162
1163 CallbackThunk *thunk = static_cast<CallbackThunk *>(void_thunk);
1164
1165 const ::Material *blender_mat = GPU_material_get_material(mat);
1166
1167 uint64_t shader_uuid = GPU_material_uuid_get(mat);
1168
1169 eMaterialPipeline pipeline_type;
1170 eMaterialGeometry geometry_type;
1171 eMaterialDisplacement displacement_type;
1172 eMaterialThickness thickness_type;
1173 bool transparent_shadows;
1175 pipeline_type,
1176 geometry_type,
1177 displacement_type,
1178 thickness_type,
1179 transparent_shadows);
1180
1181 bool is_shadow_pass = pipeline_type == eMaterialPipeline::MAT_PIPE_SHADOW;
1182 bool is_prepass = ELEM(pipeline_type,
1189
1190 bool has_vertex_displacement = GPU_material_has_displacement_output(mat) &&
1192 bool has_transparency = GPU_material_flag_get(mat, GPU_MATFLAG_TRANSPARENT);
1193 bool has_shadow_transparency = has_transparency && transparent_shadows;
1194 bool has_raytraced_transmission = blender_mat && (blender_mat->blend_flag & MA_BL_SS_REFRACTION);
1195
1196 bool can_use_default = (is_shadow_pass &&
1197 (!has_vertex_displacement && !has_shadow_transparency)) ||
1198 (is_prepass && (!has_vertex_displacement && !has_transparency &&
1199 !has_raytraced_transmission));
1200 if (can_use_default) {
1202 thunk->default_mat->nodetree,
1203 pipeline_type,
1204 geometry_type,
1205 false,
1206 nullptr);
1207 return GPU_material_get_pass(mat);
1208 }
1209
1210 return nullptr;
1211}
1212
1214{
1215 Depsgraph *depsgraph = DRW_context_get()->depsgraph;
1216 if (!depsgraph) {
1217 return;
1218 }
1219 if (!DEG_is_active(depsgraph)) {
1220 return;
1221 }
1222 for (const GPUMaterialFromNodeTreeResult::Error &error : material_from_tree.errors) {
1223 const bNodeTree &tree = error.node->owner_tree();
1224 if (const bNodeTree *tree_orig = DEG_get_original(&tree)) {
1225 std::lock_guard lock(tree_orig->runtime->shader_node_errors_mutex);
1226 tree_orig->runtime->shader_node_errors.lookup_or_add_default(error.node->identifier)
1227 .add(error.message);
1228 }
1229 }
1230}
1231
1233 bNodeTree *nodetree,
1234 eMaterialPipeline pipeline_type,
1235 eMaterialGeometry geometry_type,
1236 bool deferred_compilation,
1237 ::Material *default_mat)
1238{
1239 eMaterialDisplacement displacement_type = to_displacement_type(blender_mat->displacement_method);
1240 eMaterialThickness thickness_type = to_thickness_type(blender_mat->thickness_mode);
1241
1243 pipeline_type, geometry_type, displacement_type, thickness_type, blender_mat->blend_flag);
1244
1245 bool is_default_material = default_mat == nullptr;
1246 BLI_assert(blender_mat != default_mat);
1247
1248 CallbackThunk thunk = {this, default_mat};
1249
1251 blender_mat,
1252 nodetree,
1253 &blender_mat->gpumaterial,
1254 blender_mat->id.name,
1256 shader_uuid,
1257 deferred_compilation,
1259 &thunk,
1260 is_default_material ? nullptr : pass_replacement_cb);
1261 store_node_tree_errors(material_from_tree);
1262 return material_from_tree.material;
1263}
1264
1266 bNodeTree *nodetree,
1267 eMaterialPipeline pipeline_type,
1268 bool deferred_compilation)
1269{
1270 uint64_t shader_uuid = shader_uuid_from_material_type(pipeline_type, MAT_GEOM_WORLD);
1271
1272 CallbackThunk thunk = {this, nullptr};
1273
1275 nullptr,
1276 nodetree,
1277 &blender_world->gpumaterial,
1278 blender_world->id.name,
1280 shader_uuid,
1281 deferred_compilation,
1283 &thunk);
1284 store_node_tree_errors(material_from_tree);
1285 return material_from_tree.material;
1286}
1287
1289
1290} // namespace blender::eevee
General operations, lookup, etc. for materials.
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
#define ELEM(...)
bool DEG_is_active(const Depsgraph *depsgraph)
Definition depsgraph.cc:323
T * DEG_get_original(T *id)
@ MA_BL_SS_REFRACTION
uint64_t GPU_material_uuid_get(GPUMaterial *mat)
GPUPass * GPU_material_get_pass(GPUMaterial *material)
@ GPU_MAT_EEVEE
GPUMaterialFromNodeTreeResult GPU_material_from_nodetree(Material *ma, bNodeTree *ntree, ListBase *gpumaterials, const char *name, eGPUMaterialEngine engine, uint64_t shader_uuid, bool deferred_compilation, GPUCodegenCallbackFn callback, void *thunk, GPUMaterialPassReplacementCallbackFn pass_replacement_cb=nullptr)
bool GPU_material_flag_get(const GPUMaterial *mat, eGPUMaterialFlag flag)
@ GPU_MATFLAG_REFRACTION_MAYBE_COLORED
@ GPU_MATFLAG_SHADER_TO_RGBA
@ GPU_MATFLAG_REFLECTION_MAYBE_COLORED
@ GPU_MATFLAG_GLOSSY
@ GPU_MATFLAG_COAT
@ GPU_MATFLAG_AO
@ GPU_MATFLAG_REFRACT
@ GPU_MATFLAG_BARYCENTRIC
@ GPU_MATFLAG_TRANSLUCENT
@ GPU_MATFLAG_DIFFUSE
@ GPU_MATFLAG_TRANSPARENT
@ GPU_MATFLAG_SUBSURFACE
Material * GPU_material_get_material(GPUMaterial *material)
const GPUUniformAttrList * GPU_material_uniform_attributes(const GPUMaterial *material)
bool GPU_material_has_displacement_output(GPUMaterial *mat)
int GPU_shader_get_constant(blender::gpu::Shader *shader, const char *name)
const blender::gpu::shader::SpecializationConstants & GPU_shader_get_default_constant_state(blender::gpu::Shader *sh)
SpecializationBatchHandle GPU_shader_batch_specializations(blender::Span< ShaderSpecialization > specializations, CompilationPriority priority=CompilationPriority::High)
const GPUShaderCreateInfo * GPU_shader_create_info_get(const char *info_name)
void GPU_shader_batch_specializations_cancel(SpecializationBatchHandle &handle)
int64_t SpecializationBatchHandle
bool GPU_shader_batch_specializations_is_ready(SpecializationBatchHandle &handle)
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME
volatile int lock
BPy_StructRNA * depsgraph
unsigned long long int uint64_t
constexpr const char * c_str() const
void append(const T &value)
void extend(Span< T > array)
SamplerSlots(eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type, bool has_shader_to_rgba)
bool request_specializations(bool block_until_ready, int render_buffers_shadow_id, int shadow_ray_count, int shadow_ray_step_count, bool use_split_indirect, bool use_lightprobe_eval)
void material_create_info_amend(GPUMaterial *mat, GPUCodegenOutput *codegen)
gpu::Shader * static_shader_get(eShaderType shader_type)
GPUMaterial * material_shader_get(::Material *blender_mat, bNodeTree *nodetree, eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type, bool deferred_compilation, ::Material *default_mat)
static ShaderModule * module_get()
GPUMaterial * world_shader_get(::World *blender_world, bNodeTree *nodetree, eMaterialPipeline pipeline_type, bool deferred_compilation)
static ShadowTechnique shadow_technique
const DRWContext * DRW_context_get()
KDTree_3d * tree
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_NO_EVAL
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_HYBRID
#define MATERIAL_TEXTURE_RESERVED_SLOT_FIRST
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_FORWARD
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_WORLD
#define AS_SPAN(arr)
#define inout
#define resource
#define input
static void error(const char *str)
@ SHADOW_TILEMAP_TAG_USAGE_SURFELS
@ SHADOW_TILEMAP_TAG_USAGE_OPAQUE
@ SHADOW_TILEMAP_TAG_USAGE_TRANSPARENT
@ FILM_PASS_CONVERT_CRYPTOMATTE
@ SHADOW_TILEMAP_TAG_USAGE_VOLUME
static GPUPass * pass_replacement_cb(void *void_thunk, GPUMaterial *mat)
static eMaterialDisplacement to_displacement_type(int displacement_method)
gpu::StaticShader StaticShader
static bool geometry_type_has_surface(eMaterialGeometry geometry_type)
static void store_node_tree_errors(GPUMaterialFromNodeTreeResult &material_from_tree)
static eClosureBits shader_closure_bits_from_flag(const GPUMaterial *gpumat)
static void material_type_from_shader_uuid(uint64_t shader_uuid, eMaterialPipeline &pipeline_type, eMaterialGeometry &geometry_type, eMaterialDisplacement &displacement_type, eMaterialThickness &thickness_type, bool &transparent_shadows)
static uint64_t shader_uuid_from_material_type(eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type, eMaterialDisplacement displacement_type=MAT_DISPLACEMENT_BUMP, eMaterialThickness thickness_type=MAT_THICKNESS_SPHERE, char blend_flags=0)
static void codegen_callback(void *void_thunk, GPUMaterial *mat, GPUCodegenOutput *codegen)
static int to_gbuffer_bin_count(const eClosureBits closure_bits)
static eMaterialThickness to_thickness_type(int thickness_mode)
const char * name
Depsgraph * depsgraph
blender::Vector< GPUGraphOutput > material_functions
GPUGraphOutput displacement
GPUGraphOutput volume
GPUGraphOutput thickness
std::string attr_load
GPUShaderCreateInfo * create_info
GPUGraphOutput surface
std::string serialized
std::string serialized_or_default(std::string value) const
blender::Vector< blender::StringRefNull > dependencies
bool empty() const
blender::Vector< Error > errors
char name[258]
Definition DNA_ID.h:432
ListBase gpumaterial
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...
Vector< StageInterfaceInfo * > vertex_out_interfaces_
GeneratedSourceList generated_sources
Self & builtins(BuiltinBits builtin)
Vector< VertIn > vertex_inputs_
Vector< Resource > batch_resources_
Vector< StringRefNull > additional_infos_
Self & additional_info(StringRefNull info_name)
Self & sampler(int slot, ImageType type, StringRefNull name, Frequency freq=Frequency::PASS, GPUSamplerState sampler=GPUSamplerState::internal_sampler())
Self & define(StringRefNull name, StringRefNull value="")
ListBase gpumaterial
i
Definition text_draw.cc:230