Blender V4.3
eevee_shader.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12#include "GPU_capabilities.hh"
13
15
16#include "eevee_shader.hh"
17
18#include "eevee_shadow.hh"
19
20#include "BLI_assert.h"
21#include "BLI_math_bits.h"
22
23namespace blender::eevee {
24
25/* -------------------------------------------------------------------- */
30ShaderModule *ShaderModule::g_shader_module = nullptr;
31
33{
34 if (g_shader_module == nullptr) {
35 /* TODO(@fclem) thread-safety. */
36 g_shader_module = new ShaderModule();
37 }
38 return g_shader_module;
39}
40
42{
43 if (g_shader_module != nullptr) {
44 /* TODO(@fclem) thread-safety. */
45 delete g_shader_module;
46 g_shader_module = nullptr;
47 }
48}
49
51{
52 for (GPUShader *&shader : shaders_) {
53 shader = nullptr;
54 }
55
58
59 for (auto i : IndexRange(MAX_SHADER_TYPE)) {
60 const char *name = static_shader_create_info_name_get(eShaderType(i));
61 const GPUShaderCreateInfo *create_info = GPU_shader_create_info_get(name);
62 infos.append(create_info);
63
64#ifndef NDEBUG
65 if (name == nullptr) {
66 std::cerr << "EEVEE: Missing case for eShaderType(" << i
67 << ") in static_shader_create_info_name_get().";
68 BLI_assert(0);
69 }
70 BLI_assert_msg(create_info != nullptr, "EEVEE: Missing create info for static shader.");
71#endif
72 }
73
75 compilation_handle_ = GPU_shader_batch_create_from_infos(infos);
76 }
77}
78
80{
81 for (GPUShader *&shader : shaders_) {
83 }
84}
85
88/* -------------------------------------------------------------------- */
93void ShaderModule::precompile_specializations(int render_buffers_shadow_id,
94 int shadow_ray_count,
95 int shadow_ray_step_count)
96{
97 BLI_assert(specialization_handle_ == 0);
98
100 return;
101 }
102
103 Vector<ShaderSpecialization> specializations;
104 for (int i = 0; i < 3; i++) {
106 for (bool use_split_indirect : {false, true}) {
107 for (bool use_lightprobe_eval : {false, true}) {
108 for (bool use_transmission : {false, true}) {
109 specializations.append({sh,
110 {{"render_pass_shadow_id", render_buffers_shadow_id},
111 {"use_split_indirect", use_split_indirect},
112 {"use_lightprobe_eval", use_lightprobe_eval},
113 {"use_transmission", use_transmission},
114 {"shadow_ray_count", shadow_ray_count},
115 {"shadow_ray_step_count", shadow_ray_step_count}}});
116 }
117 }
118 }
119 }
120
121 specialization_handle_ = GPU_shader_batch_specializations(specializations);
122}
123
125{
126 if (compilation_handle_) {
127 if (GPU_shader_batch_is_ready(compilation_handle_) || block) {
128 Vector<GPUShader *> shaders = GPU_shader_batch_finalize(compilation_handle_);
129 for (int i : IndexRange(MAX_SHADER_TYPE)) {
130 shaders_[i] = shaders[i];
131 }
132 }
133 }
134
135 if (specialization_handle_) {
136 while (!GPU_shader_batch_specializations_is_ready(specialization_handle_) && block) {
137 /* Block until ready. */
138 }
139 }
140
141 return compilation_handle_ == 0 && specialization_handle_ == 0;
142}
143
144const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_type)
145{
146 switch (shader_type) {
148 return "eevee_ambient_occlusion_pass";
149 case FILM_COPY:
150 return "eevee_film_copy_frag";
151 case FILM_COMP:
152 return "eevee_film_comp";
154 return "eevee_film_cryptomatte_post";
155 case FILM_FRAG:
156 return "eevee_film_frag";
158 return "eevee_film_pass_convert_combined";
160 return "eevee_film_pass_convert_depth";
162 return "eevee_film_pass_convert_value";
164 return "eevee_film_pass_convert_color";
166 return "eevee_film_pass_convert_cryptomatte";
167 case DEFERRED_COMBINE:
168 return "eevee_deferred_combine";
170 return "eevee_deferred_light_single";
172 return "eevee_deferred_light_double";
174 return "eevee_deferred_light_triple";
176 return "eevee_deferred_capture_eval";
178 return "eevee_deferred_planar_eval";
180 return "eevee_deferred_thickness_amend";
182 return "eevee_deferred_tile_classify";
183 case HIZ_DEBUG:
184 return "eevee_hiz_debug";
185 case HIZ_UPDATE:
186 return "eevee_hiz_update";
187 case HIZ_UPDATE_LAYER:
188 return "eevee_hiz_update_layer";
189 case HORIZON_DENOISE:
190 return "eevee_horizon_denoise";
191 case HORIZON_RESOLVE:
192 return "eevee_horizon_resolve";
193 case HORIZON_SCAN:
194 return "eevee_horizon_scan";
195 case HORIZON_SETUP:
196 return "eevee_horizon_setup";
197 case LOOKDEV_DISPLAY:
198 return "eevee_lookdev_display";
200 return "eevee_motion_blur_gather";
202 return "eevee_motion_blur_tiles_dilate";
204 return "eevee_motion_blur_tiles_flatten_rgba";
206 return "eevee_motion_blur_tiles_flatten_rg";
207 case DEBUG_SURFELS:
208 return "eevee_debug_surfels";
210 return "eevee_debug_irradiance_grid";
211 case DEBUG_GBUFFER:
212 return "eevee_debug_gbuffer";
214 return "eevee_display_lightprobe_volume";
216 return "eevee_display_lightprobe_sphere";
218 return "eevee_display_lightprobe_planar";
219 case DOF_BOKEH_LUT:
220 return "eevee_depth_of_field_bokeh_lut";
221 case DOF_DOWNSAMPLE:
222 return "eevee_depth_of_field_downsample";
223 case DOF_FILTER:
224 return "eevee_depth_of_field_filter";
226 return "eevee_depth_of_field_gather_foreground_lut";
228 return "eevee_depth_of_field_gather_foreground_no_lut";
230 return "eevee_depth_of_field_gather_background_lut";
232 return "eevee_depth_of_field_gather_background_no_lut";
234 return "eevee_depth_of_field_hole_fill";
235 case DOF_REDUCE:
236 return "eevee_depth_of_field_reduce";
237 case DOF_RESOLVE:
238 return "eevee_depth_of_field_resolve_no_lut";
239 case DOF_RESOLVE_LUT:
240 return "eevee_depth_of_field_resolve_lut";
241 case DOF_SETUP:
242 return "eevee_depth_of_field_setup";
243 case DOF_SCATTER:
244 return "eevee_depth_of_field_scatter";
245 case DOF_STABILIZE:
246 return "eevee_depth_of_field_stabilize";
248 return "eevee_depth_of_field_tiles_dilate_minabs";
250 return "eevee_depth_of_field_tiles_dilate_minmax";
252 return "eevee_depth_of_field_tiles_flatten";
254 return "eevee_light_culling_debug";
256 return "eevee_light_culling_select";
258 return "eevee_light_culling_sort";
260 return "eevee_light_culling_tile";
262 return "eevee_light_culling_zbin";
264 return "eevee_light_shadow_setup";
266 return "eevee_ray_denoise_spatial";
268 return "eevee_ray_denoise_temporal";
270 return "eevee_ray_denoise_bilateral";
271 case RAY_GENERATE:
272 return "eevee_ray_generate";
274 return "eevee_ray_trace_fallback";
275 case RAY_TRACE_PLANAR:
276 return "eevee_ray_trace_planar";
277 case RAY_TRACE_SCREEN:
278 return "eevee_ray_trace_screen";
280 return "eevee_ray_tile_classify";
281 case RAY_TILE_COMPACT:
282 return "eevee_ray_tile_compact";
283 case RENDERPASS_CLEAR:
284 return "eevee_renderpass_clear";
286 return "eevee_lightprobe_volume_bounds";
288 return "eevee_lightprobe_volume_offset";
290 return "eevee_lightprobe_volume_ray";
292 return "eevee_lightprobe_volume_load";
294 return "eevee_lightprobe_volume_world";
296 return "eevee_lightprobe_sphere_convolve";
298 return "eevee_lightprobe_sphere_remap";
300 return "eevee_lightprobe_sphere_irradiance";
302 return "eevee_lightprobe_sphere_select";
304 return "eevee_lightprobe_sphere_sunlight";
306 return "eevee_shadow_clipmap_clear";
307 case SHADOW_DEBUG:
308 return "eevee_shadow_debug";
310 return "eevee_shadow_page_allocate";
312 return "eevee_shadow_page_clear";
314 return "eevee_shadow_page_defrag";
315 case SHADOW_PAGE_FREE:
316 return "eevee_shadow_page_free";
317 case SHADOW_PAGE_MASK:
318 return "eevee_shadow_page_mask";
320 return "eevee_shadow_tilemap_amend";
322 return "eevee_shadow_tilemap_bounds";
324 return "eevee_shadow_tilemap_finalize";
326 return "eevee_shadow_tilemap_rendermap";
328 return "eevee_shadow_tilemap_init";
330 return "eevee_shadow_tag_update";
332 return "eevee_shadow_tag_usage_opaque";
334 return "eevee_shadow_tag_usage_surfels";
336 return "eevee_shadow_tag_usage_transparent";
338 return "eevee_shadow_page_tile_clear";
340 return "eevee_shadow_page_tile_store";
342 return "eevee_shadow_tag_usage_volume";
344 return "eevee_shadow_view_visibility";
346 return "eevee_subsurface_convolve";
347 case SUBSURFACE_SETUP:
348 return "eevee_subsurface_setup";
350 return "eevee_surfel_cluster_build";
351 case SURFEL_LIGHT:
352 return "eevee_surfel_light";
354 return "eevee_surfel_list_build";
355 case SURFEL_LIST_SORT:
356 return "eevee_surfel_list_sort";
357 case SURFEL_RAY:
358 return "eevee_surfel_ray";
359 case VERTEX_COPY:
360 return "eevee_vertex_copy";
362 return "eevee_volume_integration";
364 return "eevee_volume_occupancy_convert";
365 case VOLUME_RESOLVE:
366 return "eevee_volume_resolve";
367 case VOLUME_SCATTER:
368 return "eevee_volume_scatter";
370 return "eevee_volume_scatter_with_lights";
371 /* To avoid compiler warning about missing case. */
372 case MAX_SHADER_TYPE:
373 return "";
374 }
375 return "";
376}
377
379{
381 if (shaders_[shader_type] == nullptr) {
382 const char *shader_name = static_shader_create_info_name_get(shader_type);
384 fprintf(stderr, "EEVEE: error: Could not compile static shader \"%s\"\n", shader_name);
385 BLI_assert(0);
386 }
387 else {
388 shaders_[shader_type] = GPU_shader_create_from_info_name(shader_name);
389 }
390 }
391 return shaders_[shader_type];
392}
393
396/* -------------------------------------------------------------------- */
401/* Helper class to get free sampler slots for materials. */
403 int first_reserved_;
404 int last_reserved_;
405 int index_;
406
407 public:
409 eMaterialGeometry geometry_type,
410 bool has_shader_to_rgba)
411 {
412 index_ = 0;
413 if (ELEM(geometry_type, MAT_GEOM_POINT_CLOUD, MAT_GEOM_CURVES)) {
414 index_ = 1;
415 }
416 else if (geometry_type == MAT_GEOM_GPENCIL) {
417 index_ = 2;
418 }
419
420 first_reserved_ = MATERIAL_TEXTURE_RESERVED_SLOT_FIRST;
422 if (geometry_type == MAT_GEOM_WORLD) {
424 }
425 else if (pipeline_type == MAT_PIPE_DEFERRED && has_shader_to_rgba) {
427 }
428 else if (pipeline_type == MAT_PIPE_FORWARD) {
430 }
431 }
432
433 int get()
434 {
435 if (index_ == first_reserved_) {
436 index_ = last_reserved_ + 1;
437 }
438 return index_++;
439 }
440};
441
443{
444 using namespace blender::gpu::shader;
445
446 uint64_t shader_uuid = GPU_material_uuid_get(gpumat);
447
448 eMaterialPipeline pipeline_type;
449 eMaterialGeometry geometry_type;
450 eMaterialDisplacement displacement_type;
451 eMaterialThickness thickness_type;
452 bool transparent_shadows;
454 pipeline_type,
455 geometry_type,
456 displacement_type,
457 thickness_type,
458 transparent_shadows);
459
460 GPUCodegenOutput &codegen = *codegen_;
461 ShaderCreateInfo &info = *reinterpret_cast<ShaderCreateInfo *>(codegen.create_info);
462
463 /* WORKAROUND: Add new ob attr buffer. */
464 if (GPU_material_uniform_attributes(gpumat) != nullptr) {
465 info.additional_info("draw_object_attribute_new");
466
467 /* Search and remove the old object attribute UBO which would creating bind point collision. */
468 for (auto &resource_info : info.batch_resources_) {
469 if (resource_info.bind_type == ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER &&
470 resource_info.uniformbuf.name == GPU_ATTRIBUTE_UBO_BLOCK_NAME "[512]")
471 {
472 info.batch_resources_.remove_first_occurrence_and_reorder(resource_info);
473 break;
474 }
475 }
476 /* Remove references to the UBO. */
477 info.define("UNI_ATTR(a)", "vec4(0.0)");
478 }
479
480 SamplerSlots sampler_slots(
481 pipeline_type, geometry_type, GPU_material_flag_get(gpumat, GPU_MATFLAG_SHADER_TO_RGBA));
482
483 for (auto &resource : info.batch_resources_) {
484 if (resource.bind_type == ShaderCreateInfo::Resource::BindType::SAMPLER) {
485 resource.slot = sampler_slots.get();
486 }
487 }
488
490 ELEM(pipeline_type, MAT_PIPE_FORWARD, MAT_PIPE_DEFERRED) &&
491 geometry_type_has_surface(geometry_type))
492 {
493 info.define("MAT_AMBIENT_OCCLUSION");
494 }
495
497 if (pipeline_type != MAT_PIPE_SHADOW || transparent_shadows) {
498 info.define("MAT_TRANSPARENT");
499 }
500 /* Transparent material do not have any velocity specific pipeline. */
501 if (pipeline_type == MAT_PIPE_PREPASS_FORWARD_VELOCITY) {
502 pipeline_type = MAT_PIPE_PREPASS_FORWARD;
503 }
504 }
505
506 /* Only deferred material allow use of cryptomatte and render passes. */
507 if (pipeline_type == MAT_PIPE_DEFERRED) {
508 info.additional_info("eevee_render_pass_out");
509 info.additional_info("eevee_cryptomatte_out");
510 }
511
512 int32_t closure_data_slots = 0;
514 info.define("MAT_DIFFUSE");
517 {
518 /* Special case to allow translucent with diffuse without noise.
519 * Revert back to noise if clear coat is present. */
520 closure_data_slots |= (1 << 2);
521 }
522 else {
523 closure_data_slots |= (1 << 0);
524 }
525 }
527 info.define("MAT_SUBSURFACE");
528 closure_data_slots |= (1 << 0);
529 }
531 info.define("MAT_REFRACTION");
532 closure_data_slots |= (1 << 0);
533 }
535 info.define("MAT_TRANSLUCENT");
536 closure_data_slots |= (1 << 0);
537 }
539 info.define("MAT_REFLECTION");
540 closure_data_slots |= (1 << 1);
541 }
543 info.define("MAT_CLEARCOAT");
544 closure_data_slots |= (1 << 2);
545 }
546
547 int32_t closure_bin_count = count_bits_i(closure_data_slots);
548 switch (closure_bin_count) {
549 /* These need to be separated since the strings need to be static. */
550 case 0:
551 case 1:
552 info.define("CLOSURE_BIN_COUNT", "1");
553 break;
554 case 2:
555 info.define("CLOSURE_BIN_COUNT", "2");
556 break;
557 case 3:
558 info.define("CLOSURE_BIN_COUNT", "3");
559 break;
560 default:
562 break;
563 }
564
565 if (pipeline_type == MAT_PIPE_DEFERRED) {
566 switch (closure_bin_count) {
567 /* These need to be separated since the strings need to be static. */
568 case 0:
569 case 1:
570 info.define("GBUFFER_LAYER_MAX", "1");
571 break;
572 case 2:
573 info.define("GBUFFER_LAYER_MAX", "2");
574 break;
575 case 3:
576 info.define("GBUFFER_LAYER_MAX", "3");
577 break;
578 default:
580 break;
581 }
582 }
583
584 if ((pipeline_type == MAT_PIPE_FORWARD) ||
586 {
587 switch (closure_bin_count) {
588 case 0:
589 /* Define nothing. This will in turn define SKIP_LIGHT_EVAL. */
590 break;
591 /* These need to be separated since the strings need to be static. */
592 case 1:
593 info.define("LIGHT_CLOSURE_EVAL_COUNT", "1");
594 break;
595 case 2:
596 info.define("LIGHT_CLOSURE_EVAL_COUNT", "2");
597 break;
598 case 3:
599 info.define("LIGHT_CLOSURE_EVAL_COUNT", "3");
600 break;
601 default:
603 break;
604 }
605 }
606
608 switch (geometry_type) {
609 case MAT_GEOM_MESH:
610 /* Support using gpu builtin barycentrics. */
611 info.define("USE_BARYCENTRICS");
612 info.builtins(BuiltinBits::BARYCENTRIC_COORD);
613 break;
614 case MAT_GEOM_CURVES:
615 /* Support using one vec2 attribute. See #hair_get_barycentric(). */
616 info.define("USE_BARYCENTRICS");
617 break;
618 default:
619 /* No support */
620 break;
621 }
622 }
623
624 std::stringstream global_vars;
625 switch (geometry_type) {
626 case MAT_GEOM_MESH:
627 if (pipeline_type == MAT_PIPE_VOLUME_MATERIAL) {
628 /* If mesh has a volume output, it can receive volume grid attributes from smoke
629 * simulation modifier. But the vertex shader might still need access to the vertex
630 * attribute for displacement. */
631 /* TODO(fclem): Eventually, we could add support for loading both. For now, remove the
632 * vertex inputs after conversion (avoid name collision). */
633 for (auto &input : info.vertex_inputs_) {
634 info.sampler(sampler_slots.get(), ImageType::FLOAT_3D, input.name, Frequency::BATCH);
635 }
636 info.vertex_inputs_.clear();
637 /* Volume materials require these for loading the grid attributes from smoke sims. */
638 info.additional_info("draw_volume_infos");
639 }
640 break;
642 case MAT_GEOM_CURVES:
644 for (auto &input : info.vertex_inputs_) {
645 if (input.name == "orco") {
647 global_vars << input.type << " " << input.name << ";\n";
648 }
649 else {
650 info.sampler(sampler_slots.get(), ImageType::FLOAT_BUFFER, input.name, Frequency::BATCH);
651 }
652 }
653 info.vertex_inputs_.clear();
654 break;
655 case MAT_GEOM_WORLD:
656 if (pipeline_type == MAT_PIPE_VOLUME_MATERIAL) {
657 /* Even if world do not have grid attributes, we use dummy texture binds to pass correct
658 * defaults. So we have to replace all attributes as samplers. */
659 for (auto &input : info.vertex_inputs_) {
660 info.sampler(sampler_slots.get(), ImageType::FLOAT_3D, input.name, Frequency::BATCH);
661 }
662 info.vertex_inputs_.clear();
663 }
669 case MAT_GEOM_GPENCIL:
675 for (auto &input : info.vertex_inputs_) {
676 global_vars << input.type << " " << input.name << ";\n";
677 }
678 info.vertex_inputs_.clear();
679 break;
680 case MAT_GEOM_VOLUME:
682 for (auto &input : info.vertex_inputs_) {
683 info.sampler(sampler_slots.get(), ImageType::FLOAT_3D, input.name, Frequency::BATCH);
684 }
685 info.vertex_inputs_.clear();
686 break;
687 }
688
689 const bool do_vertex_attrib_load = !ELEM(geometry_type, MAT_GEOM_WORLD, MAT_GEOM_VOLUME) &&
690 (pipeline_type != MAT_PIPE_VOLUME_MATERIAL);
691
692 if (!do_vertex_attrib_load && !info.vertex_out_interfaces_.is_empty()) {
693 /* Codegen outputs only one interface. */
694 const StageInterfaceInfo &iface = *info.vertex_out_interfaces_.first();
695 /* Globals the attrib_load() can write to when it is in the fragment shader. */
696 global_vars << "struct " << iface.name << " {\n";
697 for (const auto &inout : iface.inouts) {
698 global_vars << " " << inout.type << " " << inout.name << ";\n";
699 }
700 global_vars << "};\n";
701 global_vars << iface.name << " " << iface.instance_name << ";\n";
702
703 info.vertex_out_interfaces_.clear();
704 }
705
706 std::stringstream attr_load;
707 attr_load << "void attrib_load()\n";
708 attr_load << "{\n";
709 attr_load << (!codegen.attr_load.empty() ? codegen.attr_load : "");
710 attr_load << "}\n\n";
711
712 std::stringstream vert_gen, frag_gen, comp_gen;
713
714 if (do_vertex_attrib_load) {
715 vert_gen << global_vars.str() << attr_load.str();
716 frag_gen << "void attrib_load() {}\n"; /* Placeholder. */
717 }
718 else {
719 vert_gen << "void attrib_load() {}\n"; /* Placeholder. */
720 frag_gen << global_vars.str() << attr_load.str();
721 }
722
723 {
724 const bool use_vertex_displacement = !codegen.displacement.empty() &&
725 (displacement_type != MAT_DISPLACEMENT_BUMP) &&
726 !ELEM(geometry_type, MAT_GEOM_WORLD, MAT_GEOM_VOLUME);
727
728 vert_gen << "vec3 nodetree_displacement()\n";
729 vert_gen << "{\n";
730 vert_gen << ((use_vertex_displacement) ? codegen.displacement : "return vec3(0);\n");
731 vert_gen << "}\n\n";
732
733 info.vertex_source_generated = vert_gen.str();
734 }
735
736 if (pipeline_type != MAT_PIPE_VOLUME_OCCUPANCY) {
737 frag_gen << (!codegen.material_functions.empty() ? codegen.material_functions : "\n");
738
739 if (!codegen.displacement.empty()) {
740 /* Bump displacement. Needed to recompute normals after displacement. */
741 info.define("MAT_DISPLACEMENT_BUMP");
742
743 frag_gen << "vec3 nodetree_displacement()\n";
744 frag_gen << "{\n";
745 frag_gen << codegen.displacement;
746 frag_gen << "}\n\n";
747 }
748
749 frag_gen << "Closure nodetree_surface(float closure_rand)\n";
750 frag_gen << "{\n";
751 frag_gen << " closure_weights_reset(closure_rand);\n";
752 frag_gen << (!codegen.surface.empty() ? codegen.surface : "return Closure(0);\n");
753 frag_gen << "}\n\n";
754
755 /* TODO(fclem): Find a way to pass material parameters inside the material UBO. */
756 info.define("thickness_mode", thickness_type == MAT_THICKNESS_SLAB ? "-1.0" : "1.0");
757
758 frag_gen << "float nodetree_thickness()\n";
759 frag_gen << "{\n";
760 if (codegen.thickness.empty()) {
761 /* Check presence of closure needing thickness to not add mandatory dependency on obinfos. */
764 {
765 frag_gen << "return 0.0;\n";
766 }
767 else {
768 if (info.additional_infos_.first_index_of_try("draw_object_infos_new") == -1) {
769 info.additional_info("draw_object_infos_new");
770 }
771 /* TODO(fclem): Should use `to_scale` but the gpu_shader_math_matrix_lib.glsl isn't
772 * included everywhere yet. */
773 frag_gen << "vec3 ob_scale;\n";
774 frag_gen << "ob_scale.x = length(ModelMatrix[0].xyz);\n";
775 frag_gen << "ob_scale.y = length(ModelMatrix[1].xyz);\n";
776 frag_gen << "ob_scale.z = length(ModelMatrix[2].xyz);\n";
777 frag_gen << "vec3 ls_dimensions = safe_rcp(abs(OrcoTexCoFactors[1].xyz));\n";
778 frag_gen << "vec3 ws_dimensions = ob_scale * ls_dimensions;\n";
779 /* Choose the minimum axis so that cuboids are better represented. */
780 frag_gen << "return reduce_min(ws_dimensions);\n";
781 }
782 }
783 else {
784 frag_gen << codegen.thickness;
785 }
786 frag_gen << "}\n\n";
787
788 frag_gen << "Closure nodetree_volume()\n";
789 frag_gen << "{\n";
790 frag_gen << " closure_weights_reset(0.0);\n";
791 frag_gen << (!codegen.volume.empty() ? codegen.volume : "return Closure(0);\n");
792 frag_gen << "}\n\n";
793
794 info.fragment_source_generated = frag_gen.str();
795 }
796
797 /* Geometry Info. */
798 switch (geometry_type) {
799 case MAT_GEOM_WORLD:
800 info.additional_info("eevee_geom_world");
801 break;
802 case MAT_GEOM_GPENCIL:
803 info.additional_info("eevee_geom_gpencil");
804 break;
805 case MAT_GEOM_CURVES:
806 info.additional_info("eevee_geom_curves");
807 break;
808 case MAT_GEOM_MESH:
809 info.additional_info("eevee_geom_mesh");
810 break;
812 info.additional_info("eevee_geom_point_cloud");
813 break;
814 case MAT_GEOM_VOLUME:
815 info.additional_info("eevee_geom_volume");
816 break;
817 }
818 /* Pipeline Info. */
819 switch (geometry_type) {
820 case MAT_GEOM_WORLD:
821 switch (pipeline_type) {
823 info.additional_info("eevee_surf_volume");
824 break;
825 default:
826 info.additional_info("eevee_surf_world");
827 break;
828 }
829 break;
830 default:
831 switch (pipeline_type) {
834 info.additional_info("eevee_surf_depth", "eevee_velocity_geom");
835 break;
839 info.additional_info("eevee_surf_depth");
840 break;
842 info.additional_info("eevee_surf_depth", "eevee_clip_plane");
843 break;
844 case MAT_PIPE_SHADOW:
845 /* Determine surface shadow shader depending on used update technique. */
848 info.additional_info("eevee_surf_shadow_atomic");
849 } break;
851 info.additional_info("eevee_surf_shadow_tbdr");
852 } break;
853 default: {
855 } break;
856 }
857 break;
859 info.additional_info("eevee_surf_occupancy");
860 break;
862 info.additional_info("eevee_surf_volume");
863 break;
864 case MAT_PIPE_CAPTURE:
865 info.additional_info("eevee_surf_capture");
866 break;
869 info.additional_info("eevee_surf_deferred_hybrid");
870 }
871 else {
872 info.additional_info("eevee_surf_deferred");
873 }
874 break;
875 case MAT_PIPE_FORWARD:
876 info.additional_info("eevee_surf_forward");
877 break;
878 default:
880 break;
881 }
882 break;
883 }
884}
885
886/* WATCH: This can be called from another thread! Needs to not touch the shader module in any
887 * thread unsafe manner. */
888static void codegen_callback(void *thunk, GPUMaterial *mat, GPUCodegenOutput *codegen)
889{
890 reinterpret_cast<ShaderModule *>(thunk)->material_create_info_amend(mat, codegen);
891}
892
893static GPUPass *pass_replacement_cb(void *thunk, GPUMaterial *mat)
894{
895 using namespace blender::gpu::shader;
896
897 const ::Material *blender_mat = GPU_material_get_material(mat);
898
899 uint64_t shader_uuid = GPU_material_uuid_get(mat);
900
901 eMaterialPipeline pipeline_type;
902 eMaterialGeometry geometry_type;
903 eMaterialDisplacement displacement_type;
904 eMaterialThickness thickness_type;
905 bool transparent_shadows;
907 pipeline_type,
908 geometry_type,
909 displacement_type,
910 thickness_type,
911 transparent_shadows);
912
913 bool is_shadow_pass = pipeline_type == eMaterialPipeline::MAT_PIPE_SHADOW;
914 bool is_prepass = ELEM(pipeline_type,
921
922 bool has_vertex_displacement = GPU_material_has_displacement_output(mat) &&
924 bool has_transparency = GPU_material_flag_get(mat, GPU_MATFLAG_TRANSPARENT);
925 bool has_shadow_transparency = has_transparency && transparent_shadows;
926 bool has_raytraced_transmission = blender_mat && (blender_mat->blend_flag & MA_BL_SS_REFRACTION);
927
928 bool can_use_default = (is_shadow_pass &&
929 (!has_vertex_displacement && !has_shadow_transparency)) ||
930 (is_prepass && (!has_vertex_displacement && !has_transparency &&
931 !has_raytraced_transmission));
932 if (can_use_default) {
933 GPUMaterial *mat = reinterpret_cast<ShaderModule *>(thunk)->material_default_shader_get(
934 pipeline_type, geometry_type);
935 return GPU_material_get_pass(mat);
936 }
937
938 return nullptr;
939}
940
942 eMaterialGeometry geometry_type)
943{
944 bool is_volume = ELEM(pipeline_type, MAT_PIPE_VOLUME_MATERIAL, MAT_PIPE_VOLUME_OCCUPANCY);
945 ::Material *blender_mat = (is_volume) ? BKE_material_default_volume() :
947
948 return material_shader_get(
949 blender_mat, blender_mat->nodetree, pipeline_type, geometry_type, false);
950}
951
953 bNodeTree *nodetree,
954 eMaterialPipeline pipeline_type,
955 eMaterialGeometry geometry_type,
956 bool deferred_compilation)
957{
958 bool is_volume = ELEM(pipeline_type, MAT_PIPE_VOLUME_MATERIAL, MAT_PIPE_VOLUME_OCCUPANCY);
959
960 eMaterialDisplacement displacement_type = to_displacement_type(blender_mat->displacement_method);
961 eMaterialThickness thickness_type = to_thickness_type(blender_mat->thickness_mode);
962
964 pipeline_type, geometry_type, displacement_type, thickness_type, blender_mat->blend_flag);
965
966 bool is_default_material = ELEM(
968
969 GPUMaterial *mat = DRW_shader_from_material(blender_mat,
970 nodetree,
972 shader_uuid,
973 is_volume,
974 deferred_compilation,
976 this,
977 is_default_material ? nullptr : pass_replacement_cb);
978
979 return mat;
980}
981
983 bNodeTree *nodetree,
984 eMaterialPipeline pipeline_type)
985{
986 bool is_volume = (pipeline_type == MAT_PIPE_VOLUME_MATERIAL);
987 bool defer_compilation = is_volume;
988
989 uint64_t shader_uuid = shader_uuid_from_material_type(pipeline_type, MAT_GEOM_WORLD);
990
991 return DRW_shader_from_world(blender_world,
992 nodetree,
994 shader_uuid,
995 is_volume,
996 defer_compilation,
998 this);
999}
1000
1002 ListBase &materials,
1003 bNodeTree *nodetree,
1004 eMaterialPipeline pipeline_type,
1005 eMaterialGeometry geometry_type)
1006{
1007 uint64_t shader_uuid = shader_uuid_from_material_type(pipeline_type, geometry_type);
1008
1009 bool is_volume = ELEM(pipeline_type, MAT_PIPE_VOLUME_MATERIAL, MAT_PIPE_VOLUME_OCCUPANCY);
1010
1011 GPUMaterial *gpumat = GPU_material_from_nodetree(nullptr,
1012 nullptr,
1013 nodetree,
1014 &materials,
1015 name,
1017 shader_uuid,
1018 is_volume,
1019 false,
1021 this);
1023 GPU_material_compile(gpumat);
1024 /* Queue deferred material optimization. */
1026 return gpumat;
1027}
1028
1031} // namespace blender::eevee
struct Material * BKE_material_default_surface(void)
struct Material * BKE_material_default_volume(void)
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
#define ATTR_FALLTHROUGH
MINLINE int count_bits_i(unsigned int n)
#define ELEM(...)
@ MA_BL_SS_REFRACTION
#define DRW_SHADER_FREE_SAFE(shader)
bool GPU_use_parallel_compilation()
uint64_t GPU_material_uuid_get(GPUMaterial *mat)
void GPU_material_compile(GPUMaterial *mat)
GPUPass * GPU_material_get_pass(GPUMaterial *material)
void GPU_material_status_set(GPUMaterial *mat, eGPUMaterialStatus status)
@ GPU_MAT_EEVEE
bool GPU_material_flag_get(const GPUMaterial *mat, eGPUMaterialFlag flag)
GPUMaterial * GPU_material_from_nodetree(Scene *scene, Material *ma, bNodeTree *ntree, ListBase *gpumaterials, const char *name, eGPUMaterialEngine engine, uint64_t shader_uuid, bool is_volume_shader, bool is_lookdev, GPUCodegenCallbackFn callback, void *thunk, GPUMaterialPassReplacementCallbackFn pass_replacement_cb=nullptr)
@ GPU_MATFLAG_SHADER_TO_RGBA
@ GPU_MATFLAG_GLOSSY
@ GPU_MATFLAG_COAT
@ GPU_MATFLAG_AO
@ GPU_MATFLAG_REFRACT
@ GPU_MATFLAG_BARYCENTRIC
@ GPU_MATFLAG_TRANSLUCENT
@ GPU_MATFLAG_DIFFUSE
@ GPU_MATFLAG_TRANSPARENT
@ GPU_MATFLAG_SUBSURFACE
@ GPU_MAT_CREATED
Material * GPU_material_get_material(GPUMaterial *material)
const GPUUniformAttrList * GPU_material_uniform_attributes(const GPUMaterial *material)
bool GPU_material_has_displacement_output(GPUMaterial *mat)
blender::Vector< GPUShader * > GPU_shader_batch_finalize(BatchHandle &handle)
SpecializationBatchHandle GPU_shader_batch_specializations(blender::Span< ShaderSpecialization > specializations)
GPUShader * GPU_shader_create_from_info_name(const char *info_name)
bool GPU_shader_batch_is_ready(BatchHandle handle)
const GPUShaderCreateInfo * GPU_shader_create_info_get(const char *info_name)
BatchHandle GPU_shader_batch_create_from_infos(blender::Span< const GPUShaderCreateInfo * > infos)
bool GPU_shader_batch_specializations_is_ready(SpecializationBatchHandle &handle)
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME
struct GPUShader GPUShader
void append(const T &value)
void reserve(const int64_t min_capacity)
SamplerSlots(eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type, bool has_shader_to_rgba)
bool is_ready(bool block=false)
GPUMaterial * material_shader_get(::Material *blender_mat, bNodeTree *nodetree, eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type, bool deferred_compilation)
GPUMaterial * material_default_shader_get(eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type)
void material_create_info_amend(GPUMaterial *mat, GPUCodegenOutput *codegen)
void precompile_specializations(int render_buffers_shadow_id, int shadow_ray_count, int shadow_ray_step_count)
GPUMaterial * world_shader_get(::World *blender_world, bNodeTree *nodetree, eMaterialPipeline pipeline_type)
static ShaderModule * module_get()
GPUShader * static_shader_get(eShaderType shader_type)
static ShadowTechnique shadow_technique
GPUMaterial * DRW_shader_from_world(World *wo, bNodeTree *ntree, eGPUMaterialEngine engine, const uint64_t shader_id, const bool is_volume_shader, bool deferred, GPUCodegenCallbackFn callback, void *thunk)
void DRW_shader_queue_optimize_material(GPUMaterial *mat)
GPUMaterial * DRW_shader_from_material(Material *ma, bNodeTree *ntree, eGPUMaterialEngine engine, const uint64_t shader_id, const bool is_volume_shader, bool deferred, GPUCodegenCallbackFn callback, void *thunk, GPUMaterialPassReplacementCallbackFn pass_replacement_cb)
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_NO_EVAL
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_HYBRID
#define MATERIAL_TEXTURE_RESERVED_SLOT_FIRST
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_FORWARD
#define MATERIAL_TEXTURE_RESERVED_SLOT_LAST_WORLD
@ SHADOW_TILEMAP_TAG_USAGE_SURFELS
@ SHADOW_TILEMAP_TAG_USAGE_OPAQUE
@ SHADOW_TILEMAP_TAG_USAGE_TRANSPARENT
@ FILM_PASS_CONVERT_CRYPTOMATTE
@ SHADOW_TILEMAP_TAG_USAGE_VOLUME
static eMaterialDisplacement to_displacement_type(int displacement_method)
static GPUPass * pass_replacement_cb(void *thunk, GPUMaterial *mat)
static bool geometry_type_has_surface(eMaterialGeometry geometry_type)
static void material_type_from_shader_uuid(uint64_t shader_uuid, eMaterialPipeline &pipeline_type, eMaterialGeometry &geometry_type, eMaterialDisplacement &displacement_type, eMaterialThickness &thickness_type, bool &transparent_shadows)
static uint64_t shader_uuid_from_material_type(eMaterialPipeline pipeline_type, eMaterialGeometry geometry_type, eMaterialDisplacement displacement_type=MAT_DISPLACEMENT_BUMP, eMaterialThickness thickness_type=MAT_THICKNESS_SPHERE, char blend_flags=0)
static eMaterialThickness to_thickness_type(int thickness_mode)
@ MAT_PIPE_PREPASS_FORWARD_VELOCITY
@ MAT_PIPE_PREPASS_DEFERRED_VELOCITY
static void codegen_callback(void *thunk, GPUMaterial *mat, GPUCodegenOutput *codegen)
signed int int32_t
Definition stdint.h:77
unsigned __int64 uint64_t
Definition stdint.h:90
std::string surface
std::string thickness
std::string displacement
std::string volume
std::string attr_load
std::string material_functions
GPUShaderCreateInfo * create_info
struct bNodeTree * nodetree
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...
Vector< StageInterfaceInfo * > vertex_out_interfaces_
Self & additional_info(StringRefNull info_name)
Self & sampler(int slot, ImageType type, StringRefNull name, Frequency freq=Frequency::PASS, GPUSamplerState sampler=GPUSamplerState::internal_sampler())
Self & define(StringRefNull name, StringRefNull value="")