Blender V4.3
draw_command.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "GPU_batch.hh"
10#include "GPU_capabilities.hh"
11#include "GPU_compute.hh"
12#include "GPU_debug.hh"
13
14#include "draw_command.hh"
15#include "draw_pass.hh"
16#include "draw_shader.hh"
17#include "draw_view.hh"
18
19#include <bitset>
20#include <sstream>
21
23
24static gpu::Batch *procedural_batch_get(GPUPrimType primitive)
25{
26 switch (primitive) {
27 case GPU_PRIM_POINTS:
29 case GPU_PRIM_LINES:
31 case GPU_PRIM_TRIS:
35 default:
36 /* Add new one as needed. */
38 return nullptr;
39 }
40}
41
42/* -------------------------------------------------------------------- */
47{
48 if (assign_if_different(state.shader, shader)) {
49 GPU_shader_bind(shader);
50 }
51}
52
57
59{
60 /* TODO(fclem): Require framebuffer bind to always be part of the pass so that we can track it
61 * inside RecordingState. */
62 GPUFrameBuffer *framebuffer = GPU_framebuffer_active_get();
63 /* Unpack to the real enum type. */
64 const GPUAttachmentState states[9] = {
74 };
75 GPU_framebuffer_subpass_transition_array(framebuffer, states, ARRAY_SIZE(states));
76}
77
110
112{
113 if (location == -1) {
114 return;
115 }
116 switch (type) {
119 break;
122 break;
125 break;
128 break;
129 }
130}
131
133{
134 /* All specialization constants should exist as they are not optimized out like uniforms. */
135 BLI_assert(location != -1);
136
137 switch (type) {
139 GPU_shader_constant_int_ex(shader, location, int_value);
140 break;
142 GPU_shader_constant_int_ex(shader, location, *int_ref);
143 break;
145 GPU_shader_constant_uint_ex(shader, location, uint_value);
146 break;
148 GPU_shader_constant_uint_ex(shader, location, *uint_ref);
149 break;
151 GPU_shader_constant_float_ex(shader, location, float_value);
152 break;
154 GPU_shader_constant_float_ex(shader, location, *float_ref);
155 break;
157 GPU_shader_constant_bool_ex(shader, location, bool_value);
158 break;
160 GPU_shader_constant_bool_ex(shader, location, *bool_ref);
161 break;
162 }
163}
164
166{
167 state.front_facing_set(handle.has_inverted_handedness());
168
169 if (GPU_shader_draw_parameters_support() == false) {
170 GPU_batch_resource_id_buf_set(batch, state.resource_id_buf);
171 }
172
174 /* Expanded drawcall. */
177 IndexRange expanded_range = {vert_range.start() * expand_prim_len,
178 vert_range.size() * expand_prim_len};
179
180 if (expanded_range.is_empty()) {
181 /* Nothing to draw, and can lead to asserts in GPU_batch_bind_as_resources. */
182 return;
183 }
184
186
187 gpu::Batch *gpu_batch = procedural_batch_get(GPUPrimType(expand_prim_type));
188 GPU_batch_set_shader(gpu_batch, state.shader);
190 gpu_batch, expanded_range.start(), expanded_range.size(), 0, instance_len);
191 }
192 else {
193 /* Regular drawcall. */
196 }
197}
198
200{
201 DrawMultiBuf::DrawCommandBuf &indirect_buf = multi_draw_buf->command_buf_;
202 DrawMultiBuf::DrawGroupBuf &groups = multi_draw_buf->group_buf_;
203
204 uint group_index = this->group_first;
205 while (group_index != uint(-1)) {
206 const DrawGroup &group = groups[group_index];
207
208 if (group.vertex_len > 0) {
209 gpu::Batch *batch = group.desc.gpu_batch;
210
211 if (GPUPrimType(group.desc.expand_prim_type) != GPU_PRIM_NONE) {
212 /* Bind original batch as resource and use a procedural batch to issue the draw-call. */
213 GPU_batch_bind_as_resources(group.desc.gpu_batch, state.shader);
214 batch = procedural_batch_get(GPUPrimType(group.desc.expand_prim_type));
215 }
216
217 if (GPU_shader_draw_parameters_support() == false) {
218 GPU_batch_resource_id_buf_set(batch, state.resource_id_buf);
219 }
220
222
223 constexpr intptr_t stride = sizeof(DrawCommand);
224 /* We have 2 indirect command reserved per draw group. */
225 intptr_t offset = stride * group_index * 2;
226
227 /* Draw negatively scaled geometry first. */
228 if (group.len - group.front_facing_len > 0) {
229 state.front_facing_set(true);
230 GPU_batch_draw_indirect(batch, indirect_buf, offset);
231 }
232
233 if (group.front_facing_len > 0) {
234 state.front_facing_set(false);
235 GPU_batch_draw_indirect(batch, indirect_buf, offset + stride);
236 }
237 }
238
239 group_index = group.next;
240 }
241}
242
244{
245 state.front_facing_set(handle.has_inverted_handedness());
246
248}
249
251{
252 if (is_reference) {
254 }
255 else {
256 GPU_compute_dispatch(state.shader, size.x, size.y, size.z);
257 }
258}
259
264
266{
267 GPU_memory_barrier(type);
268}
269
270void Clear::execute() const
271{
272 GPUFrameBuffer *fb = GPU_framebuffer_active_get();
274}
275
277{
278 GPUFrameBuffer *fb = GPU_framebuffer_active_get();
279 GPU_framebuffer_multi_clear(fb, (const float(*)[4])colors);
280}
281
282void StateSet::execute(RecordingState &recording_state) const
283{
289
290 bool state_changed = assign_if_different(recording_state.pipeline_state, new_state);
291 bool clip_changed = assign_if_different(recording_state.clip_plane_count, clip_plane_count);
292
293 if (!state_changed && !clip_changed) {
294 return;
295 }
296
297 /* Keep old API working. Keep the state tracking in sync. */
298 /* TODO(fclem): Move at the end of a pass. */
300
308
310 GPU_shadow_offset(true);
311 }
312 else {
313 GPU_shadow_offset(false);
314 }
315
316 /* TODO: this should be part of shader state. */
317 GPU_clip_distances(recording_state.clip_plane_count);
318
320 /* XXX `GPU_depth_range` is not a perfect solution
321 * since very distant geometries can still be occluded.
322 * Also the depth test precision of these geometries is impaired.
323 * However, it solves the selection for the vast majority of cases. */
324 GPU_depth_range(0.0f, 0.01f);
325 }
326 else {
327 GPU_depth_range(0.0f, 1.0f);
328 }
329
332 }
333 else {
335 }
336}
337
344
347/* -------------------------------------------------------------------- */
351std::string ShaderBind::serialize() const
352{
353 return std::string(".shader_bind(") + GPU_shader_get_name(shader) + ")";
354}
355
356std::string FramebufferBind::serialize() const
357{
358 return std::string(".framebuffer_bind(") +
359 (*framebuffer == nullptr ? "nullptr" : GPU_framebuffer_get_name(*framebuffer)) + ")";
360}
361
363{
364 auto to_str = [](GPUAttachmentState state) {
365 return (state != GPU_ATTACHMENT_IGNORE) ?
366 ((state == GPU_ATTACHMENT_WRITE) ? "write" : "read") :
367 "ignore";
368 };
369
370 return std::string(".subpass_transition(\n") +
371 "depth=" + to_str(GPUAttachmentState(depth_state)) + ",\n" +
372 "color0=" + to_str(GPUAttachmentState(color_states[0])) + ",\n" +
373 "color1=" + to_str(GPUAttachmentState(color_states[1])) + ",\n" +
374 "color2=" + to_str(GPUAttachmentState(color_states[2])) + ",\n" +
375 "color3=" + to_str(GPUAttachmentState(color_states[3])) + ",\n" +
376 "color4=" + to_str(GPUAttachmentState(color_states[4])) + ",\n" +
377 "color5=" + to_str(GPUAttachmentState(color_states[5])) + ",\n" +
378 "color6=" + to_str(GPUAttachmentState(color_states[6])) + ",\n" +
379 "color7=" + to_str(GPUAttachmentState(color_states[7])) + "\n)";
380}
381
382std::string ResourceBind::serialize() const
383{
384 switch (type) {
385 case Type::Sampler:
386 return std::string(".bind_texture") + (is_reference ? "_ref" : "") + "(" +
387 std::to_string(slot) + ", sampler=" + sampler.to_string() + ")";
389 return std::string(".bind_vertbuf_as_texture") + (is_reference ? "_ref" : "") + "(" +
390 std::to_string(slot) + ")";
391 case Type::Image:
392 return std::string(".bind_image") + (is_reference ? "_ref" : "") + "(" +
393 std::to_string(slot) + ")";
394 case Type::UniformBuf:
395 return std::string(".bind_uniform_buf") + (is_reference ? "_ref" : "") + "(" +
396 std::to_string(slot) + ")";
397 case Type::StorageBuf:
398 return std::string(".bind_storage_buf") + (is_reference ? "_ref" : "") + "(" +
399 std::to_string(slot) + ")";
401 return std::string(".bind_uniform_as_ssbo") + (is_reference ? "_ref" : "") + "(" +
402 std::to_string(slot) + ")";
404 return std::string(".bind_vertbuf_as_ssbo") + (is_reference ? "_ref" : "") + "(" +
405 std::to_string(slot) + ")";
407 return std::string(".bind_indexbuf_as_ssbo") + (is_reference ? "_ref" : "") + "(" +
408 std::to_string(slot) + ")";
409 default:
411 return "";
412 }
413}
414
415std::string PushConstant::serialize() const
416{
417 std::stringstream ss;
418 for (int i = 0; i < array_len; i++) {
419 switch (comp_len) {
420 case 1:
421 switch (type) {
422 case Type::IntValue:
423 ss << int1_value;
424 break;
426 ss << int_ref[i];
427 break;
428 case Type::FloatValue:
429 ss << float1_value;
430 break;
432 ss << float_ref[i];
433 break;
434 }
435 break;
436 case 2:
437 switch (type) {
438 case Type::IntValue:
439 ss << int2_value;
440 break;
442 ss << int2_ref[i];
443 break;
444 case Type::FloatValue:
445 ss << float2_value;
446 break;
448 ss << float2_ref[i];
449 break;
450 }
451 break;
452 case 3:
453 switch (type) {
454 case Type::IntValue:
455 ss << int3_value;
456 break;
458 ss << int3_ref[i];
459 break;
460 case Type::FloatValue:
461 ss << float3_value;
462 break;
464 ss << float3_ref[i];
465 break;
466 }
467 break;
468 case 4:
469 switch (type) {
470 case Type::IntValue:
471 ss << int4_value;
472 break;
474 ss << int4_ref[i];
475 break;
476 case Type::FloatValue:
477 ss << float4_value;
478 break;
480 ss << float4_ref[i];
481 break;
482 }
483 break;
484 case 16:
485 switch (type) {
486 case Type::IntValue:
489 break;
490 case Type::FloatValue:
491 ss << float4x4(
492 (&float4_value)[0], (&float4_value)[1], (&float4_value)[2], (&float4_value)[3]);
493 break;
495 ss << *float4x4_ref;
496 break;
497 }
498 break;
499 }
500 if (i < array_len - 1) {
501 ss << ", ";
502 }
503 }
504
505 return std::string(".push_constant(") + std::to_string(location) + ", data=" + ss.str() + ")";
506}
507
509{
510 std::stringstream ss;
511 switch (type) {
512 case Type::IntValue:
513 ss << int_value;
514 break;
515 case Type::UintValue:
516 ss << uint_value;
517 break;
518 case Type::FloatValue:
519 ss << float_value;
520 break;
521 case Type::BoolValue:
522 ss << bool_value;
523 break;
525 ss << *int_ref;
526 break;
528 ss << *uint_ref;
529 break;
531 ss << *float_ref;
532 break;
534 ss << *bool_ref;
535 break;
536 }
537 return std::string(".specialize_constant(") + std::to_string(location) + ", data=" + ss.str() +
538 ")";
539}
540
541std::string Draw::serialize() const
542{
543 std::string inst_len = std::to_string(instance_len);
544 std::string vert_len = (vertex_len == uint(-1)) ? "from_batch" : std::to_string(vertex_len);
545 std::string vert_first = (vertex_first == uint(-1)) ? "from_batch" :
546 std::to_string(vertex_first);
547 return std::string(".draw(inst_len=") + inst_len + ", vert_len=" + vert_len +
548 ", vert_first=" + vert_first + ", res_id=" + std::to_string(handle.resource_index()) +
549 ")";
550}
551
552std::string DrawMulti::serialize(const std::string &line_prefix) const
553{
554 DrawMultiBuf::DrawGroupBuf &groups = multi_draw_buf->group_buf_;
555
556 MutableSpan<DrawPrototype> prototypes(multi_draw_buf->prototype_buf_.data(),
557 multi_draw_buf->prototype_count_);
558
559 /* This emulates the GPU sorting but without the unstable draw order. */
560 std::sort(
561 prototypes.begin(), prototypes.end(), [](const DrawPrototype &a, const DrawPrototype &b) {
562 return (a.group_id < b.group_id) ||
563 (a.group_id == b.group_id && a.resource_handle > b.resource_handle);
564 });
565
566 /* Compute prefix sum to have correct offsets. */
567 uint prefix_sum = 0u;
568 for (DrawGroup &group : groups) {
569 group.start = prefix_sum;
570 prefix_sum += group.front_facing_counter + group.back_facing_counter;
571 }
572
573 std::stringstream ss;
574
575 uint group_len = 0;
576 uint group_index = this->group_first;
577 while (group_index != uint(-1)) {
578 const DrawGroup &grp = groups[group_index];
579
580 ss << std::endl << line_prefix << " .group(id=" << group_index << ", len=" << grp.len << ")";
581
582 intptr_t offset = grp.start;
583
584 if (grp.back_facing_counter > 0) {
585 for (DrawPrototype &proto : prototypes.slice_safe({offset, grp.back_facing_counter})) {
586 BLI_assert(proto.group_id == group_index);
587 ResourceHandle handle(proto.resource_handle);
588 BLI_assert(handle.has_inverted_handedness());
589 ss << std::endl
590 << line_prefix << " .proto(instance_len=" << std::to_string(proto.instance_len)
591 << ", resource_id=" << std::to_string(handle.resource_index()) << ", back_face)";
592 }
593 offset += grp.back_facing_counter;
594 }
595
596 if (grp.front_facing_counter > 0) {
597 for (DrawPrototype &proto : prototypes.slice_safe({offset, grp.front_facing_counter})) {
598 BLI_assert(proto.group_id == group_index);
599 ResourceHandle handle(proto.resource_handle);
600 BLI_assert(!handle.has_inverted_handedness());
601 ss << std::endl
602 << line_prefix << " .proto(instance_len=" << std::to_string(proto.instance_len)
603 << ", resource_id=" << std::to_string(handle.resource_index()) << ", front_face)";
604 }
605 }
606
607 group_index = grp.next;
608 group_len++;
609 }
610
611 ss << std::endl;
612
613 return line_prefix + ".draw_multi(" + std::to_string(group_len) + ")" + ss.str();
614}
615
616std::string DrawIndirect::serialize() const
617{
618 return std::string(".draw_indirect()");
619}
620
621std::string Dispatch::serialize() const
622{
623 int3 sz = is_reference ? *size_ref : size;
624 return std::string(".dispatch") + (is_reference ? "_ref" : "") + "(" + std::to_string(sz.x) +
625 ", " + std::to_string(sz.y) + ", " + std::to_string(sz.z) + ")";
626}
627
629{
630 return std::string(".dispatch_indirect()");
631}
632
633std::string Barrier::serialize() const
634{
635 /* TODO(@fclem): Better serialization... */
636 return std::string(".barrier(") + std::to_string(type) + ")";
637}
638
639std::string Clear::serialize() const
640{
641 std::stringstream ss;
643 ss << "color=" << color;
645 ss << ", ";
646 }
647 }
649 ss << "depth=" << depth;
651 ss << ", ";
652 }
653 }
655 ss << "stencil=0b" << std::bitset<8>(stencil) << ")";
656 }
657 return std::string(".clear(") + ss.str() + ")";
658}
659
660std::string ClearMulti::serialize() const
661{
662 std::stringstream ss;
663 for (float4 color : Span<float4>(colors, colors_len)) {
664 ss << color << ", ";
665 }
666 return std::string(".clear_multi(colors={") + ss.str() + "})";
667}
668
669std::string StateSet::serialize() const
670{
671 /* TODO(@fclem): Better serialization... */
672 return std::string(".state_set(") + std::to_string(new_state) + ")";
673}
674
675std::string StencilSet::serialize() const
676{
677 std::stringstream ss;
678 ss << ".stencil_set(write_mask=0b" << std::bitset<8>(write_mask) << ", reference=0b"
679 << std::bitset<8>(reference) << ", compare_mask=0b" << std::bitset<8>(compare_mask) << ")";
680 return ss.str();
681}
682
685/* -------------------------------------------------------------------- */
689void DrawCommandBuf::finalize_commands(Vector<Header, 0> &headers,
690 Vector<Undetermined, 0> &commands,
691 SubPassVector &sub_passes,
692 uint &resource_id_count,
693 ResourceIdBuf &resource_id_buf)
694{
695 for (const Header &header : headers) {
696 if (header.type == Type::SubPass) {
698 auto &sub = sub_passes[int64_t(header.index)];
699 finalize_commands(
700 sub.headers_, sub.commands_, sub_passes, resource_id_count, resource_id_buf);
701 }
702
703 if (header.type != Type::Draw) {
704 continue;
705 }
706
707 Draw &cmd = commands[header.index].draw;
708
709 int batch_vert_len, batch_vert_first, batch_base_index, batch_inst_len;
710 /* Now that GPUBatches are guaranteed to be finished, extract their parameters. */
712 cmd.batch, &batch_vert_len, &batch_vert_first, &batch_base_index, &batch_inst_len);
713 /* Instancing attributes are not supported using the new pipeline since we use the base
714 * instance to set the correct resource_id. Workaround is a storage_buf + gl_InstanceID. */
715 BLI_assert(batch_inst_len == 1);
716
717 if (cmd.vertex_len == uint(-1)) {
718 cmd.vertex_len = batch_vert_len;
719 }
720
721#ifdef WITH_METAL_BACKEND
722 /* For SSBO vertex fetch, mutate output vertex count by ssbo vertex fetch expansion factor. */
723 if (cmd.shader) {
724 int num_input_primitives = gpu_get_prim_count_from_type(cmd.vertex_len,
725 cmd.batch->prim_type);
726 cmd.vertex_len = num_input_primitives *
728 }
729#endif
730
731 if (cmd.handle.raw > 0) {
732 /* Save correct offset to start of resource_id buffer region for this draw. */
733 uint instance_first = resource_id_count;
734 resource_id_count += cmd.instance_len;
735 /* Ensure the buffer is big enough. */
736 resource_id_buf.get_or_resize(resource_id_count - 1);
737
738 /* Copy the resource id for all instances. */
739 uint index = cmd.handle.resource_index();
740 for (int i = instance_first; i < (instance_first + cmd.instance_len); i++) {
741 resource_id_buf[i] = index;
742 }
743 }
744 }
745}
746
748 Vector<Header, 0> &headers,
749 Vector<Undetermined, 0> &commands,
750 SubPassVector &sub_passes)
751{
752 resource_id_count_ = 0;
753
754 finalize_commands(headers, commands, sub_passes, resource_id_count_, resource_id_buf_);
755
756 resource_id_buf_.push_update();
757
758 if (GPU_shader_draw_parameters_support() == false) {
759 state.resource_id_buf = resource_id_buf_;
760 }
761 else {
762 GPU_storagebuf_bind(resource_id_buf_, DRW_RESOURCE_ID_SLOT);
763 }
764}
765
767 Vector<Header, 0> & /*headers*/,
768 Vector<Undetermined, 0> & /*commands*/,
769 VisibilityBuf &visibility_buf,
770 int visibility_word_per_draw,
771 int view_len,
772 bool use_custom_ids)
773{
774 GPU_debug_group_begin("DrawMultiBuf.bind");
775
776 resource_id_count_ = 0u;
777 for (DrawGroup &group : MutableSpan<DrawGroup>(group_buf_.data(), group_count_)) {
778 /* Compute prefix sum of all instance of previous group. */
779 group.start = resource_id_count_;
780 resource_id_count_ += group.len;
781
782 int batch_vert_len, batch_vert_first, batch_base_index, batch_inst_len;
783 /* Now that GPUBatches are guaranteed to be finished, extract their parameters. */
784 GPU_batch_draw_parameter_get(group.desc.gpu_batch,
785 &batch_vert_len,
786 &batch_vert_first,
787 &batch_base_index,
788 &batch_inst_len);
789
790 group.vertex_len = group.desc.vertex_len == 0 ? batch_vert_len : group.desc.vertex_len;
791 group.vertex_first = group.desc.vertex_first == -1 ? batch_vert_first :
792 group.desc.vertex_first;
793 group.base_index = batch_base_index;
794 /* Instancing attributes are not supported using the new pipeline since we use the base
795 * instance to set the correct resource_id. Workaround is a storage_buf + gl_InstanceID. */
796 BLI_assert(batch_inst_len == 1);
797 UNUSED_VARS_NDEBUG(batch_inst_len);
798
799 if (group.desc.expand_prim_type != GPU_PRIM_NONE) {
800 /* Expanded drawcall. */
802 group.desc.gpu_batch,
803 GPUPrimType(group.desc.expand_prim_type),
804 group.vertex_len,
805 group.vertex_first);
806
807 group.vertex_first = vert_range.start() * group.desc.expand_prim_len;
808 group.vertex_len = vert_range.size() * group.desc.expand_prim_len;
809 /* Override base index to -1 as the generated drawcall will not use an index buffer and do
810 * the indirection manually inside the shader. */
811 group.base_index = -1;
812 }
813
814#ifdef WITH_METAL_BACKEND
815 /* For SSBO vertex fetch, mutate output vertex count by ssbo vertex fetch expansion factor. */
816 if (group.desc.gpu_shader) {
817 int num_input_primitives = gpu_get_prim_count_from_type(group.vertex_len,
818 group.desc.gpu_batch->prim_type);
819 group.vertex_len = num_input_primitives *
821 group.desc.gpu_shader);
822 /* Override base index to -1, as all SSBO calls are submitted as non-indexed, with the
823 * index buffer indirection handled within the implementation. This is to ensure
824 * command generation can correctly assigns baseInstance in the non-indexed formatting. */
825 group.base_index = -1;
826 }
827#endif
828
829 /* Reset counters to 0 for the GPU. */
830 group.total_counter = group.front_facing_counter = group.back_facing_counter = 0;
831 }
832
833 group_buf_.push_update();
834 prototype_buf_.push_update();
835 /* Allocate enough for the expansion pass. */
836 resource_id_buf_.get_or_resize(resource_id_count_ * view_len * (use_custom_ids ? 2 : 1));
837 /* Two commands per group (inverted and non-inverted scale). */
838 command_buf_.get_or_resize(group_count_ * 2);
839
840 if (prototype_count_ > 0) {
842 GPU_shader_bind(shader);
843 GPU_shader_uniform_1i(shader, "prototype_len", prototype_count_);
844 GPU_shader_uniform_1i(shader, "visibility_word_per_draw", visibility_word_per_draw);
845 GPU_shader_uniform_1i(shader, "view_len", view_len);
846 GPU_shader_uniform_1i(shader, "view_shift", log2_ceil_u(view_len));
847 GPU_shader_uniform_1b(shader, "use_custom_ids", use_custom_ids);
848 GPU_storagebuf_bind(group_buf_, GPU_shader_get_ssbo_binding(shader, "group_buf"));
849 GPU_storagebuf_bind(visibility_buf, GPU_shader_get_ssbo_binding(shader, "visibility_buf"));
850 GPU_storagebuf_bind(prototype_buf_, GPU_shader_get_ssbo_binding(shader, "prototype_buf"));
851 GPU_storagebuf_bind(command_buf_, GPU_shader_get_ssbo_binding(shader, "command_buf"));
852 GPU_storagebuf_bind(resource_id_buf_, DRW_RESOURCE_ID_SLOT);
853 GPU_compute_dispatch(shader, divide_ceil_u(prototype_count_, DRW_COMMAND_GROUP_SIZE), 1, 1);
854 if (GPU_shader_draw_parameters_support() == false) {
856 state.resource_id_buf = resource_id_buf_;
857 }
858 else {
860 }
862 }
863
865}
866
869}; // namespace blender::draw::command
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_assert(a)
Definition BLI_assert.h:50
MINLINE unsigned int log2_ceil_u(unsigned int x)
MINLINE uint divide_ceil_u(uint a, uint b)
unsigned int uint
#define ARRAY_SIZE(arr)
#define UNUSED_VARS_NDEBUG(...)
void GPU_batch_draw_advanced(blender::gpu::Batch *batch, int vertex_first, int vertex_count, int instance_first, int instance_count)
void GPU_batch_draw_indirect(blender::gpu::Batch *batch, GPUStorageBuf *indirect_buf, intptr_t offset)
void GPU_batch_resource_id_buf_set(blender::gpu::Batch *batch, GPUStorageBuf *resource_id_buf)
blender::IndexRange GPU_batch_draw_expanded_parameter_get(const blender::gpu::Batch *batch, GPUPrimType expanded_prim_type, int vertex_count, int vertex_first)
Definition gpu_batch.cc:355
void GPU_batch_set_shader(blender::gpu::Batch *batch, GPUShader *shader)
void GPU_batch_draw_parameter_get(blender::gpu::Batch *batch, int *r_vertex_count, int *r_vertex_first, int *r_base_index, int *r_instance_count)
void GPU_batch_bind_as_resources(blender::gpu::Batch *batch, GPUShader *shader)
bool GPU_shader_draw_parameters_support()
GPUAttachmentState
@ GPU_ATTACHMENT_WRITE
@ GPU_ATTACHMENT_IGNORE
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len)
void GPU_debug_group_end()
Definition gpu_debug.cc:33
void GPU_debug_group_begin(const char *name)
Definition gpu_debug.cc:22
const char * GPU_framebuffer_get_name(GPUFrameBuffer *framebuffer)
GPUFrameBuffer * GPU_framebuffer_active_get()
eGPUFrameBufferBits
@ GPU_DEPTH_BIT
@ GPU_STENCIL_BIT
@ GPU_COLOR_BIT
void GPU_framebuffer_clear(GPUFrameBuffer *framebuffer, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, unsigned int clear_stencil)
void GPU_framebuffer_multi_clear(GPUFrameBuffer *framebuffer, const float(*clear_colors)[4])
void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer)
void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *framebuffer, const GPUAttachmentState *attachment_states, uint attachment_len)
void GPU_indexbuf_bind_as_ssbo(blender::gpu::IndexBuf *elem, int binding)
GPUPrimType
@ GPU_PRIM_NONE
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRI_STRIP
@ GPU_PRIM_TRIS
int gpu_get_prim_count_from_type(uint vertex_len, GPUPrimType prim_type)
const char * GPU_shader_get_name(GPUShader *shader)
void GPU_shader_uniform_1i(GPUShader *sh, const char *name, int value)
void GPU_shader_uniform_int_ex(GPUShader *shader, int location, int length, int array_size, const int *value)
int GPU_shader_get_ssbo_vertex_fetch_num_verts_per_prim(GPUShader *shader)
int GPU_shader_get_ssbo_binding(GPUShader *shader, const char *name)
void GPU_shader_constant_bool_ex(GPUShader *sh, int location, bool value)
void GPU_shader_constant_int_ex(GPUShader *sh, int location, int value)
void GPU_shader_uniform_float_ex(GPUShader *shader, int location, int length, int array_size, const float *value)
void GPU_shader_bind(GPUShader *shader)
void GPU_shader_constant_float_ex(GPUShader *sh, int location, float value)
void GPU_shader_uniform_1b(GPUShader *sh, const char *name, bool value)
void GPU_shader_constant_uint_ex(GPUShader *sh, int location, unsigned int value)
void GPU_memory_barrier(eGPUBarrier barrier)
Definition gpu_state.cc:374
void GPU_program_point_size(bool enable)
Definition gpu_state.cc:175
void GPU_stencil_write_mask_set(uint write_mask)
Definition gpu_state.cc:205
void GPU_depth_range(float near, float far)
Definition gpu_state.cc:154
void GPU_stencil_reference_set(uint reference)
Definition gpu_state.cc:200
@ GPU_BARRIER_SHADER_STORAGE
Definition GPU_state.hh:48
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
Definition GPU_state.hh:50
void GPU_stencil_compare_mask_set(uint compare_mask)
Definition gpu_state.cc:210
void GPU_state_set(eGPUWriteMask write_mask, eGPUBlend blend, eGPUFaceCullTest culling_test, eGPUDepthTest depth_test, eGPUStencilTest stencil_test, eGPUStencilOp stencil_op, eGPUProvokingVertex provoking_vert)
Definition gpu_state.cc:129
void GPU_clip_distances(int distances_enabled)
Definition gpu_state.cc:124
void GPU_shadow_offset(bool enable)
Definition gpu_state.cc:119
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
void GPU_storagebuf_sync_as_indirect_buffer(GPUStorageBuf *ssbo)
void GPU_texture_bind_ex(GPUTexture *texture, GPUSamplerState state, int unit)
void GPU_texture_image_bind(GPUTexture *texture, int unit)
void GPU_uniformbuf_bind_as_ssbo(GPUUniformBuf *ubo, int slot)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
void GPU_vertbuf_bind_as_ssbo(blender::gpu::VertBuf *verts, int binding)
void GPU_vertbuf_bind_as_texture(blender::gpu::VertBuf *verts, int binding)
struct GPUShader GPUShader
constexpr int64_t size() const
constexpr bool is_empty() const
constexpr int64_t start() const
constexpr MutableSpan slice_safe(const int64_t start, const int64_t size) const
Definition BLI_span.hh:591
constexpr T * end() const
Definition BLI_span.hh:549
constexpr T * begin() const
Definition BLI_span.hh:545
void bind(RecordingState &state, Vector< Header, 0 > &headers, Vector< Undetermined, 0 > &commands, SubPassVector &sub_passes)
void bind(RecordingState &state, Vector< Header, 0 > &headers, Vector< Undetermined, 0 > &commands, VisibilityBuf &visibility_buf, int visibility_word_per_draw, int view_len, bool use_custom_ids)
local_group_size(16, 16) .push_constant(Type b
blender::gpu::Batch * drw_cache_procedural_lines_get()
blender::gpu::Batch * drw_cache_procedural_triangles_get()
blender::gpu::Batch * drw_cache_procedural_triangle_strips_get()
blender::gpu::Batch * drw_cache_procedural_points_get()
#define DRW_RESOURCE_ID_SLOT
#define DRW_COMMAND_GROUP_SIZE
DRWManager DST
GPUShader * DRW_shader_draw_command_generate_get()
@ DRW_STATE_IN_FRONT_SELECT
Definition draw_state.hh:67
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition draw_state.hh:72
@ DRW_STATE_SHADOW_OFFSET
Definition draw_state.hh:68
DOF_REDUCE_GROUP_SIZE dof_buf scatter_fg_list_buf[] DrawCommand
void GPU_compute_dispatch_indirect(GPUShader *shader, GPUStorageBuf *indirect_buf_)
BLI_INLINE float fb(float length, float L)
static ulong state[N]
static gpu::Batch * procedural_batch_get(GPUPrimType primitive)
static eGPUDepthTest to_depth_test(DRWState state)
static eGPUBlend to_blend(DRWState state)
static eGPUProvokingVertex to_provoking_vertex(DRWState state)
static eGPUStencilOp to_stencil_op(DRWState state)
static eGPUStencilTest to_stencil_test(DRWState state)
static eGPUWriteMask to_write_mask(DRWState state)
static eGPUFaceCullTest to_face_cull_test(DRWState state)
bool assign_if_different(T &old_value, T new_value)
MatBase< float, 4, 4 > float4x4
__int64 int64_t
Definition stdint.h:89
_W64 int intptr_t
Definition stdint.h:118
DRWState state_lock
std::string to_string() const
std::string serialize() const
void execute(RecordingState &state) const
void execute(RecordingState &state) const
struct blender::draw::command::DrawGroup::@274 desc
void execute(RecordingState &state) const
std::string serialize(const std::string &line_prefix) const
void execute(RecordingState &state) const
void execute(RecordingState &state) const
std::string serialize() const
void execute(RecordingState &state) const
void execute(RecordingState &state) const
void execute(RecordingState &state) const