Blender V4.3
draw_manager_exec.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "draw_manager_c.hh"
10
11#include "BLI_alloca.h"
12#include "BLI_math_bits.h"
13#include "BLI_memblock.h"
14
15#include "BKE_global.hh"
16
17#include "GPU_compute.hh"
18#include "GPU_platform.hh"
19#include "GPU_shader.hh"
20#include "GPU_state.hh"
21
22#ifdef USE_GPU_SELECT
23# include "GPU_select.hh"
24#endif
25
27{
28#ifdef USE_GPU_SELECT
30 DST.select_id = id;
31#endif
32}
33
34#define DEBUG_UBO_BINDING
35
37 blender::gpu::Batch *batch;
43 /* Resource location. */
51 /* Legacy matrix support. */
54 /* Uniform Attributes. */
56 /* Selection ID state. */
59 /* Drawing State */
62};
63
64/* -------------------------------------------------------------------- */
69{
70 /* Mask locked state. */
71 state = (~DST.state_lock & state) | (DST.state_lock & DST.state);
72
73 if (DST.state == state) {
74 return;
75 }
76
77 eGPUWriteMask write_mask = eGPUWriteMask(0);
79 eGPUFaceCullTest culling_test = eGPUFaceCullTest(0);
80 eGPUDepthTest depth_test = eGPUDepthTest(0);
81 eGPUStencilTest stencil_test = eGPUStencilTest(0);
82 eGPUStencilOp stencil_op = eGPUStencilOp(0);
83 eGPUProvokingVertex provoking_vert = eGPUProvokingVertex(0);
84
86 write_mask |= GPU_WRITE_DEPTH;
87 }
89 write_mask |= GPU_WRITE_COLOR;
90 }
92 write_mask |= GPU_WRITE_STENCIL;
93 }
94
97 culling_test = GPU_CULL_BACK;
98 break;
100 culling_test = GPU_CULL_FRONT;
101 break;
102 default:
103 culling_test = GPU_CULL_NONE;
104 break;
105 }
106
109 depth_test = GPU_DEPTH_LESS;
110 break;
112 depth_test = GPU_DEPTH_LESS_EQUAL;
113 break;
115 depth_test = GPU_DEPTH_EQUAL;
116 break;
118 depth_test = GPU_DEPTH_GREATER;
119 break;
121 depth_test = GPU_DEPTH_GREATER_EQUAL;
122 break;
124 depth_test = GPU_DEPTH_ALWAYS;
125 break;
126 default:
127 depth_test = GPU_DEPTH_NONE;
128 break;
129 }
130
133 stencil_op = GPU_STENCIL_OP_REPLACE;
135 break;
139 break;
143 break;
144 default:
145 stencil_op = GPU_STENCIL_OP_NONE;
147 break;
148 }
149
152 stencil_test = GPU_STENCIL_ALWAYS;
153 break;
155 stencil_test = GPU_STENCIL_EQUAL;
156 break;
158 stencil_test = GPU_STENCIL_NEQUAL;
159 break;
160 default:
161 stencil_test = GPU_STENCIL_NONE;
162 break;
163 }
164
165 switch (state & DRW_STATE_BLEND_ENABLED) {
168 break;
171 break;
174 break;
177 break;
180 break;
183 break;
186 break;
189 break;
192 break;
195 break;
198 break;
199 default:
201 break;
202 }
203
205 write_mask, blend, culling_test, depth_test, stencil_test, stencil_op, provoking_vert);
206
208 GPU_shadow_offset(true);
209 }
210 else {
211 GPU_shadow_offset(false);
212 }
213
214 /* TODO: this should be part of shader state. */
217 }
218 else {
220 }
221
223 /* XXX `GPU_depth_range` is not a perfect solution
224 * since very distant geometries can still be occluded.
225 * Also the depth test precision of these geometries is impaired.
226 * However, it solves the selection for the vast majority of cases. */
227 GPU_depth_range(0.0f, 0.01f);
228 }
229 else {
230 GPU_depth_range(0.0f, 1.0f);
231 }
232
235 }
236 else {
238 }
239
242 }
243 else {
245 }
246
247 DST.state = state;
248}
249
250static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
251{
252 /* Reminders:
253 * - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
254 * stencil_value being the value stored in the stencil buffer.
255 * - (write-mask & reference) is what gets written if the test condition is fulfilled.
256 */
257 GPU_stencil_write_mask_set(write_mask);
258 GPU_stencil_reference_set(reference);
259 GPU_stencil_compare_mask_set(compare_mask);
260}
261
263{
264 DST.state = ~state;
266}
267
269{
270 /* Cannot write to stencil buffer without stencil test. */
273 }
274 /* Cannot write to depth buffer without depth test. */
277 }
278}
279
281{
283
284 /* We must get the current state to avoid overriding it. */
285 /* Not complete, but that just what we need for now. */
288 }
290 DST.state &= ~DRW_STATE_DEPTH_TEST_ENABLED;
291
292 switch (GPU_depth_test_get()) {
293 case GPU_DEPTH_ALWAYS:
295 break;
296 case GPU_DEPTH_LESS:
298 break;
301 break;
302 case GPU_DEPTH_EQUAL:
304 break;
307 break;
310 break;
311 default:
312 break;
313 }
314 }
315}
316
318{
320
325
326 /* Should stay constant during the whole rendering. */
328 GPU_line_smooth(false);
329 /* Bypass #U.pixelsize factor by using a factor of 0.0f. Will be clamped to 1.0f. */
330 GPU_line_width(0.0f);
331}
332
335/* -------------------------------------------------------------------- */
339static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
340{
341 DRWCullingState *culling = static_cast<DRWCullingState *>(
343 return (culling->mask & view->culling_mask) != 0;
344}
345
347{
348 DST.view_active = (view != nullptr) ? ((DRWView *)view) : DST.view_default;
349}
350
352{
353 return DST.view_active;
354}
355
356/* Return True if the given BoundSphere intersect the current view frustum */
357static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere,
358 const float (*frustum_planes)[4],
359 const BoundSphere *bsphere)
360{
361 /* Bypass test if radius is negative. */
362 if (bsphere->radius < 0.0f) {
363 return true;
364 }
365
366 /* Do a rough test first: Sphere VS Sphere intersect. */
367 float center_dist_sq = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
368 float radius_sum = bsphere->radius + frustum_bsphere->radius;
369 if (center_dist_sq > square_f(radius_sum)) {
370 return false;
371 }
372 /* TODO: we could test against the inscribed sphere of the frustum to early out positively. */
373
374 /* Test against the 6 frustum planes. */
375 /* TODO: order planes with sides first then far then near clip. Should be better culling
376 * heuristic when sculpting. */
377 for (int p = 0; p < 6; p++) {
378 float dist = plane_point_side_v3(frustum_planes[p], bsphere->center);
379 if (dist < -bsphere->radius) {
380 return false;
381 }
382 }
383 return true;
384}
385
386static bool draw_culling_box_test(const float (*frustum_planes)[4], const BoundBox *bbox)
387{
388 /* 6 view frustum planes */
389 for (int p = 0; p < 6; p++) {
390 /* 8 box vertices. */
391 for (int v = 0; v < 8; v++) {
392 float dist = plane_point_side_v3(frustum_planes[p], bbox->vec[v]);
393 if (dist > 0.0f) {
394 /* At least one point in front of this plane.
395 * Go to next plane. */
396 break;
397 }
398 if (v == 7) {
399 /* 8 points behind this plane. */
400 return false;
401 }
402 }
403 }
404 return true;
405}
406
407static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
408{
409 /* Test against the 8 frustum corners. */
410 for (int c = 0; c < 8; c++) {
411 float dist = plane_point_side_v3(plane, corners->vec[c]);
412 if (dist < 0.0f) {
413 return true;
414 }
415 }
416 return false;
417}
418
419bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
420{
421 view = view ? view : DST.view_default;
422 return draw_culling_sphere_test(&view->frustum_bsphere, view->frustum_planes, bsphere);
423}
424
425bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
426{
427 view = view ? view : DST.view_default;
428 return draw_culling_box_test(view->frustum_planes, bbox);
429}
430
431bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
432{
433 view = view ? view : DST.view_default;
434 return draw_culling_plane_test(&view->frustum_corners, plane);
435}
436
437bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
438{
439 view = view ? view : DST.view_default;
440 float tobmat[4][4];
441 transpose_m4_m4(tobmat, obmat);
442 for (int i = 6; i--;) {
443 float frustum_plane_local[4], bb_near[3], bb_far[3];
444 mul_v4_m4v4(frustum_plane_local, tobmat, view->frustum_planes[i]);
445 aabb_get_near_far_from_plane(frustum_plane_local, min, max, bb_near, bb_far);
446
447 if (plane_point_side_v3(frustum_plane_local, bb_far) < 0.0f) {
448 return false;
449 }
450 }
451
452 return true;
453}
454
456{
457 view = view ? view : DST.view_default;
458 *corners = view->frustum_corners;
459}
460
461void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
462{
463 view = view ? view : DST.view_default;
464 memcpy(planes, view->frustum_planes, sizeof(float[6][4]));
465}
466
468{
469 view = view->parent ? view->parent : view;
470
471 /* TODO(fclem): multi-thread this. */
472 /* TODO(fclem): compute all dirty views at once. */
473 if (!view->is_dirty) {
474 return;
475 }
476
479 DRWCullingState *cull;
480 while ((cull = static_cast<DRWCullingState *>(BLI_memblock_iterstep(&iter)))) {
481 if (cull->bsphere.radius < 0.0) {
482 cull->mask = 0;
483 }
484 else {
485 bool culled = !draw_culling_sphere_test(
486 &view->frustum_bsphere, view->frustum_planes, &cull->bsphere);
487
488#ifdef DRW_DEBUG_CULLING
489 if (G.debug_value != 0) {
490 if (culled) {
492 cull->bsphere.center, cull->bsphere.radius, blender::float4{1, 0, 0, 1});
493 }
494 else {
496 cull->bsphere.center, cull->bsphere.radius, blender::float4{0, 1, 0, 1});
497 }
498 }
499#endif
500
501 if (view->visibility_fn) {
502 culled = !view->visibility_fn(!culled, cull->user_data);
503 }
504
505 SET_FLAG_FROM_TEST(cull->mask, culled, view->culling_mask);
506 }
507 }
508
509 view->is_dirty = false;
510}
511
514/* -------------------------------------------------------------------- */
519 DRWResourceHandle *handle,
520 float obmat_loc,
521 float obinv_loc)
522{
523 /* Still supported for compatibility with gpu_shader_* but should be forbidden. */
524 DRWObjectMatrix *ob_mats = static_cast<DRWObjectMatrix *>(
526 if (obmat_loc != -1) {
527 GPU_shader_uniform_float_ex(shgroup->shader, obmat_loc, 16, 1, (float *)ob_mats->model);
528 }
529 if (obinv_loc != -1) {
530 GPU_shader_uniform_float_ex(shgroup->shader, obinv_loc, 16, 1, (float *)ob_mats->modelinverse);
531 }
532}
533
534BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, blender::gpu::Batch *geom)
535{
536 DST.batch = geom;
537
538 GPU_batch_set_shader(geom, shgroup->shader);
539}
540
542 blender::gpu::Batch *geom,
543 int vert_first,
544 int vert_count,
545 int inst_first,
546 int inst_count,
547 int baseinst_loc)
548{
549 /* inst_count can be -1. */
550 inst_count = max_ii(0, inst_count);
551
552 if (baseinst_loc != -1) {
553 /* Fallback when ARB_shader_draw_parameters is not supported. */
554 GPU_shader_uniform_int_ex(shgroup->shader, baseinst_loc, 1, 1, (int *)&inst_first);
555 /* Avoids VAO reconfiguration on older hardware. (see GPU_batch_draw_advanced) */
556 inst_first = 0;
557 }
558
559 /* bind vertex array */
560 if (DST.batch != geom) {
561 draw_geometry_bind(shgroup, geom);
562 }
563
564 GPU_batch_draw_advanced(geom, vert_first, vert_count, inst_first, inst_count);
565}
566
568{
569 if (state->inst_count == 0) {
570 return;
571 }
572 if (state->baseinst_loc == -1) {
573 /* bind vertex array */
574 if (DST.batch != state->batch) {
576 draw_geometry_bind(shgroup, state->batch);
577 }
578 GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
579 }
580 /* Fallback when unsupported */
581 else {
583 shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
584 }
585}
586
589 bool *use_tfeedback)
590{
591#define MAX_UNIFORM_STACK_SIZE 64
592
593 /* Uniform array elements stored as separate entries. We need to batch these together */
594 int array_uniform_loc = -1;
595 int array_index = 0;
596 float mat4_stack[4 * 4];
597
598 /* Loop through uniforms in reverse order. */
599 for (DRWUniformChunk *unichunk = shgroup->uniforms; unichunk; unichunk = unichunk->next) {
600 DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used - 1;
601
602 for (int i = 0; i < unichunk->uniform_used; i++, uni--) {
603 /* For uniform array copies, copy per-array-element data into local buffer before upload. */
604 if (uni->arraysize > 1 && uni->type == DRW_UNIFORM_FLOAT_COPY) {
605 /* Only written for mat4 copy for now and is not meant to become generalized. */
606 /* TODO(@fclem): Use UBOs/SSBOs instead of inline mat4 copies. */
607 BLI_assert(uni->arraysize == 4 && uni->length == 4);
608 /* Begin copying uniform array. */
609 if (array_uniform_loc == -1) {
610 array_uniform_loc = uni->location;
611 array_index = uni->arraysize * uni->length;
612 }
613 /* Debug check same array loc. */
614 BLI_assert(array_uniform_loc > -1 && array_uniform_loc == uni->location);
615 /* Copy array element data to local buffer. */
616 array_index -= uni->length;
617 memcpy(&mat4_stack[array_index], uni->fvalue, sizeof(float) * uni->length);
618 /* Flush array data to shader. */
619 if (array_index <= 0) {
620 GPU_shader_uniform_float_ex(shgroup->shader, uni->location, 16, 1, mat4_stack);
621 array_uniform_loc = -1;
622 }
623 continue;
624 }
625
626 /* Handle standard cases. */
627 switch (uni->type) {
629 BLI_assert(uni->arraysize == 1);
630 if (uni->arraysize == 1) {
632 shgroup->shader, uni->location, uni->length, uni->arraysize, uni->ivalue);
633 }
634 break;
635 case DRW_UNIFORM_INT:
637 uni->location,
638 uni->length,
639 uni->arraysize,
640 static_cast<const int *>(uni->pvalue));
641 break;
643 BLI_assert(uni->arraysize == 1);
644 if (uni->arraysize == 1) {
646 shgroup->shader, uni->location, uni->length, uni->arraysize, uni->fvalue);
647 }
648 break;
651 uni->location,
652 uni->length,
653 uni->arraysize,
654 static_cast<const float *>(uni->pvalue));
655 break;
658 break;
661 break;
664 break;
667 break;
670 break;
673 break;
676 break;
679 break;
681 state->obmats_loc = uni->location;
683 break;
685 state->obinfos_loc = uni->location;
687 break;
689 state->obattrs_loc = uni->location;
691 uni->uniform_attrs);
692 DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
693 break;
695 state->vlattrs_loc = uni->location;
697 break;
699 state->chunkid_loc = uni->location;
700 int zero = 0;
701 GPU_shader_uniform_int_ex(shgroup->shader, uni->location, 1, 1, &zero);
702 break;
703 }
705 state->resourceid_loc = uni->location;
706 break;
708 BLI_assert(uni->pvalue && (*use_tfeedback == false));
710 shgroup->shader, ((blender::gpu::VertBuf *)uni->pvalue));
711 break;
714 break;
717 break;
720 break;
723 break;
724 /* Legacy/Fallback support. */
726 state->baseinst_loc = uni->location;
727 break;
729 state->obmat_loc = uni->location;
730 break;
732 state->obinv_loc = uni->location;
733 break;
734 }
735 }
736 }
737 /* Ensure uniform arrays copied. */
738 BLI_assert(array_index == 0);
739 BLI_assert(array_uniform_loc == -1);
740 UNUSED_VARS_NDEBUG(array_uniform_loc);
741}
742
745 blender::gpu::Batch *batch,
746 const DRWResourceHandle *handle)
747{
748 const bool is_instancing = (batch->inst[0] != nullptr);
749 int start = 0;
750 int count = 1;
751 int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
753 /* HACK: get VBO data without actually drawing. */
754 int *select_id = state->select_buf->data<int>().data();
755
756 /* Batching */
757 if (!is_instancing) {
758 /* FIXME: Meh a bit nasty. */
759 if (batch->prim_type == GPU_PRIM_TRIS) {
760 count = 3;
761 }
762 else if (batch->prim_type == GPU_PRIM_LINES) {
763 count = 2;
764 }
765 }
766
767 while (start < tot) {
768 GPU_select_load_id(select_id[start]);
769 if (is_instancing) {
770 draw_geometry_execute(shgroup, batch, 0, 0, start, count, state->baseinst_loc);
771 }
772 else {
774 shgroup, batch, start, count, DRW_handle_id_get(handle), 0, state->baseinst_loc);
775 }
776 start += count;
777 }
778}
779
784
786{
787 iter->curr_chunk = shgroup->cmd.first;
788 iter->cmd_index = 0;
789}
790
792{
793 if (iter->curr_chunk) {
794 if (iter->cmd_index == iter->curr_chunk->command_len) {
795 iter->curr_chunk = iter->curr_chunk->next;
796 iter->cmd_index = 0;
797 }
798 if (iter->curr_chunk) {
799 *cmd_type = command_type_get(iter->curr_chunk->command_type, iter->cmd_index);
800 if (iter->cmd_index < iter->curr_chunk->command_used) {
801 return iter->curr_chunk->commands + iter->cmd_index++;
802 }
803 }
804 }
805 return nullptr;
806}
807
809{
810 /* Front face is not a resource but it is inside the resource handle. */
811 bool neg_scale = DRW_handle_negative_scale_get(handle);
812 if (neg_scale != state->neg_scale) {
813 state->neg_scale = neg_scale;
815 }
816
817 int chunk = DRW_handle_chunk_get(handle);
818 if (state->resource_chunk != chunk) {
819 if (state->chunkid_loc != -1) {
820 GPU_shader_uniform_int_ex(DST.shader, state->chunkid_loc, 1, 1, &chunk);
821 }
822 if (state->obmats_loc != -1) {
824 GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[chunk], state->obmats_loc);
825 }
826 if (state->obinfos_loc != -1) {
828 GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
829 }
830 if (state->obattrs_loc != -1) {
831 DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
832 DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
833 }
834 state->resource_chunk = chunk;
835 }
836
837 if (state->resourceid_loc != -1) {
838 int id = DRW_handle_id_get(handle);
839 if (state->resource_id != id) {
840 GPU_shader_uniform_int_ex(DST.shader, state->resourceid_loc, 1, 1, &id);
841 state->resource_id = id;
842 }
843 }
844}
845
847{
848 draw_indirect_call(shgroup, state);
850
851 state->batch = nullptr;
852 state->inst_count = 0;
853 state->base_inst = -1;
854}
855
858 blender::gpu::Batch *batch,
859 DRWResourceHandle handle,
860 int vert_first,
861 int vert_count,
862 int inst_first,
863 int inst_count,
864 bool do_base_instance)
865{
867
869
870 /* TODO: This is Legacy. Need to be removed. */
871 if (state->obmats_loc == -1 && (state->obmat_loc != -1 || state->obinv_loc != -1)) {
872 draw_legacy_matrix_update(shgroup, &handle, state->obmat_loc, state->obinv_loc);
873 }
874
875 if (G.f & G_FLAG_PICKSEL) {
876 if (state->select_buf != nullptr) {
877 draw_select_buffer(shgroup, state, batch, &handle);
878 return;
879 }
880
881 GPU_select_load_id(state->select_id);
882 }
883
884 draw_geometry_execute(shgroup,
885 batch,
886 vert_first,
887 vert_count,
888 do_base_instance ? DRW_handle_id_get(&handle) : inst_first,
889 inst_count,
890 state->baseinst_loc);
891}
892
893/* Not to be mistaken with draw_indirect_call which does batch many drawcalls together. This one
894 * only execute an indirect drawcall with user indirect buffer. */
897 blender::gpu::Batch *batch,
898 DRWResourceHandle handle,
899 GPUStorageBuf *indirect_buf)
900{
903
904 if (G.f & G_FLAG_PICKSEL) {
905 GPU_select_load_id(state->select_id);
906 }
907
909 GPU_batch_draw_indirect(batch, indirect_buf, 0);
910}
911
913{
914 state->neg_scale = false;
915 state->resource_chunk = 0;
916 state->resource_id = -1;
917 state->base_inst = 0;
918 state->inst_count = 0;
919 state->batch = nullptr;
920
921 state->select_id = -1;
922 state->select_buf = nullptr;
923}
924
925/* NOTE: Does not support batches with instancing VBOs. */
928 DRWCommandDraw *call)
929{
930 /* If any condition requires to interrupt the merging. */
931 bool neg_scale = DRW_handle_negative_scale_get(&call->handle);
932 int chunk = DRW_handle_chunk_get(&call->handle);
933 int id = DRW_handle_id_get(&call->handle);
934 if ((state->neg_scale != neg_scale) || /* Need to change state. */
935 (state->resource_chunk != chunk) || /* Need to change UBOs. */
936 (state->batch != call->batch) /* Need to change VAO. */
937 )
938 {
940
941 state->batch = call->batch;
942 state->inst_count = 1;
943 state->base_inst = id;
944
946 }
947 /* Is the id consecutive? */
948 else if (id != state->base_inst + state->inst_count) {
949 /* We need to add a draw command for the pending instances. */
950 draw_indirect_call(shgroup, state);
951 state->inst_count = 1;
952 state->base_inst = id;
953 }
954 /* We avoid a drawcall by merging with the precedent
955 * drawcall using instancing. */
956 else {
957 state->inst_count++;
958 }
959}
960
961/* Flush remaining pending drawcalls. */
963{
965
966 /* Reset state */
967 if (state->neg_scale) {
969 }
970 if (state->obmats_loc != -1) {
972 }
973 if (state->obinfos_loc != -1) {
975 }
976 if (state->obattrs_loc != -1) {
977 DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
978 }
979 if (state->vlattrs_loc != -1) {
981 }
982}
983
984static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
985{
986 BLI_assert(shgroup->shader);
987
989 state.obmats_loc = -1;
990 state.obinfos_loc = -1;
991 state.obattrs_loc = -1;
992 state.vlattrs_loc = -1;
993 state.baseinst_loc = -1;
994 state.chunkid_loc = -1;
995 state.resourceid_loc = -1;
996 state.obmat_loc = -1;
997 state.obinv_loc = -1;
998 state.obattrs_ubo = nullptr;
999 state.drw_state_enabled = DRWState(0);
1000 state.drw_state_disabled = DRWState(0);
1001
1002 const bool shader_changed = (DST.shader != shgroup->shader);
1003 bool use_tfeedback = false;
1004
1005 if (shader_changed) {
1006 if (DST.shader) {
1008
1009 /* Unbinding can be costly. Skip in normal condition. */
1010 if (G.debug & G_DEBUG_GPU) {
1015 }
1016 }
1017 GPU_shader_bind(shgroup->shader);
1018 DST.shader = shgroup->shader;
1019 DST.batch = nullptr;
1020 }
1021
1022 draw_update_uniforms(shgroup, &state, &use_tfeedback);
1023
1024 drw_state_set(pass_state);
1025
1026 /* Rendering Calls */
1027 {
1028 DRWCommandIterator iter;
1029 DRWCommand *cmd;
1030 eDRWCommandType cmd_type;
1031
1032 draw_command_iter_begin(&iter, shgroup);
1033
1035
1036 while ((cmd = draw_command_iter_step(&iter, &cmd_type))) {
1037
1038 switch (cmd_type) {
1040 case DRW_CMD_DRWSTATE:
1041 case DRW_CMD_STENCIL:
1043 break;
1044 case DRW_CMD_DRAW:
1048 continue;
1049 }
1050 break;
1051 default:
1052 break;
1053 }
1054
1055 switch (cmd_type) {
1056 case DRW_CMD_CLEAR:
1058 cmd->clear.clear_channels,
1059 blender::float4{cmd->clear.r / 255.0f,
1060 cmd->clear.g / 255.0f,
1061 cmd->clear.b / 255.0f,
1062 cmd->clear.a / 255.0f},
1063 cmd->clear.depth,
1064 cmd->clear.stencil);
1065 break;
1066 case DRW_CMD_DRWSTATE:
1067 state.drw_state_enabled |= cmd->state.enable;
1068 state.drw_state_disabled |= cmd->state.disable;
1069 drw_state_set((pass_state & ~state.drw_state_disabled) | state.drw_state_enabled);
1070 break;
1071 case DRW_CMD_STENCIL:
1073 break;
1074 case DRW_CMD_SELECTID:
1075 state.select_id = cmd->select_id.select_id;
1076 state.select_buf = cmd->select_id.select_buf;
1077 break;
1078 case DRW_CMD_DRAW:
1079 if (!USE_BATCHING || state.obmats_loc == -1 || (G.f & G_FLAG_PICKSEL) ||
1080 cmd->draw.batch->inst[0])
1081 {
1083 shgroup, &state, cmd->draw.batch, cmd->draw.handle, 0, 0, 0, 0, true);
1084 }
1085 else {
1086 draw_call_batching_do(shgroup, &state, &cmd->draw);
1087 }
1088 break;
1090 draw_call_single_do(shgroup,
1091 &state,
1092 cmd->procedural.batch,
1093 cmd->procedural.handle,
1094 0,
1096 0,
1097 1,
1098 true);
1099 break;
1101 draw_call_indirect(shgroup,
1102 &state,
1103 cmd->draw_indirect.batch,
1104 cmd->draw_indirect.handle,
1106 break;
1108 draw_call_single_do(shgroup,
1109 &state,
1110 cmd->instance.batch,
1111 cmd->instance.handle,
1112 0,
1113 0,
1114 0,
1115 cmd->instance.inst_count,
1116 cmd->instance.use_attrs == 0);
1117 break;
1118 case DRW_CMD_DRAW_RANGE:
1119 draw_call_single_do(shgroup,
1120 &state,
1121 cmd->range.batch,
1122 cmd->range.handle,
1123 cmd->range.vert_first,
1124 cmd->range.vert_count,
1125 0,
1126 1,
1127 true);
1128 break;
1130 draw_call_single_do(shgroup,
1131 &state,
1132 cmd->instance_range.batch,
1134 0,
1135 0,
1138 false);
1139 break;
1140 case DRW_CMD_COMPUTE:
1142 cmd->compute.groups_x_len,
1143 cmd->compute.groups_y_len,
1144 cmd->compute.groups_z_len);
1145 break;
1148 cmd->compute_ref.groups_ref[0],
1149 cmd->compute_ref.groups_ref[1],
1150 cmd->compute_ref.groups_ref[2]);
1151 break;
1154 break;
1155 case DRW_CMD_BARRIER:
1157 break;
1158 }
1159 }
1160
1162 }
1163
1164 if (use_tfeedback) {
1166 }
1167}
1168
1169static void drw_update_view()
1170{
1171 /* TODO(fclem): update a big UBO and only bind ranges here. */
1174
1176}
1177
1178static void drw_draw_pass_ex(DRWPass *pass,
1179 DRWShadingGroup *start_group,
1180 DRWShadingGroup *end_group)
1181{
1182 if (pass->original) {
1183 start_group = pass->original->shgroups.first;
1184 end_group = pass->original->shgroups.last;
1185 }
1186
1187 if (start_group == nullptr) {
1188 return;
1189 }
1190
1191 DST.shader = nullptr;
1192
1194 "DRW_render_instance_buffer_finish had not been called before drawing");
1195
1198 DST.view_active->is_dirty = false;
1200 }
1201
1202 /* GPU_framebuffer_clear calls can change the state outside the DRW module.
1203 * Force reset the affected states to avoid problems later. */
1205
1206 drw_state_set(pass->state);
1208
1210 GPU_front_facing(true);
1211 }
1212
1213 DRW_stats_query_start(pass->name);
1214
1215 for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
1216 draw_shgroup(shgroup, pass->state);
1217 /* break if upper limit */
1218 if (shgroup == end_group) {
1219 break;
1220 }
1221 }
1222
1223 if (DST.shader) {
1225 DST.shader = nullptr;
1226 }
1227
1228 if (DST.batch) {
1229 DST.batch = nullptr;
1230 }
1231
1232 /* Fix #67342 for some reason. AMD Pro driver bug. */
1233 if ((DST.state & DRW_STATE_BLEND_CUSTOM) != 0 &&
1235 {
1237 }
1238
1239 /* HACK: Rasterized discard can affect clear commands which are not
1240 * part of a DRWPass (as of now). So disable rasterized discard here
1241 * if it has been enabled. */
1242 if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
1244 }
1245
1246 /* Reset default. */
1248 GPU_front_facing(false);
1249 }
1250
1252}
1253
1255{
1256 for (; pass; pass = pass->next) {
1257 drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
1258 }
1259}
1260
1262{
1263 drw_draw_pass_ex(pass, start_group, end_group);
1264}
1265
@ G_FLAG_PICKSEL
@ G_DEBUG_GPU
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
#define BLI_INLINE
MINLINE int max_ii(int a, int b)
MINLINE float square_f(float a)
MINLINE float plane_point_side_v3(const float plane[4], const float co[3])
void aabb_get_near_far_from_plane(const float plane_no[3], const float bbmin[3], const float bbmax[3], float bb_near[3], float bb_afar[3])
Definition math_geom.cc:647
void mul_v4_m4v4(float r[4], const float mat[4][4], const float v[4])
void transpose_m4_m4(float R[4][4], const float M[4][4])
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
unsigned int uint
#define UNUSED_VARS_NDEBUG(...)
#define SET_FLAG_FROM_TEST(value, test, flag)
static AppView * view
void GPU_batch_draw_advanced(blender::gpu::Batch *batch, int vertex_first, int vertex_count, int instance_first, int instance_count)
void GPU_batch_draw_indirect(blender::gpu::Batch *batch, GPUStorageBuf *indirect_buf, intptr_t offset)
void GPU_batch_set_shader(blender::gpu::Batch *batch, GPUShader *shader)
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len)
void GPU_draw_list_append(GPUDrawList *list, blender::gpu::Batch *batch, int i_first, int i_count)
void GPU_draw_list_submit(GPUDrawList *list)
GPUFrameBuffer * GPU_framebuffer_active_get()
void GPU_framebuffer_clear(GPUFrameBuffer *framebuffer, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, unsigned int clear_stencil)
@ GPU_DRIVER_OFFICIAL
@ GPU_OS_ANY
@ GPU_DEVICE_ATI
bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
@ GPU_PRIM_LINES
@ GPU_PRIM_TRIS
bool GPU_select_load_id(unsigned int id)
void GPU_shader_uniform_int_ex(GPUShader *shader, int location, int length, int array_size, const int *value)
bool GPU_shader_transform_feedback_enable(GPUShader *shader, blender::gpu::VertBuf *vertbuf)
void GPU_shader_uniform_float_ex(GPUShader *shader, int location, int length, int array_size, const float *value)
void GPU_shader_transform_feedback_disable(GPUShader *shader)
void GPU_shader_bind(GPUShader *shader)
void GPU_shader_unbind()
void GPU_memory_barrier(eGPUBarrier barrier)
Definition gpu_state.cc:374
void GPU_program_point_size(bool enable)
Definition gpu_state.cc:175
eGPUBlend
Definition GPU_state.hh:84
@ GPU_BLEND_ADDITIVE_PREMULT
Definition GPU_state.hh:90
@ GPU_BLEND_INVERT
Definition GPU_state.hh:95
@ GPU_BLEND_OIT
Definition GPU_state.hh:98
@ GPU_BLEND_MULTIPLY
Definition GPU_state.hh:91
@ GPU_BLEND_NONE
Definition GPU_state.hh:85
@ GPU_BLEND_ALPHA
Definition GPU_state.hh:87
@ GPU_BLEND_CUSTOM
Definition GPU_state.hh:103
@ GPU_BLEND_ADDITIVE
Definition GPU_state.hh:89
@ GPU_BLEND_SUBTRACT
Definition GPU_state.hh:92
@ GPU_BLEND_ALPHA_UNDER_PREMUL
Definition GPU_state.hh:104
@ GPU_BLEND_BACKGROUND
Definition GPU_state.hh:100
@ GPU_BLEND_ALPHA_PREMULT
Definition GPU_state.hh:88
void GPU_line_width(float width)
Definition gpu_state.cc:161
eGPUWriteMask
Definition GPU_state.hh:16
@ GPU_WRITE_STENCIL
Definition GPU_state.hh:23
@ GPU_WRITE_DEPTH
Definition GPU_state.hh:22
@ GPU_WRITE_COLOR
Definition GPU_state.hh:24
void GPU_line_smooth(bool enable)
Definition gpu_state.cc:78
eGPUProvokingVertex
Definition GPU_state.hh:138
@ GPU_VERTEX_LAST
Definition GPU_state.hh:139
@ GPU_VERTEX_FIRST
Definition GPU_state.hh:140
eGPUDepthTest GPU_depth_test_get()
Definition gpu_state.cc:239
void GPU_stencil_write_mask_set(uint write_mask)
Definition gpu_state.cc:205
eGPUFaceCullTest
Definition GPU_state.hh:132
@ GPU_CULL_FRONT
Definition GPU_state.hh:134
@ GPU_CULL_NONE
Definition GPU_state.hh:133
@ GPU_CULL_BACK
Definition GPU_state.hh:135
void GPU_depth_range(float near, float far)
Definition gpu_state.cc:154
void GPU_stencil_reference_set(uint reference)
Definition gpu_state.cc:200
eGPUStencilOp
Definition GPU_state.hh:124
@ GPU_STENCIL_OP_COUNT_DEPTH_FAIL
Definition GPU_state.hh:129
@ GPU_STENCIL_OP_COUNT_DEPTH_PASS
Definition GPU_state.hh:128
@ GPU_STENCIL_OP_REPLACE
Definition GPU_state.hh:126
@ GPU_STENCIL_OP_NONE
Definition GPU_state.hh:125
void GPU_stencil_compare_mask_set(uint compare_mask)
Definition gpu_state.cc:210
void GPU_front_facing(bool invert)
Definition gpu_state.cc:58
void GPU_point_size(float size)
Definition gpu_state.cc:167
eGPUDepthTest
Definition GPU_state.hh:107
@ GPU_DEPTH_GREATER
Definition GPU_state.hh:113
@ GPU_DEPTH_EQUAL
Definition GPU_state.hh:112
@ GPU_DEPTH_ALWAYS
Definition GPU_state.hh:109
@ GPU_DEPTH_GREATER_EQUAL
Definition GPU_state.hh:114
@ GPU_DEPTH_LESS
Definition GPU_state.hh:110
@ GPU_DEPTH_LESS_EQUAL
Definition GPU_state.hh:111
@ GPU_DEPTH_NONE
Definition GPU_state.hh:108
void GPU_state_set(eGPUWriteMask write_mask, eGPUBlend blend, eGPUFaceCullTest culling_test, eGPUDepthTest depth_test, eGPUStencilTest stencil_test, eGPUStencilOp stencil_op, eGPUProvokingVertex provoking_vert)
Definition gpu_state.cc:129
eGPUStencilTest
Definition GPU_state.hh:117
@ GPU_STENCIL_EQUAL
Definition GPU_state.hh:120
@ GPU_STENCIL_NEQUAL
Definition GPU_state.hh:121
@ GPU_STENCIL_ALWAYS
Definition GPU_state.hh:119
@ GPU_STENCIL_NONE
Definition GPU_state.hh:118
bool GPU_depth_mask_get()
Definition gpu_state.cc:276
void GPU_clip_distances(int distances_enabled)
Definition gpu_state.cc:124
void GPU_provoking_vertex(eGPUProvokingVertex vert)
Definition gpu_state.cc:63
void GPU_shadow_offset(bool enable)
Definition gpu_state.cc:119
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
void GPU_storagebuf_debug_unbind_all()
void GPU_texture_bind_ex(GPUTexture *texture, GPUSamplerState state, int unit)
void GPU_texture_image_unbind_all()
void GPU_texture_image_bind(GPUTexture *texture, int unit)
void GPU_texture_unbind_all()
void GPU_uniformbuf_unbind(GPUUniformBuf *ubo)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_debug_unbind_all()
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
void GPU_vertbuf_bind_as_ssbo(blender::gpu::VertBuf *verts, int binding)
void GPU_vertbuf_bind_as_texture(blender::gpu::VertBuf *verts, int binding)
uint GPU_vertbuf_get_vertex_len(const blender::gpu::VertBuf *verts)
ATTR_WARN_UNUSED_RESULT const BMVert * v
DRW_Global G_draw
void DRW_debug_sphere(const float center[3], float radius, const float color[4])
GPUUniformBuf * drw_ensure_layer_attribute_buffer()
DRWSparseUniformBuf * DRW_uniform_attrs_pool_find_ubo(GHash *table, const GPUUniformAttrList *key)
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
DRWManager DST
BLI_INLINE void * DRW_memblock_elem_from_handle(BLI_memblock *memblock, const DRWResourceHandle *handle)
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF
@ DRW_UNIFORM_BLOCK_OBINFOS
@ DRW_UNIFORM_TFEEDBACK_TARGET
@ DRW_UNIFORM_TEXTURE_REF
@ DRW_UNIFORM_MODEL_MATRIX
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE
@ DRW_UNIFORM_FLOAT_COPY
@ DRW_UNIFORM_MODEL_MATRIX_INVERSE
@ DRW_UNIFORM_FLOAT
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF
@ DRW_UNIFORM_BASE_INSTANCE
@ DRW_UNIFORM_BLOCK_OBMATS
@ DRW_UNIFORM_IMAGE_REF
@ DRW_UNIFORM_RESOURCE_ID
@ DRW_UNIFORM_BLOCK
@ DRW_UNIFORM_TEXTURE
@ DRW_UNIFORM_STORAGE_BLOCK_REF
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE
@ DRW_UNIFORM_RESOURCE_CHUNK
@ DRW_UNIFORM_IMAGE
@ DRW_UNIFORM_BLOCK_OBATTRS
@ DRW_UNIFORM_STORAGE_BLOCK
@ DRW_UNIFORM_BLOCK_VLATTRS
@ DRW_UNIFORM_INT
@ DRW_UNIFORM_BLOCK_REF
@ DRW_UNIFORM_INT_COPY
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
#define USE_BATCHING
eDRWCommandType
@ DRW_CMD_COMPUTE_INDIRECT
@ DRW_CMD_COMPUTE
@ DRW_CMD_COMPUTE_REF
@ DRW_CMD_DRAW
@ DRW_CMD_DRWSTATE
@ DRW_CMD_DRAW_RANGE
@ DRW_CMD_CLEAR
@ DRW_CMD_BARRIER
@ DRW_CMD_STENCIL
@ DRW_CMD_DRAW_INSTANCE_RANGE
@ DRW_CMD_DRAW_PROCEDURAL
@ DRW_CMD_DRAW_INDIRECT
@ DRW_CMD_SELECTID
@ DRW_CMD_DRAW_INSTANCE
BLI_INLINE uint32_t DRW_handle_negative_scale_get(const DRWResourceHandle *handle)
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
uint32_t DRWResourceHandle
BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup, DRWResourceHandle *handle, float obmat_loc, float obinv_loc)
bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState *state)
static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere, const float(*frustum_planes)[4], const BoundSphere *bsphere)
static void draw_update_uniforms(DRWShadingGroup *shgroup, DRWCommandsState *state, bool *use_tfeedback)
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup, blender::gpu::Batch *geom, int vert_first, int vert_count, int inst_first, int inst_count, int baseinst_loc)
static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState *state)
void DRW_draw_pass(DRWPass *pass)
static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
void drw_state_set(DRWState state)
void DRW_view_set_active(const DRWView *view)
static void draw_call_single_do(DRWShadingGroup *shgroup, DRWCommandsState *state, blender::gpu::Batch *batch, DRWResourceHandle handle, int vert_first, int vert_count, int inst_first, int inst_count, bool do_base_instance)
void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
static void draw_call_batching_do(DRWShadingGroup *shgroup, DRWCommandsState *state, DRWCommandDraw *call)
static DRWCommand * draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandType *cmd_type)
static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
void DRW_culling_frustum_corners_get(const DRWView *view, BoundBox *corners)
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
static void drw_state_validate()
void DRW_state_reset()
static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
void DRW_state_reset_ex(DRWState state)
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup, DRWCommandsState *state, blender::gpu::Batch *batch, const DRWResourceHandle *handle)
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *state)
static void drw_update_view()
void DRW_state_lock(DRWState state)
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, blender::gpu::Batch *geom)
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
static bool draw_culling_box_test(const float(*frustum_planes)[4], const BoundBox *bbox)
static void draw_call_indirect(DRWShadingGroup *shgroup, DRWCommandsState *state, blender::gpu::Batch *batch, DRWResourceHandle handle, GPUStorageBuf *indirect_buf)
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
void DRW_select_load_id(uint id)
const DRWView * DRW_view_get_active()
static void draw_compute_culling(DRWView *view)
static void draw_call_batching_start(DRWCommandsState *state)
void DRW_stats_query_start(const char *name)
void DRW_stats_query_end()
#define DRW_STATE_WRITE_STENCIL_ENABLED
Definition draw_state.hh:91
DRWState
Definition draw_state.hh:25
@ DRW_STATE_STENCIL_EQUAL
Definition draw_state.hh:47
@ DRW_STATE_CLIP_PLANES
Definition draw_state.hh:69
@ DRW_STATE_BLEND_ALPHA
Definition draw_state.hh:55
@ DRW_STATE_BLEND_ADD
Definition draw_state.hh:51
@ DRW_STATE_BLEND_BACKGROUND
Definition draw_state.hh:58
@ DRW_STATE_CULL_FRONT
Definition draw_state.hh:44
@ DRW_STATE_STENCIL_ALWAYS
Definition draw_state.hh:46
@ DRW_STATE_DEPTH_LESS
Definition draw_state.hh:37
@ DRW_STATE_IN_FRONT_SELECT
Definition draw_state.hh:67
@ DRW_STATE_BLEND_SUB
Definition draw_state.hh:61
@ DRW_STATE_DEPTH_GREATER_EQUAL
Definition draw_state.hh:41
@ DRW_STATE_WRITE_STENCIL_SHADOW_FAIL
Definition draw_state.hh:34
@ DRW_STATE_DEPTH_EQUAL
Definition draw_state.hh:39
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition draw_state.hh:72
@ DRW_STATE_WRITE_DEPTH
Definition draw_state.hh:29
@ DRW_STATE_BLEND_OIT
Definition draw_state.hh:59
@ DRW_STATE_LOGIC_INVERT
Definition draw_state.hh:64
@ DRW_STATE_SHADOW_OFFSET
Definition draw_state.hh:68
@ DRW_STATE_BLEND_ADD_FULL
Definition draw_state.hh:53
@ DRW_STATE_WRITE_COLOR
Definition draw_state.hh:30
@ DRW_STATE_BLEND_ALPHA_UNDER_PREMUL
Definition draw_state.hh:65
@ DRW_STATE_DEPTH_LESS_EQUAL
Definition draw_state.hh:38
@ DRW_STATE_WRITE_STENCIL_SHADOW_PASS
Definition draw_state.hh:33
@ DRW_STATE_CULL_BACK
Definition draw_state.hh:43
@ DRW_STATE_FIRST_VERTEX_CONVENTION
Definition draw_state.hh:70
@ DRW_STATE_STENCIL_NEQUAL
Definition draw_state.hh:48
@ DRW_STATE_DEPTH_ALWAYS
Definition draw_state.hh:36
@ DRW_STATE_BLEND_CUSTOM
Definition draw_state.hh:63
@ DRW_STATE_BLEND_ALPHA_PREMUL
Definition draw_state.hh:57
@ DRW_STATE_DEPTH_GREATER
Definition draw_state.hh:40
@ DRW_STATE_BLEND_MUL
Definition draw_state.hh:60
@ DRW_STATE_WRITE_STENCIL
Definition draw_state.hh:32
#define DRW_STATE_RASTERIZER_ENABLED
Definition draw_state.hh:83
#define DRW_STATE_STENCIL_TEST_ENABLED
Definition draw_state.hh:89
#define DRW_STATE_DEPTH_TEST_ENABLED
Definition draw_state.hh:86
#define DRW_STATE_BLEND_ENABLED
Definition draw_state.hh:79
#define DRW_STATE_DEFAULT
Definition draw_state.hh:77
struct @620::@622 batch
void GPU_compute_dispatch_indirect(GPUShader *shader, GPUStorageBuf *indirect_buf_)
int count
static ulong state[N]
#define G(x, y, z)
#define min(a, b)
Definition sort.c:32
float vec[8][3]
float center[3]
Definition DRW_render.hh:93
DRWCommandChunk * next
DRWCommand commands[96]
uint64_t command_type[6]
eGPUFrameBufferBits clear_channels
blender::gpu::Batch * batch
DRWResourceHandle handle
GPUStorageBuf * indirect_buf
blender::gpu::Batch * batch
DRWResourceHandle handle
blender::gpu::Batch * batch
blender::gpu::Batch * batch
blender::gpu::Batch * batch
DRWResourceHandle handle
DRWResourceHandle handle
blender::gpu::Batch * batch
DRWCommandChunk * curr_chunk
blender::gpu::VertBuf * select_buf
blender::gpu::VertBuf * select_buf
DRWSparseUniformBuf * obattrs_ubo
blender::gpu::Batch * batch
BoundSphere bsphere
GPUUniformBuf ** obinfos_ubo
BLI_memblock * cullstates
GPUUniformBuf * vlattrs_ubo
GHash * obattrs_ubo_pool
BLI_memblock * obmats
GPUUniformBuf ** matrices_ubo
DRWView * view_active
DRWView * view_previous
bool buffer_finish_called
GPUShader * shader
GPUDrawList * draw_list
DRWData * vmempool
DRWState state_lock
DRWView * view_default
blender::gpu::Batch * batch
float modelinverse[4][4]
struct DRWCommandChunk * first
DRWShadingGroup * next
struct DRWShadingGroup::@288 cmd
struct DRWCommandChunk * last
DRWUniformChunk * uniforms
blender::gpu::VertBuf * vertbuf
GPUStorageBuf ** ssbo_ref
uint8_t arraysize
GPUStorageBuf * ssbo
GPUTexture ** texture_ref
GPUUniformBuf ** block_ref
GPUUniformBuf * block
GPUSamplerState sampler_state
GPUTexture * texture
float fvalue[4]
const struct GPUUniformAttrList * uniform_attrs
const void * pvalue
blender::gpu::VertBuf ** vertbuf_ref
float4 clip_planes[6]
ViewMatrices storage
int clip_planes_len
BoundBox frustum_corners
GPUUniformBuf * view_ubo
GPUUniformBuf * clipping_ubo
DRWCommandComputeIndirect compute_indirect
DRWCommandSetStencil stencil
DRWCommandComputeRef compute_ref
DRWCommandDraw draw
DRWCommandDrawInstance instance
DRWCommandDrawRange range
DRWCommandSetMutableState state
DRWCommandCompute compute
DRWCommandBarrier barrier
DRWCommandDrawInstanceRange instance_range
DRWCommandClear clear
DRWCommandDrawIndirect draw_indirect
DRWCommandDrawProcedural procedural
DRWCommandSetSelectID select_id