Blender V4.3
gl_drawlist.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 by Mike Erwin. All rights reserved.
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12#include "BLI_assert.h"
13
14#include "GPU_batch.hh"
15
16#include "GPU_vertex_buffer.hh"
19
20#include "gl_backend.hh"
21#include "gl_drawlist.hh"
22#include "gl_primitive.hh"
23
24#include <climits>
25
26using namespace blender::gpu;
27
29 GLuint v_count;
30 GLuint i_count;
31 GLuint v_first;
32 GLuint i_first;
33};
34
36 GLuint v_count;
37 GLuint i_count;
38 GLuint v_first;
39 GLuint base_index;
40 GLuint i_first;
41};
42
43#define MDI_ENABLED (buffer_size_ != 0)
44#define MDI_DISABLED (buffer_size_ == 0)
45#define MDI_INDEXED (base_index_ != UINT_MAX)
46
48{
49 BLI_assert(length > 0);
50 batch_ = nullptr;
51 buffer_id_ = 0;
52 command_len_ = 0;
53 base_index_ = 0;
54 command_offset_ = 0;
55 data_size_ = 0;
56 data_ = nullptr;
57
59 /* Alloc the biggest possible command list, which is indexed. */
60 buffer_size_ = sizeof(GLDrawCommandIndexed) * length;
61 }
62 else {
63 /* Indicates MDI is not supported. */
64 buffer_size_ = 0;
65 }
66 /* Force buffer specification on first init. */
67 data_offset_ = buffer_size_;
68}
69
74
75void GLDrawList::init()
76{
79 BLI_assert(data_ == nullptr);
80 batch_ = nullptr;
81 command_len_ = 0;
82
83 if (buffer_id_ == 0) {
84 /* Allocate on first use. */
85 glGenBuffers(1, &buffer_id_);
86 context_ = GLContext::get();
87 }
88
89 glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buffer_id_);
90 /* If buffer is full, orphan buffer data and start fresh. */
91 size_t command_size = MDI_INDEXED ? sizeof(GLDrawCommandIndexed) : sizeof(GLDrawCommand);
92 if (data_offset_ + command_size > buffer_size_) {
93 glBufferData(GL_DRAW_INDIRECT_BUFFER, buffer_size_, nullptr, GL_DYNAMIC_DRAW);
94 data_offset_ = 0;
95 }
96 /* Map the remaining range. */
97 GLbitfield flag = GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_FLUSH_EXPLICIT_BIT;
98 data_size_ = buffer_size_ - data_offset_;
99 data_ = (GLbyte *)glMapBufferRange(GL_DRAW_INDIRECT_BUFFER, data_offset_, data_size_, flag);
100 command_offset_ = 0;
101}
102
103void GLDrawList::append(Batch *gpu_batch, int i_first, int i_count)
104{
105 /* Fallback when MultiDrawIndirect is not supported/enabled. */
106 if (MDI_DISABLED) {
107 GPU_batch_draw_advanced(gpu_batch, 0, 0, i_first, i_count);
108 return;
109 }
110
111 if (data_ == nullptr) {
112 this->init();
113 }
114
115 GLBatch *batch = static_cast<GLBatch *>(gpu_batch);
116 if (batch != batch_) {
117 // BLI_assert(batch->flag | GPU_BATCH_INIT);
118 this->submit();
119 batch_ = batch;
120 /* Cached for faster access. */
121 GLIndexBuf *el = batch_->elem_();
122 base_index_ = el ? el->index_base_ : UINT_MAX;
123 v_first_ = el ? el->index_start_ : 0;
124 v_count_ = el ? el->index_len_ : batch->verts_(0)->vertex_len;
125 }
126
127 if (v_count_ == 0) {
128 /* Nothing to draw. */
129 return;
130 }
131
132 if (MDI_INDEXED) {
133 GLDrawCommandIndexed *cmd = reinterpret_cast<GLDrawCommandIndexed *>(data_ + command_offset_);
134 cmd->v_first = v_first_;
135 cmd->v_count = v_count_;
136 cmd->i_count = i_count;
137 cmd->base_index = base_index_;
138 cmd->i_first = i_first;
139 }
140 else {
141 GLDrawCommand *cmd = reinterpret_cast<GLDrawCommand *>(data_ + command_offset_);
142 cmd->v_first = v_first_;
143 cmd->v_count = v_count_;
144 cmd->i_count = i_count;
145 cmd->i_first = i_first;
146 }
147
148 size_t command_size = MDI_INDEXED ? sizeof(GLDrawCommandIndexed) : sizeof(GLDrawCommand);
149
150 command_offset_ += command_size;
151 command_len_++;
152
153 /* Check if we can fit at least one other command. */
154 if (command_offset_ + command_size > data_size_) {
155 this->submit();
156 }
157}
158
160{
161 if (command_len_ == 0) {
162 return;
163 }
164 /* Something's wrong if we get here without MDI support. */
166 BLI_assert(data_);
167 BLI_assert(GLContext::get()->shader != nullptr);
168
169 size_t command_size = MDI_INDEXED ? sizeof(GLDrawCommandIndexed) : sizeof(GLDrawCommand);
170
171 /* Only do multi-draw indirect if doing more than 2 drawcall. This avoids the overhead of
172 * buffer mapping if scene is not very instance friendly. BUT we also need to take into
173 * account the case where only a few instances are needed to finish filling a call buffer. */
174 const bool is_finishing_a_buffer = (command_offset_ + command_size > data_size_);
175 if (command_len_ > 2 || is_finishing_a_buffer) {
176 GLenum prim = to_gl(batch_->prim_type);
177 void *offset = (void *)data_offset_;
178
179 glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buffer_id_);
180 glFlushMappedBufferRange(GL_DRAW_INDIRECT_BUFFER, 0, command_offset_);
181 glUnmapBuffer(GL_DRAW_INDIRECT_BUFFER);
182 data_ = nullptr; /* Unmapped */
183 data_offset_ += command_offset_;
184
185 batch_->bind();
186
187 if (MDI_INDEXED) {
188 GLenum gl_type = to_gl(batch_->elem_()->index_type_);
189 glMultiDrawElementsIndirect(prim, gl_type, offset, command_len_, 0);
190 }
191 else {
192 glMultiDrawArraysIndirect(prim, offset, command_len_, 0);
193 }
194 }
195 else {
196 /* Fallback do simple drawcalls, and don't unmap the buffer. */
197 if (MDI_INDEXED) {
199 for (int i = 0; i < command_len_; i++, cmd++) {
200 /* Index start was already added. Avoid counting it twice. */
201 cmd->v_first -= v_first_;
202 batch_->draw(cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
203 }
204 /* Reuse the same data. */
205 command_offset_ -= command_len_ * sizeof(GLDrawCommandIndexed);
206 }
207 else {
208 GLDrawCommand *cmd = (GLDrawCommand *)data_;
209 for (int i = 0; i < command_len_; i++, cmd++) {
210 batch_->draw(cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
211 }
212 /* Reuse the same data. */
213 command_offset_ -= command_len_ * sizeof(GLDrawCommand);
214 }
215 }
216 /* Do not submit this buffer again. */
217 command_len_ = 0;
218 /* Avoid keeping reference to the batch. */
219 batch_ = nullptr;
220}
#define BLI_assert(a)
Definition BLI_assert.h:50
void GPU_batch_draw_advanced(blender::gpu::Batch *batch, int vertex_first, int vertex_count, int instance_first, int instance_count)
GLIndexBuf * elem_() const
Definition gl_batch.hh:103
void draw(int v_first, int v_count, int i_first, int i_count) override
Definition gl_batch.cc:250
static void buf_free(GLuint buf_id)
static GLContext * get()
static bool multi_draw_indirect_support
Definition gl_context.hh:59
void append(Batch *batch, int i_first, int i_count) override
GPUIndexBufType index_type_
struct @620::@622 batch
#define UINT_MAX
Definition hash_md5.cc:44
#define MDI_INDEXED
#define MDI_ENABLED
#define MDI_DISABLED
static GLenum to_gl(const GPUAttachmentType type)
uint8_t flag
Definition wm_window.cc:138