Blender V4.3
gpu_py_vertex_buffer.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12#include <Python.h>
13
14#include "GPU_vertex_buffer.hh"
15
16#include "MEM_guardedalloc.h"
17
21
22#include "gpu_py.hh"
23#include "gpu_py_vertex_buffer.hh" /* own include */
25
26/* -------------------------------------------------------------------- */
30#define PYGPU_AS_NATIVE_SWITCH(attr) \
31 switch (attr->comp_type) { \
32 case GPU_COMP_I8: { \
33 PY_AS_NATIVE(int8_t, PyC_Long_AsI8); \
34 break; \
35 } \
36 case GPU_COMP_U8: { \
37 PY_AS_NATIVE(uint8_t, PyC_Long_AsU8); \
38 break; \
39 } \
40 case GPU_COMP_I16: { \
41 PY_AS_NATIVE(int16_t, PyC_Long_AsI16); \
42 break; \
43 } \
44 case GPU_COMP_U16: { \
45 PY_AS_NATIVE(uint16_t, PyC_Long_AsU16); \
46 break; \
47 } \
48 case GPU_COMP_I32: { \
49 PY_AS_NATIVE(int32_t, PyC_Long_AsI32); \
50 break; \
51 } \
52 case GPU_COMP_U32: { \
53 PY_AS_NATIVE(uint32_t, PyC_Long_AsU32); \
54 break; \
55 } \
56 case GPU_COMP_F32: { \
57 PY_AS_NATIVE(float, PyFloat_AsDouble); \
58 break; \
59 } \
60 default: \
61 BLI_assert_unreachable(); \
62 break; \
63 } \
64 ((void)0)
65
66/* No error checking, callers must run PyErr_Occurred */
67static void pygpu_fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
68{
69#define PY_AS_NATIVE(ty_dst, py_as_native) \
70 { \
71 ty_dst *data_dst = static_cast<ty_dst *>(data_dst_void); \
72 *data_dst = py_as_native(py_src); \
73 } \
74 ((void)0)
75
77
78#undef PY_AS_NATIVE
79}
80
81/* No error checking, callers must run PyErr_Occurred */
82static void pygpu_fill_format_sequence(void *data_dst_void,
83 PyObject *py_seq_fast,
84 const GPUVertAttr *attr)
85{
86 const uint len = attr->comp_len;
87 PyObject **value_fast_items = PySequence_Fast_ITEMS(py_seq_fast);
88
92#define PY_AS_NATIVE(ty_dst, py_as_native) \
93 ty_dst *data_dst = static_cast<ty_dst *>(data_dst_void); \
94 for (uint i = 0; i < len; i++) { \
95 data_dst[i] = py_as_native(value_fast_items[i]); \
96 } \
97 ((void)0)
98
100
101#undef PY_AS_NATIVE
102}
103
104#undef PYGPU_AS_NATIVE_SWITCH
105#undef WARN_TYPE_LIMIT_PUSH
106#undef WARN_TYPE_LIMIT_POP
107
109 uint data_id,
110 PyObject *seq,
111 const char *error_prefix)
112{
113 const char *exc_str_size_mismatch = "Expected a %s of size %d, got %u";
114
115 bool ok = true;
116 const GPUVertAttr *attr = &GPU_vertbuf_get_format(vbo)->attrs[data_id];
117 uint vert_len = GPU_vertbuf_get_vertex_len(vbo);
118
119 if (PyObject_CheckBuffer(seq)) {
120 Py_buffer pybuffer;
121
122 if (PyObject_GetBuffer(seq, &pybuffer, PyBUF_STRIDES | PyBUF_ND) == -1) {
123 /* PyObject_GetBuffer raise a PyExc_BufferError */
124 return false;
125 }
126
127 const uint comp_len = pybuffer.ndim == 1 ? 1 : uint(pybuffer.shape[1]);
128
129 if (pybuffer.shape[0] != vert_len) {
130 PyErr_Format(
131 PyExc_ValueError, exc_str_size_mismatch, "sequence", vert_len, pybuffer.shape[0]);
132 ok = false;
133 }
134 else if (comp_len != attr->comp_len) {
135 PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "component", attr->comp_len, comp_len);
136 ok = false;
137 }
138 else {
139 GPU_vertbuf_attr_fill_stride(vbo, data_id, pybuffer.strides[0], pybuffer.buf);
140 }
141
142 PyBuffer_Release(&pybuffer);
143 }
144 else {
145 GPUVertBufRaw data_step;
146 GPU_vertbuf_attr_get_raw_data(vbo, data_id, &data_step);
147
148 PyObject *seq_fast = PySequence_Fast(seq, "Vertex buffer fill");
149 if (seq_fast == nullptr) {
150 return false;
151 }
152
153 const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
154
155 if (seq_len != vert_len) {
156 PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "sequence", vert_len, seq_len);
157 }
158
159 PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
160
161 if (attr->comp_len == 1) {
162 for (uint i = 0; i < seq_len; i++) {
163 uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
164 PyObject *item = seq_items[i];
165 pygpu_fill_format_elem(data, item, attr);
166 }
167 }
168 else {
169 for (uint i = 0; i < seq_len; i++) {
170 uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
171 PyObject *seq_fast_item = PySequence_Fast(seq_items[i], error_prefix);
172
173 if (seq_fast_item == nullptr) {
174 ok = false;
175 goto finally;
176 }
177 if (PySequence_Fast_GET_SIZE(seq_fast_item) != attr->comp_len) {
178 PyErr_Format(PyExc_ValueError,
179 exc_str_size_mismatch,
180 "sequence",
181 attr->comp_len,
182 PySequence_Fast_GET_SIZE(seq_fast_item));
183 ok = false;
184 Py_DECREF(seq_fast_item);
185 goto finally;
186 }
187
188 /* May trigger error, check below */
189 pygpu_fill_format_sequence(data, seq_fast_item, attr);
190 Py_DECREF(seq_fast_item);
191 }
192 }
193
194 if (PyErr_Occurred()) {
195 ok = false;
196 }
197
198 finally:
199
200 Py_DECREF(seq_fast);
201 }
202 return ok;
203}
204
206 int id,
207 PyObject *py_seq_data,
208 const char *error_prefix)
209{
210 if (id < 0 || id >= GPU_vertbuf_get_format(buf)->attr_len) {
211 PyErr_Format(PyExc_ValueError, "Format id %d out of range", id);
212 return 0;
213 }
214
215 if (buf->data<char>().data() == nullptr) {
216 PyErr_SetString(PyExc_ValueError, "Can't fill, static buffer already in use");
217 return 0;
218 }
219
220 if (!pygpu_vertbuf_fill_impl(buf, uint(id), py_seq_data, error_prefix)) {
221 return 0;
222 }
223
224 return 1;
225}
226
229/* -------------------------------------------------------------------- */
233static PyObject *pygpu_vertbuf__tp_new(PyTypeObject * /*type*/, PyObject *args, PyObject *kwds)
234{
236
237 struct {
238 PyObject *py_fmt;
239 uint len;
240 } params;
241
242 static const char *_keywords[] = {"format", "len", nullptr};
243 static _PyArg_Parser _parser = {
245 "O!" /* `format` */
246 "I" /* `len` */
247 ":GPUVertBuf.__new__",
248 _keywords,
249 nullptr,
250 };
251 if (!_PyArg_ParseTupleAndKeywordsFast(
252 args, kwds, &_parser, &BPyGPUVertFormat_Type, &params.py_fmt, &params.len))
253 {
254 return nullptr;
255 }
256
257 const GPUVertFormat &fmt = ((BPyGPUVertFormat *)params.py_fmt)->fmt;
259
261
263}
264
266 /* Wrap. */
267 pygpu_vertbuf_attr_fill_doc,
268 ".. method:: attr_fill(id, data)\n"
269 "\n"
270 " Insert data into the buffer for a single attribute.\n"
271 "\n"
272 " :arg id: Either the name or the id of the attribute.\n"
273 " :type id: int | str\n"
274 " :arg data: Buffer or sequence of data that should be stored in the buffer\n"
275 " :type data: Buffer | "
276 "Sequence[float] | Sequence[int] | Sequence[Sequence[float]] | Sequence[Sequence[int]]\n");
277static PyObject *pygpu_vertbuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
278{
279 PyObject *data;
280 PyObject *identifier;
281
282 static const char *_keywords[] = {"id", "data", nullptr};
283 static _PyArg_Parser _parser = {
285 "O" /* `id` */
286 "O" /* `data` */
287 ":attr_fill",
288 _keywords,
289 nullptr,
290 };
291 if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &identifier, &data)) {
292 return nullptr;
293 }
294
295 int id;
296
297 if (PyLong_Check(identifier)) {
298 id = PyLong_AsLong(identifier);
299 }
300 else if (PyUnicode_Check(identifier)) {
302 const char *name = PyUnicode_AsUTF8(identifier);
304 if (id == -1) {
305 PyErr_Format(PyExc_ValueError, "Unknown attribute '%s'", name);
306 return nullptr;
307 }
308 }
309 else {
310 PyErr_SetString(PyExc_TypeError, "expected int or str type as identifier");
311 return nullptr;
312 }
313
314 if (!pygpu_vertbuf_fill(self->buf, id, data, "GPUVertBuf.attr_fill")) {
315 return nullptr;
316 }
317
318 Py_RETURN_NONE;
319}
320
321#if (defined(__GNUC__) && !defined(__clang__))
322# pragma GCC diagnostic push
323# pragma GCC diagnostic ignored "-Wcast-function-type"
324#endif
325
326static PyMethodDef pygpu_vertbuf__tp_methods[] = {
327 {"attr_fill",
328 (PyCFunction)pygpu_vertbuf_attr_fill,
329 METH_VARARGS | METH_KEYWORDS,
330 pygpu_vertbuf_attr_fill_doc},
331 {nullptr, nullptr, 0, nullptr},
332};
333
334#if (defined(__GNUC__) && !defined(__clang__))
335# pragma GCC diagnostic pop
336#endif
337
339{
341 Py_TYPE(self)->tp_free(self);
342}
343
345 /* Wrap. */
346 pygpu_vertbuf__tp_doc,
347 ".. class:: GPUVertBuf(format, len)\n"
348 "\n"
349 " Contains a VBO.\n"
350 "\n"
351 " :arg format: Vertex format.\n"
352 " :type format: :class:`gpu.types.GPUVertFormat`\n"
353 " :arg len: Amount of vertices that will fit into this buffer.\n"
354 " :type len: int\n");
355PyTypeObject BPyGPUVertBuf_Type = {
356 /*ob_base*/ PyVarObject_HEAD_INIT(nullptr, 0)
357 /*tp_name*/ "GPUVertBuf",
358 /*tp_basicsize*/ sizeof(BPyGPUVertBuf),
359 /*tp_itemsize*/ 0,
360 /*tp_dealloc*/ (destructor)pygpu_vertbuf__tp_dealloc,
361 /*tp_vectorcall_offset*/ 0,
362 /*tp_getattr*/ nullptr,
363 /*tp_setattr*/ nullptr,
364 /*tp_as_async*/ nullptr,
365 /*tp_repr*/ nullptr,
366 /*tp_as_number*/ nullptr,
367 /*tp_as_sequence*/ nullptr,
368 /*tp_as_mapping*/ nullptr,
369 /*tp_hash*/ nullptr,
370 /*tp_call*/ nullptr,
371 /*tp_str*/ nullptr,
372 /*tp_getattro*/ nullptr,
373 /*tp_setattro*/ nullptr,
374 /*tp_as_buffer*/ nullptr,
375 /*tp_flags*/ Py_TPFLAGS_DEFAULT,
376 /*tp_doc*/ pygpu_vertbuf__tp_doc,
377 /*tp_traverse*/ nullptr,
378 /*tp_clear*/ nullptr,
379 /*tp_richcompare*/ nullptr,
380 /*tp_weaklistoffset*/ 0,
381 /*tp_iter*/ nullptr,
382 /*tp_iternext*/ nullptr,
383 /*tp_methods*/ pygpu_vertbuf__tp_methods,
384 /*tp_members*/ nullptr,
385 /*tp_getset*/ nullptr,
386 /*tp_base*/ nullptr,
387 /*tp_dict*/ nullptr,
388 /*tp_descr_get*/ nullptr,
389 /*tp_descr_set*/ nullptr,
390 /*tp_dictoffset*/ 0,
391 /*tp_init*/ nullptr,
392 /*tp_alloc*/ nullptr,
393 /*tp_new*/ pygpu_vertbuf__tp_new,
394 /*tp_free*/ nullptr,
395 /*tp_is_gc*/ nullptr,
396 /*tp_bases*/ nullptr,
397 /*tp_mro*/ nullptr,
398 /*tp_cache*/ nullptr,
399 /*tp_subclasses*/ nullptr,
400 /*tp_weaklist*/ nullptr,
401 /*tp_del*/ nullptr,
402 /*tp_version_tag*/ 0,
403 /*tp_finalize*/ nullptr,
404 /*tp_vectorcall*/ nullptr,
405};
406
409/* -------------------------------------------------------------------- */
414{
416
417 self = PyObject_New(BPyGPUVertBuf, &BPyGPUVertBuf_Type);
418 self->buf = buf;
419
420 return (PyObject *)self;
421}
422
unsigned char uchar
unsigned int uint
void GPU_vertbuf_attr_get_raw_data(blender::gpu::VertBuf *, uint a_idx, GPUVertBufRaw *access)
GPU_INLINE void * GPU_vertbuf_raw_step(GPUVertBufRaw *a)
void GPU_vertbuf_attr_fill_stride(blender::gpu::VertBuf *, uint a_idx, uint stride, const void *data)
#define GPU_vertbuf_create_with_format(format)
const GPUVertFormat * GPU_vertbuf_get_format(const blender::gpu::VertBuf *verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
uint GPU_vertbuf_get_vertex_len(const blender::gpu::VertBuf *verts)
void GPU_vertbuf_discard(blender::gpu::VertBuf *)
int GPU_vertformat_attr_id_get(const GPUVertFormat *, const char *name)
Read Guarded memory(de)allocation.
PyObject * self
MutableSpan< T > data()
int len
#define BPYGPU_IS_INIT_OR_ERROR_OBJ
Definition gpu_py.hh:18
PyTypeObject BPyGPUVertBuf_Type
PyObject * BPyGPUVertBuf_CreatePyObject(blender::gpu::VertBuf *buf)
static bool pygpu_vertbuf_fill_impl(blender::gpu::VertBuf *vbo, uint data_id, PyObject *seq, const char *error_prefix)
static PyObject * pygpu_vertbuf__tp_new(PyTypeObject *, PyObject *args, PyObject *kwds)
#define PYGPU_AS_NATIVE_SWITCH(attr)
static void pygpu_fill_format_sequence(void *data_dst_void, PyObject *py_seq_fast, const GPUVertAttr *attr)
static PyObject * pygpu_vertbuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
static void pygpu_fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
static int pygpu_vertbuf_fill(blender::gpu::VertBuf *buf, int id, PyObject *py_seq_data, const char *error_prefix)
static PyMethodDef pygpu_vertbuf__tp_methods[]
PyDoc_STRVAR(pygpu_vertbuf_attr_fill_doc, ".. method:: attr_fill(id, data)\n" "\n" " Insert data into the buffer for a single attribute.\n" "\n" " :arg id: Either the name or the id of the attribute.\n" " :type id: int | str\n" " :arg data: Buffer or sequence of data that should be stored in the buffer\n" " :type data: Buffer | " "Sequence[float] | Sequence[int] | Sequence[Sequence[float]] | Sequence[Sequence[int]]\n")
static void pygpu_vertbuf__tp_dealloc(BPyGPUVertBuf *self)
PyTypeObject BPyGPUVertFormat_Type
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
format
header-only compatibility defines.
#define PY_ARG_PARSER_HEAD_COMPAT()
header-only utilities
GPUVertAttr attrs[GPU_VERT_ATTR_MAX_LEN]