Blender V4.3
gpu_py_buffer.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
14#include <Python.h>
15
16#include "BLI_utildefines.h"
17
18#include "MEM_guardedalloc.h"
19
20#include "GPU_texture.hh"
21
23
24#include "gpu_py.hh"
25
26#include "gpu_py_buffer.hh"
27
28#define PYGPU_BUFFER_PROTOCOL
29#define MAX_DIMENSIONS 64
30
31/* -------------------------------------------------------------------- */
35static Py_ssize_t pygpu_buffer_dimensions_tot_elem(const Py_ssize_t *shape, Py_ssize_t shape_len)
36{
37 Py_ssize_t tot = shape[0];
38 for (int i = 1; i < shape_len; i++) {
39 tot *= shape[i];
40 }
41
42 return tot;
43}
44
45static bool pygpu_buffer_dimensions_tot_len_compare(const Py_ssize_t *shape_a,
46 const Py_ssize_t shape_a_len,
47 const Py_ssize_t *shape_b,
48 const Py_ssize_t shape_b_len)
49{
50 if (pygpu_buffer_dimensions_tot_elem(shape_a, shape_a_len) !=
51 pygpu_buffer_dimensions_tot_elem(shape_b, shape_b_len))
52 {
53 PyErr_Format(PyExc_BufferError, "array size does not match");
54 return false;
55 }
56
57 return true;
58}
59
60static bool pygpu_buffer_pyobj_as_shape(PyObject *shape_obj,
61 Py_ssize_t r_shape[MAX_DIMENSIONS],
62 Py_ssize_t *r_shape_len)
63{
64 Py_ssize_t shape_len = 0;
65 if (PyLong_Check(shape_obj)) {
66 shape_len = 1;
67 if ((r_shape[0] = PyLong_AsSsize_t(shape_obj)) < 1) {
68 PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
69 return false;
70 }
71 }
72 else if (PySequence_Check(shape_obj)) {
73 shape_len = PySequence_Size(shape_obj);
74 if (shape_len > MAX_DIMENSIONS) {
75 PyErr_SetString(PyExc_AttributeError,
76 "too many dimensions, max is " STRINGIFY(MAX_DIMENSIONS));
77 return false;
78 }
79 if (shape_len < 1) {
80 PyErr_SetString(PyExc_AttributeError, "sequence must have at least one dimension");
81 return false;
82 }
83
84 for (int i = 0; i < shape_len; i++) {
85 PyObject *ob = PySequence_GetItem(shape_obj, i);
86 if (!PyLong_Check(ob)) {
87 PyErr_Format(PyExc_TypeError,
88 "invalid dimension %i, expected an int, not a %.200s",
89 i,
90 Py_TYPE(ob)->tp_name);
91 Py_DECREF(ob);
92 return false;
93 }
94
95 r_shape[i] = PyLong_AsSsize_t(ob);
96 Py_DECREF(ob);
97
98 if (r_shape[i] < 1) {
99 PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
100 return false;
101 }
102 }
103 }
104 else {
105 PyErr_Format(PyExc_TypeError,
106 "invalid second argument expected a sequence "
107 "or an int, not a %.200s",
108 Py_TYPE(shape_obj)->tp_name);
109 }
110
111 *r_shape_len = shape_len;
112 return true;
113}
114
115static const char *pygpu_buffer_formatstr(eGPUDataFormat data_format)
116{
117 switch (data_format) {
118 case GPU_DATA_FLOAT:
119 return "f";
120 case GPU_DATA_INT:
121 return "i";
122 case GPU_DATA_UINT:
123 return "I";
124 case GPU_DATA_UBYTE:
125 return "B";
128 return "I";
129 default:
130 break;
131 }
132 return nullptr;
133}
134
137/* -------------------------------------------------------------------- */
143 const int shape_len,
144 const Py_ssize_t *shape,
145 void *buf)
146{
147 BPyGPUBuffer *buffer = (BPyGPUBuffer *)_PyObject_GC_New(&BPyGPU_BufferType);
148
149 buffer->parent = nullptr;
150 buffer->format = format;
151 buffer->shape_len = shape_len;
152 buffer->shape = static_cast<Py_ssize_t *>(
153 MEM_mallocN(shape_len * sizeof(*buffer->shape), "BPyGPUBuffer shape"));
154 memcpy(buffer->shape, shape, shape_len * sizeof(*buffer->shape));
155 buffer->buf.as_void = buf;
156
157 if (parent) {
158 Py_INCREF(parent);
159 buffer->parent = parent;
160 BLI_assert(!PyObject_GC_IsTracked((PyObject *)buffer));
161 PyObject_GC_Track(buffer);
162 }
163 return buffer;
164}
165
166static PyObject *pygpu_buffer__sq_item(BPyGPUBuffer *self, Py_ssize_t i)
167{
168 if (i >= self->shape[0] || i < 0) {
169 PyErr_SetString(PyExc_IndexError, "array index out of range");
170 return nullptr;
171 }
172
173 const char *formatstr = pygpu_buffer_formatstr(eGPUDataFormat(self->format));
174
175 if (self->shape_len == 1) {
176 switch (self->format) {
177 case GPU_DATA_FLOAT:
178 return Py_BuildValue(formatstr, self->buf.as_float[i]);
179 case GPU_DATA_INT:
180 return Py_BuildValue(formatstr, self->buf.as_int[i]);
181 case GPU_DATA_UBYTE:
182 return Py_BuildValue(formatstr, self->buf.as_byte[i]);
183 case GPU_DATA_UINT:
186 return Py_BuildValue(formatstr, self->buf.as_uint[i]);
187 }
188 }
189 else {
190 int offset = i * GPU_texture_dataformat_size(eGPUDataFormat(self->format));
191 for (int j = 1; j < self->shape_len; j++) {
192 offset *= self->shape[j];
193 }
194
195 return (PyObject *)pygpu_buffer_make_from_data((PyObject *)self,
196 eGPUDataFormat(self->format),
197 self->shape_len - 1,
198 self->shape + 1,
199 self->buf.as_byte + offset);
200 }
201
202 return nullptr;
203}
204
206{
207 const Py_ssize_t len = self->shape[0];
208 PyObject *list = PyList_New(len);
209
210 for (Py_ssize_t i = 0; i < len; i++) {
211 PyList_SET_ITEM(list, i, pygpu_buffer__sq_item(self, i));
212 }
213
214 return list;
215}
216
218{
219 PyObject *list;
220
221 if (self->shape_len > 1) {
222 int i, len = self->shape[0];
223 list = PyList_New(len);
224
225 for (i = 0; i < len; i++) {
226 /* "BPyGPUBuffer *sub_tmp" is a temporary object created just to be read for nested lists.
227 * That is why it is decremented/freed soon after.
228 * TODO: For efficiency, avoid creating #BPyGPUBuffer when creating nested lists. */
230 PyList_SET_ITEM(list, i, pygpu_buffer_to_list_recursive(sub_tmp));
231 Py_DECREF(sub_tmp);
232 }
233 }
234 else {
236 }
237
238 return list;
239}
240
241static PyObject *pygpu_buffer_dimensions_get(BPyGPUBuffer *self, void * /*arg*/)
242{
243 PyObject *list = PyList_New(self->shape_len);
244 int i;
245
246 for (i = 0; i < self->shape_len; i++) {
247 PyList_SET_ITEM(list, i, PyLong_FromLong(self->shape[i]));
248 }
249
250 return list;
251}
252
253static int pygpu_buffer_dimensions_set(BPyGPUBuffer *self, PyObject *value, void * /*type*/)
254{
255 Py_ssize_t shape[MAX_DIMENSIONS];
256 Py_ssize_t shape_len = 0;
257
258 if (!pygpu_buffer_pyobj_as_shape(value, shape, &shape_len)) {
259 return -1;
260 }
261
262 if (!pygpu_buffer_dimensions_tot_len_compare(shape, shape_len, self->shape, self->shape_len)) {
263 return -1;
264 }
265
266 size_t size = shape_len * sizeof(*self->shape);
267 if (shape_len != self->shape_len) {
268 MEM_freeN(self->shape);
269 self->shape = static_cast<Py_ssize_t *>(MEM_mallocN(size, __func__));
270 }
271
272 self->shape_len = shape_len;
273 memcpy(self->shape, shape, size);
274 return 0;
275}
276
277static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
278{
279 Py_VISIT(self->parent);
280 return 0;
281}
282
284{
285 if (self->parent) {
286 Py_CLEAR(self->parent);
287 self->buf.as_void = nullptr;
288 }
289 return 0;
290}
291
293{
294 if (self->parent) {
295 PyObject_GC_UnTrack(self);
296 Py_CLEAR(self->parent);
297 }
298 else if (self->buf.as_void) {
299 MEM_freeN(self->buf.as_void);
300 }
301
302 MEM_freeN(self->shape);
303
304 PyObject_GC_Del(self);
305}
306
308{
309 PyObject *repr;
310
311 PyObject *list = pygpu_buffer_to_list_recursive(self);
312 const char *typestr = PyC_StringEnum_FindIDFromValue(bpygpu_dataformat_items, self->format);
313
314 repr = PyUnicode_FromFormat("Buffer(%s, %R)", typestr, list);
315 Py_DECREF(list);
316
317 return repr;
318}
319
320static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, Py_ssize_t i, PyObject *v);
321
323 Py_ssize_t begin,
324 Py_ssize_t end,
325 PyObject *seq)
326{
327 PyObject *item;
328 int count, err = 0;
329
330 if (begin < 0) {
331 begin = 0;
332 }
333 if (end > self->shape[0]) {
334 end = self->shape[0];
335 }
336 if (begin > end) {
337 begin = end;
338 }
339
340 if (!PySequence_Check(seq)) {
341 PyErr_Format(PyExc_TypeError,
342 "buffer[:] = value, invalid assignment. "
343 "Expected a sequence, not an %.200s type",
344 Py_TYPE(seq)->tp_name);
345 return -1;
346 }
347
348 /* re-use count var */
349 if ((count = PySequence_Size(seq)) != (end - begin)) {
350 PyErr_Format(PyExc_TypeError,
351 "buffer[:] = value, size mismatch in assignment. "
352 "Expected: %d (given: %d)",
353 count,
354 end - begin);
355 return -1;
356 }
357
358 for (count = begin; count < end; count++) {
359 item = PySequence_GetItem(seq, count - begin);
360 if (item) {
362 Py_DECREF(item);
363 }
364 else {
365 err = -1;
366 }
367 if (err) {
368 break;
369 }
370 }
371 return err;
372}
373
374static PyObject *pygpu_buffer__tp_new(PyTypeObject * /*type*/, PyObject *args, PyObject *kwds)
375{
377
378 PyObject *length_ob, *init = nullptr;
379 BPyGPUBuffer *buffer = nullptr;
380 Py_ssize_t shape[MAX_DIMENSIONS];
381
382 Py_ssize_t shape_len = 0;
383
384 if (kwds && PyDict_Size(kwds)) {
385 PyErr_SetString(PyExc_TypeError, "Buffer(): takes no keyword args");
386 return nullptr;
387 }
388
389 const PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items, GPU_DATA_FLOAT};
390 if (!PyArg_ParseTuple(
391 args, "O&O|O: Buffer", PyC_ParseStringEnum, &pygpu_dataformat, &length_ob, &init))
392 {
393 return nullptr;
394 }
395
396 if (!pygpu_buffer_pyobj_as_shape(length_ob, shape, &shape_len)) {
397 return nullptr;
398 }
399
400 if (init && PyObject_CheckBuffer(init)) {
401 Py_buffer pybuffer;
402
403 if (PyObject_GetBuffer(init, &pybuffer, PyBUF_ND | PyBUF_FORMAT) == -1) {
404 /* PyObject_GetBuffer raise a PyExc_BufferError */
405 return nullptr;
406 }
407
408 Py_ssize_t *pybuffer_shape = pybuffer.shape;
409 Py_ssize_t pybuffer_ndim = pybuffer.ndim;
410 if (!pybuffer_shape) {
411 pybuffer_shape = &pybuffer.len;
412 pybuffer_ndim = 1;
413 }
414
415 if (pygpu_buffer_dimensions_tot_len_compare(shape, shape_len, pybuffer_shape, pybuffer_ndim)) {
417 init, eGPUDataFormat(pygpu_dataformat.value_found), shape_len, shape, pybuffer.buf);
418 }
419
420 PyBuffer_Release(&pybuffer);
421 }
422 else {
423 buffer = BPyGPU_Buffer_CreatePyObject(pygpu_dataformat.value_found, shape, shape_len, nullptr);
424 if (init && pygpu_buffer_ass_slice(buffer, 0, shape[0], init)) {
425 Py_DECREF(buffer);
426 return nullptr;
427 }
428 }
429
430 return (PyObject *)buffer;
431}
432
434{
435 return self->parent != nullptr;
436}
437
438/* BPyGPUBuffer sequence methods */
439
441{
442 return self->shape[0];
443}
444
445static PyObject *pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
446{
447 PyObject *list;
448 Py_ssize_t count;
449
450 if (begin < 0) {
451 begin = 0;
452 }
453 if (end > self->shape[0]) {
454 end = self->shape[0];
455 }
456 if (begin > end) {
457 begin = end;
458 }
459
460 list = PyList_New(end - begin);
461
462 for (count = begin; count < end; count++) {
463 PyList_SET_ITEM(list, count - begin, pygpu_buffer__sq_item(self, count));
464 }
465 return list;
466}
467
468static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, Py_ssize_t i, PyObject *v)
469{
470 if (i >= self->shape[0] || i < 0) {
471 PyErr_SetString(PyExc_IndexError, "array assignment index out of range");
472 return -1;
473 }
474
475 if (self->shape_len != 1) {
477
478 if (row) {
479 const int ret = pygpu_buffer_ass_slice(row, 0, self->shape[1], v);
480 Py_DECREF(row);
481 return ret;
482 }
483
484 return -1;
485 }
486
487 switch (self->format) {
488 case GPU_DATA_FLOAT:
489 return PyArg_Parse(v, "f:Expected floats", &self->buf.as_float[i]) ? 0 : -1;
490 case GPU_DATA_INT:
491 return PyArg_Parse(v, "i:Expected ints", &self->buf.as_int[i]) ? 0 : -1;
492 case GPU_DATA_UBYTE:
493 return PyArg_Parse(v, "b:Expected ints", &self->buf.as_byte[i]) ? 0 : -1;
494 case GPU_DATA_UINT:
497 return PyArg_Parse(v, "I:Expected unsigned ints", &self->buf.as_uint[i]) ? 0 : -1;
498 default:
499 return 0; /* should never happen */
500 }
501}
502
503static PyObject *pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
504{
505 if (PyIndex_Check(item)) {
506 Py_ssize_t i;
507 i = PyNumber_AsSsize_t(item, PyExc_IndexError);
508 if (i == -1 && PyErr_Occurred()) {
509 return nullptr;
510 }
511 if (i < 0) {
512 i += self->shape[0];
513 }
514 return pygpu_buffer__sq_item(self, i);
515 }
516 if (PySlice_Check(item)) {
517 Py_ssize_t start, stop, step, slicelength;
518
519 if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
520 return nullptr;
521 }
522
523 if (slicelength <= 0) {
524 return PyTuple_New(0);
525 }
526 if (step == 1) {
527 return pygpu_buffer_slice(self, start, stop);
528 }
529
530 PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
531 return nullptr;
532 }
533
534 PyErr_Format(
535 PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
536 return nullptr;
537}
538
539static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
540{
541 if (PyIndex_Check(item)) {
542 Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
543 if (i == -1 && PyErr_Occurred()) {
544 return -1;
545 }
546 if (i < 0) {
547 i += self->shape[0];
548 }
549 return pygpu_buffer__sq_ass_item(self, i, value);
550 }
551 if (PySlice_Check(item)) {
552 Py_ssize_t start, stop, step, slicelength;
553
554 if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
555 return -1;
556 }
557
558 if (step == 1) {
559 return pygpu_buffer_ass_slice(self, start, stop, value);
560 }
561
562 PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
563 return -1;
564 }
565
566 PyErr_Format(
567 PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
568 return -1;
569}
570
571#if (defined(__GNUC__) && !defined(__clang__))
572# pragma GCC diagnostic push
573# pragma GCC diagnostic ignored "-Wcast-function-type"
574#endif
575
576static PyMethodDef pygpu_buffer__tp_methods[] = {
577 {"to_list",
579 METH_NOARGS,
580 "return the buffer as a list"},
581 {nullptr, nullptr, 0, nullptr},
582};
583
584#if (defined(__GNUC__) && !defined(__clang__))
585# pragma GCC diagnostic pop
586#endif
587
588static PyGetSetDef pygpu_buffer_getseters[] = {
589 {"dimensions",
592 nullptr,
593 nullptr},
594 {nullptr, nullptr, nullptr, nullptr, nullptr},
595};
596
597static PySequenceMethods pygpu_buffer__tp_as_sequence = {
598 /*sq_length*/ (lenfunc)pygpu_buffer__sq_length,
599 /*sq_concat*/ nullptr,
600 /*sq_repeat*/ nullptr,
601 /*sq_item*/ (ssizeargfunc)pygpu_buffer__sq_item,
602 /*was_sq_slice*/ nullptr, /* DEPRECATED. Handled by #pygpu_buffer__sq_item. */
603 /*sq_ass_item*/ (ssizeobjargproc)pygpu_buffer__sq_ass_item,
604 /*was_sq_ass_slice*/ nullptr, /* DEPRECATED. Handled by #pygpu_buffer__sq_ass_item. */
605 /*sq_contains*/ nullptr,
606 /*sq_inplace_concat*/ nullptr,
607 /*sq_inplace_repeat*/ nullptr,
608};
609
610static PyMappingMethods pygpu_buffer__tp_as_mapping = {
611 /*mp_length*/ (lenfunc)pygpu_buffer__sq_length,
612 /*mp_subscript*/ (binaryfunc)pygpu_buffer__mp_subscript,
613 /*mp_ass_subscript*/ (objobjargproc)pygpu_buffer__mp_ass_subscript,
614};
615
616#ifdef PYGPU_BUFFER_PROTOCOL
618 const int shape_len,
619 const Py_ssize_t *shape,
620 Py_ssize_t *r_strides)
621{
622 r_strides[0] = GPU_texture_dataformat_size(format);
623 for (int i = 1; i < shape_len; i++) {
624 r_strides[i] = r_strides[i - 1] * shape[i - 1];
625 }
626}
627
628/* Here is the buffer interface function */
629static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
630{
631 if (view == nullptr) {
632 PyErr_SetString(PyExc_ValueError, "nullptr view in getbuffer");
633 return -1;
634 }
635
636 memset(view, 0, sizeof(*view));
637
638 view->obj = (PyObject *)self;
639 view->buf = (void *)self->buf.as_void;
640 view->len = bpygpu_Buffer_size(self);
641 view->readonly = 0;
642 view->itemsize = GPU_texture_dataformat_size(eGPUDataFormat(self->format));
643 if (flags & PyBUF_FORMAT) {
644 view->format = (char *)pygpu_buffer_formatstr(eGPUDataFormat(self->format));
645 }
646 if (flags & PyBUF_ND) {
647 view->ndim = self->shape_len;
648 view->shape = self->shape;
649 }
650 if (flags & PyBUF_STRIDES) {
651 view->strides = static_cast<Py_ssize_t *>(
652 MEM_mallocN(view->ndim * sizeof(*view->strides), "BPyGPUBuffer strides"));
654 eGPUDataFormat(self->format), view->ndim, view->shape, view->strides);
655 }
656 view->suboffsets = nullptr;
657 view->internal = nullptr;
658
659 Py_INCREF(self);
660 return 0;
661}
662
663static void pygpu_buffer__bf_releasebuffer(PyObject * /*exporter*/, Py_buffer *view)
664{
665 MEM_SAFE_FREE(view->strides);
666}
667
668static PyBufferProcs pygpu_buffer__tp_as_buffer = {
669 /*bf_getbuffer*/ (getbufferproc)pygpu_buffer__bf_getbuffer,
670 /*bf_releasebuffer*/ (releasebufferproc)pygpu_buffer__bf_releasebuffer,
671};
672#endif
673
675 /* Wrap. */
676 pygpu_buffer__tp_doc,
677 ".. class:: Buffer(format, dimensions, data)\n"
678 "\n"
679 " For Python access to GPU functions requiring a pointer.\n"
680 "\n"
681 " :arg format: Format type to interpret the buffer.\n"
682 " Possible values are `FLOAT`, `INT`, `UINT`, `UBYTE`, `UINT_24_8` and `10_11_11_REV`.\n"
683 " :type format: str\n"
684 " :arg dimensions: Array describing the dimensions.\n"
685 " :type dimensions: int\n"
686 " :arg data: Optional data array.\n"
687 " :type data: Buffer | Sequence[float] | Sequence[int]\n");
688PyTypeObject BPyGPU_BufferType = {
689 /*ob_base*/ PyVarObject_HEAD_INIT(nullptr, 0)
690 /*tp_name*/ "Buffer",
691 /*tp_basicsize*/ sizeof(BPyGPUBuffer),
692 /*tp_itemsize*/ 0,
693 /*tp_dealloc*/ (destructor)pygpu_buffer__tp_dealloc,
694 /*tp_vectorcall_offset*/ 0,
695 /*tp_getattr*/ nullptr,
696 /*tp_setattr*/ nullptr,
697 /*tp_compare*/ nullptr,
698 /*tp_repr*/ (reprfunc)pygpu_buffer__tp_repr,
699 /*tp_as_number*/ nullptr,
700 /*tp_as_sequence*/ &pygpu_buffer__tp_as_sequence,
701 /*tp_as_mapping*/ &pygpu_buffer__tp_as_mapping,
702 /*tp_hash*/ nullptr,
703 /*tp_call*/ nullptr,
704 /*tp_str*/ nullptr,
705 /*tp_getattro*/ nullptr,
706 /*tp_setattro*/ nullptr,
708 /*tp_as_buffer*/ &pygpu_buffer__tp_as_buffer,
709#else
710 /*tp_as_buffer*/ nullptr,
711#endif
712 /*tp_flags*/ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
713 /*tp_doc*/ pygpu_buffer__tp_doc,
714 /*tp_traverse*/ (traverseproc)pygpu_buffer__tp_traverse,
715 /*tp_clear*/ (inquiry)pygpu_buffer__tp_clear,
716 /*tp_richcompare*/ nullptr,
717 /*tp_weaklistoffset*/ 0,
718 /*tp_iter*/ nullptr,
719 /*tp_iternext*/ nullptr,
720 /*tp_methods*/ pygpu_buffer__tp_methods,
721 /*tp_members*/ nullptr,
722 /*tp_getset*/ pygpu_buffer_getseters,
723 /*tp_base*/ nullptr,
724 /*tp_dict*/ nullptr,
725 /*tp_descr_get*/ nullptr,
726 /*tp_descr_set*/ nullptr,
727 /*tp_dictoffset*/ 0,
728 /*tp_init*/ nullptr,
729 /*tp_alloc*/ nullptr,
730 /*tp_new*/ pygpu_buffer__tp_new,
731 /*tp_free*/ nullptr,
732 /*tp_is_gc*/ (inquiry)pygpu_buffer__tp_is_gc,
733 /*tp_bases*/ nullptr,
734 /*tp_mro*/ nullptr,
735 /*tp_cache*/ nullptr,
736 /*tp_subclasses*/ nullptr,
737 /*tp_weaklist*/ nullptr,
738 /*tp_del*/ nullptr,
739 /*tp_version_tag*/ 0,
740 /*tp_finalize*/ nullptr,
741 /*tp_vectorcall*/ nullptr,
742};
743
744static size_t pygpu_buffer_calc_size(const int format,
745 const int shape_len,
746 const Py_ssize_t *shape)
747{
748 return pygpu_buffer_dimensions_tot_elem(shape, shape_len) *
750}
751
753{
754 return pygpu_buffer_calc_size(buffer->format, buffer->shape_len, buffer->shape);
755}
756
758 const Py_ssize_t *shape,
759 const int shape_len,
760 void *buffer)
761{
762 if (buffer == nullptr) {
763 size_t size = pygpu_buffer_calc_size(format, shape_len, shape);
764 buffer = MEM_callocN(size, "BPyGPUBuffer buffer");
765 }
766
767 return pygpu_buffer_make_from_data(nullptr, eGPUDataFormat(format), shape_len, shape, buffer);
768}
769
#define BLI_assert(a)
Definition BLI_assert.h:50
#define STRINGIFY(x)
size_t GPU_texture_dataformat_size(eGPUDataFormat data_format)
eGPUDataFormat
@ GPU_DATA_UINT_24_8
@ GPU_DATA_INT
@ GPU_DATA_10_11_11_REV
@ GPU_DATA_UBYTE
@ GPU_DATA_UINT
@ GPU_DATA_FLOAT
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
ATTR_WARN_UNUSED_RESULT const BMVert * v
PyObject * self
void init()
int len
PyC_StringEnumItems bpygpu_dataformat_items[]
Definition gpu_py.cc:40
#define BPYGPU_IS_INIT_OR_ERROR_OBJ
Definition gpu_py.hh:18
static Py_ssize_t pygpu_buffer_dimensions_tot_elem(const Py_ssize_t *shape, Py_ssize_t shape_len)
static size_t pygpu_buffer_calc_size(const int format, const int shape_len, const Py_ssize_t *shape)
static PyObject * pygpu_buffer_to_list(BPyGPUBuffer *self)
static PyBufferProcs pygpu_buffer__tp_as_buffer
static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
static void pygpu_buffer_strides_calc(const eGPUDataFormat format, const int shape_len, const Py_ssize_t *shape, Py_ssize_t *r_strides)
static PyObject * pygpu_buffer_to_list_recursive(BPyGPUBuffer *self)
static int pygpu_buffer_dimensions_set(BPyGPUBuffer *self, PyObject *value, void *)
static PyObject * pygpu_buffer_dimensions_get(BPyGPUBuffer *self, void *)
PyTypeObject BPyGPU_BufferType
#define MAX_DIMENSIONS
static PyMappingMethods pygpu_buffer__tp_as_mapping
#define PYGPU_BUFFER_PROTOCOL
static int pygpu_buffer__tp_is_gc(BPyGPUBuffer *self)
static PySequenceMethods pygpu_buffer__tp_as_sequence
static PyObject * pygpu_buffer__sq_item(BPyGPUBuffer *self, Py_ssize_t i)
static bool pygpu_buffer_dimensions_tot_len_compare(const Py_ssize_t *shape_a, const Py_ssize_t shape_a_len, const Py_ssize_t *shape_b, const Py_ssize_t shape_b_len)
static PyObject * pygpu_buffer__tp_repr(BPyGPUBuffer *self)
static Py_ssize_t pygpu_buffer__sq_length(BPyGPUBuffer *self)
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, Py_ssize_t i, PyObject *v)
static void pygpu_buffer__bf_releasebuffer(PyObject *, Py_buffer *view)
static PyObject * pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
BPyGPUBuffer * BPyGPU_Buffer_CreatePyObject(const int format, const Py_ssize_t *shape, const int shape_len, void *buffer)
PyDoc_STRVAR(pygpu_buffer__tp_doc, ".. class:: Buffer(format, dimensions, data)\n" "\n" " For Python access to GPU functions requiring a pointer.\n" "\n" " :arg format: Format type to interpret the buffer.\n" " Possible values are `FLOAT`, `INT`, `UINT`, `UBYTE`, `UINT_24_8` and `10_11_11_REV`.\n" " :type format: str\n" " :arg dimensions: Array describing the dimensions.\n" " :type dimensions: int\n" " :arg data: Optional data array.\n" " :type data: Buffer | Sequence[float] | Sequence[int]\n")
static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
static PyObject * pygpu_buffer__tp_new(PyTypeObject *, PyObject *args, PyObject *kwds)
static PyMethodDef pygpu_buffer__tp_methods[]
static PyGetSetDef pygpu_buffer_getseters[]
static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
static bool pygpu_buffer_pyobj_as_shape(PyObject *shape_obj, Py_ssize_t r_shape[MAX_DIMENSIONS], Py_ssize_t *r_shape_len)
static BPyGPUBuffer * pygpu_buffer_make_from_data(PyObject *parent, const eGPUDataFormat format, const int shape_len, const Py_ssize_t *shape, void *buf)
static int pygpu_buffer_ass_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end, PyObject *seq)
static const char * pygpu_buffer_formatstr(eGPUDataFormat data_format)
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer)
static PyObject * pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
static void pygpu_buffer__tp_dealloc(BPyGPUBuffer *self)
static int pygpu_buffer__tp_clear(BPyGPUBuffer *self)
int count
format
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
int PyC_ParseStringEnum(PyObject *o, void *p)
const char * PyC_StringEnum_FindIDFromValue(const PyC_StringEnumItems *items, const int value)
return ret
Py_ssize_t * shape
PyObject_VAR_HEAD PyObject * parent
union BPyGPUBuffer::@1342 buf