Blender V5.0
gpu_py_buffer.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
13
14#include <Python.h>
15
16#include <algorithm>
17
18#include "BLI_utildefines.h"
19
20#include "MEM_guardedalloc.h"
21
22#include "GPU_texture.hh"
23
25
26#include "gpu_py.hh"
27
28#include "gpu_py_buffer.hh"
29
30#define PYGPU_BUFFER_PROTOCOL
31#define MAX_DIMENSIONS 64
32
33/* -------------------------------------------------------------------- */
36
37static Py_ssize_t pygpu_buffer_dimensions_tot_elem(const Py_ssize_t *shape, Py_ssize_t shape_len)
38{
39 Py_ssize_t tot = shape[0];
40 for (int i = 1; i < shape_len; i++) {
41 tot *= shape[i];
42 }
43
44 return tot;
45}
46
47static bool pygpu_buffer_dimensions_tot_len_compare(const Py_ssize_t *shape_a,
48 const Py_ssize_t shape_a_len,
49 const Py_ssize_t *shape_b,
50 const Py_ssize_t shape_b_len)
51{
52 if (pygpu_buffer_dimensions_tot_elem(shape_a, shape_a_len) !=
53 pygpu_buffer_dimensions_tot_elem(shape_b, shape_b_len))
54 {
55 PyErr_SetString(PyExc_BufferError, "array size does not match");
56 return false;
57 }
58
59 return true;
60}
61
62static bool pygpu_buffer_pyobj_as_shape(PyObject *shape_obj,
63 Py_ssize_t r_shape[MAX_DIMENSIONS],
64 Py_ssize_t *r_shape_len)
65{
66 Py_ssize_t shape_len = 0;
67 if (PyLong_Check(shape_obj)) {
68 shape_len = 1;
69 if ((r_shape[0] = PyLong_AsSsize_t(shape_obj)) < 1) {
70 PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
71 return false;
72 }
73 }
74 else if (PySequence_Check(shape_obj)) {
75 shape_len = PySequence_Size(shape_obj);
76 if (shape_len > MAX_DIMENSIONS) {
77 PyErr_SetString(PyExc_AttributeError,
78 "too many dimensions, max is " STRINGIFY(MAX_DIMENSIONS));
79 return false;
80 }
81 if (shape_len < 1) {
82 PyErr_SetString(PyExc_AttributeError, "sequence must have at least one dimension");
83 return false;
84 }
85
86 for (int i = 0; i < shape_len; i++) {
87 PyObject *ob = PySequence_GetItem(shape_obj, i);
88 if (!PyLong_Check(ob)) {
89 PyErr_Format(PyExc_TypeError,
90 "invalid dimension %i, expected an int, not a %.200s",
91 i,
92 Py_TYPE(ob)->tp_name);
93 Py_DECREF(ob);
94 return false;
95 }
96
97 r_shape[i] = PyLong_AsSsize_t(ob);
98 Py_DECREF(ob);
99
100 if (r_shape[i] < 1) {
101 PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
102 return false;
103 }
104 }
105 }
106 else {
107 PyErr_Format(PyExc_TypeError,
108 "invalid second argument expected a sequence "
109 "or an int, not a %.200s",
110 Py_TYPE(shape_obj)->tp_name);
111 }
112
113 *r_shape_len = shape_len;
114 return true;
115}
116
117static const char *pygpu_buffer_formatstr(eGPUDataFormat data_format)
118{
119 switch (data_format) {
120 case GPU_DATA_FLOAT:
121 return "f";
122 case GPU_DATA_INT:
123 return "i";
124 case GPU_DATA_UINT:
125 return "I";
126 case GPU_DATA_UBYTE:
127 return "B";
130 return "I";
131 default:
132 break;
133 }
134 return nullptr;
135}
136
138
139/* -------------------------------------------------------------------- */
142
145 const int shape_len,
146 const Py_ssize_t *shape,
147 void *buf)
148{
149 BPyGPUBuffer *buffer = (BPyGPUBuffer *)_PyObject_GC_New(&BPyGPU_BufferType);
150
151 buffer->parent = nullptr;
152 buffer->format = format;
153 buffer->shape_len = shape_len;
154 buffer->shape = MEM_malloc_arrayN<Py_ssize_t>(size_t(shape_len), "BPyGPUBuffer shape");
155 memcpy(buffer->shape, shape, sizeof(*buffer->shape) * size_t(shape_len));
156 buffer->buf.as_void = buf;
157
158 if (parent) {
159 Py_INCREF(parent);
160 buffer->parent = parent;
161 BLI_assert(!PyObject_GC_IsTracked((PyObject *)buffer));
162 PyObject_GC_Track(buffer);
163 }
164 return buffer;
165}
166
167static PyObject *pygpu_buffer__sq_item(BPyGPUBuffer *self, Py_ssize_t i)
168{
169 if (i >= self->shape[0] || i < 0) {
170 PyErr_SetString(PyExc_IndexError, "array index out of range");
171 return nullptr;
172 }
173
174 const char *formatstr = pygpu_buffer_formatstr(eGPUDataFormat(self->format));
175
176 if (self->shape_len == 1) {
177 switch (self->format) {
178 case GPU_DATA_FLOAT:
179 return Py_BuildValue(formatstr, self->buf.as_float[i]);
180 case GPU_DATA_INT:
181 return Py_BuildValue(formatstr, self->buf.as_int[i]);
182 case GPU_DATA_UBYTE:
183 return Py_BuildValue(formatstr, self->buf.as_byte[i]);
184 case GPU_DATA_UINT:
187 return Py_BuildValue(formatstr, self->buf.as_uint[i]);
188 }
189 }
190 else {
191 int offset = i * GPU_texture_dataformat_size(eGPUDataFormat(self->format));
192 for (int j = 1; j < self->shape_len; j++) {
193 offset *= self->shape[j];
194 }
195
196 return (PyObject *)pygpu_buffer_make_from_data((PyObject *)self,
197 eGPUDataFormat(self->format),
198 self->shape_len - 1,
199 self->shape + 1,
200 self->buf.as_byte + offset);
201 }
202
203 return nullptr;
204}
205
207{
208 const Py_ssize_t len = self->shape[0];
209 PyObject *list = PyList_New(len);
210
211 for (Py_ssize_t i = 0; i < len; i++) {
212 PyList_SET_ITEM(list, i, pygpu_buffer__sq_item(self, i));
213 }
214
215 return list;
216}
217
219{
220 PyObject *list;
221
222 if (self->shape_len > 1) {
223 int i, len = self->shape[0];
224 list = PyList_New(len);
225
226 for (i = 0; i < len; i++) {
227 /* "BPyGPUBuffer *sub_tmp" is a temporary object created just to be read for nested lists.
228 * That is why it is decremented/freed soon after.
229 * TODO: For efficiency, avoid creating #BPyGPUBuffer when creating nested lists. */
231 PyList_SET_ITEM(list, i, pygpu_buffer_to_list_recursive(sub_tmp));
232 Py_DECREF(sub_tmp);
233 }
234 }
235 else {
237 }
238
239 return list;
240}
241
242static PyObject *pygpu_buffer_dimensions_get(BPyGPUBuffer *self, void * /*arg*/)
243{
244 PyObject *list = PyList_New(self->shape_len);
245 int i;
246
247 for (i = 0; i < self->shape_len; i++) {
248 PyList_SET_ITEM(list, i, PyLong_FromLong(self->shape[i]));
249 }
250
251 return list;
252}
253
254static int pygpu_buffer_dimensions_set(BPyGPUBuffer *self, PyObject *value, void * /*type*/)
255{
256 Py_ssize_t shape[MAX_DIMENSIONS];
257 Py_ssize_t shape_len = 0;
258
259 if (!pygpu_buffer_pyobj_as_shape(value, shape, &shape_len)) {
260 return -1;
261 }
262
263 if (!pygpu_buffer_dimensions_tot_len_compare(shape, shape_len, self->shape, self->shape_len)) {
264 return -1;
265 }
266
267 if (shape_len != self->shape_len) {
268 MEM_freeN(self->shape);
269 self->shape = MEM_malloc_arrayN<Py_ssize_t>(size_t(shape_len), __func__);
270 }
271
272 self->shape_len = shape_len;
273 memcpy(self->shape, shape, sizeof(*self->shape) * size_t(shape_len));
274 return 0;
275}
276
277static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
278{
279 Py_VISIT(self->parent);
280 return 0;
281}
282
284{
285 if (self->parent) {
286 Py_CLEAR(self->parent);
287 self->buf.as_void = nullptr;
288 }
289 return 0;
290}
291
293{
294 if (self->parent) {
295 PyObject_GC_UnTrack(self);
296 Py_CLEAR(self->parent);
297 }
298 else if (self->buf.as_void) {
299 MEM_freeN(self->buf.as_void);
300 }
301
302 MEM_freeN(self->shape);
303
304 PyObject_GC_Del(self);
305}
306
308{
309 PyObject *repr;
310
311 PyObject *list = pygpu_buffer_to_list_recursive(self);
312 const char *typestr = PyC_StringEnum_FindIDFromValue(bpygpu_dataformat_items, self->format);
313
314 repr = PyUnicode_FromFormat("Buffer(%s, %R)", typestr, list);
315 Py_DECREF(list);
316
317 return repr;
318}
319
320static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, Py_ssize_t i, PyObject *v);
321
323 Py_ssize_t begin,
324 Py_ssize_t end,
325 PyObject *seq)
326{
327 PyObject *item;
328 int count, err = 0;
329
330 begin = std::max<Py_ssize_t>(begin, 0);
331 end = std::min(end, self->shape[0]);
332 begin = std::min(begin, end);
333
334 if (!PySequence_Check(seq)) {
335 PyErr_Format(PyExc_TypeError,
336 "buffer[:] = value, invalid assignment. "
337 "Expected a sequence, not an %.200s type",
338 Py_TYPE(seq)->tp_name);
339 return -1;
340 }
341
342 /* re-use count var */
343 if ((count = PySequence_Size(seq)) != (end - begin)) {
344 PyErr_Format(PyExc_TypeError,
345 "buffer[:] = value, size mismatch in assignment. "
346 "Expected: %d (given: %d)",
347 count,
348 end - begin);
349 return -1;
350 }
351
352 for (count = begin; count < end; count++) {
353 item = PySequence_GetItem(seq, count - begin);
354 if (item) {
356 Py_DECREF(item);
357 }
358 else {
359 err = -1;
360 }
361 if (err) {
362 break;
363 }
364 }
365 return err;
366}
367
368static PyObject *pygpu_buffer__tp_new(PyTypeObject * /*type*/, PyObject *args, PyObject *kwds)
369{
371
372 PyObject *length_ob, *init = nullptr;
373 BPyGPUBuffer *buffer = nullptr;
374 Py_ssize_t shape[MAX_DIMENSIONS];
375
376 Py_ssize_t shape_len = 0;
377
378 if (kwds && PyDict_Size(kwds)) {
379 PyErr_SetString(PyExc_TypeError, "Buffer(): takes no keyword args");
380 return nullptr;
381 }
382
384 if (!PyArg_ParseTuple(
385 args, "O&O|O: Buffer", PyC_ParseStringEnum, &pygpu_dataformat, &length_ob, &init))
386 {
387 return nullptr;
388 }
389 if (pygpu_dataformat.value_found == GPU_DATA_UINT_24_8_DEPRECATED) {
390 PyErr_WarnEx(PyExc_DeprecationWarning, "`UINT_24_8` is deprecated, use `FLOAT` instead", 1);
391 }
392
393 if (!pygpu_buffer_pyobj_as_shape(length_ob, shape, &shape_len)) {
394 return nullptr;
395 }
396
397 if (init && PyObject_CheckBuffer(init)) {
398 Py_buffer pybuffer;
399
400 if (PyObject_GetBuffer(init, &pybuffer, PyBUF_ND | PyBUF_FORMAT) == -1) {
401 /* PyObject_GetBuffer raise a PyExc_BufferError */
402 return nullptr;
403 }
404
405 Py_ssize_t *pybuffer_shape = pybuffer.shape;
406 Py_ssize_t pybuffer_ndim = pybuffer.ndim;
407 if (!pybuffer_shape) {
408 pybuffer_shape = &pybuffer.len;
409 pybuffer_ndim = 1;
410 }
411
412 if (pygpu_buffer_dimensions_tot_len_compare(shape, shape_len, pybuffer_shape, pybuffer_ndim)) {
414 init, eGPUDataFormat(pygpu_dataformat.value_found), shape_len, shape, pybuffer.buf);
415 }
416
417 PyBuffer_Release(&pybuffer);
418 }
419 else {
420 buffer = BPyGPU_Buffer_CreatePyObject(pygpu_dataformat.value_found, shape, shape_len, nullptr);
421 if (init && pygpu_buffer_ass_slice(buffer, 0, shape[0], init)) {
422 Py_DECREF(buffer);
423 return nullptr;
424 }
425 }
426
427 return (PyObject *)buffer;
428}
429
431{
432 return self->parent != nullptr;
433}
434
435/* BPyGPUBuffer sequence methods */
436
438{
439 return self->shape[0];
440}
441
442static PyObject *pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
443{
444 PyObject *list;
445 Py_ssize_t count;
446
447 begin = std::max<Py_ssize_t>(begin, 0);
448 end = std::min(end, self->shape[0]);
449 begin = std::min(begin, end);
450
451 list = PyList_New(end - begin);
452
453 for (count = begin; count < end; count++) {
454 PyList_SET_ITEM(list, count - begin, pygpu_buffer__sq_item(self, count));
455 }
456 return list;
457}
458
459static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, Py_ssize_t i, PyObject *v)
460{
461 if (i >= self->shape[0] || i < 0) {
462 PyErr_SetString(PyExc_IndexError, "array assignment index out of range");
463 return -1;
464 }
465
466 if (self->shape_len != 1) {
468
469 if (row) {
470 const int ret = pygpu_buffer_ass_slice(row, 0, self->shape[1], v);
471 Py_DECREF(row);
472 return ret;
473 }
474
475 return -1;
476 }
477
478 switch (self->format) {
479 case GPU_DATA_FLOAT:
480 return PyArg_Parse(v, "f:Expected floats", &self->buf.as_float[i]) ? 0 : -1;
481 case GPU_DATA_INT:
482 return PyArg_Parse(v, "i:Expected ints", &self->buf.as_int[i]) ? 0 : -1;
483 case GPU_DATA_UBYTE:
484 return PyArg_Parse(v, "b:Expected ints", &self->buf.as_byte[i]) ? 0 : -1;
485 case GPU_DATA_UINT:
488 return PyArg_Parse(v, "I:Expected unsigned ints", &self->buf.as_uint[i]) ? 0 : -1;
489 default:
490 return 0; /* should never happen */
491 }
492}
493
494static PyObject *pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
495{
496 if (PyIndex_Check(item)) {
497 Py_ssize_t i;
498 i = PyNumber_AsSsize_t(item, PyExc_IndexError);
499 if (i == -1 && PyErr_Occurred()) {
500 return nullptr;
501 }
502 if (i < 0) {
503 i += self->shape[0];
504 }
506 }
507 if (PySlice_Check(item)) {
508 Py_ssize_t start, stop, step, slicelength;
509
510 if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
511 return nullptr;
512 }
513
514 if (slicelength <= 0) {
515 return PyTuple_New(0);
516 }
517 if (step == 1) {
518 return pygpu_buffer_slice(self, start, stop);
519 }
520
521 PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
522 return nullptr;
523 }
524
525 PyErr_Format(
526 PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
527 return nullptr;
528}
529
530static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
531{
532 if (PyIndex_Check(item)) {
533 Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
534 if (i == -1 && PyErr_Occurred()) {
535 return -1;
536 }
537 if (i < 0) {
538 i += self->shape[0];
539 }
540 return pygpu_buffer__sq_ass_item(self, i, value);
541 }
542 if (PySlice_Check(item)) {
543 Py_ssize_t start, stop, step, slicelength;
544
545 if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
546 return -1;
547 }
548
549 if (step == 1) {
550 return pygpu_buffer_ass_slice(self, start, stop, value);
551 }
552
553 PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
554 return -1;
555 }
556
557 PyErr_Format(
558 PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
559 return -1;
560}
561
562#ifdef __GNUC__
563# ifdef __clang__
564# pragma clang diagnostic push
565# pragma clang diagnostic ignored "-Wcast-function-type"
566# else
567# pragma GCC diagnostic push
568# pragma GCC diagnostic ignored "-Wcast-function-type"
569# endif
570#endif
571
572static PyMethodDef pygpu_buffer__tp_methods[] = {
573 {"to_list",
575 METH_NOARGS,
576 "return the buffer as a list"},
577 {nullptr, nullptr, 0, nullptr},
578};
579
580#ifdef __GNUC__
581# ifdef __clang__
582# pragma clang diagnostic pop
583# else
584# pragma GCC diagnostic pop
585# endif
586#endif
587
588static PyGetSetDef pygpu_buffer_getseters[] = {
589 {"dimensions",
592 nullptr,
593 nullptr},
594 {nullptr, nullptr, nullptr, nullptr, nullptr},
595};
596
597static PySequenceMethods pygpu_buffer__tp_as_sequence = {
598 /*sq_length*/ (lenfunc)pygpu_buffer__sq_length,
599 /*sq_concat*/ nullptr,
600 /*sq_repeat*/ nullptr,
601 /*sq_item*/ (ssizeargfunc)pygpu_buffer__sq_item,
602 /*was_sq_slice*/ nullptr, /* DEPRECATED. Handled by #pygpu_buffer__sq_item. */
603 /*sq_ass_item*/ (ssizeobjargproc)pygpu_buffer__sq_ass_item,
604 /*was_sq_ass_slice*/ nullptr, /* DEPRECATED. Handled by #pygpu_buffer__sq_ass_item. */
605 /*sq_contains*/ nullptr,
606 /*sq_inplace_concat*/ nullptr,
607 /*sq_inplace_repeat*/ nullptr,
608};
609
610static PyMappingMethods pygpu_buffer__tp_as_mapping = {
611 /*mp_length*/ (lenfunc)pygpu_buffer__sq_length,
612 /*mp_subscript*/ (binaryfunc)pygpu_buffer__mp_subscript,
613 /*mp_ass_subscript*/ (objobjargproc)pygpu_buffer__mp_ass_subscript,
614};
615
616#ifdef PYGPU_BUFFER_PROTOCOL
618 const int shape_len,
619 const Py_ssize_t *shape,
620 Py_ssize_t *r_strides)
621{
622 r_strides[0] = GPU_texture_dataformat_size(format);
623 for (int i = 1; i < shape_len; i++) {
624 r_strides[i] = r_strides[i - 1] * shape[i - 1];
625 }
626}
627
628/* Here is the buffer interface function */
629static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
630{
631 if (UNLIKELY(view == nullptr)) {
632 PyErr_SetString(PyExc_ValueError, "null view in get-buffer is obsolete");
633 return -1;
634 }
635
636 memset(view, 0, sizeof(*view));
637
638 view->obj = (PyObject *)self;
639 view->buf = self->buf.as_void;
641 view->readonly = 0;
643 if (flags & PyBUF_FORMAT) {
644 view->format = (char *)pygpu_buffer_formatstr(eGPUDataFormat(self->format));
645 }
646 if (flags & PyBUF_ND) {
647 view->ndim = self->shape_len;
648 view->shape = self->shape;
649 }
650 if (flags & PyBUF_STRIDES) {
651 view->strides = MEM_malloc_arrayN<Py_ssize_t>(size_t(view->ndim), "BPyGPUBuffer strides");
653 eGPUDataFormat(self->format), view->ndim, view->shape, view->strides);
654 }
655 view->suboffsets = nullptr;
656 view->internal = nullptr;
657
658 Py_INCREF(self);
659 return 0;
660}
661
662static void pygpu_buffer__bf_releasebuffer(PyObject * /*exporter*/, Py_buffer *view)
663{
664 MEM_SAFE_FREE(view->strides);
665}
666
667static PyBufferProcs pygpu_buffer__tp_as_buffer = {
668 /*bf_getbuffer*/ (getbufferproc)pygpu_buffer__bf_getbuffer,
669 /*bf_releasebuffer*/ (releasebufferproc)pygpu_buffer__bf_releasebuffer,
670};
671#endif
672
674 /* Wrap. */
675 pygpu_buffer__tp_doc,
676 ".. class:: Buffer(format, dimensions, data)\n"
677 "\n"
678 " For Python access to GPU functions requiring a pointer.\n"
679 "\n"
680 " :arg format: Format type to interpret the buffer.\n"
681 " Possible values are ``FLOAT``, ``INT``, ``UINT``, ``UBYTE``, ``UINT_24_8`` & "
682 "``10_11_11_REV``.\n"
683 " ``UINT_24_8`` is deprecated, use ``FLOAT`` instead.\n"
684 " :type format: str\n"
685 " :arg dimensions: Array describing the dimensions.\n"
686 " :type dimensions: int\n"
687 " :arg data: Optional data array.\n"
688 " :type data: Buffer | Sequence[float] | Sequence[int]\n");
689PyTypeObject BPyGPU_BufferType = {
690 /*ob_base*/ PyVarObject_HEAD_INIT(nullptr, 0)
691 /*tp_name*/ "Buffer",
692 /*tp_basicsize*/ sizeof(BPyGPUBuffer),
693 /*tp_itemsize*/ 0,
694 /*tp_dealloc*/ (destructor)pygpu_buffer__tp_dealloc,
695 /*tp_vectorcall_offset*/ 0,
696 /*tp_getattr*/ nullptr,
697 /*tp_setattr*/ nullptr,
698 /*tp_compare*/ nullptr,
699 /*tp_repr*/ (reprfunc)pygpu_buffer__tp_repr,
700 /*tp_as_number*/ nullptr,
701 /*tp_as_sequence*/ &pygpu_buffer__tp_as_sequence,
702 /*tp_as_mapping*/ &pygpu_buffer__tp_as_mapping,
703 /*tp_hash*/ nullptr,
704 /*tp_call*/ nullptr,
705 /*tp_str*/ nullptr,
706 /*tp_getattro*/ nullptr,
707 /*tp_setattro*/ nullptr,
709 /*tp_as_buffer*/ &pygpu_buffer__tp_as_buffer,
710#else
711 /*tp_as_buffer*/ nullptr,
712#endif
713 /*tp_flags*/ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
714 /*tp_doc*/ pygpu_buffer__tp_doc,
715 /*tp_traverse*/ (traverseproc)pygpu_buffer__tp_traverse,
716 /*tp_clear*/ (inquiry)pygpu_buffer__tp_clear,
717 /*tp_richcompare*/ nullptr,
718 /*tp_weaklistoffset*/ 0,
719 /*tp_iter*/ nullptr,
720 /*tp_iternext*/ nullptr,
721 /*tp_methods*/ pygpu_buffer__tp_methods,
722 /*tp_members*/ nullptr,
723 /*tp_getset*/ pygpu_buffer_getseters,
724 /*tp_base*/ nullptr,
725 /*tp_dict*/ nullptr,
726 /*tp_descr_get*/ nullptr,
727 /*tp_descr_set*/ nullptr,
728 /*tp_dictoffset*/ 0,
729 /*tp_init*/ nullptr,
730 /*tp_alloc*/ nullptr,
731 /*tp_new*/ pygpu_buffer__tp_new,
732 /*tp_free*/ nullptr,
733 /*tp_is_gc*/ (inquiry)pygpu_buffer__tp_is_gc,
734 /*tp_bases*/ nullptr,
735 /*tp_mro*/ nullptr,
736 /*tp_cache*/ nullptr,
737 /*tp_subclasses*/ nullptr,
738 /*tp_weaklist*/ nullptr,
739 /*tp_del*/ nullptr,
740 /*tp_version_tag*/ 0,
741 /*tp_finalize*/ nullptr,
742 /*tp_vectorcall*/ nullptr,
743};
744
745static size_t pygpu_buffer_calc_size(const int format,
746 const int shape_len,
747 const Py_ssize_t *shape)
748{
749 return pygpu_buffer_dimensions_tot_elem(shape, shape_len) *
751}
752
754{
755 return pygpu_buffer_calc_size(buffer->format, buffer->shape_len, buffer->shape);
756}
757
759 const Py_ssize_t *shape,
760 const int shape_len,
761 void *buffer)
762{
763 if (buffer == nullptr) {
764 size_t size = pygpu_buffer_calc_size(format, shape_len, shape);
765 buffer = MEM_callocN(size, "BPyGPUBuffer buffer");
766 }
767
768 return pygpu_buffer_make_from_data(nullptr, eGPUDataFormat(format), shape_len, shape, buffer);
769}
770
#define BLI_assert(a)
Definition BLI_assert.h:46
#define STRINGIFY(x)
#define UNLIKELY(x)
static AppView * view
size_t GPU_texture_dataformat_size(eGPUDataFormat data_format)
eGPUDataFormat
@ GPU_DATA_INT
@ GPU_DATA_10_11_11_REV
@ GPU_DATA_UBYTE
@ GPU_DATA_UINT
@ GPU_DATA_UINT_24_8_DEPRECATED
@ GPU_DATA_FLOAT
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
iter begin(iter)
ATTR_WARN_UNUSED_RESULT const BMVert * v
PyObject * self
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
PyC_StringEnumItems bpygpu_dataformat_items[]
Definition gpu_py.cc:40
#define BPYGPU_IS_INIT_OR_ERROR_OBJ
Definition gpu_py.hh:20
static Py_ssize_t pygpu_buffer_dimensions_tot_elem(const Py_ssize_t *shape, Py_ssize_t shape_len)
static size_t pygpu_buffer_calc_size(const int format, const int shape_len, const Py_ssize_t *shape)
static PyObject * pygpu_buffer_to_list(BPyGPUBuffer *self)
static PyBufferProcs pygpu_buffer__tp_as_buffer
static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
static void pygpu_buffer_strides_calc(const eGPUDataFormat format, const int shape_len, const Py_ssize_t *shape, Py_ssize_t *r_strides)
static PyObject * pygpu_buffer_to_list_recursive(BPyGPUBuffer *self)
static int pygpu_buffer_dimensions_set(BPyGPUBuffer *self, PyObject *value, void *)
static PyObject * pygpu_buffer_dimensions_get(BPyGPUBuffer *self, void *)
PyTypeObject BPyGPU_BufferType
#define MAX_DIMENSIONS
static PyMappingMethods pygpu_buffer__tp_as_mapping
#define PYGPU_BUFFER_PROTOCOL
static int pygpu_buffer__tp_is_gc(BPyGPUBuffer *self)
static PySequenceMethods pygpu_buffer__tp_as_sequence
static PyObject * pygpu_buffer__sq_item(BPyGPUBuffer *self, Py_ssize_t i)
static bool pygpu_buffer_dimensions_tot_len_compare(const Py_ssize_t *shape_a, const Py_ssize_t shape_a_len, const Py_ssize_t *shape_b, const Py_ssize_t shape_b_len)
static PyObject * pygpu_buffer__tp_repr(BPyGPUBuffer *self)
static Py_ssize_t pygpu_buffer__sq_length(BPyGPUBuffer *self)
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, Py_ssize_t i, PyObject *v)
static void pygpu_buffer__bf_releasebuffer(PyObject *, Py_buffer *view)
static PyObject * pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
BPyGPUBuffer * BPyGPU_Buffer_CreatePyObject(const int format, const Py_ssize_t *shape, const int shape_len, void *buffer)
static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
static PyObject * pygpu_buffer__tp_new(PyTypeObject *, PyObject *args, PyObject *kwds)
static PyMethodDef pygpu_buffer__tp_methods[]
static PyGetSetDef pygpu_buffer_getseters[]
static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
static bool pygpu_buffer_pyobj_as_shape(PyObject *shape_obj, Py_ssize_t r_shape[MAX_DIMENSIONS], Py_ssize_t *r_shape_len)
PyDoc_STRVAR(pygpu_buffer__tp_doc, ".. class:: Buffer(format, dimensions, data)\n" "\n" " For Python access to GPU functions requiring a pointer.\n" "\n" " :arg format: Format type to interpret the buffer.\n" " Possible values are ``FLOAT``, ``INT``, ``UINT``, ``UBYTE``, ``UINT_24_8`` & " "``10_11_11_REV``.\n" " ``UINT_24_8`` is deprecated, use ``FLOAT`` instead.\n" " :type format: str\n" " :arg dimensions: Array describing the dimensions.\n" " :type dimensions: int\n" " :arg data: Optional data array.\n" " :type data: Buffer | Sequence[float] | Sequence[int]\n")
static BPyGPUBuffer * pygpu_buffer_make_from_data(PyObject *parent, const eGPUDataFormat format, const int shape_len, const Py_ssize_t *shape, void *buf)
static int pygpu_buffer_ass_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end, PyObject *seq)
static const char * pygpu_buffer_formatstr(eGPUDataFormat data_format)
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer)
static PyObject * pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
static void pygpu_buffer__tp_dealloc(BPyGPUBuffer *self)
static int pygpu_buffer__tp_clear(BPyGPUBuffer *self)
VecBase< float, D > step(VecOp< float, D >, VecOp< float, D >) RET
int count
format
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static void init(bNodeTree *, bNode *node)
int PyC_ParseStringEnum(PyObject *o, void *p)
const char * PyC_StringEnum_FindIDFromValue(const PyC_StringEnumItems *items, const int value)
Py_DECREF(oname)
return ret
union BPyGPUBuffer::@121060215011127262115351146023070133300154361035 buf
Py_ssize_t * shape
PyObject_VAR_HEAD PyObject * parent
i
Definition text_draw.cc:230
uint len