1 /* C Extension module to test all aspects of PEP-3118.
2 Written by Stefan Krah. */
3
4
5 #define PY_SSIZE_T_CLEAN
6
7 #include "Python.h"
8
9
10 /* struct module */
11 static PyObject *structmodule = NULL;
12 static PyObject *Struct = NULL;
13 static PyObject *calcsize = NULL;
14
15 /* cache simple format string */
16 static const char *simple_fmt = "B";
17 static PyObject *simple_format = NULL;
18 #define SIMPLE_FORMAT(fmt) (fmt == NULL || strcmp(fmt, "B") == 0)
19 #define FIX_FORMAT(fmt) (fmt == NULL ? "B" : fmt)
20
21
22 /**************************************************************************/
23 /* NDArray Object */
24 /**************************************************************************/
25
26 static PyTypeObject NDArray_Type;
27 #define NDArray_Check(v) Py_IS_TYPE(v, &NDArray_Type)
28
29 #define CHECK_LIST_OR_TUPLE(v) \
30 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
31 PyErr_SetString(PyExc_TypeError, \
32 #v " must be a list or a tuple"); \
33 return NULL; \
34 } \
35
36 #define PyMem_XFree(v) \
37 do { if (v) PyMem_Free(v); } while (0)
38
39 /* Maximum number of dimensions. */
40 #define ND_MAX_NDIM (2 * PyBUF_MAX_NDIM)
41
42 /* Check for the presence of suboffsets in the first dimension. */
43 #define HAVE_PTR(suboffsets) (suboffsets && suboffsets[0] >= 0)
44 /* Adjust ptr if suboffsets are present. */
45 #define ADJUST_PTR(ptr, suboffsets) \
46 (HAVE_PTR(suboffsets) ? *((char**)ptr) + suboffsets[0] : ptr)
47
48 /* Default: NumPy style (strides), read-only, no var-export, C-style layout */
49 #define ND_DEFAULT 0x000
50 /* User configurable flags for the ndarray */
51 #define ND_VAREXPORT 0x001 /* change layout while buffers are exported */
52 /* User configurable flags for each base buffer */
53 #define ND_WRITABLE 0x002 /* mark base buffer as writable */
54 #define ND_FORTRAN 0x004 /* Fortran contiguous layout */
55 #define ND_SCALAR 0x008 /* scalar: ndim = 0 */
56 #define ND_PIL 0x010 /* convert to PIL-style array (suboffsets) */
57 #define ND_REDIRECT 0x020 /* redirect buffer requests */
58 #define ND_GETBUF_FAIL 0x040 /* trigger getbuffer failure */
59 #define ND_GETBUF_UNDEFINED 0x080 /* undefined view.obj */
60 /* Internal flags for the base buffer */
61 #define ND_C 0x100 /* C contiguous layout (default) */
62 #define ND_OWN_ARRAYS 0x200 /* consumer owns arrays */
63
64 /* ndarray properties */
65 #define ND_IS_CONSUMER(nd) \
66 (((NDArrayObject *)nd)->head == &((NDArrayObject *)nd)->staticbuf)
67
68 /* ndbuf->flags properties */
69 #define ND_C_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_C)))
70 #define ND_FORTRAN_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_FORTRAN)))
71 #define ND_ANY_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_C|ND_FORTRAN)))
72
73 /* getbuffer() requests */
74 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
75 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
76 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
77 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
78 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
79 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
80 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
81 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
82
83
84 /* Single node of a list of base buffers. The list is needed to implement
85 changes in memory layout while exported buffers are active. */
86 static PyTypeObject NDArray_Type;
87
88 struct ndbuf;
89 typedef struct ndbuf {
90 struct ndbuf *next;
91 struct ndbuf *prev;
92 Py_ssize_t len; /* length of data */
93 Py_ssize_t offset; /* start of the array relative to data */
94 char *data; /* raw data */
95 int flags; /* capabilities of the base buffer */
96 Py_ssize_t exports; /* number of exports */
97 Py_buffer base; /* base buffer */
98 } ndbuf_t;
99
100 typedef struct {
101 PyObject_HEAD
102 int flags; /* ndarray flags */
103 ndbuf_t staticbuf; /* static buffer for re-exporting mode */
104 ndbuf_t *head; /* currently active base buffer */
105 } NDArrayObject;
106
107
108 static ndbuf_t *
ndbuf_new(Py_ssize_t nitems,Py_ssize_t itemsize,Py_ssize_t offset,int flags)109 ndbuf_new(Py_ssize_t nitems, Py_ssize_t itemsize, Py_ssize_t offset, int flags)
110 {
111 ndbuf_t *ndbuf;
112 Py_buffer *base;
113 Py_ssize_t len;
114
115 len = nitems * itemsize;
116 if (offset % itemsize) {
117 PyErr_SetString(PyExc_ValueError,
118 "offset must be a multiple of itemsize");
119 return NULL;
120 }
121 if (offset < 0 || offset+itemsize > len) {
122 PyErr_SetString(PyExc_ValueError, "offset out of bounds");
123 return NULL;
124 }
125
126 ndbuf = PyMem_Malloc(sizeof *ndbuf);
127 if (ndbuf == NULL) {
128 PyErr_NoMemory();
129 return NULL;
130 }
131
132 ndbuf->next = NULL;
133 ndbuf->prev = NULL;
134 ndbuf->len = len;
135 ndbuf->offset= offset;
136
137 ndbuf->data = PyMem_Malloc(len);
138 if (ndbuf->data == NULL) {
139 PyErr_NoMemory();
140 PyMem_Free(ndbuf);
141 return NULL;
142 }
143
144 ndbuf->flags = flags;
145 ndbuf->exports = 0;
146
147 base = &ndbuf->base;
148 base->obj = NULL;
149 base->buf = ndbuf->data;
150 base->len = len;
151 base->itemsize = 1;
152 base->readonly = 0;
153 base->format = NULL;
154 base->ndim = 1;
155 base->shape = NULL;
156 base->strides = NULL;
157 base->suboffsets = NULL;
158 base->internal = ndbuf;
159
160 return ndbuf;
161 }
162
163 static void
ndbuf_free(ndbuf_t * ndbuf)164 ndbuf_free(ndbuf_t *ndbuf)
165 {
166 Py_buffer *base = &ndbuf->base;
167
168 PyMem_XFree(ndbuf->data);
169 PyMem_XFree(base->format);
170 PyMem_XFree(base->shape);
171 PyMem_XFree(base->strides);
172 PyMem_XFree(base->suboffsets);
173
174 PyMem_Free(ndbuf);
175 }
176
177 static void
ndbuf_push(NDArrayObject * nd,ndbuf_t * elt)178 ndbuf_push(NDArrayObject *nd, ndbuf_t *elt)
179 {
180 elt->next = nd->head;
181 if (nd->head) nd->head->prev = elt;
182 nd->head = elt;
183 elt->prev = NULL;
184 }
185
186 static void
ndbuf_delete(NDArrayObject * nd,ndbuf_t * elt)187 ndbuf_delete(NDArrayObject *nd, ndbuf_t *elt)
188 {
189 if (elt->prev)
190 elt->prev->next = elt->next;
191 else
192 nd->head = elt->next;
193
194 if (elt->next)
195 elt->next->prev = elt->prev;
196
197 ndbuf_free(elt);
198 }
199
200 static void
ndbuf_pop(NDArrayObject * nd)201 ndbuf_pop(NDArrayObject *nd)
202 {
203 ndbuf_delete(nd, nd->head);
204 }
205
206
207 static PyObject *
ndarray_new(PyTypeObject * type,PyObject * args,PyObject * kwds)208 ndarray_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
209 {
210 NDArrayObject *nd;
211
212 nd = PyObject_New(NDArrayObject, &NDArray_Type);
213 if (nd == NULL)
214 return NULL;
215
216 nd->flags = 0;
217 nd->head = NULL;
218 return (PyObject *)nd;
219 }
220
221 static void
ndarray_dealloc(NDArrayObject * self)222 ndarray_dealloc(NDArrayObject *self)
223 {
224 if (self->head) {
225 if (ND_IS_CONSUMER(self)) {
226 Py_buffer *base = &self->head->base;
227 if (self->head->flags & ND_OWN_ARRAYS) {
228 PyMem_XFree(base->shape);
229 PyMem_XFree(base->strides);
230 PyMem_XFree(base->suboffsets);
231 }
232 PyBuffer_Release(base);
233 }
234 else {
235 while (self->head)
236 ndbuf_pop(self);
237 }
238 }
239 PyObject_Del(self);
240 }
241
242 static int
ndarray_init_staticbuf(PyObject * exporter,NDArrayObject * nd,int flags)243 ndarray_init_staticbuf(PyObject *exporter, NDArrayObject *nd, int flags)
244 {
245 Py_buffer *base = &nd->staticbuf.base;
246
247 if (PyObject_GetBuffer(exporter, base, flags) < 0)
248 return -1;
249
250 nd->head = &nd->staticbuf;
251
252 nd->head->next = NULL;
253 nd->head->prev = NULL;
254 nd->head->len = -1;
255 nd->head->offset = -1;
256 nd->head->data = NULL;
257
258 nd->head->flags = base->readonly ? 0 : ND_WRITABLE;
259 nd->head->exports = 0;
260
261 return 0;
262 }
263
264 static void
init_flags(ndbuf_t * ndbuf)265 init_flags(ndbuf_t *ndbuf)
266 {
267 if (ndbuf->base.ndim == 0)
268 ndbuf->flags |= ND_SCALAR;
269 if (ndbuf->base.suboffsets)
270 ndbuf->flags |= ND_PIL;
271 if (PyBuffer_IsContiguous(&ndbuf->base, 'C'))
272 ndbuf->flags |= ND_C;
273 if (PyBuffer_IsContiguous(&ndbuf->base, 'F'))
274 ndbuf->flags |= ND_FORTRAN;
275 }
276
277
278 /****************************************************************************/
279 /* Buffer/List conversions */
280 /****************************************************************************/
281
282 static Py_ssize_t *strides_from_shape(const ndbuf_t *, int flags);
283
284 /* Get number of members in a struct: see issue #12740 */
285 typedef struct {
286 PyObject_HEAD
287 Py_ssize_t s_size;
288 Py_ssize_t s_len;
289 } PyPartialStructObject;
290
291 static Py_ssize_t
get_nmemb(PyObject * s)292 get_nmemb(PyObject *s)
293 {
294 return ((PyPartialStructObject *)s)->s_len;
295 }
296
297 /* Pack all items into the buffer of 'obj'. The 'format' parameter must be
298 in struct module syntax. For standard C types, a single item is an integer.
299 For compound types, a single item is a tuple of integers. */
300 static int
pack_from_list(PyObject * obj,PyObject * items,PyObject * format,Py_ssize_t itemsize)301 pack_from_list(PyObject *obj, PyObject *items, PyObject *format,
302 Py_ssize_t itemsize)
303 {
304 PyObject *structobj, *pack_into;
305 PyObject *args, *offset;
306 PyObject *item, *tmp;
307 Py_ssize_t nitems; /* number of items */
308 Py_ssize_t nmemb; /* number of members in a single item */
309 Py_ssize_t i, j;
310 int ret = 0;
311
312 assert(PyObject_CheckBuffer(obj));
313 assert(PyList_Check(items) || PyTuple_Check(items));
314
315 structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
316 if (structobj == NULL)
317 return -1;
318
319 nitems = PySequence_Fast_GET_SIZE(items);
320 nmemb = get_nmemb(structobj);
321 assert(nmemb >= 1);
322
323 pack_into = PyObject_GetAttrString(structobj, "pack_into");
324 if (pack_into == NULL) {
325 Py_DECREF(structobj);
326 return -1;
327 }
328
329 /* nmemb >= 1 */
330 args = PyTuple_New(2 + nmemb);
331 if (args == NULL) {
332 Py_DECREF(pack_into);
333 Py_DECREF(structobj);
334 return -1;
335 }
336
337 offset = NULL;
338 for (i = 0; i < nitems; i++) {
339 /* Loop invariant: args[j] are borrowed references or NULL. */
340 PyTuple_SET_ITEM(args, 0, obj);
341 for (j = 1; j < 2+nmemb; j++)
342 PyTuple_SET_ITEM(args, j, NULL);
343
344 Py_XDECREF(offset);
345 offset = PyLong_FromSsize_t(i*itemsize);
346 if (offset == NULL) {
347 ret = -1;
348 break;
349 }
350 PyTuple_SET_ITEM(args, 1, offset);
351
352 item = PySequence_Fast_GET_ITEM(items, i);
353 if ((PyBytes_Check(item) || PyLong_Check(item) ||
354 PyFloat_Check(item)) && nmemb == 1) {
355 PyTuple_SET_ITEM(args, 2, item);
356 }
357 else if ((PyList_Check(item) || PyTuple_Check(item)) &&
358 PySequence_Length(item) == nmemb) {
359 for (j = 0; j < nmemb; j++) {
360 tmp = PySequence_Fast_GET_ITEM(item, j);
361 PyTuple_SET_ITEM(args, 2+j, tmp);
362 }
363 }
364 else {
365 PyErr_SetString(PyExc_ValueError,
366 "mismatch between initializer element and format string");
367 ret = -1;
368 break;
369 }
370
371 tmp = PyObject_CallObject(pack_into, args);
372 if (tmp == NULL) {
373 ret = -1;
374 break;
375 }
376 Py_DECREF(tmp);
377 }
378
379 Py_INCREF(obj); /* args[0] */
380 /* args[1]: offset is either NULL or should be dealloc'd */
381 for (i = 2; i < 2+nmemb; i++) {
382 tmp = PyTuple_GET_ITEM(args, i);
383 Py_XINCREF(tmp);
384 }
385 Py_DECREF(args);
386
387 Py_DECREF(pack_into);
388 Py_DECREF(structobj);
389 return ret;
390
391 }
392
393 /* Pack single element */
394 static int
pack_single(char * ptr,PyObject * item,const char * fmt,Py_ssize_t itemsize)395 pack_single(char *ptr, PyObject *item, const char *fmt, Py_ssize_t itemsize)
396 {
397 PyObject *structobj = NULL, *pack_into = NULL, *args = NULL;
398 PyObject *format = NULL, *mview = NULL, *zero = NULL;
399 Py_ssize_t i, nmemb;
400 int ret = -1;
401 PyObject *x;
402
403 if (fmt == NULL) fmt = "B";
404
405 format = PyUnicode_FromString(fmt);
406 if (format == NULL)
407 goto out;
408
409 structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
410 if (structobj == NULL)
411 goto out;
412
413 nmemb = get_nmemb(structobj);
414 assert(nmemb >= 1);
415
416 mview = PyMemoryView_FromMemory(ptr, itemsize, PyBUF_WRITE);
417 if (mview == NULL)
418 goto out;
419
420 zero = PyLong_FromLong(0);
421 if (zero == NULL)
422 goto out;
423
424 pack_into = PyObject_GetAttrString(structobj, "pack_into");
425 if (pack_into == NULL)
426 goto out;
427
428 args = PyTuple_New(2+nmemb);
429 if (args == NULL)
430 goto out;
431
432 PyTuple_SET_ITEM(args, 0, mview);
433 PyTuple_SET_ITEM(args, 1, zero);
434
435 if ((PyBytes_Check(item) || PyLong_Check(item) ||
436 PyFloat_Check(item)) && nmemb == 1) {
437 PyTuple_SET_ITEM(args, 2, item);
438 }
439 else if ((PyList_Check(item) || PyTuple_Check(item)) &&
440 PySequence_Length(item) == nmemb) {
441 for (i = 0; i < nmemb; i++) {
442 x = PySequence_Fast_GET_ITEM(item, i);
443 PyTuple_SET_ITEM(args, 2+i, x);
444 }
445 }
446 else {
447 PyErr_SetString(PyExc_ValueError,
448 "mismatch between initializer element and format string");
449 goto args_out;
450 }
451
452 x = PyObject_CallObject(pack_into, args);
453 if (x != NULL) {
454 Py_DECREF(x);
455 ret = 0;
456 }
457
458
459 args_out:
460 for (i = 0; i < 2+nmemb; i++)
461 Py_XINCREF(PyTuple_GET_ITEM(args, i));
462 Py_XDECREF(args);
463 out:
464 Py_XDECREF(pack_into);
465 Py_XDECREF(zero);
466 Py_XDECREF(mview);
467 Py_XDECREF(structobj);
468 Py_XDECREF(format);
469 return ret;
470 }
471
472 static void
copy_rec(const Py_ssize_t * shape,Py_ssize_t ndim,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)473 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
474 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
475 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
476 char *mem)
477 {
478 Py_ssize_t i;
479
480 assert(ndim >= 1);
481
482 if (ndim == 1) {
483 if (!HAVE_PTR(dsuboffsets) && !HAVE_PTR(ssuboffsets) &&
484 dstrides[0] == itemsize && sstrides[0] == itemsize) {
485 memmove(dptr, sptr, shape[0] * itemsize);
486 }
487 else {
488 char *p;
489 assert(mem != NULL);
490 for (i=0, p=mem; i<shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
491 char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
492 memcpy(p, xsptr, itemsize);
493 }
494 for (i=0, p=mem; i<shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
495 char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
496 memcpy(xdptr, p, itemsize);
497 }
498 }
499 return;
500 }
501
502 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
503 char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
504 char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
505
506 copy_rec(shape+1, ndim-1, itemsize,
507 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
508 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
509 mem);
510 }
511 }
512
513 static int
cmp_structure(Py_buffer * dest,Py_buffer * src)514 cmp_structure(Py_buffer *dest, Py_buffer *src)
515 {
516 Py_ssize_t i;
517
518 if (strcmp(FIX_FORMAT(dest->format), FIX_FORMAT(src->format)) != 0 ||
519 dest->itemsize != src->itemsize ||
520 dest->ndim != src->ndim)
521 return -1;
522
523 for (i = 0; i < dest->ndim; i++) {
524 if (dest->shape[i] != src->shape[i])
525 return -1;
526 if (dest->shape[i] == 0)
527 break;
528 }
529
530 return 0;
531 }
532
533 /* Copy src to dest. Both buffers must have the same format, itemsize,
534 ndim and shape. Copying is atomic, the function never fails with
535 a partial copy. */
536 static int
copy_buffer(Py_buffer * dest,Py_buffer * src)537 copy_buffer(Py_buffer *dest, Py_buffer *src)
538 {
539 char *mem = NULL;
540
541 assert(dest->ndim > 0);
542
543 if (cmp_structure(dest, src) < 0) {
544 PyErr_SetString(PyExc_ValueError,
545 "ndarray assignment: lvalue and rvalue have different structures");
546 return -1;
547 }
548
549 if ((dest->suboffsets && dest->suboffsets[dest->ndim-1] >= 0) ||
550 (src->suboffsets && src->suboffsets[src->ndim-1] >= 0) ||
551 dest->strides[dest->ndim-1] != dest->itemsize ||
552 src->strides[src->ndim-1] != src->itemsize) {
553 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
554 if (mem == NULL) {
555 PyErr_NoMemory();
556 return -1;
557 }
558 }
559
560 copy_rec(dest->shape, dest->ndim, dest->itemsize,
561 dest->buf, dest->strides, dest->suboffsets,
562 src->buf, src->strides, src->suboffsets,
563 mem);
564
565 PyMem_XFree(mem);
566 return 0;
567 }
568
569
570 /* Unpack single element */
571 static PyObject *
unpack_single(char * ptr,const char * fmt,Py_ssize_t itemsize)572 unpack_single(char *ptr, const char *fmt, Py_ssize_t itemsize)
573 {
574 PyObject *x, *unpack_from, *mview;
575
576 if (fmt == NULL) {
577 fmt = "B";
578 itemsize = 1;
579 }
580
581 unpack_from = PyObject_GetAttrString(structmodule, "unpack_from");
582 if (unpack_from == NULL)
583 return NULL;
584
585 mview = PyMemoryView_FromMemory(ptr, itemsize, PyBUF_READ);
586 if (mview == NULL) {
587 Py_DECREF(unpack_from);
588 return NULL;
589 }
590
591 x = PyObject_CallFunction(unpack_from, "sO", fmt, mview);
592 Py_DECREF(unpack_from);
593 Py_DECREF(mview);
594 if (x == NULL)
595 return NULL;
596
597 if (PyTuple_GET_SIZE(x) == 1) {
598 PyObject *tmp = PyTuple_GET_ITEM(x, 0);
599 Py_INCREF(tmp);
600 Py_DECREF(x);
601 return tmp;
602 }
603
604 return x;
605 }
606
607 /* Unpack a multi-dimensional matrix into a nested list. Return a scalar
608 for ndim = 0. */
609 static PyObject *
unpack_rec(PyObject * unpack_from,char * ptr,PyObject * mview,char * item,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,Py_ssize_t ndim,Py_ssize_t itemsize)610 unpack_rec(PyObject *unpack_from, char *ptr, PyObject *mview, char *item,
611 const Py_ssize_t *shape, const Py_ssize_t *strides,
612 const Py_ssize_t *suboffsets, Py_ssize_t ndim, Py_ssize_t itemsize)
613 {
614 PyObject *lst, *x;
615 Py_ssize_t i;
616
617 assert(ndim >= 0);
618 assert(shape != NULL);
619 assert(strides != NULL);
620
621 if (ndim == 0) {
622 memcpy(item, ptr, itemsize);
623 x = PyObject_CallFunctionObjArgs(unpack_from, mview, NULL);
624 if (x == NULL)
625 return NULL;
626 if (PyTuple_GET_SIZE(x) == 1) {
627 PyObject *tmp = PyTuple_GET_ITEM(x, 0);
628 Py_INCREF(tmp);
629 Py_DECREF(x);
630 return tmp;
631 }
632 return x;
633 }
634
635 lst = PyList_New(shape[0]);
636 if (lst == NULL)
637 return NULL;
638
639 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
640 char *nextptr = ADJUST_PTR(ptr, suboffsets);
641
642 x = unpack_rec(unpack_from, nextptr, mview, item,
643 shape+1, strides+1, suboffsets ? suboffsets+1 : NULL,
644 ndim-1, itemsize);
645 if (x == NULL) {
646 Py_DECREF(lst);
647 return NULL;
648 }
649
650 PyList_SET_ITEM(lst, i, x);
651 }
652
653 return lst;
654 }
655
656
657 static PyObject *
ndarray_as_list(NDArrayObject * nd)658 ndarray_as_list(NDArrayObject *nd)
659 {
660 PyObject *structobj = NULL, *unpack_from = NULL;
661 PyObject *lst = NULL, *mview = NULL;
662 Py_buffer *base = &nd->head->base;
663 Py_ssize_t *shape = base->shape;
664 Py_ssize_t *strides = base->strides;
665 Py_ssize_t simple_shape[1];
666 Py_ssize_t simple_strides[1];
667 char *item = NULL;
668 PyObject *format;
669 char *fmt = base->format;
670
671 base = &nd->head->base;
672
673 if (fmt == NULL) {
674 PyErr_SetString(PyExc_ValueError,
675 "ndarray: tolist() does not support format=NULL, use "
676 "tobytes()");
677 return NULL;
678 }
679 if (shape == NULL) {
680 assert(ND_C_CONTIGUOUS(nd->head->flags));
681 assert(base->strides == NULL);
682 assert(base->ndim <= 1);
683 shape = simple_shape;
684 shape[0] = base->len;
685 strides = simple_strides;
686 strides[0] = base->itemsize;
687 }
688 else if (strides == NULL) {
689 assert(ND_C_CONTIGUOUS(nd->head->flags));
690 strides = strides_from_shape(nd->head, 0);
691 if (strides == NULL)
692 return NULL;
693 }
694
695 format = PyUnicode_FromString(fmt);
696 if (format == NULL)
697 goto out;
698
699 structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
700 Py_DECREF(format);
701 if (structobj == NULL)
702 goto out;
703
704 unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
705 if (unpack_from == NULL)
706 goto out;
707
708 item = PyMem_Malloc(base->itemsize);
709 if (item == NULL) {
710 PyErr_NoMemory();
711 goto out;
712 }
713
714 mview = PyMemoryView_FromMemory(item, base->itemsize, PyBUF_WRITE);
715 if (mview == NULL)
716 goto out;
717
718 lst = unpack_rec(unpack_from, base->buf, mview, item,
719 shape, strides, base->suboffsets,
720 base->ndim, base->itemsize);
721
722 out:
723 Py_XDECREF(mview);
724 PyMem_XFree(item);
725 Py_XDECREF(unpack_from);
726 Py_XDECREF(structobj);
727 if (strides != base->strides && strides != simple_strides)
728 PyMem_XFree(strides);
729
730 return lst;
731 }
732
733
734 /****************************************************************************/
735 /* Initialize ndbuf */
736 /****************************************************************************/
737
738 /*
739 State of a new ndbuf during initialization. 'OK' means that initialization
740 is complete. 'PTR' means that a pointer has been initialized, but the
741 state of the memory is still undefined and ndbuf->offset is disregarded.
742
743 +-----------------+-----------+-------------+----------------+
744 | | ndbuf_new | init_simple | init_structure |
745 +-----------------+-----------+-------------+----------------+
746 | next | OK (NULL) | OK | OK |
747 +-----------------+-----------+-------------+----------------+
748 | prev | OK (NULL) | OK | OK |
749 +-----------------+-----------+-------------+----------------+
750 | len | OK | OK | OK |
751 +-----------------+-----------+-------------+----------------+
752 | offset | OK | OK | OK |
753 +-----------------+-----------+-------------+----------------+
754 | data | PTR | OK | OK |
755 +-----------------+-----------+-------------+----------------+
756 | flags | user | user | OK |
757 +-----------------+-----------+-------------+----------------+
758 | exports | OK (0) | OK | OK |
759 +-----------------+-----------+-------------+----------------+
760 | base.obj | OK (NULL) | OK | OK |
761 +-----------------+-----------+-------------+----------------+
762 | base.buf | PTR | PTR | OK |
763 +-----------------+-----------+-------------+----------------+
764 | base.len | len(data) | len(data) | OK |
765 +-----------------+-----------+-------------+----------------+
766 | base.itemsize | 1 | OK | OK |
767 +-----------------+-----------+-------------+----------------+
768 | base.readonly | 0 | OK | OK |
769 +-----------------+-----------+-------------+----------------+
770 | base.format | NULL | OK | OK |
771 +-----------------+-----------+-------------+----------------+
772 | base.ndim | 1 | 1 | OK |
773 +-----------------+-----------+-------------+----------------+
774 | base.shape | NULL | NULL | OK |
775 +-----------------+-----------+-------------+----------------+
776 | base.strides | NULL | NULL | OK |
777 +-----------------+-----------+-------------+----------------+
778 | base.suboffsets | NULL | NULL | OK |
779 +-----------------+-----------+-------------+----------------+
780 | base.internal | OK | OK | OK |
781 +-----------------+-----------+-------------+----------------+
782
783 */
784
785 static Py_ssize_t
get_itemsize(PyObject * format)786 get_itemsize(PyObject *format)
787 {
788 PyObject *tmp;
789 Py_ssize_t itemsize;
790
791 tmp = PyObject_CallFunctionObjArgs(calcsize, format, NULL);
792 if (tmp == NULL)
793 return -1;
794 itemsize = PyLong_AsSsize_t(tmp);
795 Py_DECREF(tmp);
796
797 return itemsize;
798 }
799
800 static char *
get_format(PyObject * format)801 get_format(PyObject *format)
802 {
803 PyObject *tmp;
804 char *fmt;
805
806 tmp = PyUnicode_AsASCIIString(format);
807 if (tmp == NULL)
808 return NULL;
809 fmt = PyMem_Malloc(PyBytes_GET_SIZE(tmp)+1);
810 if (fmt == NULL) {
811 PyErr_NoMemory();
812 Py_DECREF(tmp);
813 return NULL;
814 }
815 strcpy(fmt, PyBytes_AS_STRING(tmp));
816 Py_DECREF(tmp);
817
818 return fmt;
819 }
820
821 static int
init_simple(ndbuf_t * ndbuf,PyObject * items,PyObject * format,Py_ssize_t itemsize)822 init_simple(ndbuf_t *ndbuf, PyObject *items, PyObject *format,
823 Py_ssize_t itemsize)
824 {
825 PyObject *mview;
826 Py_buffer *base = &ndbuf->base;
827 int ret;
828
829 mview = PyMemoryView_FromBuffer(base);
830 if (mview == NULL)
831 return -1;
832
833 ret = pack_from_list(mview, items, format, itemsize);
834 Py_DECREF(mview);
835 if (ret < 0)
836 return -1;
837
838 base->readonly = !(ndbuf->flags & ND_WRITABLE);
839 base->itemsize = itemsize;
840 base->format = get_format(format);
841 if (base->format == NULL)
842 return -1;
843
844 return 0;
845 }
846
847 static Py_ssize_t *
seq_as_ssize_array(PyObject * seq,Py_ssize_t len,int is_shape)848 seq_as_ssize_array(PyObject *seq, Py_ssize_t len, int is_shape)
849 {
850 Py_ssize_t *dest;
851 Py_ssize_t x, i;
852
853 /* ndim = len <= ND_MAX_NDIM, so PyMem_New() is actually not needed. */
854 dest = PyMem_New(Py_ssize_t, len);
855 if (dest == NULL) {
856 PyErr_NoMemory();
857 return NULL;
858 }
859
860 for (i = 0; i < len; i++) {
861 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
862 if (!PyLong_Check(tmp)) {
863 PyErr_Format(PyExc_ValueError,
864 "elements of %s must be integers",
865 is_shape ? "shape" : "strides");
866 PyMem_Free(dest);
867 return NULL;
868 }
869 x = PyLong_AsSsize_t(tmp);
870 if (PyErr_Occurred()) {
871 PyMem_Free(dest);
872 return NULL;
873 }
874 if (is_shape && x < 0) {
875 PyErr_Format(PyExc_ValueError,
876 "elements of shape must be integers >= 0");
877 PyMem_Free(dest);
878 return NULL;
879 }
880 dest[i] = x;
881 }
882
883 return dest;
884 }
885
886 static Py_ssize_t *
strides_from_shape(const ndbuf_t * ndbuf,int flags)887 strides_from_shape(const ndbuf_t *ndbuf, int flags)
888 {
889 const Py_buffer *base = &ndbuf->base;
890 Py_ssize_t *s, i;
891
892 s = PyMem_Malloc(base->ndim * (sizeof *s));
893 if (s == NULL) {
894 PyErr_NoMemory();
895 return NULL;
896 }
897
898 if (flags & ND_FORTRAN) {
899 s[0] = base->itemsize;
900 for (i = 1; i < base->ndim; i++)
901 s[i] = s[i-1] * base->shape[i-1];
902 }
903 else {
904 s[base->ndim-1] = base->itemsize;
905 for (i = base->ndim-2; i >= 0; i--)
906 s[i] = s[i+1] * base->shape[i+1];
907 }
908
909 return s;
910 }
911
912 /* Bounds check:
913
914 len := complete length of allocated memory
915 offset := start of the array
916
917 A single array element is indexed by:
918
919 i = indices[0] * strides[0] + indices[1] * strides[1] + ...
920
921 imin is reached when all indices[n] combined with positive strides are 0
922 and all indices combined with negative strides are shape[n]-1, which is
923 the maximum index for the nth dimension.
924
925 imax is reached when all indices[n] combined with negative strides are 0
926 and all indices combined with positive strides are shape[n]-1.
927 */
928 static int
verify_structure(Py_ssize_t len,Py_ssize_t itemsize,Py_ssize_t offset,const Py_ssize_t * shape,const Py_ssize_t * strides,Py_ssize_t ndim)929 verify_structure(Py_ssize_t len, Py_ssize_t itemsize, Py_ssize_t offset,
930 const Py_ssize_t *shape, const Py_ssize_t *strides,
931 Py_ssize_t ndim)
932 {
933 Py_ssize_t imin, imax;
934 Py_ssize_t n;
935
936 assert(ndim >= 0);
937
938 if (ndim == 0 && (offset < 0 || offset+itemsize > len))
939 goto invalid_combination;
940
941 for (n = 0; n < ndim; n++)
942 if (strides[n] % itemsize) {
943 PyErr_SetString(PyExc_ValueError,
944 "strides must be a multiple of itemsize");
945 return -1;
946 }
947
948 for (n = 0; n < ndim; n++)
949 if (shape[n] == 0)
950 return 0;
951
952 imin = imax = 0;
953 for (n = 0; n < ndim; n++)
954 if (strides[n] <= 0)
955 imin += (shape[n]-1) * strides[n];
956 else
957 imax += (shape[n]-1) * strides[n];
958
959 if (imin + offset < 0 || imax + offset + itemsize > len)
960 goto invalid_combination;
961
962 return 0;
963
964
965 invalid_combination:
966 PyErr_SetString(PyExc_ValueError,
967 "invalid combination of buffer, shape and strides");
968 return -1;
969 }
970
971 /*
972 Convert a NumPy-style array to an array using suboffsets to stride in
973 the first dimension. Requirements: ndim > 0.
974
975 Contiguous example
976 ==================
977
978 Input:
979 ------
980 shape = {2, 2, 3};
981 strides = {6, 3, 1};
982 suboffsets = NULL;
983 data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
984 buf = &data[0]
985
986 Output:
987 -------
988 shape = {2, 2, 3};
989 strides = {sizeof(char *), 3, 1};
990 suboffsets = {0, -1, -1};
991 data = {p1, p2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
992 | | ^ ^
993 `---'---' |
994 | |
995 `---------------------'
996 buf = &data[0]
997
998 So, in the example the input resembles the three-dimensional array
999 char v[2][2][3], while the output resembles an array of two pointers
1000 to two-dimensional arrays: char (*v[2])[2][3].
1001
1002
1003 Non-contiguous example:
1004 =======================
1005
1006 Input (with offset and negative strides):
1007 -----------------------------------------
1008 shape = {2, 2, 3};
1009 strides = {-6, 3, -1};
1010 offset = 8
1011 suboffsets = NULL;
1012 data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
1013
1014 Output:
1015 -------
1016 shape = {2, 2, 3};
1017 strides = {-sizeof(char *), 3, -1};
1018 suboffsets = {2, -1, -1};
1019 newdata = {p1, p2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
1020 | | ^ ^ ^ ^
1021 `---'---' | | `- p2+suboffsets[0]
1022 | `-----------|--- p1+suboffsets[0]
1023 `---------------------'
1024 buf = &newdata[1] # striding backwards over the pointers.
1025
1026 suboffsets[0] is the same as the offset that one would specify if
1027 the two {2, 3} subarrays were created directly, hence the name.
1028 */
1029 static int
init_suboffsets(ndbuf_t * ndbuf)1030 init_suboffsets(ndbuf_t *ndbuf)
1031 {
1032 Py_buffer *base = &ndbuf->base;
1033 Py_ssize_t start, step;
1034 Py_ssize_t imin, suboffset0;
1035 Py_ssize_t addsize;
1036 Py_ssize_t n;
1037 char *data;
1038
1039 assert(base->ndim > 0);
1040 assert(base->suboffsets == NULL);
1041
1042 /* Allocate new data with additional space for shape[0] pointers. */
1043 addsize = base->shape[0] * (sizeof (char *));
1044
1045 /* Align array start to a multiple of 8. */
1046 addsize = 8 * ((addsize + 7) / 8);
1047
1048 data = PyMem_Malloc(ndbuf->len + addsize);
1049 if (data == NULL) {
1050 PyErr_NoMemory();
1051 return -1;
1052 }
1053
1054 memcpy(data + addsize, ndbuf->data, ndbuf->len);
1055
1056 PyMem_Free(ndbuf->data);
1057 ndbuf->data = data;
1058 ndbuf->len += addsize;
1059 base->buf = ndbuf->data;
1060
1061 /* imin: minimum index of the input array relative to ndbuf->offset.
1062 suboffset0: offset for each sub-array of the output. This is the
1063 same as calculating -imin' for a sub-array of ndim-1. */
1064 imin = suboffset0 = 0;
1065 for (n = 0; n < base->ndim; n++) {
1066 if (base->shape[n] == 0)
1067 break;
1068 if (base->strides[n] <= 0) {
1069 Py_ssize_t x = (base->shape[n]-1) * base->strides[n];
1070 imin += x;
1071 suboffset0 += (n >= 1) ? -x : 0;
1072 }
1073 }
1074
1075 /* Initialize the array of pointers to the sub-arrays. */
1076 start = addsize + ndbuf->offset + imin;
1077 step = base->strides[0] < 0 ? -base->strides[0] : base->strides[0];
1078
1079 for (n = 0; n < base->shape[0]; n++)
1080 ((char **)base->buf)[n] = (char *)base->buf + start + n*step;
1081
1082 /* Initialize suboffsets. */
1083 base->suboffsets = PyMem_Malloc(base->ndim * (sizeof *base->suboffsets));
1084 if (base->suboffsets == NULL) {
1085 PyErr_NoMemory();
1086 return -1;
1087 }
1088 base->suboffsets[0] = suboffset0;
1089 for (n = 1; n < base->ndim; n++)
1090 base->suboffsets[n] = -1;
1091
1092 /* Adjust strides for the first (zeroth) dimension. */
1093 if (base->strides[0] >= 0) {
1094 base->strides[0] = sizeof(char *);
1095 }
1096 else {
1097 /* Striding backwards. */
1098 base->strides[0] = -(Py_ssize_t)sizeof(char *);
1099 if (base->shape[0] > 0)
1100 base->buf = (char *)base->buf + (base->shape[0]-1) * sizeof(char *);
1101 }
1102
1103 ndbuf->flags &= ~(ND_C|ND_FORTRAN);
1104 ndbuf->offset = 0;
1105 return 0;
1106 }
1107
1108 static void
init_len(Py_buffer * base)1109 init_len(Py_buffer *base)
1110 {
1111 Py_ssize_t i;
1112
1113 base->len = 1;
1114 for (i = 0; i < base->ndim; i++)
1115 base->len *= base->shape[i];
1116 base->len *= base->itemsize;
1117 }
1118
1119 static int
init_structure(ndbuf_t * ndbuf,PyObject * shape,PyObject * strides,Py_ssize_t ndim)1120 init_structure(ndbuf_t *ndbuf, PyObject *shape, PyObject *strides,
1121 Py_ssize_t ndim)
1122 {
1123 Py_buffer *base = &ndbuf->base;
1124
1125 base->ndim = (int)ndim;
1126 if (ndim == 0) {
1127 if (ndbuf->flags & ND_PIL) {
1128 PyErr_SetString(PyExc_TypeError,
1129 "ndim = 0 cannot be used in conjunction with ND_PIL");
1130 return -1;
1131 }
1132 ndbuf->flags |= (ND_SCALAR|ND_C|ND_FORTRAN);
1133 return 0;
1134 }
1135
1136 /* shape */
1137 base->shape = seq_as_ssize_array(shape, ndim, 1);
1138 if (base->shape == NULL)
1139 return -1;
1140
1141 /* strides */
1142 if (strides) {
1143 base->strides = seq_as_ssize_array(strides, ndim, 0);
1144 }
1145 else {
1146 base->strides = strides_from_shape(ndbuf, ndbuf->flags);
1147 }
1148 if (base->strides == NULL)
1149 return -1;
1150 if (verify_structure(base->len, base->itemsize, ndbuf->offset,
1151 base->shape, base->strides, ndim) < 0)
1152 return -1;
1153
1154 /* buf */
1155 base->buf = ndbuf->data + ndbuf->offset;
1156
1157 /* len */
1158 init_len(base);
1159
1160 /* ndbuf->flags */
1161 if (PyBuffer_IsContiguous(base, 'C'))
1162 ndbuf->flags |= ND_C;
1163 if (PyBuffer_IsContiguous(base, 'F'))
1164 ndbuf->flags |= ND_FORTRAN;
1165
1166
1167 /* convert numpy array to suboffset representation */
1168 if (ndbuf->flags & ND_PIL) {
1169 /* modifies base->buf, base->strides and base->suboffsets **/
1170 return init_suboffsets(ndbuf);
1171 }
1172
1173 return 0;
1174 }
1175
1176 static ndbuf_t *
init_ndbuf(PyObject * items,PyObject * shape,PyObject * strides,Py_ssize_t offset,PyObject * format,int flags)1177 init_ndbuf(PyObject *items, PyObject *shape, PyObject *strides,
1178 Py_ssize_t offset, PyObject *format, int flags)
1179 {
1180 ndbuf_t *ndbuf;
1181 Py_ssize_t ndim;
1182 Py_ssize_t nitems;
1183 Py_ssize_t itemsize;
1184
1185 /* ndim = len(shape) */
1186 CHECK_LIST_OR_TUPLE(shape)
1187 ndim = PySequence_Fast_GET_SIZE(shape);
1188 if (ndim > ND_MAX_NDIM) {
1189 PyErr_Format(PyExc_ValueError,
1190 "ndim must not exceed %d", ND_MAX_NDIM);
1191 return NULL;
1192 }
1193
1194 /* len(strides) = len(shape) */
1195 if (strides) {
1196 CHECK_LIST_OR_TUPLE(strides)
1197 if (PySequence_Fast_GET_SIZE(strides) == 0)
1198 strides = NULL;
1199 else if (flags & ND_FORTRAN) {
1200 PyErr_SetString(PyExc_TypeError,
1201 "ND_FORTRAN cannot be used together with strides");
1202 return NULL;
1203 }
1204 else if (PySequence_Fast_GET_SIZE(strides) != ndim) {
1205 PyErr_SetString(PyExc_ValueError,
1206 "len(shape) != len(strides)");
1207 return NULL;
1208 }
1209 }
1210
1211 /* itemsize */
1212 itemsize = get_itemsize(format);
1213 if (itemsize <= 0) {
1214 if (itemsize == 0) {
1215 PyErr_SetString(PyExc_ValueError,
1216 "itemsize must not be zero");
1217 }
1218 return NULL;
1219 }
1220
1221 /* convert scalar to list */
1222 if (ndim == 0) {
1223 items = Py_BuildValue("(O)", items);
1224 if (items == NULL)
1225 return NULL;
1226 }
1227 else {
1228 CHECK_LIST_OR_TUPLE(items)
1229 Py_INCREF(items);
1230 }
1231
1232 /* number of items */
1233 nitems = PySequence_Fast_GET_SIZE(items);
1234 if (nitems == 0) {
1235 PyErr_SetString(PyExc_ValueError,
1236 "initializer list or tuple must not be empty");
1237 Py_DECREF(items);
1238 return NULL;
1239 }
1240
1241 ndbuf = ndbuf_new(nitems, itemsize, offset, flags);
1242 if (ndbuf == NULL) {
1243 Py_DECREF(items);
1244 return NULL;
1245 }
1246
1247
1248 if (init_simple(ndbuf, items, format, itemsize) < 0)
1249 goto error;
1250 if (init_structure(ndbuf, shape, strides, ndim) < 0)
1251 goto error;
1252
1253 Py_DECREF(items);
1254 return ndbuf;
1255
1256 error:
1257 Py_DECREF(items);
1258 ndbuf_free(ndbuf);
1259 return NULL;
1260 }
1261
1262 /* initialize and push a new base onto the linked list */
1263 static int
ndarray_push_base(NDArrayObject * nd,PyObject * items,PyObject * shape,PyObject * strides,Py_ssize_t offset,PyObject * format,int flags)1264 ndarray_push_base(NDArrayObject *nd, PyObject *items,
1265 PyObject *shape, PyObject *strides,
1266 Py_ssize_t offset, PyObject *format, int flags)
1267 {
1268 ndbuf_t *ndbuf;
1269
1270 ndbuf = init_ndbuf(items, shape, strides, offset, format, flags);
1271 if (ndbuf == NULL)
1272 return -1;
1273
1274 ndbuf_push(nd, ndbuf);
1275 return 0;
1276 }
1277
1278 #define PyBUF_UNUSED 0x10000
1279 static int
ndarray_init(PyObject * self,PyObject * args,PyObject * kwds)1280 ndarray_init(PyObject *self, PyObject *args, PyObject *kwds)
1281 {
1282 NDArrayObject *nd = (NDArrayObject *)self;
1283 static char *kwlist[] = {
1284 "obj", "shape", "strides", "offset", "format", "flags", "getbuf", NULL
1285 };
1286 PyObject *v = NULL; /* initializer: scalar, list, tuple or base object */
1287 PyObject *shape = NULL; /* size of each dimension */
1288 PyObject *strides = NULL; /* number of bytes to the next elt in each dim */
1289 Py_ssize_t offset = 0; /* buffer offset */
1290 PyObject *format = simple_format; /* struct module specifier: "B" */
1291 int flags = ND_DEFAULT; /* base buffer and ndarray flags */
1292
1293 int getbuf = PyBUF_UNUSED; /* re-exporter: getbuffer request flags */
1294
1295
1296 if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OOnOii", kwlist,
1297 &v, &shape, &strides, &offset, &format, &flags, &getbuf))
1298 return -1;
1299
1300 /* NDArrayObject is re-exporter */
1301 if (PyObject_CheckBuffer(v) && shape == NULL) {
1302 if (strides || offset || format != simple_format ||
1303 !(flags == ND_DEFAULT || flags == ND_REDIRECT)) {
1304 PyErr_SetString(PyExc_TypeError,
1305 "construction from exporter object only takes 'obj', 'getbuf' "
1306 "and 'flags' arguments");
1307 return -1;
1308 }
1309
1310 getbuf = (getbuf == PyBUF_UNUSED) ? PyBUF_FULL_RO : getbuf;
1311
1312 if (ndarray_init_staticbuf(v, nd, getbuf) < 0)
1313 return -1;
1314
1315 init_flags(nd->head);
1316 nd->head->flags |= flags;
1317
1318 return 0;
1319 }
1320
1321 /* NDArrayObject is the original base object. */
1322 if (getbuf != PyBUF_UNUSED) {
1323 PyErr_SetString(PyExc_TypeError,
1324 "getbuf argument only valid for construction from exporter "
1325 "object");
1326 return -1;
1327 }
1328 if (shape == NULL) {
1329 PyErr_SetString(PyExc_TypeError,
1330 "shape is a required argument when constructing from "
1331 "list, tuple or scalar");
1332 return -1;
1333 }
1334
1335 if (flags & ND_VAREXPORT) {
1336 nd->flags |= ND_VAREXPORT;
1337 flags &= ~ND_VAREXPORT;
1338 }
1339
1340 /* Initialize and push the first base buffer onto the linked list. */
1341 return ndarray_push_base(nd, v, shape, strides, offset, format, flags);
1342 }
1343
1344 /* Push an additional base onto the linked list. */
1345 static PyObject *
ndarray_push(PyObject * self,PyObject * args,PyObject * kwds)1346 ndarray_push(PyObject *self, PyObject *args, PyObject *kwds)
1347 {
1348 NDArrayObject *nd = (NDArrayObject *)self;
1349 static char *kwlist[] = {
1350 "items", "shape", "strides", "offset", "format", "flags", NULL
1351 };
1352 PyObject *items = NULL; /* initializer: scalar, list or tuple */
1353 PyObject *shape = NULL; /* size of each dimension */
1354 PyObject *strides = NULL; /* number of bytes to the next elt in each dim */
1355 PyObject *format = simple_format; /* struct module specifier: "B" */
1356 Py_ssize_t offset = 0; /* buffer offset */
1357 int flags = ND_DEFAULT; /* base buffer flags */
1358
1359 if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OnOi", kwlist,
1360 &items, &shape, &strides, &offset, &format, &flags))
1361 return NULL;
1362
1363 if (flags & ND_VAREXPORT) {
1364 PyErr_SetString(PyExc_ValueError,
1365 "ND_VAREXPORT flag can only be used during object creation");
1366 return NULL;
1367 }
1368 if (ND_IS_CONSUMER(nd)) {
1369 PyErr_SetString(PyExc_BufferError,
1370 "structure of re-exporting object is immutable");
1371 return NULL;
1372 }
1373 if (!(nd->flags&ND_VAREXPORT) && nd->head->exports > 0) {
1374 PyErr_Format(PyExc_BufferError,
1375 "cannot change structure: %zd exported buffer%s",
1376 nd->head->exports, nd->head->exports==1 ? "" : "s");
1377 return NULL;
1378 }
1379
1380 if (ndarray_push_base(nd, items, shape, strides,
1381 offset, format, flags) < 0)
1382 return NULL;
1383 Py_RETURN_NONE;
1384 }
1385
1386 /* Pop a base from the linked list (if possible). */
1387 static PyObject *
ndarray_pop(PyObject * self,PyObject * dummy)1388 ndarray_pop(PyObject *self, PyObject *dummy)
1389 {
1390 NDArrayObject *nd = (NDArrayObject *)self;
1391 if (ND_IS_CONSUMER(nd)) {
1392 PyErr_SetString(PyExc_BufferError,
1393 "structure of re-exporting object is immutable");
1394 return NULL;
1395 }
1396 if (nd->head->exports > 0) {
1397 PyErr_Format(PyExc_BufferError,
1398 "cannot change structure: %zd exported buffer%s",
1399 nd->head->exports, nd->head->exports==1 ? "" : "s");
1400 return NULL;
1401 }
1402 if (nd->head->next == NULL) {
1403 PyErr_SetString(PyExc_BufferError,
1404 "list only has a single base");
1405 return NULL;
1406 }
1407
1408 ndbuf_pop(nd);
1409 Py_RETURN_NONE;
1410 }
1411
1412 /**************************************************************************/
1413 /* getbuffer */
1414 /**************************************************************************/
1415
1416 static int
ndarray_getbuf(NDArrayObject * self,Py_buffer * view,int flags)1417 ndarray_getbuf(NDArrayObject *self, Py_buffer *view, int flags)
1418 {
1419 ndbuf_t *ndbuf = self->head;
1420 Py_buffer *base = &ndbuf->base;
1421 int baseflags = ndbuf->flags;
1422
1423 /* redirect mode */
1424 if (base->obj != NULL && (baseflags&ND_REDIRECT)) {
1425 return PyObject_GetBuffer(base->obj, view, flags);
1426 }
1427
1428 /* start with complete information */
1429 *view = *base;
1430 view->obj = NULL;
1431
1432 /* reconstruct format */
1433 if (view->format == NULL)
1434 view->format = "B";
1435
1436 if (base->ndim != 0 &&
1437 ((REQ_SHAPE(flags) && base->shape == NULL) ||
1438 (REQ_STRIDES(flags) && base->strides == NULL))) {
1439 /* The ndarray is a re-exporter that has been created without full
1440 information for testing purposes. In this particular case the
1441 ndarray is not a PEP-3118 compliant buffer provider. */
1442 PyErr_SetString(PyExc_BufferError,
1443 "re-exporter does not provide format, shape or strides");
1444 return -1;
1445 }
1446
1447 if (baseflags & ND_GETBUF_FAIL) {
1448 PyErr_SetString(PyExc_BufferError,
1449 "ND_GETBUF_FAIL: forced test exception");
1450 if (baseflags & ND_GETBUF_UNDEFINED)
1451 view->obj = (PyObject *)0x1; /* wrong but permitted in <= 3.2 */
1452 return -1;
1453 }
1454
1455 if (REQ_WRITABLE(flags) && base->readonly) {
1456 PyErr_SetString(PyExc_BufferError,
1457 "ndarray is not writable");
1458 return -1;
1459 }
1460 if (!REQ_FORMAT(flags)) {
1461 /* NULL indicates that the buffer's data type has been cast to 'B'.
1462 view->itemsize is the _previous_ itemsize. If shape is present,
1463 the equality product(shape) * itemsize = len still holds at this
1464 point. The equality calcsize(format) = itemsize does _not_ hold
1465 from here on! */
1466 view->format = NULL;
1467 }
1468
1469 if (REQ_C_CONTIGUOUS(flags) && !ND_C_CONTIGUOUS(baseflags)) {
1470 PyErr_SetString(PyExc_BufferError,
1471 "ndarray is not C-contiguous");
1472 return -1;
1473 }
1474 if (REQ_F_CONTIGUOUS(flags) && !ND_FORTRAN_CONTIGUOUS(baseflags)) {
1475 PyErr_SetString(PyExc_BufferError,
1476 "ndarray is not Fortran contiguous");
1477 return -1;
1478 }
1479 if (REQ_ANY_CONTIGUOUS(flags) && !ND_ANY_CONTIGUOUS(baseflags)) {
1480 PyErr_SetString(PyExc_BufferError,
1481 "ndarray is not contiguous");
1482 return -1;
1483 }
1484 if (!REQ_INDIRECT(flags) && (baseflags & ND_PIL)) {
1485 PyErr_SetString(PyExc_BufferError,
1486 "ndarray cannot be represented without suboffsets");
1487 return -1;
1488 }
1489 if (!REQ_STRIDES(flags)) {
1490 if (!ND_C_CONTIGUOUS(baseflags)) {
1491 PyErr_SetString(PyExc_BufferError,
1492 "ndarray is not C-contiguous");
1493 return -1;
1494 }
1495 view->strides = NULL;
1496 }
1497 if (!REQ_SHAPE(flags)) {
1498 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1499 so base->buf = ndbuf->data. */
1500 if (view->format != NULL) {
1501 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1502 not make sense. */
1503 PyErr_Format(PyExc_BufferError,
1504 "ndarray: cannot cast to unsigned bytes if the format flag "
1505 "is present");
1506 return -1;
1507 }
1508 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1509 do _not_ hold from here on! */
1510 view->ndim = 1;
1511 view->shape = NULL;
1512 }
1513
1514 /* Ascertain that the new buffer has the same contiguity as the exporter */
1515 if (ND_C_CONTIGUOUS(baseflags) != PyBuffer_IsContiguous(view, 'C') ||
1516 /* skip cast to 1-d */
1517 (view->format != NULL && view->shape != NULL &&
1518 ND_FORTRAN_CONTIGUOUS(baseflags) != PyBuffer_IsContiguous(view, 'F')) ||
1519 /* cast to 1-d */
1520 (view->format == NULL && view->shape == NULL &&
1521 !PyBuffer_IsContiguous(view, 'F'))) {
1522 PyErr_SetString(PyExc_BufferError,
1523 "ndarray: contiguity mismatch in getbuf()");
1524 return -1;
1525 }
1526
1527 view->obj = (PyObject *)self;
1528 Py_INCREF(view->obj);
1529 self->head->exports++;
1530
1531 return 0;
1532 }
1533
1534 static void
ndarray_releasebuf(NDArrayObject * self,Py_buffer * view)1535 ndarray_releasebuf(NDArrayObject *self, Py_buffer *view)
1536 {
1537 if (!ND_IS_CONSUMER(self)) {
1538 ndbuf_t *ndbuf = view->internal;
1539 if (--ndbuf->exports == 0 && ndbuf != self->head)
1540 ndbuf_delete(self, ndbuf);
1541 }
1542 }
1543
1544 static PyBufferProcs ndarray_as_buffer = {
1545 (getbufferproc)ndarray_getbuf, /* bf_getbuffer */
1546 (releasebufferproc)ndarray_releasebuf /* bf_releasebuffer */
1547 };
1548
1549
1550 /**************************************************************************/
1551 /* indexing/slicing */
1552 /**************************************************************************/
1553
1554 static char *
ptr_from_index(Py_buffer * base,Py_ssize_t index)1555 ptr_from_index(Py_buffer *base, Py_ssize_t index)
1556 {
1557 char *ptr;
1558 Py_ssize_t nitems; /* items in the first dimension */
1559
1560 if (base->shape)
1561 nitems = base->shape[0];
1562 else {
1563 assert(base->ndim == 1 && SIMPLE_FORMAT(base->format));
1564 nitems = base->len;
1565 }
1566
1567 if (index < 0) {
1568 index += nitems;
1569 }
1570 if (index < 0 || index >= nitems) {
1571 PyErr_SetString(PyExc_IndexError, "index out of bounds");
1572 return NULL;
1573 }
1574
1575 ptr = (char *)base->buf;
1576
1577 if (base->strides == NULL)
1578 ptr += base->itemsize * index;
1579 else
1580 ptr += base->strides[0] * index;
1581
1582 ptr = ADJUST_PTR(ptr, base->suboffsets);
1583
1584 return ptr;
1585 }
1586
1587 static PyObject *
ndarray_item(NDArrayObject * self,Py_ssize_t index)1588 ndarray_item(NDArrayObject *self, Py_ssize_t index)
1589 {
1590 ndbuf_t *ndbuf = self->head;
1591 Py_buffer *base = &ndbuf->base;
1592 char *ptr;
1593
1594 if (base->ndim == 0) {
1595 PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar");
1596 return NULL;
1597 }
1598
1599 ptr = ptr_from_index(base, index);
1600 if (ptr == NULL)
1601 return NULL;
1602
1603 if (base->ndim == 1) {
1604 return unpack_single(ptr, base->format, base->itemsize);
1605 }
1606 else {
1607 NDArrayObject *nd;
1608 Py_buffer *subview;
1609
1610 nd = (NDArrayObject *)ndarray_new(&NDArray_Type, NULL, NULL);
1611 if (nd == NULL)
1612 return NULL;
1613
1614 if (ndarray_init_staticbuf((PyObject *)self, nd, PyBUF_FULL_RO) < 0) {
1615 Py_DECREF(nd);
1616 return NULL;
1617 }
1618
1619 subview = &nd->staticbuf.base;
1620
1621 subview->buf = ptr;
1622 subview->len /= subview->shape[0];
1623
1624 subview->ndim--;
1625 subview->shape++;
1626 if (subview->strides) subview->strides++;
1627 if (subview->suboffsets) subview->suboffsets++;
1628
1629 init_flags(&nd->staticbuf);
1630
1631 return (PyObject *)nd;
1632 }
1633 }
1634
1635 /*
1636 For each dimension, we get valid (start, stop, step, slicelength) quadruples
1637 from PySlice_GetIndicesEx().
1638
1639 Slicing NumPy arrays
1640 ====================
1641
1642 A pointer to an element in a NumPy array is defined by:
1643
1644 ptr = (char *)buf + indices[0] * strides[0] +
1645 ... +
1646 indices[ndim-1] * strides[ndim-1]
1647
1648 Adjust buf:
1649 -----------
1650 Adding start[n] for each dimension effectively adds the constant:
1651
1652 c = start[0] * strides[0] + ... + start[ndim-1] * strides[ndim-1]
1653
1654 Therefore init_slice() adds all start[n] directly to buf.
1655
1656 Adjust shape:
1657 -------------
1658 Obviously shape[n] = slicelength[n]
1659
1660 Adjust strides:
1661 ---------------
1662 In the original array, the next element in a dimension is reached
1663 by adding strides[n] to the pointer. In the sliced array, elements
1664 may be skipped, so the next element is reached by adding:
1665
1666 strides[n] * step[n]
1667
1668 Slicing PIL arrays
1669 ==================
1670
1671 Layout:
1672 -------
1673 In the first (zeroth) dimension, PIL arrays have an array of pointers
1674 to sub-arrays of ndim-1. Striding in the first dimension is done by
1675 getting the index of the nth pointer, dereference it and then add a
1676 suboffset to it. The arrays pointed to can best be seen a regular
1677 NumPy arrays.
1678
1679 Adjust buf:
1680 -----------
1681 In the original array, buf points to a location (usually the start)
1682 in the array of pointers. For the sliced array, start[0] can be
1683 added to buf in the same manner as for NumPy arrays.
1684
1685 Adjust suboffsets:
1686 ------------------
1687 Due to the dereferencing step in the addressing scheme, it is not
1688 possible to adjust buf for higher dimensions. Recall that the
1689 sub-arrays pointed to are regular NumPy arrays, so for each of
1690 those arrays adding start[n] effectively adds the constant:
1691
1692 c = start[1] * strides[1] + ... + start[ndim-1] * strides[ndim-1]
1693
1694 This constant is added to suboffsets[0]. suboffsets[0] in turn is
1695 added to each pointer right after dereferencing.
1696
1697 Adjust shape and strides:
1698 -------------------------
1699 Shape and strides are not influenced by the dereferencing step, so
1700 they are adjusted in the same manner as for NumPy arrays.
1701
1702 Multiple levels of suboffsets
1703 =============================
1704
1705 For a construct like an array of pointers to array of pointers to
1706 sub-arrays of ndim-2:
1707
1708 suboffsets[0] = start[1] * strides[1]
1709 suboffsets[1] = start[2] * strides[2] + ...
1710 */
1711 static int
init_slice(Py_buffer * base,PyObject * key,int dim)1712 init_slice(Py_buffer *base, PyObject *key, int dim)
1713 {
1714 Py_ssize_t start, stop, step, slicelength;
1715
1716 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
1717 return -1;
1718 }
1719 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
1720
1721
1722 if (base->suboffsets == NULL || dim == 0) {
1723 adjust_buf:
1724 base->buf = (char *)base->buf + base->strides[dim] * start;
1725 }
1726 else {
1727 Py_ssize_t n = dim-1;
1728 while (n >= 0 && base->suboffsets[n] < 0)
1729 n--;
1730 if (n < 0)
1731 goto adjust_buf; /* all suboffsets are negative */
1732 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
1733 }
1734 base->shape[dim] = slicelength;
1735 base->strides[dim] = base->strides[dim] * step;
1736
1737 return 0;
1738 }
1739
1740 static int
copy_structure(Py_buffer * base)1741 copy_structure(Py_buffer *base)
1742 {
1743 Py_ssize_t *shape = NULL, *strides = NULL, *suboffsets = NULL;
1744 Py_ssize_t i;
1745
1746 shape = PyMem_Malloc(base->ndim * (sizeof *shape));
1747 strides = PyMem_Malloc(base->ndim * (sizeof *strides));
1748 if (shape == NULL || strides == NULL)
1749 goto err_nomem;
1750
1751 suboffsets = NULL;
1752 if (base->suboffsets) {
1753 suboffsets = PyMem_Malloc(base->ndim * (sizeof *suboffsets));
1754 if (suboffsets == NULL)
1755 goto err_nomem;
1756 }
1757
1758 for (i = 0; i < base->ndim; i++) {
1759 shape[i] = base->shape[i];
1760 strides[i] = base->strides[i];
1761 if (suboffsets)
1762 suboffsets[i] = base->suboffsets[i];
1763 }
1764
1765 base->shape = shape;
1766 base->strides = strides;
1767 base->suboffsets = suboffsets;
1768
1769 return 0;
1770
1771 err_nomem:
1772 PyErr_NoMemory();
1773 PyMem_XFree(shape);
1774 PyMem_XFree(strides);
1775 PyMem_XFree(suboffsets);
1776 return -1;
1777 }
1778
1779 static PyObject *
ndarray_subscript(NDArrayObject * self,PyObject * key)1780 ndarray_subscript(NDArrayObject *self, PyObject *key)
1781 {
1782 NDArrayObject *nd;
1783 ndbuf_t *ndbuf;
1784 Py_buffer *base = &self->head->base;
1785
1786 if (base->ndim == 0) {
1787 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
1788 return unpack_single(base->buf, base->format, base->itemsize);
1789 }
1790 else if (key == Py_Ellipsis) {
1791 Py_INCREF(self);
1792 return (PyObject *)self;
1793 }
1794 else {
1795 PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar");
1796 return NULL;
1797 }
1798 }
1799 if (PyIndex_Check(key)) {
1800 Py_ssize_t index = PyLong_AsSsize_t(key);
1801 if (index == -1 && PyErr_Occurred())
1802 return NULL;
1803 return ndarray_item(self, index);
1804 }
1805
1806 nd = (NDArrayObject *)ndarray_new(&NDArray_Type, NULL, NULL);
1807 if (nd == NULL)
1808 return NULL;
1809
1810 /* new ndarray is a consumer */
1811 if (ndarray_init_staticbuf((PyObject *)self, nd, PyBUF_FULL_RO) < 0) {
1812 Py_DECREF(nd);
1813 return NULL;
1814 }
1815
1816 /* copy shape, strides and suboffsets */
1817 ndbuf = nd->head;
1818 base = &ndbuf->base;
1819 if (copy_structure(base) < 0) {
1820 Py_DECREF(nd);
1821 return NULL;
1822 }
1823 ndbuf->flags |= ND_OWN_ARRAYS;
1824
1825 if (PySlice_Check(key)) {
1826 /* one-dimensional slice */
1827 if (init_slice(base, key, 0) < 0)
1828 goto err_occurred;
1829 }
1830 else if (PyTuple_Check(key)) {
1831 /* multi-dimensional slice */
1832 PyObject *tuple = key;
1833 Py_ssize_t i, n;
1834
1835 n = PyTuple_GET_SIZE(tuple);
1836 for (i = 0; i < n; i++) {
1837 key = PyTuple_GET_ITEM(tuple, i);
1838 if (!PySlice_Check(key))
1839 goto type_error;
1840 if (init_slice(base, key, (int)i) < 0)
1841 goto err_occurred;
1842 }
1843 }
1844 else {
1845 goto type_error;
1846 }
1847
1848 init_len(base);
1849 init_flags(ndbuf);
1850
1851 return (PyObject *)nd;
1852
1853
1854 type_error:
1855 PyErr_Format(PyExc_TypeError,
1856 "cannot index memory using \"%.200s\"",
1857 Py_TYPE(key)->tp_name);
1858 err_occurred:
1859 Py_DECREF(nd);
1860 return NULL;
1861 }
1862
1863
1864 static int
ndarray_ass_subscript(NDArrayObject * self,PyObject * key,PyObject * value)1865 ndarray_ass_subscript(NDArrayObject *self, PyObject *key, PyObject *value)
1866 {
1867 NDArrayObject *nd;
1868 Py_buffer *dest = &self->head->base;
1869 Py_buffer src;
1870 char *ptr;
1871 Py_ssize_t index;
1872 int ret = -1;
1873
1874 if (dest->readonly) {
1875 PyErr_SetString(PyExc_TypeError, "ndarray is not writable");
1876 return -1;
1877 }
1878 if (value == NULL) {
1879 PyErr_SetString(PyExc_TypeError, "ndarray data cannot be deleted");
1880 return -1;
1881 }
1882 if (dest->ndim == 0) {
1883 if (key == Py_Ellipsis ||
1884 (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0)) {
1885 ptr = (char *)dest->buf;
1886 return pack_single(ptr, value, dest->format, dest->itemsize);
1887 }
1888 else {
1889 PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar");
1890 return -1;
1891 }
1892 }
1893 if (dest->ndim == 1 && PyIndex_Check(key)) {
1894 /* rvalue must be a single item */
1895 index = PyLong_AsSsize_t(key);
1896 if (index == -1 && PyErr_Occurred())
1897 return -1;
1898 else {
1899 ptr = ptr_from_index(dest, index);
1900 if (ptr == NULL)
1901 return -1;
1902 }
1903 return pack_single(ptr, value, dest->format, dest->itemsize);
1904 }
1905
1906 /* rvalue must be an exporter */
1907 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) == -1)
1908 return -1;
1909
1910 nd = (NDArrayObject *)ndarray_subscript(self, key);
1911 if (nd != NULL) {
1912 dest = &nd->head->base;
1913 ret = copy_buffer(dest, &src);
1914 Py_DECREF(nd);
1915 }
1916
1917 PyBuffer_Release(&src);
1918 return ret;
1919 }
1920
1921 static PyObject *
slice_indices(PyObject * self,PyObject * args)1922 slice_indices(PyObject *self, PyObject *args)
1923 {
1924 PyObject *ret, *key, *tmp;
1925 Py_ssize_t s[4]; /* start, stop, step, slicelength */
1926 Py_ssize_t i, len;
1927
1928 if (!PyArg_ParseTuple(args, "On", &key, &len)) {
1929 return NULL;
1930 }
1931 if (!PySlice_Check(key)) {
1932 PyErr_SetString(PyExc_TypeError,
1933 "first argument must be a slice object");
1934 return NULL;
1935 }
1936 if (PySlice_Unpack(key, &s[0], &s[1], &s[2]) < 0) {
1937 return NULL;
1938 }
1939 s[3] = PySlice_AdjustIndices(len, &s[0], &s[1], s[2]);
1940
1941 ret = PyTuple_New(4);
1942 if (ret == NULL)
1943 return NULL;
1944
1945 for (i = 0; i < 4; i++) {
1946 tmp = PyLong_FromSsize_t(s[i]);
1947 if (tmp == NULL)
1948 goto error;
1949 PyTuple_SET_ITEM(ret, i, tmp);
1950 }
1951
1952 return ret;
1953
1954 error:
1955 Py_DECREF(ret);
1956 return NULL;
1957 }
1958
1959
1960 static PyMappingMethods ndarray_as_mapping = {
1961 NULL, /* mp_length */
1962 (binaryfunc)ndarray_subscript, /* mp_subscript */
1963 (objobjargproc)ndarray_ass_subscript /* mp_ass_subscript */
1964 };
1965
1966 static PySequenceMethods ndarray_as_sequence = {
1967 0, /* sq_length */
1968 0, /* sq_concat */
1969 0, /* sq_repeat */
1970 (ssizeargfunc)ndarray_item, /* sq_item */
1971 };
1972
1973
1974 /**************************************************************************/
1975 /* getters */
1976 /**************************************************************************/
1977
1978 static PyObject *
ssize_array_as_tuple(Py_ssize_t * array,Py_ssize_t len)1979 ssize_array_as_tuple(Py_ssize_t *array, Py_ssize_t len)
1980 {
1981 PyObject *tuple, *x;
1982 Py_ssize_t i;
1983
1984 if (array == NULL)
1985 return PyTuple_New(0);
1986
1987 tuple = PyTuple_New(len);
1988 if (tuple == NULL)
1989 return NULL;
1990
1991 for (i = 0; i < len; i++) {
1992 x = PyLong_FromSsize_t(array[i]);
1993 if (x == NULL) {
1994 Py_DECREF(tuple);
1995 return NULL;
1996 }
1997 PyTuple_SET_ITEM(tuple, i, x);
1998 }
1999
2000 return tuple;
2001 }
2002
2003 static PyObject *
ndarray_get_flags(NDArrayObject * self,void * closure)2004 ndarray_get_flags(NDArrayObject *self, void *closure)
2005 {
2006 return PyLong_FromLong(self->head->flags);
2007 }
2008
2009 static PyObject *
ndarray_get_offset(NDArrayObject * self,void * closure)2010 ndarray_get_offset(NDArrayObject *self, void *closure)
2011 {
2012 ndbuf_t *ndbuf = self->head;
2013 return PyLong_FromSsize_t(ndbuf->offset);
2014 }
2015
2016 static PyObject *
ndarray_get_obj(NDArrayObject * self,void * closure)2017 ndarray_get_obj(NDArrayObject *self, void *closure)
2018 {
2019 Py_buffer *base = &self->head->base;
2020
2021 if (base->obj == NULL) {
2022 Py_RETURN_NONE;
2023 }
2024 Py_INCREF(base->obj);
2025 return base->obj;
2026 }
2027
2028 static PyObject *
ndarray_get_nbytes(NDArrayObject * self,void * closure)2029 ndarray_get_nbytes(NDArrayObject *self, void *closure)
2030 {
2031 Py_buffer *base = &self->head->base;
2032 return PyLong_FromSsize_t(base->len);
2033 }
2034
2035 static PyObject *
ndarray_get_readonly(NDArrayObject * self,void * closure)2036 ndarray_get_readonly(NDArrayObject *self, void *closure)
2037 {
2038 Py_buffer *base = &self->head->base;
2039 return PyBool_FromLong(base->readonly);
2040 }
2041
2042 static PyObject *
ndarray_get_itemsize(NDArrayObject * self,void * closure)2043 ndarray_get_itemsize(NDArrayObject *self, void *closure)
2044 {
2045 Py_buffer *base = &self->head->base;
2046 return PyLong_FromSsize_t(base->itemsize);
2047 }
2048
2049 static PyObject *
ndarray_get_format(NDArrayObject * self,void * closure)2050 ndarray_get_format(NDArrayObject *self, void *closure)
2051 {
2052 Py_buffer *base = &self->head->base;
2053 const char *fmt = base->format ? base->format : "";
2054 return PyUnicode_FromString(fmt);
2055 }
2056
2057 static PyObject *
ndarray_get_ndim(NDArrayObject * self,void * closure)2058 ndarray_get_ndim(NDArrayObject *self, void *closure)
2059 {
2060 Py_buffer *base = &self->head->base;
2061 return PyLong_FromSsize_t(base->ndim);
2062 }
2063
2064 static PyObject *
ndarray_get_shape(NDArrayObject * self,void * closure)2065 ndarray_get_shape(NDArrayObject *self, void *closure)
2066 {
2067 Py_buffer *base = &self->head->base;
2068 return ssize_array_as_tuple(base->shape, base->ndim);
2069 }
2070
2071 static PyObject *
ndarray_get_strides(NDArrayObject * self,void * closure)2072 ndarray_get_strides(NDArrayObject *self, void *closure)
2073 {
2074 Py_buffer *base = &self->head->base;
2075 return ssize_array_as_tuple(base->strides, base->ndim);
2076 }
2077
2078 static PyObject *
ndarray_get_suboffsets(NDArrayObject * self,void * closure)2079 ndarray_get_suboffsets(NDArrayObject *self, void *closure)
2080 {
2081 Py_buffer *base = &self->head->base;
2082 return ssize_array_as_tuple(base->suboffsets, base->ndim);
2083 }
2084
2085 static PyObject *
ndarray_c_contig(PyObject * self,PyObject * dummy)2086 ndarray_c_contig(PyObject *self, PyObject *dummy)
2087 {
2088 NDArrayObject *nd = (NDArrayObject *)self;
2089 int ret = PyBuffer_IsContiguous(&nd->head->base, 'C');
2090
2091 if (ret != ND_C_CONTIGUOUS(nd->head->flags)) {
2092 PyErr_SetString(PyExc_RuntimeError,
2093 "results from PyBuffer_IsContiguous() and flags differ");
2094 return NULL;
2095 }
2096 return PyBool_FromLong(ret);
2097 }
2098
2099 static PyObject *
ndarray_fortran_contig(PyObject * self,PyObject * dummy)2100 ndarray_fortran_contig(PyObject *self, PyObject *dummy)
2101 {
2102 NDArrayObject *nd = (NDArrayObject *)self;
2103 int ret = PyBuffer_IsContiguous(&nd->head->base, 'F');
2104
2105 if (ret != ND_FORTRAN_CONTIGUOUS(nd->head->flags)) {
2106 PyErr_SetString(PyExc_RuntimeError,
2107 "results from PyBuffer_IsContiguous() and flags differ");
2108 return NULL;
2109 }
2110 return PyBool_FromLong(ret);
2111 }
2112
2113 static PyObject *
ndarray_contig(PyObject * self,PyObject * dummy)2114 ndarray_contig(PyObject *self, PyObject *dummy)
2115 {
2116 NDArrayObject *nd = (NDArrayObject *)self;
2117 int ret = PyBuffer_IsContiguous(&nd->head->base, 'A');
2118
2119 if (ret != ND_ANY_CONTIGUOUS(nd->head->flags)) {
2120 PyErr_SetString(PyExc_RuntimeError,
2121 "results from PyBuffer_IsContiguous() and flags differ");
2122 return NULL;
2123 }
2124 return PyBool_FromLong(ret);
2125 }
2126
2127
2128 static PyGetSetDef ndarray_getset [] =
2129 {
2130 /* ndbuf */
2131 { "flags", (getter)ndarray_get_flags, NULL, NULL, NULL},
2132 { "offset", (getter)ndarray_get_offset, NULL, NULL, NULL},
2133 /* ndbuf.base */
2134 { "obj", (getter)ndarray_get_obj, NULL, NULL, NULL},
2135 { "nbytes", (getter)ndarray_get_nbytes, NULL, NULL, NULL},
2136 { "readonly", (getter)ndarray_get_readonly, NULL, NULL, NULL},
2137 { "itemsize", (getter)ndarray_get_itemsize, NULL, NULL, NULL},
2138 { "format", (getter)ndarray_get_format, NULL, NULL, NULL},
2139 { "ndim", (getter)ndarray_get_ndim, NULL, NULL, NULL},
2140 { "shape", (getter)ndarray_get_shape, NULL, NULL, NULL},
2141 { "strides", (getter)ndarray_get_strides, NULL, NULL, NULL},
2142 { "suboffsets", (getter)ndarray_get_suboffsets, NULL, NULL, NULL},
2143 { "c_contiguous", (getter)ndarray_c_contig, NULL, NULL, NULL},
2144 { "f_contiguous", (getter)ndarray_fortran_contig, NULL, NULL, NULL},
2145 { "contiguous", (getter)ndarray_contig, NULL, NULL, NULL},
2146 {NULL}
2147 };
2148
2149 static PyObject *
ndarray_tolist(PyObject * self,PyObject * dummy)2150 ndarray_tolist(PyObject *self, PyObject *dummy)
2151 {
2152 return ndarray_as_list((NDArrayObject *)self);
2153 }
2154
2155 static PyObject *
ndarray_tobytes(PyObject * self,PyObject * dummy)2156 ndarray_tobytes(PyObject *self, PyObject *dummy)
2157 {
2158 ndbuf_t *ndbuf = ((NDArrayObject *)self)->head;
2159 Py_buffer *src = &ndbuf->base;
2160 Py_buffer dest;
2161 PyObject *ret = NULL;
2162 char *mem;
2163
2164 if (ND_C_CONTIGUOUS(ndbuf->flags))
2165 return PyBytes_FromStringAndSize(src->buf, src->len);
2166
2167 assert(src->shape != NULL);
2168 assert(src->strides != NULL);
2169 assert(src->ndim > 0);
2170
2171 mem = PyMem_Malloc(src->len);
2172 if (mem == NULL) {
2173 PyErr_NoMemory();
2174 return NULL;
2175 }
2176
2177 dest = *src;
2178 dest.buf = mem;
2179 dest.suboffsets = NULL;
2180 dest.strides = strides_from_shape(ndbuf, 0);
2181 if (dest.strides == NULL)
2182 goto out;
2183 if (copy_buffer(&dest, src) < 0)
2184 goto out;
2185
2186 ret = PyBytes_FromStringAndSize(mem, src->len);
2187
2188 out:
2189 PyMem_XFree(dest.strides);
2190 PyMem_Free(mem);
2191 return ret;
2192 }
2193
2194 /* add redundant (negative) suboffsets for testing */
2195 static PyObject *
ndarray_add_suboffsets(PyObject * self,PyObject * dummy)2196 ndarray_add_suboffsets(PyObject *self, PyObject *dummy)
2197 {
2198 NDArrayObject *nd = (NDArrayObject *)self;
2199 Py_buffer *base = &nd->head->base;
2200 Py_ssize_t i;
2201
2202 if (base->suboffsets != NULL) {
2203 PyErr_SetString(PyExc_TypeError,
2204 "cannot add suboffsets to PIL-style array");
2205 return NULL;
2206 }
2207 if (base->strides == NULL) {
2208 PyErr_SetString(PyExc_TypeError,
2209 "cannot add suboffsets to array without strides");
2210 return NULL;
2211 }
2212
2213 base->suboffsets = PyMem_Malloc(base->ndim * (sizeof *base->suboffsets));
2214 if (base->suboffsets == NULL) {
2215 PyErr_NoMemory();
2216 return NULL;
2217 }
2218
2219 for (i = 0; i < base->ndim; i++)
2220 base->suboffsets[i] = -1;
2221
2222 nd->head->flags &= ~(ND_C|ND_FORTRAN);
2223
2224 Py_RETURN_NONE;
2225 }
2226
2227 /* Test PyMemoryView_FromBuffer(): return a memoryview from a static buffer.
2228 Obviously this is fragile and only one such view may be active at any
2229 time. Never use anything like this in real code! */
2230 static char *infobuf = NULL;
2231 static PyObject *
ndarray_memoryview_from_buffer(PyObject * self,PyObject * dummy)2232 ndarray_memoryview_from_buffer(PyObject *self, PyObject *dummy)
2233 {
2234 const NDArrayObject *nd = (NDArrayObject *)self;
2235 const Py_buffer *view = &nd->head->base;
2236 const ndbuf_t *ndbuf;
2237 static char format[ND_MAX_NDIM+1];
2238 static Py_ssize_t shape[ND_MAX_NDIM];
2239 static Py_ssize_t strides[ND_MAX_NDIM];
2240 static Py_ssize_t suboffsets[ND_MAX_NDIM];
2241 static Py_buffer info;
2242 char *p;
2243
2244 if (!ND_IS_CONSUMER(nd))
2245 ndbuf = nd->head; /* self is ndarray/original exporter */
2246 else if (NDArray_Check(view->obj) && !ND_IS_CONSUMER(view->obj))
2247 /* self is ndarray and consumer from ndarray/original exporter */
2248 ndbuf = ((NDArrayObject *)view->obj)->head;
2249 else {
2250 PyErr_SetString(PyExc_TypeError,
2251 "memoryview_from_buffer(): ndarray must be original exporter or "
2252 "consumer from ndarray/original exporter");
2253 return NULL;
2254 }
2255
2256 info = *view;
2257 p = PyMem_Realloc(infobuf, ndbuf->len);
2258 if (p == NULL) {
2259 PyMem_Free(infobuf);
2260 PyErr_NoMemory();
2261 infobuf = NULL;
2262 return NULL;
2263 }
2264 else {
2265 infobuf = p;
2266 }
2267 /* copy the complete raw data */
2268 memcpy(infobuf, ndbuf->data, ndbuf->len);
2269 info.buf = infobuf + ((char *)view->buf - ndbuf->data);
2270
2271 if (view->format) {
2272 if (strlen(view->format) > ND_MAX_NDIM) {
2273 PyErr_Format(PyExc_TypeError,
2274 "memoryview_from_buffer: format is limited to %d characters",
2275 ND_MAX_NDIM);
2276 return NULL;
2277 }
2278 strcpy(format, view->format);
2279 info.format = format;
2280 }
2281 if (view->ndim > ND_MAX_NDIM) {
2282 PyErr_Format(PyExc_TypeError,
2283 "memoryview_from_buffer: ndim is limited to %d", ND_MAX_NDIM);
2284 return NULL;
2285 }
2286 if (view->shape) {
2287 memcpy(shape, view->shape, view->ndim * sizeof(Py_ssize_t));
2288 info.shape = shape;
2289 }
2290 if (view->strides) {
2291 memcpy(strides, view->strides, view->ndim * sizeof(Py_ssize_t));
2292 info.strides = strides;
2293 }
2294 if (view->suboffsets) {
2295 memcpy(suboffsets, view->suboffsets, view->ndim * sizeof(Py_ssize_t));
2296 info.suboffsets = suboffsets;
2297 }
2298
2299 return PyMemoryView_FromBuffer(&info);
2300 }
2301
2302 /* Get a single item from bufobj at the location specified by seq.
2303 seq is a list or tuple of indices. The purpose of this function
2304 is to check other functions against PyBuffer_GetPointer(). */
2305 static PyObject *
get_pointer(PyObject * self,PyObject * args)2306 get_pointer(PyObject *self, PyObject *args)
2307 {
2308 PyObject *ret = NULL, *bufobj, *seq;
2309 Py_buffer view;
2310 Py_ssize_t indices[ND_MAX_NDIM];
2311 Py_ssize_t i;
2312 void *ptr;
2313
2314 if (!PyArg_ParseTuple(args, "OO", &bufobj, &seq)) {
2315 return NULL;
2316 }
2317
2318 CHECK_LIST_OR_TUPLE(seq);
2319 if (PyObject_GetBuffer(bufobj, &view, PyBUF_FULL_RO) < 0)
2320 return NULL;
2321
2322 if (view.ndim > ND_MAX_NDIM) {
2323 PyErr_Format(PyExc_ValueError,
2324 "get_pointer(): ndim > %d", ND_MAX_NDIM);
2325 goto out;
2326 }
2327 if (PySequence_Fast_GET_SIZE(seq) != view.ndim) {
2328 PyErr_SetString(PyExc_ValueError,
2329 "get_pointer(): len(indices) != ndim");
2330 goto out;
2331 }
2332
2333 for (i = 0; i < view.ndim; i++) {
2334 PyObject *x = PySequence_Fast_GET_ITEM(seq, i);
2335 indices[i] = PyLong_AsSsize_t(x);
2336 if (PyErr_Occurred())
2337 goto out;
2338 if (indices[i] < 0 || indices[i] >= view.shape[i]) {
2339 PyErr_Format(PyExc_ValueError,
2340 "get_pointer(): invalid index %zd at position %zd",
2341 indices[i], i);
2342 goto out;
2343 }
2344 }
2345
2346 ptr = PyBuffer_GetPointer(&view, indices);
2347 ret = unpack_single(ptr, view.format, view.itemsize);
2348
2349 out:
2350 PyBuffer_Release(&view);
2351 return ret;
2352 }
2353
2354 static PyObject *
get_sizeof_void_p(PyObject * self,PyObject * Py_UNUSED (ignored))2355 get_sizeof_void_p(PyObject *self, PyObject *Py_UNUSED(ignored))
2356 {
2357 return PyLong_FromSize_t(sizeof(void *));
2358 }
2359
2360 static char
get_ascii_order(PyObject * order)2361 get_ascii_order(PyObject *order)
2362 {
2363 PyObject *ascii_order;
2364 char ord;
2365
2366 if (!PyUnicode_Check(order)) {
2367 PyErr_SetString(PyExc_TypeError,
2368 "order must be a string");
2369 return CHAR_MAX;
2370 }
2371
2372 ascii_order = PyUnicode_AsASCIIString(order);
2373 if (ascii_order == NULL) {
2374 return CHAR_MAX;
2375 }
2376
2377 ord = PyBytes_AS_STRING(ascii_order)[0];
2378 Py_DECREF(ascii_order);
2379
2380 if (ord != 'C' && ord != 'F' && ord != 'A') {
2381 PyErr_SetString(PyExc_ValueError,
2382 "invalid order, must be C, F or A");
2383 return CHAR_MAX;
2384 }
2385
2386 return ord;
2387 }
2388
2389 /* Get a contiguous memoryview. */
2390 static PyObject *
get_contiguous(PyObject * self,PyObject * args)2391 get_contiguous(PyObject *self, PyObject *args)
2392 {
2393 PyObject *obj;
2394 PyObject *buffertype;
2395 PyObject *order;
2396 long type;
2397 char ord;
2398
2399 if (!PyArg_ParseTuple(args, "OOO", &obj, &buffertype, &order)) {
2400 return NULL;
2401 }
2402
2403 if (!PyLong_Check(buffertype)) {
2404 PyErr_SetString(PyExc_TypeError,
2405 "buffertype must be PyBUF_READ or PyBUF_WRITE");
2406 return NULL;
2407 }
2408
2409 type = PyLong_AsLong(buffertype);
2410 if (type == -1 && PyErr_Occurred()) {
2411 return NULL;
2412 }
2413 if (type != PyBUF_READ && type != PyBUF_WRITE) {
2414 PyErr_SetString(PyExc_ValueError,
2415 "invalid buffer type");
2416 return NULL;
2417 }
2418
2419 ord = get_ascii_order(order);
2420 if (ord == CHAR_MAX)
2421 return NULL;
2422
2423 return PyMemoryView_GetContiguous(obj, (int)type, ord);
2424 }
2425
2426 /* PyBuffer_ToContiguous() */
2427 static PyObject *
py_buffer_to_contiguous(PyObject * self,PyObject * args)2428 py_buffer_to_contiguous(PyObject *self, PyObject *args)
2429 {
2430 PyObject *obj;
2431 PyObject *order;
2432 PyObject *ret = NULL;
2433 int flags;
2434 char ord;
2435 Py_buffer view;
2436 char *buf = NULL;
2437
2438 if (!PyArg_ParseTuple(args, "OOi", &obj, &order, &flags)) {
2439 return NULL;
2440 }
2441
2442 if (PyObject_GetBuffer(obj, &view, flags) < 0) {
2443 return NULL;
2444 }
2445
2446 ord = get_ascii_order(order);
2447 if (ord == CHAR_MAX) {
2448 goto out;
2449 }
2450
2451 buf = PyMem_Malloc(view.len);
2452 if (buf == NULL) {
2453 PyErr_NoMemory();
2454 goto out;
2455 }
2456
2457 if (PyBuffer_ToContiguous(buf, &view, view.len, ord) < 0) {
2458 goto out;
2459 }
2460
2461 ret = PyBytes_FromStringAndSize(buf, view.len);
2462
2463 out:
2464 PyBuffer_Release(&view);
2465 PyMem_XFree(buf);
2466 return ret;
2467 }
2468
2469 static int
fmtcmp(const char * fmt1,const char * fmt2)2470 fmtcmp(const char *fmt1, const char *fmt2)
2471 {
2472 if (fmt1 == NULL) {
2473 return fmt2 == NULL || strcmp(fmt2, "B") == 0;
2474 }
2475 if (fmt2 == NULL) {
2476 return fmt1 == NULL || strcmp(fmt1, "B") == 0;
2477 }
2478 return strcmp(fmt1, fmt2) == 0;
2479 }
2480
2481 static int
arraycmp(const Py_ssize_t * a1,const Py_ssize_t * a2,const Py_ssize_t * shape,Py_ssize_t ndim)2482 arraycmp(const Py_ssize_t *a1, const Py_ssize_t *a2, const Py_ssize_t *shape,
2483 Py_ssize_t ndim)
2484 {
2485 Py_ssize_t i;
2486
2487
2488 for (i = 0; i < ndim; i++) {
2489 if (shape && shape[i] <= 1) {
2490 /* strides can differ if the dimension is less than 2 */
2491 continue;
2492 }
2493 if (a1[i] != a2[i]) {
2494 return 0;
2495 }
2496 }
2497
2498 return 1;
2499 }
2500
2501 /* Compare two contiguous buffers for physical equality. */
2502 static PyObject *
cmp_contig(PyObject * self,PyObject * args)2503 cmp_contig(PyObject *self, PyObject *args)
2504 {
2505 PyObject *b1, *b2; /* buffer objects */
2506 Py_buffer v1, v2;
2507 PyObject *ret;
2508 int equal = 0;
2509
2510 if (!PyArg_ParseTuple(args, "OO", &b1, &b2)) {
2511 return NULL;
2512 }
2513
2514 if (PyObject_GetBuffer(b1, &v1, PyBUF_FULL_RO) < 0) {
2515 PyErr_SetString(PyExc_TypeError,
2516 "cmp_contig: first argument does not implement the buffer "
2517 "protocol");
2518 return NULL;
2519 }
2520 if (PyObject_GetBuffer(b2, &v2, PyBUF_FULL_RO) < 0) {
2521 PyErr_SetString(PyExc_TypeError,
2522 "cmp_contig: second argument does not implement the buffer "
2523 "protocol");
2524 PyBuffer_Release(&v1);
2525 return NULL;
2526 }
2527
2528 if (!(PyBuffer_IsContiguous(&v1, 'C')&&PyBuffer_IsContiguous(&v2, 'C')) &&
2529 !(PyBuffer_IsContiguous(&v1, 'F')&&PyBuffer_IsContiguous(&v2, 'F'))) {
2530 goto result;
2531 }
2532
2533 /* readonly may differ if created from non-contiguous */
2534 if (v1.len != v2.len ||
2535 v1.itemsize != v2.itemsize ||
2536 v1.ndim != v2.ndim ||
2537 !fmtcmp(v1.format, v2.format) ||
2538 !!v1.shape != !!v2.shape ||
2539 !!v1.strides != !!v2.strides ||
2540 !!v1.suboffsets != !!v2.suboffsets) {
2541 goto result;
2542 }
2543
2544 if ((v1.shape && !arraycmp(v1.shape, v2.shape, NULL, v1.ndim)) ||
2545 (v1.strides && !arraycmp(v1.strides, v2.strides, v1.shape, v1.ndim)) ||
2546 (v1.suboffsets && !arraycmp(v1.suboffsets, v2.suboffsets, NULL,
2547 v1.ndim))) {
2548 goto result;
2549 }
2550
2551 if (memcmp((char *)v1.buf, (char *)v2.buf, v1.len) != 0) {
2552 goto result;
2553 }
2554
2555 equal = 1;
2556
2557 result:
2558 PyBuffer_Release(&v1);
2559 PyBuffer_Release(&v2);
2560
2561 ret = equal ? Py_True : Py_False;
2562 Py_INCREF(ret);
2563 return ret;
2564 }
2565
2566 static PyObject *
is_contiguous(PyObject * self,PyObject * args)2567 is_contiguous(PyObject *self, PyObject *args)
2568 {
2569 PyObject *obj;
2570 PyObject *order;
2571 PyObject *ret = NULL;
2572 Py_buffer view, *base;
2573 char ord;
2574
2575 if (!PyArg_ParseTuple(args, "OO", &obj, &order)) {
2576 return NULL;
2577 }
2578
2579 ord = get_ascii_order(order);
2580 if (ord == CHAR_MAX) {
2581 return NULL;
2582 }
2583
2584 if (NDArray_Check(obj)) {
2585 /* Skip the buffer protocol to check simple etc. buffers directly. */
2586 base = &((NDArrayObject *)obj)->head->base;
2587 ret = PyBuffer_IsContiguous(base, ord) ? Py_True : Py_False;
2588 }
2589 else {
2590 if (PyObject_GetBuffer(obj, &view, PyBUF_FULL_RO) < 0) {
2591 PyErr_SetString(PyExc_TypeError,
2592 "is_contiguous: object does not implement the buffer "
2593 "protocol");
2594 return NULL;
2595 }
2596 ret = PyBuffer_IsContiguous(&view, ord) ? Py_True : Py_False;
2597 PyBuffer_Release(&view);
2598 }
2599
2600 Py_INCREF(ret);
2601 return ret;
2602 }
2603
2604 static Py_hash_t
ndarray_hash(PyObject * self)2605 ndarray_hash(PyObject *self)
2606 {
2607 const NDArrayObject *nd = (NDArrayObject *)self;
2608 const Py_buffer *view = &nd->head->base;
2609 PyObject *bytes;
2610 Py_hash_t hash;
2611
2612 if (!view->readonly) {
2613 PyErr_SetString(PyExc_ValueError,
2614 "cannot hash writable ndarray object");
2615 return -1;
2616 }
2617 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
2618 return -1;
2619 }
2620
2621 bytes = ndarray_tobytes(self, NULL);
2622 if (bytes == NULL) {
2623 return -1;
2624 }
2625
2626 hash = PyObject_Hash(bytes);
2627 Py_DECREF(bytes);
2628 return hash;
2629 }
2630
2631
2632 static PyMethodDef ndarray_methods [] =
2633 {
2634 { "tolist", ndarray_tolist, METH_NOARGS, NULL },
2635 { "tobytes", ndarray_tobytes, METH_NOARGS, NULL },
2636 { "push", (PyCFunction)(void(*)(void))ndarray_push, METH_VARARGS|METH_KEYWORDS, NULL },
2637 { "pop", ndarray_pop, METH_NOARGS, NULL },
2638 { "add_suboffsets", ndarray_add_suboffsets, METH_NOARGS, NULL },
2639 { "memoryview_from_buffer", ndarray_memoryview_from_buffer, METH_NOARGS, NULL },
2640 {NULL}
2641 };
2642
2643 static PyTypeObject NDArray_Type = {
2644 PyVarObject_HEAD_INIT(NULL, 0)
2645 "ndarray", /* Name of this type */
2646 sizeof(NDArrayObject), /* Basic object size */
2647 0, /* Item size for varobject */
2648 (destructor)ndarray_dealloc, /* tp_dealloc */
2649 0, /* tp_vectorcall_offset */
2650 0, /* tp_getattr */
2651 0, /* tp_setattr */
2652 0, /* tp_as_async */
2653 0, /* tp_repr */
2654 0, /* tp_as_number */
2655 &ndarray_as_sequence, /* tp_as_sequence */
2656 &ndarray_as_mapping, /* tp_as_mapping */
2657 (hashfunc)ndarray_hash, /* tp_hash */
2658 0, /* tp_call */
2659 0, /* tp_str */
2660 PyObject_GenericGetAttr, /* tp_getattro */
2661 0, /* tp_setattro */
2662 &ndarray_as_buffer, /* tp_as_buffer */
2663 Py_TPFLAGS_DEFAULT, /* tp_flags */
2664 0, /* tp_doc */
2665 0, /* tp_traverse */
2666 0, /* tp_clear */
2667 0, /* tp_richcompare */
2668 0, /* tp_weaklistoffset */
2669 0, /* tp_iter */
2670 0, /* tp_iternext */
2671 ndarray_methods, /* tp_methods */
2672 0, /* tp_members */
2673 ndarray_getset, /* tp_getset */
2674 0, /* tp_base */
2675 0, /* tp_dict */
2676 0, /* tp_descr_get */
2677 0, /* tp_descr_set */
2678 0, /* tp_dictoffset */
2679 ndarray_init, /* tp_init */
2680 0, /* tp_alloc */
2681 ndarray_new, /* tp_new */
2682 };
2683
2684 /**************************************************************************/
2685 /* StaticArray Object */
2686 /**************************************************************************/
2687
2688 static PyTypeObject StaticArray_Type;
2689
2690 typedef struct {
2691 PyObject_HEAD
2692 int legacy_mode; /* if true, use the view.obj==NULL hack */
2693 } StaticArrayObject;
2694
2695 static char static_mem[12] = {0,1,2,3,4,5,6,7,8,9,10,11};
2696 static Py_ssize_t static_shape[1] = {12};
2697 static Py_ssize_t static_strides[1] = {1};
2698 static Py_buffer static_buffer = {
2699 static_mem, /* buf */
2700 NULL, /* obj */
2701 12, /* len */
2702 1, /* itemsize */
2703 1, /* readonly */
2704 1, /* ndim */
2705 "B", /* format */
2706 static_shape, /* shape */
2707 static_strides, /* strides */
2708 NULL, /* suboffsets */
2709 NULL /* internal */
2710 };
2711
2712 static PyObject *
staticarray_new(PyTypeObject * type,PyObject * args,PyObject * kwds)2713 staticarray_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
2714 {
2715 return (PyObject *)PyObject_New(StaticArrayObject, &StaticArray_Type);
2716 }
2717
2718 static int
staticarray_init(PyObject * self,PyObject * args,PyObject * kwds)2719 staticarray_init(PyObject *self, PyObject *args, PyObject *kwds)
2720 {
2721 StaticArrayObject *a = (StaticArrayObject *)self;
2722 static char *kwlist[] = {
2723 "legacy_mode", NULL
2724 };
2725 PyObject *legacy_mode = Py_False;
2726
2727 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwlist, &legacy_mode))
2728 return -1;
2729
2730 a->legacy_mode = (legacy_mode != Py_False);
2731 return 0;
2732 }
2733
2734 static void
staticarray_dealloc(StaticArrayObject * self)2735 staticarray_dealloc(StaticArrayObject *self)
2736 {
2737 PyObject_Del(self);
2738 }
2739
2740 /* Return a buffer for a PyBUF_FULL_RO request. Flags are not checked,
2741 which makes this object a non-compliant exporter! */
2742 static int
staticarray_getbuf(StaticArrayObject * self,Py_buffer * view,int flags)2743 staticarray_getbuf(StaticArrayObject *self, Py_buffer *view, int flags)
2744 {
2745 *view = static_buffer;
2746
2747 if (self->legacy_mode) {
2748 view->obj = NULL; /* Don't use this in new code. */
2749 }
2750 else {
2751 view->obj = (PyObject *)self;
2752 Py_INCREF(view->obj);
2753 }
2754
2755 return 0;
2756 }
2757
2758 static PyBufferProcs staticarray_as_buffer = {
2759 (getbufferproc)staticarray_getbuf, /* bf_getbuffer */
2760 NULL, /* bf_releasebuffer */
2761 };
2762
2763 static PyTypeObject StaticArray_Type = {
2764 PyVarObject_HEAD_INIT(NULL, 0)
2765 "staticarray", /* Name of this type */
2766 sizeof(StaticArrayObject), /* Basic object size */
2767 0, /* Item size for varobject */
2768 (destructor)staticarray_dealloc, /* tp_dealloc */
2769 0, /* tp_vectorcall_offset */
2770 0, /* tp_getattr */
2771 0, /* tp_setattr */
2772 0, /* tp_as_async */
2773 0, /* tp_repr */
2774 0, /* tp_as_number */
2775 0, /* tp_as_sequence */
2776 0, /* tp_as_mapping */
2777 0, /* tp_hash */
2778 0, /* tp_call */
2779 0, /* tp_str */
2780 0, /* tp_getattro */
2781 0, /* tp_setattro */
2782 &staticarray_as_buffer, /* tp_as_buffer */
2783 Py_TPFLAGS_DEFAULT, /* tp_flags */
2784 0, /* tp_doc */
2785 0, /* tp_traverse */
2786 0, /* tp_clear */
2787 0, /* tp_richcompare */
2788 0, /* tp_weaklistoffset */
2789 0, /* tp_iter */
2790 0, /* tp_iternext */
2791 0, /* tp_methods */
2792 0, /* tp_members */
2793 0, /* tp_getset */
2794 0, /* tp_base */
2795 0, /* tp_dict */
2796 0, /* tp_descr_get */
2797 0, /* tp_descr_set */
2798 0, /* tp_dictoffset */
2799 staticarray_init, /* tp_init */
2800 0, /* tp_alloc */
2801 staticarray_new, /* tp_new */
2802 };
2803
2804
2805 static struct PyMethodDef _testbuffer_functions[] = {
2806 {"slice_indices", slice_indices, METH_VARARGS, NULL},
2807 {"get_pointer", get_pointer, METH_VARARGS, NULL},
2808 {"get_sizeof_void_p", get_sizeof_void_p, METH_NOARGS, NULL},
2809 {"get_contiguous", get_contiguous, METH_VARARGS, NULL},
2810 {"py_buffer_to_contiguous", py_buffer_to_contiguous, METH_VARARGS, NULL},
2811 {"is_contiguous", is_contiguous, METH_VARARGS, NULL},
2812 {"cmp_contig", cmp_contig, METH_VARARGS, NULL},
2813 {NULL, NULL}
2814 };
2815
2816 static struct PyModuleDef _testbuffermodule = {
2817 PyModuleDef_HEAD_INIT,
2818 "_testbuffer",
2819 NULL,
2820 -1,
2821 _testbuffer_functions,
2822 NULL,
2823 NULL,
2824 NULL,
2825 NULL
2826 };
2827
2828
2829 PyMODINIT_FUNC
PyInit__testbuffer(void)2830 PyInit__testbuffer(void)
2831 {
2832 PyObject *m;
2833
2834 m = PyModule_Create(&_testbuffermodule);
2835 if (m == NULL)
2836 return NULL;
2837
2838 Py_SET_TYPE(&NDArray_Type, &PyType_Type);
2839 Py_INCREF(&NDArray_Type);
2840 PyModule_AddObject(m, "ndarray", (PyObject *)&NDArray_Type);
2841
2842 Py_SET_TYPE(&StaticArray_Type, &PyType_Type);
2843 Py_INCREF(&StaticArray_Type);
2844 PyModule_AddObject(m, "staticarray", (PyObject *)&StaticArray_Type);
2845
2846 structmodule = PyImport_ImportModule("struct");
2847 if (structmodule == NULL)
2848 return NULL;
2849
2850 Struct = PyObject_GetAttrString(structmodule, "Struct");
2851 calcsize = PyObject_GetAttrString(structmodule, "calcsize");
2852 if (Struct == NULL || calcsize == NULL)
2853 return NULL;
2854
2855 simple_format = PyUnicode_FromString(simple_fmt);
2856 if (simple_format == NULL)
2857 return NULL;
2858
2859 PyModule_AddIntMacro(m, ND_MAX_NDIM);
2860 PyModule_AddIntMacro(m, ND_VAREXPORT);
2861 PyModule_AddIntMacro(m, ND_WRITABLE);
2862 PyModule_AddIntMacro(m, ND_FORTRAN);
2863 PyModule_AddIntMacro(m, ND_SCALAR);
2864 PyModule_AddIntMacro(m, ND_PIL);
2865 PyModule_AddIntMacro(m, ND_GETBUF_FAIL);
2866 PyModule_AddIntMacro(m, ND_GETBUF_UNDEFINED);
2867 PyModule_AddIntMacro(m, ND_REDIRECT);
2868
2869 PyModule_AddIntMacro(m, PyBUF_SIMPLE);
2870 PyModule_AddIntMacro(m, PyBUF_WRITABLE);
2871 PyModule_AddIntMacro(m, PyBUF_FORMAT);
2872 PyModule_AddIntMacro(m, PyBUF_ND);
2873 PyModule_AddIntMacro(m, PyBUF_STRIDES);
2874 PyModule_AddIntMacro(m, PyBUF_INDIRECT);
2875 PyModule_AddIntMacro(m, PyBUF_C_CONTIGUOUS);
2876 PyModule_AddIntMacro(m, PyBUF_F_CONTIGUOUS);
2877 PyModule_AddIntMacro(m, PyBUF_ANY_CONTIGUOUS);
2878 PyModule_AddIntMacro(m, PyBUF_FULL);
2879 PyModule_AddIntMacro(m, PyBUF_FULL_RO);
2880 PyModule_AddIntMacro(m, PyBUF_RECORDS);
2881 PyModule_AddIntMacro(m, PyBUF_RECORDS_RO);
2882 PyModule_AddIntMacro(m, PyBUF_STRIDED);
2883 PyModule_AddIntMacro(m, PyBUF_STRIDED_RO);
2884 PyModule_AddIntMacro(m, PyBUF_CONTIG);
2885 PyModule_AddIntMacro(m, PyBUF_CONTIG_RO);
2886
2887 PyModule_AddIntMacro(m, PyBUF_READ);
2888 PyModule_AddIntMacro(m, PyBUF_WRITE);
2889
2890 return m;
2891 }
2892
2893
2894
2895