1 /*
2 * Memoryview object implementation
3 * --------------------------------
4 *
5 * This implementation is a complete rewrite contributed by Stefan Krah in
6 * Python 3.3. Substantial credit goes to Antoine Pitrou (who had already
7 * fortified and rewritten the previous implementation) and Nick Coghlan
8 * (who came up with the idea of the ManagedBuffer) for analyzing the complex
9 * ownership rules.
10 *
11 */
12
13 #include "Python.h"
14 #include "pycore_abstract.h" // _PyIndex_Check()
15 #include "pycore_memoryobject.h" // _PyManagedBuffer_Type
16 #include "pycore_object.h" // _PyObject_GC_UNTRACK()
17 #include "pycore_strhex.h" // _Py_strhex_with_sep()
18 #include <stddef.h> // offsetof()
19
20 /*[clinic input]
21 class memoryview "PyMemoryViewObject *" "&PyMemoryView_Type"
22 [clinic start generated code]*/
23 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=e2e49d2192835219]*/
24
25 #include "clinic/memoryobject.c.h"
26
27 /****************************************************************************/
28 /* ManagedBuffer Object */
29 /****************************************************************************/
30
31 /*
32 ManagedBuffer Object:
33 ---------------------
34
35 The purpose of this object is to facilitate the handling of chained
36 memoryviews that have the same underlying exporting object. PEP-3118
37 allows the underlying object to change while a view is exported. This
38 could lead to unexpected results when constructing a new memoryview
39 from an existing memoryview.
40
41 Rather than repeatedly redirecting buffer requests to the original base
42 object, all chained memoryviews use a single buffer snapshot. This
43 snapshot is generated by the constructor _PyManagedBuffer_FromObject().
44
45 Ownership rules:
46 ----------------
47
48 The master buffer inside a managed buffer is filled in by the original
49 base object. shape, strides, suboffsets and format are read-only for
50 all consumers.
51
52 A memoryview's buffer is a private copy of the exporter's buffer. shape,
53 strides and suboffsets belong to the memoryview and are thus writable.
54
55 If a memoryview itself exports several buffers via memory_getbuf(), all
56 buffer copies share shape, strides and suboffsets. In this case, the
57 arrays are NOT writable.
58
59 Reference count assumptions:
60 ----------------------------
61
62 The 'obj' member of a Py_buffer must either be NULL or refer to the
63 exporting base object. In the Python codebase, all getbufferprocs
64 return a new reference to view.obj (example: bytes_buffer_getbuffer()).
65
66 PyBuffer_Release() decrements view.obj (if non-NULL), so the
67 releasebufferprocs must NOT decrement view.obj.
68 */
69
70
71 static inline _PyManagedBufferObject *
mbuf_alloc(void)72 mbuf_alloc(void)
73 {
74 _PyManagedBufferObject *mbuf;
75
76 mbuf = (_PyManagedBufferObject *)
77 PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
78 if (mbuf == NULL)
79 return NULL;
80 mbuf->flags = 0;
81 mbuf->exports = 0;
82 mbuf->master.obj = NULL;
83 _PyObject_GC_TRACK(mbuf);
84
85 return mbuf;
86 }
87
88 static PyObject *
_PyManagedBuffer_FromObject(PyObject * base,int flags)89 _PyManagedBuffer_FromObject(PyObject *base, int flags)
90 {
91 _PyManagedBufferObject *mbuf;
92
93 mbuf = mbuf_alloc();
94 if (mbuf == NULL)
95 return NULL;
96
97 if (PyObject_GetBuffer(base, &mbuf->master, flags) < 0) {
98 mbuf->master.obj = NULL;
99 Py_DECREF(mbuf);
100 return NULL;
101 }
102
103 return (PyObject *)mbuf;
104 }
105
106 static void
mbuf_release(_PyManagedBufferObject * self)107 mbuf_release(_PyManagedBufferObject *self)
108 {
109 if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
110 return;
111
112 self->flags |= _Py_MANAGED_BUFFER_RELEASED;
113
114 /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
115 _PyObject_GC_UNTRACK(self);
116 PyBuffer_Release(&self->master);
117 }
118
119 static void
mbuf_dealloc(PyObject * _self)120 mbuf_dealloc(PyObject *_self)
121 {
122 _PyManagedBufferObject *self = (_PyManagedBufferObject *)_self;
123 assert(self->exports == 0);
124 mbuf_release(self);
125 if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
126 PyMem_Free(self->master.format);
127 PyObject_GC_Del(self);
128 }
129
130 static int
mbuf_traverse(PyObject * _self,visitproc visit,void * arg)131 mbuf_traverse(PyObject *_self, visitproc visit, void *arg)
132 {
133 _PyManagedBufferObject *self = (_PyManagedBufferObject *)_self;
134 Py_VISIT(self->master.obj);
135 return 0;
136 }
137
138 static int
mbuf_clear(PyObject * _self)139 mbuf_clear(PyObject *_self)
140 {
141 _PyManagedBufferObject *self = (_PyManagedBufferObject *)_self;
142 assert(self->exports >= 0);
143 mbuf_release(self);
144 return 0;
145 }
146
147 PyTypeObject _PyManagedBuffer_Type = {
148 PyVarObject_HEAD_INIT(&PyType_Type, 0)
149 "managedbuffer",
150 sizeof(_PyManagedBufferObject),
151 0,
152 mbuf_dealloc, /* tp_dealloc */
153 0, /* tp_vectorcall_offset */
154 0, /* tp_getattr */
155 0, /* tp_setattr */
156 0, /* tp_as_async */
157 0, /* tp_repr */
158 0, /* tp_as_number */
159 0, /* tp_as_sequence */
160 0, /* tp_as_mapping */
161 0, /* tp_hash */
162 0, /* tp_call */
163 0, /* tp_str */
164 PyObject_GenericGetAttr, /* tp_getattro */
165 0, /* tp_setattro */
166 0, /* tp_as_buffer */
167 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
168 0, /* tp_doc */
169 mbuf_traverse, /* tp_traverse */
170 mbuf_clear /* tp_clear */
171 };
172
173
174 /****************************************************************************/
175 /* MemoryView Object */
176 /****************************************************************************/
177
178 /* In the process of breaking reference cycles mbuf_release() can be
179 called before memory_release(). */
180 #define BASE_INACCESSIBLE(mv) \
181 (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
182 ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
183
184 #define CHECK_RELEASED(mv) \
185 if (BASE_INACCESSIBLE(mv)) { \
186 PyErr_SetString(PyExc_ValueError, \
187 "operation forbidden on released memoryview object"); \
188 return NULL; \
189 }
190
191 #define CHECK_RELEASED_INT(mv) \
192 if (BASE_INACCESSIBLE(mv)) { \
193 PyErr_SetString(PyExc_ValueError, \
194 "operation forbidden on released memoryview object"); \
195 return -1; \
196 }
197
198 #define CHECK_RESTRICTED(mv) \
199 if (((PyMemoryViewObject *)(mv))->flags & _Py_MEMORYVIEW_RESTRICTED) { \
200 PyErr_SetString(PyExc_ValueError, \
201 "cannot create new view on restricted memoryview"); \
202 return NULL; \
203 }
204
205 #define CHECK_RESTRICTED_INT(mv) \
206 if (((PyMemoryViewObject *)(mv))->flags & _Py_MEMORYVIEW_RESTRICTED) { \
207 PyErr_SetString(PyExc_ValueError, \
208 "cannot create new view on restricted memoryview"); \
209 return -1; \
210 }
211
212 /* See gh-92888. These macros signal that we need to check the memoryview
213 again due to possible read after frees. */
214 #define CHECK_RELEASED_AGAIN(mv) CHECK_RELEASED(mv)
215 #define CHECK_RELEASED_INT_AGAIN(mv) CHECK_RELEASED_INT(mv)
216
217 #define CHECK_LIST_OR_TUPLE(v) \
218 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
219 PyErr_SetString(PyExc_TypeError, \
220 #v " must be a list or a tuple"); \
221 return NULL; \
222 }
223
224 #define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
225
226 /* Check for the presence of suboffsets in the first dimension. */
227 #define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
228 /* Adjust ptr if suboffsets are present. */
229 #define ADJUST_PTR(ptr, suboffsets, dim) \
230 (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
231
232 /* Memoryview buffer properties */
233 #define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
234 #define MV_F_CONTIGUOUS(flags) \
235 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
236 #define MV_ANY_CONTIGUOUS(flags) \
237 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
238
239 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
240 #define MV_CONTIGUOUS_NDIM1(view) \
241 ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
242
243 /* getbuffer() requests */
244 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
245 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
246 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
247 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
248 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
249 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
250 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
251 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
252
253
254 /**************************************************************************/
255 /* Copy memoryview buffers */
256 /**************************************************************************/
257
258 /* The functions in this section take a source and a destination buffer
259 with the same logical structure: format, itemsize, ndim and shape
260 are identical, with ndim > 0.
261
262 NOTE: All buffers are assumed to have PyBUF_FULL information, which
263 is the case for memoryviews! */
264
265
266 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
267 perhaps be explicitly forbidden in the PEP. */
268 #define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
269 (view->suboffsets && view->suboffsets[view->ndim-1] >= 0)
270
271 static inline int
last_dim_is_contiguous(const Py_buffer * dest,const Py_buffer * src)272 last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
273 {
274 assert(dest->ndim > 0 && src->ndim > 0);
275 return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
276 !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
277 dest->strides[dest->ndim-1] == dest->itemsize &&
278 src->strides[src->ndim-1] == src->itemsize);
279 }
280
281 /* This is not a general function for determining format equivalence.
282 It is used in copy_single() and copy_buffer() to weed out non-matching
283 formats. Skipping the '@' character is specifically used in slice
284 assignments, where the lvalue is already known to have a single character
285 format. This is a performance hack that could be rewritten (if properly
286 benchmarked). */
287 static inline int
equiv_format(const Py_buffer * dest,const Py_buffer * src)288 equiv_format(const Py_buffer *dest, const Py_buffer *src)
289 {
290 const char *dfmt, *sfmt;
291
292 assert(dest->format && src->format);
293 dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
294 sfmt = src->format[0] == '@' ? src->format+1 : src->format;
295
296 if (strcmp(dfmt, sfmt) != 0 ||
297 dest->itemsize != src->itemsize) {
298 return 0;
299 }
300
301 return 1;
302 }
303
304 /* Two shapes are equivalent if they are either equal or identical up
305 to a zero element at the same position. For example, in NumPy arrays
306 the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
307 static inline int
equiv_shape(const Py_buffer * dest,const Py_buffer * src)308 equiv_shape(const Py_buffer *dest, const Py_buffer *src)
309 {
310 int i;
311
312 if (dest->ndim != src->ndim)
313 return 0;
314
315 for (i = 0; i < dest->ndim; i++) {
316 if (dest->shape[i] != src->shape[i])
317 return 0;
318 if (dest->shape[i] == 0)
319 break;
320 }
321
322 return 1;
323 }
324
325 /* Check that the logical structure of the destination and source buffers
326 is identical. */
327 static int
equiv_structure(const Py_buffer * dest,const Py_buffer * src)328 equiv_structure(const Py_buffer *dest, const Py_buffer *src)
329 {
330 if (!equiv_format(dest, src) ||
331 !equiv_shape(dest, src)) {
332 PyErr_SetString(PyExc_ValueError,
333 "memoryview assignment: lvalue and rvalue have different "
334 "structures");
335 return 0;
336 }
337
338 return 1;
339 }
340
341 /* Base case for recursive multi-dimensional copying. Contiguous arrays are
342 copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
343 sizeof(mem) == shape[0] * itemsize. */
344 static void
copy_base(const Py_ssize_t * shape,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)345 copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
346 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
347 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
348 char *mem)
349 {
350 if (mem == NULL) { /* contiguous */
351 Py_ssize_t size = shape[0] * itemsize;
352 if (dptr + size < sptr || sptr + size < dptr)
353 memcpy(dptr, sptr, size); /* no overlapping */
354 else
355 memmove(dptr, sptr, size);
356 }
357 else {
358 char *p;
359 Py_ssize_t i;
360 for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
361 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
362 memcpy(p, xsptr, itemsize);
363 }
364 for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
365 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
366 memcpy(xdptr, p, itemsize);
367 }
368 }
369
370 }
371
372 /* Recursively copy a source buffer to a destination buffer. The two buffers
373 have the same ndim, shape and itemsize. */
374 static void
copy_rec(const Py_ssize_t * shape,Py_ssize_t ndim,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)375 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
376 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
377 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
378 char *mem)
379 {
380 Py_ssize_t i;
381
382 assert(ndim >= 1);
383
384 if (ndim == 1) {
385 copy_base(shape, itemsize,
386 dptr, dstrides, dsuboffsets,
387 sptr, sstrides, ssuboffsets,
388 mem);
389 return;
390 }
391
392 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
393 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
394 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
395
396 copy_rec(shape+1, ndim-1, itemsize,
397 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
398 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
399 mem);
400 }
401 }
402
403 /* Faster copying of one-dimensional arrays. */
404 static int
copy_single(PyMemoryViewObject * self,const Py_buffer * dest,const Py_buffer * src)405 copy_single(PyMemoryViewObject *self, const Py_buffer *dest, const Py_buffer *src)
406 {
407 CHECK_RELEASED_INT_AGAIN(self);
408 char *mem = NULL;
409
410 assert(dest->ndim == 1);
411
412 if (!equiv_structure(dest, src))
413 return -1;
414
415 if (!last_dim_is_contiguous(dest, src)) {
416 mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
417 if (mem == NULL) {
418 PyErr_NoMemory();
419 return -1;
420 }
421 }
422
423 copy_base(dest->shape, dest->itemsize,
424 dest->buf, dest->strides, dest->suboffsets,
425 src->buf, src->strides, src->suboffsets,
426 mem);
427
428 if (mem)
429 PyMem_Free(mem);
430
431 return 0;
432 }
433
434 /* Recursively copy src to dest. Both buffers must have the same basic
435 structure. Copying is atomic, the function never fails with a partial
436 copy. */
437 static int
copy_buffer(const Py_buffer * dest,const Py_buffer * src)438 copy_buffer(const Py_buffer *dest, const Py_buffer *src)
439 {
440 char *mem = NULL;
441
442 assert(dest->ndim > 0);
443
444 if (!equiv_structure(dest, src))
445 return -1;
446
447 if (!last_dim_is_contiguous(dest, src)) {
448 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
449 if (mem == NULL) {
450 PyErr_NoMemory();
451 return -1;
452 }
453 }
454
455 copy_rec(dest->shape, dest->ndim, dest->itemsize,
456 dest->buf, dest->strides, dest->suboffsets,
457 src->buf, src->strides, src->suboffsets,
458 mem);
459
460 if (mem)
461 PyMem_Free(mem);
462
463 return 0;
464 }
465
466 /* Initialize strides for a C-contiguous array. */
467 static inline void
init_strides_from_shape(Py_buffer * view)468 init_strides_from_shape(Py_buffer *view)
469 {
470 Py_ssize_t i;
471
472 assert(view->ndim > 0);
473
474 view->strides[view->ndim-1] = view->itemsize;
475 for (i = view->ndim-2; i >= 0; i--)
476 view->strides[i] = view->strides[i+1] * view->shape[i+1];
477 }
478
479 /* Initialize strides for a Fortran-contiguous array. */
480 static inline void
init_fortran_strides_from_shape(Py_buffer * view)481 init_fortran_strides_from_shape(Py_buffer *view)
482 {
483 Py_ssize_t i;
484
485 assert(view->ndim > 0);
486
487 view->strides[0] = view->itemsize;
488 for (i = 1; i < view->ndim; i++)
489 view->strides[i] = view->strides[i-1] * view->shape[i-1];
490 }
491
492 /* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
493 or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
494 len(mem) == src->len. */
495 static int
buffer_to_contiguous(char * mem,const Py_buffer * src,char order)496 buffer_to_contiguous(char *mem, const Py_buffer *src, char order)
497 {
498 Py_buffer dest;
499 Py_ssize_t *strides;
500 int ret;
501
502 assert(src->ndim >= 1);
503 assert(src->shape != NULL);
504 assert(src->strides != NULL);
505
506 strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
507 if (strides == NULL) {
508 PyErr_NoMemory();
509 return -1;
510 }
511
512 /* initialize dest */
513 dest = *src;
514 dest.buf = mem;
515 /* shape is constant and shared: the logical representation of the
516 array is unaltered. */
517
518 /* The physical representation determined by strides (and possibly
519 suboffsets) may change. */
520 dest.strides = strides;
521 if (order == 'C' || order == 'A') {
522 init_strides_from_shape(&dest);
523 }
524 else {
525 init_fortran_strides_from_shape(&dest);
526 }
527
528 dest.suboffsets = NULL;
529
530 ret = copy_buffer(&dest, src);
531
532 PyMem_Free(strides);
533 return ret;
534 }
535
536
537 /****************************************************************************/
538 /* Constructors */
539 /****************************************************************************/
540
541 /* Initialize values that are shared with the managed buffer. */
542 static inline void
init_shared_values(Py_buffer * dest,const Py_buffer * src)543 init_shared_values(Py_buffer *dest, const Py_buffer *src)
544 {
545 dest->obj = src->obj;
546 dest->buf = src->buf;
547 dest->len = src->len;
548 dest->itemsize = src->itemsize;
549 dest->readonly = src->readonly;
550 dest->format = src->format ? src->format : "B";
551 dest->internal = src->internal;
552 }
553
554 /* Copy shape and strides. Reconstruct missing values. */
555 static void
init_shape_strides(Py_buffer * dest,const Py_buffer * src)556 init_shape_strides(Py_buffer *dest, const Py_buffer *src)
557 {
558 Py_ssize_t i;
559
560 if (src->ndim == 0) {
561 dest->shape = NULL;
562 dest->strides = NULL;
563 return;
564 }
565 if (src->ndim == 1) {
566 dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
567 dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
568 return;
569 }
570
571 for (i = 0; i < src->ndim; i++)
572 dest->shape[i] = src->shape[i];
573 if (src->strides) {
574 for (i = 0; i < src->ndim; i++)
575 dest->strides[i] = src->strides[i];
576 }
577 else {
578 init_strides_from_shape(dest);
579 }
580 }
581
582 static inline void
init_suboffsets(Py_buffer * dest,const Py_buffer * src)583 init_suboffsets(Py_buffer *dest, const Py_buffer *src)
584 {
585 Py_ssize_t i;
586
587 if (src->suboffsets == NULL) {
588 dest->suboffsets = NULL;
589 return;
590 }
591 for (i = 0; i < src->ndim; i++)
592 dest->suboffsets[i] = src->suboffsets[i];
593 }
594
595 /* len = product(shape) * itemsize */
596 static inline void
init_len(Py_buffer * view)597 init_len(Py_buffer *view)
598 {
599 Py_ssize_t i, len;
600
601 len = 1;
602 for (i = 0; i < view->ndim; i++)
603 len *= view->shape[i];
604 len *= view->itemsize;
605
606 view->len = len;
607 }
608
609 /* Initialize memoryview buffer properties. */
610 static void
init_flags(PyMemoryViewObject * mv)611 init_flags(PyMemoryViewObject *mv)
612 {
613 const Py_buffer *view = &mv->view;
614 int flags = 0;
615
616 switch (view->ndim) {
617 case 0:
618 flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
619 _Py_MEMORYVIEW_FORTRAN);
620 break;
621 case 1:
622 if (MV_CONTIGUOUS_NDIM1(view))
623 flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
624 break;
625 default:
626 if (PyBuffer_IsContiguous(view, 'C'))
627 flags |= _Py_MEMORYVIEW_C;
628 if (PyBuffer_IsContiguous(view, 'F'))
629 flags |= _Py_MEMORYVIEW_FORTRAN;
630 break;
631 }
632
633 if (view->suboffsets) {
634 flags |= _Py_MEMORYVIEW_PIL;
635 flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
636 }
637
638 mv->flags = flags;
639 }
640
641 /* Allocate a new memoryview and perform basic initialization. New memoryviews
642 are exclusively created through the mbuf_add functions. */
643 static inline PyMemoryViewObject *
memory_alloc(int ndim)644 memory_alloc(int ndim)
645 {
646 PyMemoryViewObject *mv;
647
648 mv = (PyMemoryViewObject *)
649 PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
650 if (mv == NULL)
651 return NULL;
652
653 mv->mbuf = NULL;
654 mv->hash = -1;
655 mv->flags = 0;
656 mv->exports = 0;
657 mv->view.ndim = ndim;
658 mv->view.shape = mv->ob_array;
659 mv->view.strides = mv->ob_array + ndim;
660 mv->view.suboffsets = mv->ob_array + 2 * ndim;
661 mv->weakreflist = NULL;
662
663 _PyObject_GC_TRACK(mv);
664 return mv;
665 }
666
667 /*
668 Return a new memoryview that is registered with mbuf. If src is NULL,
669 use mbuf->master as the underlying buffer. Otherwise, use src.
670
671 The new memoryview has full buffer information: shape and strides
672 are always present, suboffsets as needed. Arrays are copied to
673 the memoryview's ob_array field.
674 */
675 static PyObject *
mbuf_add_view(_PyManagedBufferObject * mbuf,const Py_buffer * src)676 mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
677 {
678 PyMemoryViewObject *mv;
679 Py_buffer *dest;
680
681 if (src == NULL)
682 src = &mbuf->master;
683
684 if (src->ndim > PyBUF_MAX_NDIM) {
685 PyErr_SetString(PyExc_ValueError,
686 "memoryview: number of dimensions must not exceed "
687 Py_STRINGIFY(PyBUF_MAX_NDIM));
688 return NULL;
689 }
690
691 mv = memory_alloc(src->ndim);
692 if (mv == NULL)
693 return NULL;
694
695 dest = &mv->view;
696 init_shared_values(dest, src);
697 init_shape_strides(dest, src);
698 init_suboffsets(dest, src);
699 init_flags(mv);
700
701 mv->mbuf = (_PyManagedBufferObject*)Py_NewRef(mbuf);
702 mbuf->exports++;
703
704 return (PyObject *)mv;
705 }
706
707 /* Register an incomplete view: shape, strides, suboffsets and flags still
708 need to be initialized. Use 'ndim' instead of src->ndim to determine the
709 size of the memoryview's ob_array.
710
711 Assumption: ndim <= PyBUF_MAX_NDIM. */
712 static PyObject *
mbuf_add_incomplete_view(_PyManagedBufferObject * mbuf,const Py_buffer * src,int ndim)713 mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
714 int ndim)
715 {
716 PyMemoryViewObject *mv;
717 Py_buffer *dest;
718
719 if (src == NULL)
720 src = &mbuf->master;
721
722 assert(ndim <= PyBUF_MAX_NDIM);
723
724 mv = memory_alloc(ndim);
725 if (mv == NULL)
726 return NULL;
727
728 dest = &mv->view;
729 init_shared_values(dest, src);
730
731 mv->mbuf = (_PyManagedBufferObject*)Py_NewRef(mbuf);
732 mbuf->exports++;
733
734 return (PyObject *)mv;
735 }
736
737 /* Expose a raw memory area as a view of contiguous bytes. flags can be
738 PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
739 The memoryview has complete buffer information. */
740 PyObject *
PyMemoryView_FromMemory(char * mem,Py_ssize_t size,int flags)741 PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
742 {
743 _PyManagedBufferObject *mbuf;
744 PyObject *mv;
745 int readonly;
746
747 assert(mem != NULL);
748 assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
749
750 mbuf = mbuf_alloc();
751 if (mbuf == NULL)
752 return NULL;
753
754 readonly = (flags == PyBUF_WRITE) ? 0 : 1;
755 (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
756 PyBUF_FULL_RO);
757
758 mv = mbuf_add_view(mbuf, NULL);
759 Py_DECREF(mbuf);
760
761 return mv;
762 }
763
764 /* Create a memoryview from a given Py_buffer. For simple byte views,
765 PyMemoryView_FromMemory() should be used instead.
766 This function is the only entry point that can create a master buffer
767 without full information. Because of this fact init_shape_strides()
768 must be able to reconstruct missing values. */
769 PyObject *
PyMemoryView_FromBuffer(const Py_buffer * info)770 PyMemoryView_FromBuffer(const Py_buffer *info)
771 {
772 _PyManagedBufferObject *mbuf;
773 PyObject *mv;
774
775 if (info->buf == NULL) {
776 PyErr_SetString(PyExc_ValueError,
777 "PyMemoryView_FromBuffer(): info->buf must not be NULL");
778 return NULL;
779 }
780
781 mbuf = mbuf_alloc();
782 if (mbuf == NULL)
783 return NULL;
784
785 /* info->obj is either NULL or a borrowed reference. This reference
786 should not be decremented in PyBuffer_Release(). */
787 mbuf->master = *info;
788 mbuf->master.obj = NULL;
789
790 mv = mbuf_add_view(mbuf, NULL);
791 Py_DECREF(mbuf);
792
793 return mv;
794 }
795
796 /* Create a memoryview from an object that implements the buffer protocol,
797 using the given flags.
798 If the object is a memoryview, the new memoryview must be registered
799 with the same managed buffer. Otherwise, a new managed buffer is created. */
800 static PyObject *
PyMemoryView_FromObjectAndFlags(PyObject * v,int flags)801 PyMemoryView_FromObjectAndFlags(PyObject *v, int flags)
802 {
803 _PyManagedBufferObject *mbuf;
804
805 if (PyMemoryView_Check(v)) {
806 PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
807 CHECK_RELEASED(mv);
808 CHECK_RESTRICTED(mv);
809 return mbuf_add_view(mv->mbuf, &mv->view);
810 }
811 else if (PyObject_CheckBuffer(v)) {
812 PyObject *ret;
813 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v, flags);
814 if (mbuf == NULL)
815 return NULL;
816 ret = mbuf_add_view(mbuf, NULL);
817 Py_DECREF(mbuf);
818 return ret;
819 }
820
821 PyErr_Format(PyExc_TypeError,
822 "memoryview: a bytes-like object is required, not '%.200s'",
823 Py_TYPE(v)->tp_name);
824 return NULL;
825 }
826
827 /* Create a memoryview from an object that implements the buffer protocol,
828 using the given flags.
829 If the object is a memoryview, the new memoryview must be registered
830 with the same managed buffer. Otherwise, a new managed buffer is created. */
831 PyObject *
_PyMemoryView_FromBufferProc(PyObject * v,int flags,getbufferproc bufferproc)832 _PyMemoryView_FromBufferProc(PyObject *v, int flags, getbufferproc bufferproc)
833 {
834 _PyManagedBufferObject *mbuf = mbuf_alloc();
835 if (mbuf == NULL)
836 return NULL;
837
838 int res = bufferproc(v, &mbuf->master, flags);
839 if (res < 0) {
840 mbuf->master.obj = NULL;
841 Py_DECREF(mbuf);
842 return NULL;
843 }
844
845 PyObject *ret = mbuf_add_view(mbuf, NULL);
846 Py_DECREF(mbuf);
847 return ret;
848 }
849
850 /* Create a memoryview from an object that implements the buffer protocol.
851 If the object is a memoryview, the new memoryview must be registered
852 with the same managed buffer. Otherwise, a new managed buffer is created. */
853 PyObject *
PyMemoryView_FromObject(PyObject * v)854 PyMemoryView_FromObject(PyObject *v)
855 {
856 return PyMemoryView_FromObjectAndFlags(v, PyBUF_FULL_RO);
857 }
858
859 /* Copy the format string from a base object that might vanish. */
860 static int
mbuf_copy_format(_PyManagedBufferObject * mbuf,const char * fmt)861 mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
862 {
863 if (fmt != NULL) {
864 char *cp = PyMem_Malloc(strlen(fmt)+1);
865 if (cp == NULL) {
866 PyErr_NoMemory();
867 return -1;
868 }
869 mbuf->master.format = strcpy(cp, fmt);
870 mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
871 }
872
873 return 0;
874 }
875
876 /*
877 Return a memoryview that is based on a contiguous copy of src.
878 Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
879
880 Ownership rules:
881 1) As usual, the returned memoryview has a private copy
882 of src->shape, src->strides and src->suboffsets.
883 2) src->format is copied to the master buffer and released
884 in mbuf_dealloc(). The releasebufferproc of the bytes
885 object is NULL, so it does not matter that mbuf_release()
886 passes the altered format pointer to PyBuffer_Release().
887 */
888 static PyObject *
memory_from_contiguous_copy(const Py_buffer * src,char order)889 memory_from_contiguous_copy(const Py_buffer *src, char order)
890 {
891 _PyManagedBufferObject *mbuf;
892 PyMemoryViewObject *mv;
893 PyObject *bytes;
894 Py_buffer *dest;
895 int i;
896
897 assert(src->ndim > 0);
898 assert(src->shape != NULL);
899
900 bytes = PyBytes_FromStringAndSize(NULL, src->len);
901 if (bytes == NULL)
902 return NULL;
903
904 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes, PyBUF_FULL_RO);
905 Py_DECREF(bytes);
906 if (mbuf == NULL)
907 return NULL;
908
909 if (mbuf_copy_format(mbuf, src->format) < 0) {
910 Py_DECREF(mbuf);
911 return NULL;
912 }
913
914 mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
915 Py_DECREF(mbuf);
916 if (mv == NULL)
917 return NULL;
918
919 dest = &mv->view;
920
921 /* shared values are initialized correctly except for itemsize */
922 dest->itemsize = src->itemsize;
923
924 /* shape and strides */
925 for (i = 0; i < src->ndim; i++) {
926 dest->shape[i] = src->shape[i];
927 }
928 if (order == 'C' || order == 'A') {
929 init_strides_from_shape(dest);
930 }
931 else {
932 init_fortran_strides_from_shape(dest);
933 }
934 /* suboffsets */
935 dest->suboffsets = NULL;
936
937 /* flags */
938 init_flags(mv);
939
940 if (copy_buffer(dest, src) < 0) {
941 Py_DECREF(mv);
942 return NULL;
943 }
944
945 return (PyObject *)mv;
946 }
947
948 /*
949 Return a new memoryview object based on a contiguous exporter with
950 buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
951 The logical structure of the input and output buffers is the same
952 (i.e. tolist(input) == tolist(output)), but the physical layout in
953 memory can be explicitly chosen.
954
955 As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
956 otherwise it may be writable or read-only.
957
958 If the exporter is already contiguous with the desired target order,
959 the memoryview will be directly based on the exporter.
960
961 Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
962 based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
963 'F'ortran order otherwise.
964 */
965 PyObject *
PyMemoryView_GetContiguous(PyObject * obj,int buffertype,char order)966 PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
967 {
968 PyMemoryViewObject *mv;
969 PyObject *ret;
970 Py_buffer *view;
971
972 assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
973 assert(order == 'C' || order == 'F' || order == 'A');
974
975 mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
976 if (mv == NULL)
977 return NULL;
978
979 view = &mv->view;
980 if (buffertype == PyBUF_WRITE && view->readonly) {
981 PyErr_SetString(PyExc_BufferError,
982 "underlying buffer is not writable");
983 Py_DECREF(mv);
984 return NULL;
985 }
986
987 if (PyBuffer_IsContiguous(view, order))
988 return (PyObject *)mv;
989
990 if (buffertype == PyBUF_WRITE) {
991 PyErr_SetString(PyExc_BufferError,
992 "writable contiguous buffer requested "
993 "for a non-contiguous object.");
994 Py_DECREF(mv);
995 return NULL;
996 }
997
998 ret = memory_from_contiguous_copy(view, order);
999 Py_DECREF(mv);
1000 return ret;
1001 }
1002
1003
1004 /*[clinic input]
1005 @classmethod
1006 memoryview.__new__
1007
1008 object: object
1009
1010 Create a new memoryview object which references the given object.
1011 [clinic start generated code]*/
1012
1013 static PyObject *
memoryview_impl(PyTypeObject * type,PyObject * object)1014 memoryview_impl(PyTypeObject *type, PyObject *object)
1015 /*[clinic end generated code: output=7de78e184ed66db8 input=f04429eb0bdf8c6e]*/
1016 {
1017 return PyMemoryView_FromObject(object);
1018 }
1019
1020
1021 /*[clinic input]
1022 @classmethod
1023 memoryview._from_flags
1024
1025 object: object
1026 flags: int
1027
1028 Create a new memoryview object which references the given object.
1029 [clinic start generated code]*/
1030
1031 static PyObject *
memoryview__from_flags_impl(PyTypeObject * type,PyObject * object,int flags)1032 memoryview__from_flags_impl(PyTypeObject *type, PyObject *object, int flags)
1033 /*[clinic end generated code: output=bf71f9906c266ee2 input=f5f82fd0e744356b]*/
1034 {
1035 return PyMemoryView_FromObjectAndFlags(object, flags);
1036 }
1037
1038
1039 /****************************************************************************/
1040 /* Previously in abstract.c */
1041 /****************************************************************************/
1042
1043 typedef struct {
1044 Py_buffer view;
1045 Py_ssize_t array[1];
1046 } Py_buffer_full;
1047
1048 int
PyBuffer_ToContiguous(void * buf,const Py_buffer * src,Py_ssize_t len,char order)1049 PyBuffer_ToContiguous(void *buf, const Py_buffer *src, Py_ssize_t len, char order)
1050 {
1051 Py_buffer_full *fb = NULL;
1052 int ret;
1053
1054 assert(order == 'C' || order == 'F' || order == 'A');
1055
1056 if (len != src->len) {
1057 PyErr_SetString(PyExc_ValueError,
1058 "PyBuffer_ToContiguous: len != view->len");
1059 return -1;
1060 }
1061
1062 if (PyBuffer_IsContiguous(src, order)) {
1063 memcpy((char *)buf, src->buf, len);
1064 return 0;
1065 }
1066
1067 /* buffer_to_contiguous() assumes PyBUF_FULL */
1068 fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
1069 if (fb == NULL) {
1070 PyErr_NoMemory();
1071 return -1;
1072 }
1073 fb->view.ndim = src->ndim;
1074 fb->view.shape = fb->array;
1075 fb->view.strides = fb->array + src->ndim;
1076 fb->view.suboffsets = fb->array + 2 * src->ndim;
1077
1078 init_shared_values(&fb->view, src);
1079 init_shape_strides(&fb->view, src);
1080 init_suboffsets(&fb->view, src);
1081
1082 src = &fb->view;
1083
1084 ret = buffer_to_contiguous(buf, src, order);
1085 PyMem_Free(fb);
1086 return ret;
1087 }
1088
1089
1090 /****************************************************************************/
1091 /* Release/GC management */
1092 /****************************************************************************/
1093
1094 /* Inform the managed buffer that this particular memoryview will not access
1095 the underlying buffer again. If no other memoryviews are registered with
1096 the managed buffer, the underlying buffer is released instantly and
1097 marked as inaccessible for both the memoryview and the managed buffer. */
1098 static void
_memory_release(PyMemoryViewObject * self)1099 _memory_release(PyMemoryViewObject *self)
1100 {
1101 assert(self->exports == 0);
1102 if (self->flags & _Py_MEMORYVIEW_RELEASED)
1103 return;
1104
1105 self->flags |= _Py_MEMORYVIEW_RELEASED;
1106 assert(self->mbuf->exports > 0);
1107 if (--self->mbuf->exports == 0) {
1108 mbuf_release(self->mbuf);
1109 }
1110 }
1111
1112 /*[clinic input]
1113 memoryview.release
1114
1115 Release the underlying buffer exposed by the memoryview object.
1116 [clinic start generated code]*/
1117
1118 static PyObject *
memoryview_release_impl(PyMemoryViewObject * self)1119 memoryview_release_impl(PyMemoryViewObject *self)
1120 /*[clinic end generated code: output=d0b7e3ba95b7fcb9 input=bc71d1d51f4a52f0]*/
1121 {
1122 if (self->exports == 0) {
1123 _memory_release(self);
1124 Py_RETURN_NONE;
1125 }
1126
1127 if (self->exports > 0) {
1128 PyErr_Format(PyExc_BufferError,
1129 "memoryview has %zd exported buffer%s", self->exports,
1130 self->exports==1 ? "" : "s");
1131 return NULL;
1132 }
1133
1134 PyErr_SetString(PyExc_SystemError,
1135 "memoryview: negative export count");
1136 return NULL;
1137 }
1138
1139 static void
memory_dealloc(PyObject * _self)1140 memory_dealloc(PyObject *_self)
1141 {
1142 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
1143 assert(self->exports == 0);
1144 _PyObject_GC_UNTRACK(self);
1145 _memory_release(self);
1146 Py_CLEAR(self->mbuf);
1147 if (self->weakreflist != NULL)
1148 PyObject_ClearWeakRefs((PyObject *) self);
1149 PyObject_GC_Del(self);
1150 }
1151
1152 static int
memory_traverse(PyObject * _self,visitproc visit,void * arg)1153 memory_traverse(PyObject *_self, visitproc visit, void *arg)
1154 {
1155 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
1156 Py_VISIT(self->mbuf);
1157 return 0;
1158 }
1159
1160 static int
memory_clear(PyObject * _self)1161 memory_clear(PyObject *_self)
1162 {
1163 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
1164 if (self->exports == 0) {
1165 _memory_release(self);
1166 Py_CLEAR(self->mbuf);
1167 }
1168 return 0;
1169 }
1170
1171 static PyObject *
memory_enter(PyObject * self,PyObject * args)1172 memory_enter(PyObject *self, PyObject *args)
1173 {
1174 CHECK_RELEASED(self);
1175 return Py_NewRef(self);
1176 }
1177
1178 static PyObject *
memory_exit(PyObject * self,PyObject * args)1179 memory_exit(PyObject *self, PyObject *args)
1180 {
1181 return memoryview_release_impl((PyMemoryViewObject *)self);
1182 }
1183
1184
1185 /****************************************************************************/
1186 /* Casting format and shape */
1187 /****************************************************************************/
1188
1189 #define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
1190
1191 static inline Py_ssize_t
get_native_fmtchar(char * result,const char * fmt)1192 get_native_fmtchar(char *result, const char *fmt)
1193 {
1194 Py_ssize_t size = -1;
1195
1196 if (fmt[0] == '@') fmt++;
1197
1198 switch (fmt[0]) {
1199 case 'c': case 'b': case 'B': size = sizeof(char); break;
1200 case 'h': case 'H': size = sizeof(short); break;
1201 case 'i': case 'I': size = sizeof(int); break;
1202 case 'l': case 'L': size = sizeof(long); break;
1203 case 'q': case 'Q': size = sizeof(long long); break;
1204 case 'n': case 'N': size = sizeof(Py_ssize_t); break;
1205 case 'f': size = sizeof(float); break;
1206 case 'd': size = sizeof(double); break;
1207 case 'e': size = sizeof(float) / 2; break;
1208 case '?': size = sizeof(_Bool); break;
1209 case 'P': size = sizeof(void *); break;
1210 }
1211
1212 if (size > 0 && fmt[1] == '\0') {
1213 *result = fmt[0];
1214 return size;
1215 }
1216
1217 return -1;
1218 }
1219
1220 static inline const char *
get_native_fmtstr(const char * fmt)1221 get_native_fmtstr(const char *fmt)
1222 {
1223 int at = 0;
1224
1225 if (fmt[0] == '@') {
1226 at = 1;
1227 fmt++;
1228 }
1229 if (fmt[0] == '\0' || fmt[1] != '\0') {
1230 return NULL;
1231 }
1232
1233 #define RETURN(s) do { return at ? "@" s : s; } while (0)
1234
1235 switch (fmt[0]) {
1236 case 'c': RETURN("c");
1237 case 'b': RETURN("b");
1238 case 'B': RETURN("B");
1239 case 'h': RETURN("h");
1240 case 'H': RETURN("H");
1241 case 'i': RETURN("i");
1242 case 'I': RETURN("I");
1243 case 'l': RETURN("l");
1244 case 'L': RETURN("L");
1245 case 'q': RETURN("q");
1246 case 'Q': RETURN("Q");
1247 case 'n': RETURN("n");
1248 case 'N': RETURN("N");
1249 case 'f': RETURN("f");
1250 case 'd': RETURN("d");
1251 case 'e': RETURN("e");
1252 case '?': RETURN("?");
1253 case 'P': RETURN("P");
1254 }
1255
1256 return NULL;
1257 }
1258
1259
1260 /* Cast a memoryview's data type to 'format'. The input array must be
1261 C-contiguous. At least one of input-format, output-format must have
1262 byte size. The output array is 1-D, with the same byte length as the
1263 input array. Thus, view->len must be a multiple of the new itemsize. */
1264 static int
cast_to_1D(PyMemoryViewObject * mv,PyObject * format)1265 cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
1266 {
1267 Py_buffer *view = &mv->view;
1268 PyObject *asciifmt;
1269 char srcchar, destchar;
1270 Py_ssize_t itemsize;
1271 int ret = -1;
1272
1273 assert(view->ndim >= 1);
1274 assert(Py_SIZE(mv) == 3*view->ndim);
1275 assert(view->shape == mv->ob_array);
1276 assert(view->strides == mv->ob_array + view->ndim);
1277 assert(view->suboffsets == mv->ob_array + 2*view->ndim);
1278
1279 asciifmt = PyUnicode_AsASCIIString(format);
1280 if (asciifmt == NULL)
1281 return ret;
1282
1283 itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
1284 if (itemsize < 0) {
1285 PyErr_SetString(PyExc_ValueError,
1286 "memoryview: destination format must be a native single "
1287 "character format prefixed with an optional '@'");
1288 goto out;
1289 }
1290
1291 if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
1292 !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
1293 PyErr_SetString(PyExc_TypeError,
1294 "memoryview: cannot cast between two non-byte formats");
1295 goto out;
1296 }
1297 if (view->len % itemsize) {
1298 PyErr_SetString(PyExc_TypeError,
1299 "memoryview: length is not a multiple of itemsize");
1300 goto out;
1301 }
1302
1303 view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
1304 if (view->format == NULL) {
1305 /* NOT_REACHED: get_native_fmtchar() already validates the format. */
1306 PyErr_SetString(PyExc_RuntimeError,
1307 "memoryview: internal error");
1308 goto out;
1309 }
1310 view->itemsize = itemsize;
1311
1312 view->ndim = 1;
1313 view->shape[0] = view->len / view->itemsize;
1314 view->strides[0] = view->itemsize;
1315 view->suboffsets = NULL;
1316
1317 init_flags(mv);
1318
1319 ret = 0;
1320
1321 out:
1322 Py_DECREF(asciifmt);
1323 return ret;
1324 }
1325
1326 /* The memoryview must have space for 3*len(seq) elements. */
1327 static Py_ssize_t
copy_shape(Py_ssize_t * shape,const PyObject * seq,Py_ssize_t ndim,Py_ssize_t itemsize)1328 copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
1329 Py_ssize_t itemsize)
1330 {
1331 Py_ssize_t x, i;
1332 Py_ssize_t len = itemsize;
1333
1334 for (i = 0; i < ndim; i++) {
1335 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
1336 if (!PyLong_Check(tmp)) {
1337 PyErr_SetString(PyExc_TypeError,
1338 "memoryview.cast(): elements of shape must be integers");
1339 return -1;
1340 }
1341 x = PyLong_AsSsize_t(tmp);
1342 if (x == -1 && PyErr_Occurred()) {
1343 return -1;
1344 }
1345 if (x <= 0) {
1346 /* In general elements of shape may be 0, but not for casting. */
1347 PyErr_Format(PyExc_ValueError,
1348 "memoryview.cast(): elements of shape must be integers > 0");
1349 return -1;
1350 }
1351 if (x > PY_SSIZE_T_MAX / len) {
1352 PyErr_Format(PyExc_ValueError,
1353 "memoryview.cast(): product(shape) > SSIZE_MAX");
1354 return -1;
1355 }
1356 len *= x;
1357 shape[i] = x;
1358 }
1359
1360 return len;
1361 }
1362
1363 /* Cast a 1-D array to a new shape. The result array will be C-contiguous.
1364 If the result array does not have exactly the same byte length as the
1365 input array, raise ValueError. */
1366 static int
cast_to_ND(PyMemoryViewObject * mv,const PyObject * shape,int ndim)1367 cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
1368 {
1369 Py_buffer *view = &mv->view;
1370 Py_ssize_t len;
1371
1372 assert(view->ndim == 1); /* ndim from cast_to_1D() */
1373 assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
1374 assert(view->shape == mv->ob_array);
1375 assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
1376 assert(view->suboffsets == NULL);
1377
1378 view->ndim = ndim;
1379 if (view->ndim == 0) {
1380 view->shape = NULL;
1381 view->strides = NULL;
1382 len = view->itemsize;
1383 }
1384 else {
1385 len = copy_shape(view->shape, shape, ndim, view->itemsize);
1386 if (len < 0)
1387 return -1;
1388 init_strides_from_shape(view);
1389 }
1390
1391 if (view->len != len) {
1392 PyErr_SetString(PyExc_TypeError,
1393 "memoryview: product(shape) * itemsize != buffer size");
1394 return -1;
1395 }
1396
1397 init_flags(mv);
1398
1399 return 0;
1400 }
1401
1402 static int
zero_in_shape(PyMemoryViewObject * mv)1403 zero_in_shape(PyMemoryViewObject *mv)
1404 {
1405 Py_buffer *view = &mv->view;
1406 Py_ssize_t i;
1407
1408 for (i = 0; i < view->ndim; i++)
1409 if (view->shape[i] == 0)
1410 return 1;
1411
1412 return 0;
1413 }
1414
1415 /*
1416 Cast a copy of 'self' to a different view. The input view must
1417 be C-contiguous. The function always casts the input view to a
1418 1-D output according to 'format'. At least one of input-format,
1419 output-format must have byte size.
1420
1421 If 'shape' is given, the 1-D view from the previous step will
1422 be cast to a C-contiguous view with new shape and strides.
1423
1424 All casts must result in views that will have the exact byte
1425 size of the original input. Otherwise, an error is raised.
1426 */
1427 /*[clinic input]
1428 memoryview.cast
1429
1430 format: unicode
1431 shape: object = NULL
1432
1433 Cast a memoryview to a new format or shape.
1434 [clinic start generated code]*/
1435
1436 static PyObject *
memoryview_cast_impl(PyMemoryViewObject * self,PyObject * format,PyObject * shape)1437 memoryview_cast_impl(PyMemoryViewObject *self, PyObject *format,
1438 PyObject *shape)
1439 /*[clinic end generated code: output=bae520b3a389cbab input=138936cc9041b1a3]*/
1440 {
1441 PyMemoryViewObject *mv = NULL;
1442 Py_ssize_t ndim = 1;
1443
1444 CHECK_RELEASED(self);
1445 CHECK_RESTRICTED(self);
1446
1447 if (!MV_C_CONTIGUOUS(self->flags)) {
1448 PyErr_SetString(PyExc_TypeError,
1449 "memoryview: casts are restricted to C-contiguous views");
1450 return NULL;
1451 }
1452 if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
1453 PyErr_SetString(PyExc_TypeError,
1454 "memoryview: cannot cast view with zeros in shape or strides");
1455 return NULL;
1456 }
1457 if (shape) {
1458 CHECK_LIST_OR_TUPLE(shape)
1459 ndim = PySequence_Fast_GET_SIZE(shape);
1460 if (ndim > PyBUF_MAX_NDIM) {
1461 PyErr_SetString(PyExc_ValueError,
1462 "memoryview: number of dimensions must not exceed "
1463 Py_STRINGIFY(PyBUF_MAX_NDIM));
1464 return NULL;
1465 }
1466 if (self->view.ndim != 1 && ndim != 1) {
1467 PyErr_SetString(PyExc_TypeError,
1468 "memoryview: cast must be 1D -> ND or ND -> 1D");
1469 return NULL;
1470 }
1471 }
1472
1473 mv = (PyMemoryViewObject *)
1474 mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
1475 if (mv == NULL)
1476 return NULL;
1477
1478 if (cast_to_1D(mv, format) < 0)
1479 goto error;
1480 if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
1481 goto error;
1482
1483 return (PyObject *)mv;
1484
1485 error:
1486 Py_DECREF(mv);
1487 return NULL;
1488 }
1489
1490 /*[clinic input]
1491 memoryview.toreadonly
1492
1493 Return a readonly version of the memoryview.
1494 [clinic start generated code]*/
1495
1496 static PyObject *
memoryview_toreadonly_impl(PyMemoryViewObject * self)1497 memoryview_toreadonly_impl(PyMemoryViewObject *self)
1498 /*[clinic end generated code: output=2c7e056f04c99e62 input=dc06d20f19ba236f]*/
1499 {
1500 CHECK_RELEASED(self);
1501 CHECK_RESTRICTED(self);
1502 /* Even if self is already readonly, we still need to create a new
1503 * object for .release() to work correctly.
1504 */
1505 self = (PyMemoryViewObject *) mbuf_add_view(self->mbuf, &self->view);
1506 if (self != NULL) {
1507 self->view.readonly = 1;
1508 };
1509 return (PyObject *) self;
1510 }
1511
1512
1513 /**************************************************************************/
1514 /* getbuffer */
1515 /**************************************************************************/
1516
1517 static int
memory_getbuf(PyObject * _self,Py_buffer * view,int flags)1518 memory_getbuf(PyObject *_self, Py_buffer *view, int flags)
1519 {
1520 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
1521 Py_buffer *base = &self->view;
1522 int baseflags = self->flags;
1523
1524 CHECK_RELEASED_INT(self);
1525 CHECK_RESTRICTED_INT(self);
1526
1527 /* start with complete information */
1528 *view = *base;
1529 view->obj = NULL;
1530
1531 if (REQ_WRITABLE(flags) && base->readonly) {
1532 PyErr_SetString(PyExc_BufferError,
1533 "memoryview: underlying buffer is not writable");
1534 return -1;
1535 }
1536 if (!REQ_FORMAT(flags)) {
1537 /* NULL indicates that the buffer's data type has been cast to 'B'.
1538 view->itemsize is the _previous_ itemsize. If shape is present,
1539 the equality product(shape) * itemsize = len still holds at this
1540 point. The equality calcsize(format) = itemsize does _not_ hold
1541 from here on! */
1542 view->format = NULL;
1543 }
1544
1545 if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
1546 PyErr_SetString(PyExc_BufferError,
1547 "memoryview: underlying buffer is not C-contiguous");
1548 return -1;
1549 }
1550 if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
1551 PyErr_SetString(PyExc_BufferError,
1552 "memoryview: underlying buffer is not Fortran contiguous");
1553 return -1;
1554 }
1555 if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
1556 PyErr_SetString(PyExc_BufferError,
1557 "memoryview: underlying buffer is not contiguous");
1558 return -1;
1559 }
1560 if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
1561 PyErr_SetString(PyExc_BufferError,
1562 "memoryview: underlying buffer requires suboffsets");
1563 return -1;
1564 }
1565 if (!REQ_STRIDES(flags)) {
1566 if (!MV_C_CONTIGUOUS(baseflags)) {
1567 PyErr_SetString(PyExc_BufferError,
1568 "memoryview: underlying buffer is not C-contiguous");
1569 return -1;
1570 }
1571 view->strides = NULL;
1572 }
1573 if (!REQ_SHAPE(flags)) {
1574 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1575 so base->buf = ndbuf->data. */
1576 if (view->format != NULL) {
1577 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1578 not make sense. */
1579 PyErr_Format(PyExc_BufferError,
1580 "memoryview: cannot cast to unsigned bytes if the format flag "
1581 "is present");
1582 return -1;
1583 }
1584 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1585 do _not_ hold from here on! */
1586 view->ndim = 1;
1587 view->shape = NULL;
1588 }
1589
1590
1591 view->obj = Py_NewRef(self);
1592 self->exports++;
1593
1594 return 0;
1595 }
1596
1597 static void
memory_releasebuf(PyObject * _self,Py_buffer * view)1598 memory_releasebuf(PyObject *_self, Py_buffer *view)
1599 {
1600 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
1601 self->exports--;
1602 return;
1603 /* PyBuffer_Release() decrements view->obj after this function returns. */
1604 }
1605
1606 /* Buffer methods */
1607 static PyBufferProcs memory_as_buffer = {
1608 memory_getbuf, /* bf_getbuffer */
1609 memory_releasebuf, /* bf_releasebuffer */
1610 };
1611
1612
1613 /****************************************************************************/
1614 /* Optimized pack/unpack for all native format specifiers */
1615 /****************************************************************************/
1616
1617 /*
1618 Fix exceptions:
1619 1) Include format string in the error message.
1620 2) OverflowError -> ValueError.
1621 3) The error message from PyNumber_Index() is not ideal.
1622 */
1623 static int
type_error_int(const char * fmt)1624 type_error_int(const char *fmt)
1625 {
1626 PyErr_Format(PyExc_TypeError,
1627 "memoryview: invalid type for format '%s'", fmt);
1628 return -1;
1629 }
1630
1631 static int
value_error_int(const char * fmt)1632 value_error_int(const char *fmt)
1633 {
1634 PyErr_Format(PyExc_ValueError,
1635 "memoryview: invalid value for format '%s'", fmt);
1636 return -1;
1637 }
1638
1639 static int
fix_error_int(const char * fmt)1640 fix_error_int(const char *fmt)
1641 {
1642 assert(PyErr_Occurred());
1643 if (PyErr_ExceptionMatches(PyExc_TypeError)) {
1644 PyErr_Clear();
1645 return type_error_int(fmt);
1646 }
1647 else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
1648 PyErr_ExceptionMatches(PyExc_ValueError)) {
1649 PyErr_Clear();
1650 return value_error_int(fmt);
1651 }
1652
1653 return -1;
1654 }
1655
1656 /* Accept integer objects or objects with an __index__() method. */
1657 static long
pylong_as_ld(PyObject * item)1658 pylong_as_ld(PyObject *item)
1659 {
1660 PyObject *tmp;
1661 long ld;
1662
1663 tmp = _PyNumber_Index(item);
1664 if (tmp == NULL)
1665 return -1;
1666
1667 ld = PyLong_AsLong(tmp);
1668 Py_DECREF(tmp);
1669 return ld;
1670 }
1671
1672 static unsigned long
pylong_as_lu(PyObject * item)1673 pylong_as_lu(PyObject *item)
1674 {
1675 PyObject *tmp;
1676 unsigned long lu;
1677
1678 tmp = _PyNumber_Index(item);
1679 if (tmp == NULL)
1680 return (unsigned long)-1;
1681
1682 lu = PyLong_AsUnsignedLong(tmp);
1683 Py_DECREF(tmp);
1684 return lu;
1685 }
1686
1687 static long long
pylong_as_lld(PyObject * item)1688 pylong_as_lld(PyObject *item)
1689 {
1690 PyObject *tmp;
1691 long long lld;
1692
1693 tmp = _PyNumber_Index(item);
1694 if (tmp == NULL)
1695 return -1;
1696
1697 lld = PyLong_AsLongLong(tmp);
1698 Py_DECREF(tmp);
1699 return lld;
1700 }
1701
1702 static unsigned long long
pylong_as_llu(PyObject * item)1703 pylong_as_llu(PyObject *item)
1704 {
1705 PyObject *tmp;
1706 unsigned long long llu;
1707
1708 tmp = _PyNumber_Index(item);
1709 if (tmp == NULL)
1710 return (unsigned long long)-1;
1711
1712 llu = PyLong_AsUnsignedLongLong(tmp);
1713 Py_DECREF(tmp);
1714 return llu;
1715 }
1716
1717 static Py_ssize_t
pylong_as_zd(PyObject * item)1718 pylong_as_zd(PyObject *item)
1719 {
1720 PyObject *tmp;
1721 Py_ssize_t zd;
1722
1723 tmp = _PyNumber_Index(item);
1724 if (tmp == NULL)
1725 return -1;
1726
1727 zd = PyLong_AsSsize_t(tmp);
1728 Py_DECREF(tmp);
1729 return zd;
1730 }
1731
1732 static size_t
pylong_as_zu(PyObject * item)1733 pylong_as_zu(PyObject *item)
1734 {
1735 PyObject *tmp;
1736 size_t zu;
1737
1738 tmp = _PyNumber_Index(item);
1739 if (tmp == NULL)
1740 return (size_t)-1;
1741
1742 zu = PyLong_AsSize_t(tmp);
1743 Py_DECREF(tmp);
1744 return zu;
1745 }
1746
1747 /* Timings with the ndarray from _testbuffer.c indicate that using the
1748 struct module is around 15x slower than the two functions below. */
1749
1750 #define UNPACK_SINGLE(dest, ptr, type) \
1751 do { \
1752 type x; \
1753 memcpy((char *)&x, ptr, sizeof x); \
1754 dest = x; \
1755 } while (0)
1756
1757 /* Unpack a single item. 'fmt' can be any native format character in struct
1758 module syntax. This function is very sensitive to small changes. With this
1759 layout gcc automatically generates a fast jump table. */
1760 static inline PyObject *
unpack_single(PyMemoryViewObject * self,const char * ptr,const char * fmt)1761 unpack_single(PyMemoryViewObject *self, const char *ptr, const char *fmt)
1762 {
1763 unsigned long long llu;
1764 unsigned long lu;
1765 size_t zu;
1766 long long lld;
1767 long ld;
1768 Py_ssize_t zd;
1769 double d;
1770 unsigned char uc;
1771 void *p;
1772
1773 CHECK_RELEASED_AGAIN(self);
1774
1775 #if PY_LITTLE_ENDIAN
1776 int endian = 1;
1777 #else
1778 int endian = 0;
1779 #endif
1780
1781 switch (fmt[0]) {
1782
1783 /* signed integers and fast path for 'B' */
1784 case 'B': uc = *((const unsigned char *)ptr); goto convert_uc;
1785 case 'b': ld = *((const signed char *)ptr); goto convert_ld;
1786 case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
1787 case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
1788 case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
1789
1790 /* boolean */
1791 case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
1792
1793 /* unsigned integers */
1794 case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
1795 case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
1796 case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
1797
1798 /* native 64-bit */
1799 case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
1800 case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
1801
1802 /* ssize_t and size_t */
1803 case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
1804 case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
1805
1806 /* floats */
1807 case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
1808 case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
1809 case 'e': d = PyFloat_Unpack2(ptr, endian); goto convert_double;
1810
1811 /* bytes object */
1812 case 'c': goto convert_bytes;
1813
1814 /* pointer */
1815 case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
1816
1817 /* default */
1818 default: goto err_format;
1819 }
1820
1821 convert_uc:
1822 /* PyLong_FromUnsignedLong() is slower */
1823 return PyLong_FromLong(uc);
1824 convert_ld:
1825 return PyLong_FromLong(ld);
1826 convert_lu:
1827 return PyLong_FromUnsignedLong(lu);
1828 convert_lld:
1829 return PyLong_FromLongLong(lld);
1830 convert_llu:
1831 return PyLong_FromUnsignedLongLong(llu);
1832 convert_zd:
1833 return PyLong_FromSsize_t(zd);
1834 convert_zu:
1835 return PyLong_FromSize_t(zu);
1836 convert_double:
1837 return PyFloat_FromDouble(d);
1838 convert_bool:
1839 return PyBool_FromLong(ld);
1840 convert_bytes:
1841 return PyBytes_FromStringAndSize(ptr, 1);
1842 convert_pointer:
1843 return PyLong_FromVoidPtr(p);
1844 err_format:
1845 PyErr_Format(PyExc_NotImplementedError,
1846 "memoryview: format %s not supported", fmt);
1847 return NULL;
1848 }
1849
1850 #define PACK_SINGLE(ptr, src, type) \
1851 do { \
1852 type x; \
1853 x = (type)src; \
1854 memcpy(ptr, (char *)&x, sizeof x); \
1855 } while (0)
1856
1857 /* Pack a single item. 'fmt' can be any native format character in
1858 struct module syntax. */
1859 static int
pack_single(PyMemoryViewObject * self,char * ptr,PyObject * item,const char * fmt)1860 pack_single(PyMemoryViewObject *self, char *ptr, PyObject *item, const char *fmt)
1861 {
1862 unsigned long long llu;
1863 unsigned long lu;
1864 size_t zu;
1865 long long lld;
1866 long ld;
1867 Py_ssize_t zd;
1868 double d;
1869 void *p;
1870
1871 #if PY_LITTLE_ENDIAN
1872 int endian = 1;
1873 #else
1874 int endian = 0;
1875 #endif
1876 switch (fmt[0]) {
1877 /* signed integers */
1878 case 'b': case 'h': case 'i': case 'l':
1879 ld = pylong_as_ld(item);
1880 if (ld == -1 && PyErr_Occurred())
1881 goto err_occurred;
1882 CHECK_RELEASED_INT_AGAIN(self);
1883 switch (fmt[0]) {
1884 case 'b':
1885 if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
1886 *((signed char *)ptr) = (signed char)ld; break;
1887 case 'h':
1888 if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
1889 PACK_SINGLE(ptr, ld, short); break;
1890 case 'i':
1891 if (ld < INT_MIN || ld > INT_MAX) goto err_range;
1892 PACK_SINGLE(ptr, ld, int); break;
1893 default: /* 'l' */
1894 PACK_SINGLE(ptr, ld, long); break;
1895 }
1896 break;
1897
1898 /* unsigned integers */
1899 case 'B': case 'H': case 'I': case 'L':
1900 lu = pylong_as_lu(item);
1901 if (lu == (unsigned long)-1 && PyErr_Occurred())
1902 goto err_occurred;
1903 CHECK_RELEASED_INT_AGAIN(self);
1904 switch (fmt[0]) {
1905 case 'B':
1906 if (lu > UCHAR_MAX) goto err_range;
1907 *((unsigned char *)ptr) = (unsigned char)lu; break;
1908 case 'H':
1909 if (lu > USHRT_MAX) goto err_range;
1910 PACK_SINGLE(ptr, lu, unsigned short); break;
1911 case 'I':
1912 if (lu > UINT_MAX) goto err_range;
1913 PACK_SINGLE(ptr, lu, unsigned int); break;
1914 default: /* 'L' */
1915 PACK_SINGLE(ptr, lu, unsigned long); break;
1916 }
1917 break;
1918
1919 /* native 64-bit */
1920 case 'q':
1921 lld = pylong_as_lld(item);
1922 if (lld == -1 && PyErr_Occurred())
1923 goto err_occurred;
1924 CHECK_RELEASED_INT_AGAIN(self);
1925 PACK_SINGLE(ptr, lld, long long);
1926 break;
1927 case 'Q':
1928 llu = pylong_as_llu(item);
1929 if (llu == (unsigned long long)-1 && PyErr_Occurred())
1930 goto err_occurred;
1931 CHECK_RELEASED_INT_AGAIN(self);
1932 PACK_SINGLE(ptr, llu, unsigned long long);
1933 break;
1934
1935 /* ssize_t and size_t */
1936 case 'n':
1937 zd = pylong_as_zd(item);
1938 if (zd == -1 && PyErr_Occurred())
1939 goto err_occurred;
1940 CHECK_RELEASED_INT_AGAIN(self);
1941 PACK_SINGLE(ptr, zd, Py_ssize_t);
1942 break;
1943 case 'N':
1944 zu = pylong_as_zu(item);
1945 if (zu == (size_t)-1 && PyErr_Occurred())
1946 goto err_occurred;
1947 CHECK_RELEASED_INT_AGAIN(self);
1948 PACK_SINGLE(ptr, zu, size_t);
1949 break;
1950
1951 /* floats */
1952 case 'f': case 'd': case 'e':
1953 d = PyFloat_AsDouble(item);
1954 if (d == -1.0 && PyErr_Occurred())
1955 goto err_occurred;
1956 CHECK_RELEASED_INT_AGAIN(self);
1957 if (fmt[0] == 'f') {
1958 PACK_SINGLE(ptr, d, float);
1959 }
1960 else if (fmt[0] == 'd') {
1961 PACK_SINGLE(ptr, d, double);
1962 }
1963 else {
1964 if (PyFloat_Pack2(d, ptr, endian) < 0) {
1965 goto err_occurred;
1966 }
1967 }
1968 break;
1969
1970 /* bool */
1971 case '?':
1972 ld = PyObject_IsTrue(item);
1973 if (ld < 0)
1974 return -1; /* preserve original error */
1975 CHECK_RELEASED_INT_AGAIN(self);
1976 PACK_SINGLE(ptr, ld, _Bool);
1977 break;
1978
1979 /* bytes object */
1980 case 'c':
1981 if (!PyBytes_Check(item))
1982 return type_error_int(fmt);
1983 if (PyBytes_GET_SIZE(item) != 1)
1984 return value_error_int(fmt);
1985 *ptr = PyBytes_AS_STRING(item)[0];
1986 break;
1987
1988 /* pointer */
1989 case 'P':
1990 p = PyLong_AsVoidPtr(item);
1991 if (p == NULL && PyErr_Occurred())
1992 goto err_occurred;
1993 CHECK_RELEASED_INT_AGAIN(self);
1994 PACK_SINGLE(ptr, p, void *);
1995 break;
1996
1997 /* default */
1998 default: goto err_format;
1999 }
2000
2001 return 0;
2002
2003 err_occurred:
2004 return fix_error_int(fmt);
2005 err_range:
2006 return value_error_int(fmt);
2007 err_format:
2008 PyErr_Format(PyExc_NotImplementedError,
2009 "memoryview: format %s not supported", fmt);
2010 return -1;
2011 }
2012
2013
2014 /****************************************************************************/
2015 /* unpack using the struct module */
2016 /****************************************************************************/
2017
2018 /* For reasonable performance it is necessary to cache all objects required
2019 for unpacking. An unpacker can handle the format passed to unpack_from().
2020 Invariant: All pointer fields of the struct should either be NULL or valid
2021 pointers. */
2022 struct unpacker {
2023 PyObject *unpack_from; /* Struct.unpack_from(format) */
2024 PyObject *mview; /* cached memoryview */
2025 char *item; /* buffer for mview */
2026 Py_ssize_t itemsize; /* len(item) */
2027 };
2028
2029 static struct unpacker *
unpacker_new(void)2030 unpacker_new(void)
2031 {
2032 struct unpacker *x = PyMem_Malloc(sizeof *x);
2033
2034 if (x == NULL) {
2035 PyErr_NoMemory();
2036 return NULL;
2037 }
2038
2039 x->unpack_from = NULL;
2040 x->mview = NULL;
2041 x->item = NULL;
2042 x->itemsize = 0;
2043
2044 return x;
2045 }
2046
2047 static void
unpacker_free(struct unpacker * x)2048 unpacker_free(struct unpacker *x)
2049 {
2050 if (x) {
2051 Py_XDECREF(x->unpack_from);
2052 Py_XDECREF(x->mview);
2053 PyMem_Free(x->item);
2054 PyMem_Free(x);
2055 }
2056 }
2057
2058 /* Return a new unpacker for the given format. */
2059 static struct unpacker *
struct_get_unpacker(const char * fmt,Py_ssize_t itemsize)2060 struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
2061 {
2062 PyObject *Struct = NULL; /* XXX cache it in globals? */
2063 PyObject *structobj = NULL;
2064 PyObject *format = NULL;
2065 struct unpacker *x = NULL;
2066
2067 Struct = _PyImport_GetModuleAttrString("struct", "Struct");
2068 if (Struct == NULL)
2069 return NULL;
2070
2071 x = unpacker_new();
2072 if (x == NULL)
2073 goto error;
2074
2075 format = PyBytes_FromString(fmt);
2076 if (format == NULL)
2077 goto error;
2078
2079 structobj = PyObject_CallOneArg(Struct, format);
2080 if (structobj == NULL)
2081 goto error;
2082
2083 x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
2084 if (x->unpack_from == NULL)
2085 goto error;
2086
2087 x->item = PyMem_Malloc(itemsize);
2088 if (x->item == NULL) {
2089 PyErr_NoMemory();
2090 goto error;
2091 }
2092 x->itemsize = itemsize;
2093
2094 x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
2095 if (x->mview == NULL)
2096 goto error;
2097
2098
2099 out:
2100 Py_XDECREF(Struct);
2101 Py_XDECREF(format);
2102 Py_XDECREF(structobj);
2103 return x;
2104
2105 error:
2106 unpacker_free(x);
2107 x = NULL;
2108 goto out;
2109 }
2110
2111 /* unpack a single item */
2112 static PyObject *
struct_unpack_single(const char * ptr,struct unpacker * x)2113 struct_unpack_single(const char *ptr, struct unpacker *x)
2114 {
2115 PyObject *v;
2116
2117 memcpy(x->item, ptr, x->itemsize);
2118 v = PyObject_CallOneArg(x->unpack_from, x->mview);
2119 if (v == NULL)
2120 return NULL;
2121
2122 if (PyTuple_GET_SIZE(v) == 1) {
2123 PyObject *res = Py_NewRef(PyTuple_GET_ITEM(v, 0));
2124 Py_DECREF(v);
2125 return res;
2126 }
2127
2128 return v;
2129 }
2130
2131
2132 /****************************************************************************/
2133 /* Representations */
2134 /****************************************************************************/
2135
2136 /* allow explicit form of native format */
2137 static inline const char *
adjust_fmt(const Py_buffer * view)2138 adjust_fmt(const Py_buffer *view)
2139 {
2140 const char *fmt;
2141
2142 fmt = (view->format[0] == '@') ? view->format+1 : view->format;
2143 if (fmt[0] && fmt[1] == '\0')
2144 return fmt;
2145
2146 PyErr_Format(PyExc_NotImplementedError,
2147 "memoryview: unsupported format %s", view->format);
2148 return NULL;
2149 }
2150
2151 /* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
2152 static PyObject *
tolist_base(PyMemoryViewObject * self,const char * ptr,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2153 tolist_base(PyMemoryViewObject *self, const char *ptr, const Py_ssize_t *shape,
2154 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2155 const char *fmt)
2156 {
2157 PyObject *lst, *item;
2158 Py_ssize_t i;
2159
2160 lst = PyList_New(shape[0]);
2161 if (lst == NULL)
2162 return NULL;
2163
2164 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2165 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2166 item = unpack_single(self, xptr, fmt);
2167 if (item == NULL) {
2168 Py_DECREF(lst);
2169 return NULL;
2170 }
2171 PyList_SET_ITEM(lst, i, item);
2172 }
2173
2174 return lst;
2175 }
2176
2177 /* Unpack a multi-dimensional array into a nested list.
2178 Assumption: ndim >= 1. */
2179 static PyObject *
tolist_rec(PyMemoryViewObject * self,const char * ptr,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2180 tolist_rec(PyMemoryViewObject *self, const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
2181 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2182 const char *fmt)
2183 {
2184 PyObject *lst, *item;
2185 Py_ssize_t i;
2186
2187 assert(ndim >= 1);
2188 assert(shape != NULL);
2189 assert(strides != NULL);
2190
2191 if (ndim == 1)
2192 return tolist_base(self, ptr, shape, strides, suboffsets, fmt);
2193
2194 lst = PyList_New(shape[0]);
2195 if (lst == NULL)
2196 return NULL;
2197
2198 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2199 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2200 item = tolist_rec(self, xptr, ndim-1, shape+1,
2201 strides+1, suboffsets ? suboffsets+1 : NULL,
2202 fmt);
2203 if (item == NULL) {
2204 Py_DECREF(lst);
2205 return NULL;
2206 }
2207 PyList_SET_ITEM(lst, i, item);
2208 }
2209
2210 return lst;
2211 }
2212
2213 /* Return a list representation of the memoryview. Currently only buffers
2214 with native format strings are supported. */
2215 /*[clinic input]
2216 memoryview.tolist
2217
2218 Return the data in the buffer as a list of elements.
2219 [clinic start generated code]*/
2220
2221 static PyObject *
memoryview_tolist_impl(PyMemoryViewObject * self)2222 memoryview_tolist_impl(PyMemoryViewObject *self)
2223 /*[clinic end generated code: output=a6cda89214fd5a1b input=21e7d0c1860b211a]*/
2224 {
2225 const Py_buffer *view = &self->view;
2226 const char *fmt;
2227
2228 CHECK_RELEASED(self);
2229
2230 fmt = adjust_fmt(view);
2231 if (fmt == NULL)
2232 return NULL;
2233 if (view->ndim == 0) {
2234 return unpack_single(self, view->buf, fmt);
2235 }
2236 else if (view->ndim == 1) {
2237 return tolist_base(self, view->buf, view->shape,
2238 view->strides, view->suboffsets,
2239 fmt);
2240 }
2241 else {
2242 return tolist_rec(self, view->buf, view->ndim, view->shape,
2243 view->strides, view->suboffsets,
2244 fmt);
2245 }
2246 }
2247
2248 /*[clinic input]
2249 memoryview.tobytes
2250
2251 order: str(accept={str, NoneType}, c_default="NULL") = 'C'
2252
2253 Return the data in the buffer as a byte string.
2254
2255 Order can be {'C', 'F', 'A'}. When order is 'C' or 'F', the data of the
2256 original array is converted to C or Fortran order. For contiguous views,
2257 'A' returns an exact copy of the physical memory. In particular, in-memory
2258 Fortran order is preserved. For non-contiguous views, the data is converted
2259 to C first. order=None is the same as order='C'.
2260 [clinic start generated code]*/
2261
2262 static PyObject *
memoryview_tobytes_impl(PyMemoryViewObject * self,const char * order)2263 memoryview_tobytes_impl(PyMemoryViewObject *self, const char *order)
2264 /*[clinic end generated code: output=1288b62560a32a23 input=0efa3ddaeda573a8]*/
2265 {
2266 Py_buffer *src = VIEW_ADDR(self);
2267 char ord = 'C';
2268 PyObject *bytes;
2269
2270 CHECK_RELEASED(self);
2271
2272 if (order) {
2273 if (strcmp(order, "F") == 0) {
2274 ord = 'F';
2275 }
2276 else if (strcmp(order, "A") == 0) {
2277 ord = 'A';
2278 }
2279 else if (strcmp(order, "C") != 0) {
2280 PyErr_SetString(PyExc_ValueError,
2281 "order must be 'C', 'F' or 'A'");
2282 return NULL;
2283 }
2284 }
2285
2286 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2287 if (bytes == NULL)
2288 return NULL;
2289
2290 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, ord) < 0) {
2291 Py_DECREF(bytes);
2292 return NULL;
2293 }
2294
2295 return bytes;
2296 }
2297
2298 /*[clinic input]
2299 memoryview.hex
2300
2301 sep: object = NULL
2302 An optional single character or byte to separate hex bytes.
2303 bytes_per_sep: int = 1
2304 How many bytes between separators. Positive values count from the
2305 right, negative values count from the left.
2306
2307 Return the data in the buffer as a str of hexadecimal numbers.
2308
2309 Example:
2310 >>> value = memoryview(b'\xb9\x01\xef')
2311 >>> value.hex()
2312 'b901ef'
2313 >>> value.hex(':')
2314 'b9:01:ef'
2315 >>> value.hex(':', 2)
2316 'b9:01ef'
2317 >>> value.hex(':', -2)
2318 'b901:ef'
2319 [clinic start generated code]*/
2320
2321 static PyObject *
memoryview_hex_impl(PyMemoryViewObject * self,PyObject * sep,int bytes_per_sep)2322 memoryview_hex_impl(PyMemoryViewObject *self, PyObject *sep,
2323 int bytes_per_sep)
2324 /*[clinic end generated code: output=430ca760f94f3ca7 input=539f6a3a5fb56946]*/
2325 {
2326 Py_buffer *src = VIEW_ADDR(self);
2327 PyObject *bytes;
2328 PyObject *ret;
2329
2330 CHECK_RELEASED(self);
2331
2332 if (MV_C_CONTIGUOUS(self->flags)) {
2333 return _Py_strhex_with_sep(src->buf, src->len, sep, bytes_per_sep);
2334 }
2335
2336 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2337 if (bytes == NULL)
2338 return NULL;
2339
2340 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, 'C') < 0) {
2341 Py_DECREF(bytes);
2342 return NULL;
2343 }
2344
2345 ret = _Py_strhex_with_sep(
2346 PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes),
2347 sep, bytes_per_sep);
2348 Py_DECREF(bytes);
2349
2350 return ret;
2351 }
2352
2353 static PyObject *
memory_repr(PyObject * _self)2354 memory_repr(PyObject *_self)
2355 {
2356 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
2357 if (self->flags & _Py_MEMORYVIEW_RELEASED)
2358 return PyUnicode_FromFormat("<released memory at %p>", self);
2359 else
2360 return PyUnicode_FromFormat("<memory at %p>", self);
2361 }
2362
2363
2364 /**************************************************************************/
2365 /* Indexing and slicing */
2366 /**************************************************************************/
2367
2368 static char *
lookup_dimension(const Py_buffer * view,char * ptr,int dim,Py_ssize_t index)2369 lookup_dimension(const Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
2370 {
2371 Py_ssize_t nitems; /* items in the given dimension */
2372
2373 assert(view->shape);
2374 assert(view->strides);
2375
2376 nitems = view->shape[dim];
2377 if (index < 0) {
2378 index += nitems;
2379 }
2380 if (index < 0 || index >= nitems) {
2381 PyErr_Format(PyExc_IndexError,
2382 "index out of bounds on dimension %d", dim + 1);
2383 return NULL;
2384 }
2385
2386 ptr += view->strides[dim] * index;
2387
2388 ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
2389
2390 return ptr;
2391 }
2392
2393 /* Get the pointer to the item at index. */
2394 static char *
ptr_from_index(const Py_buffer * view,Py_ssize_t index)2395 ptr_from_index(const Py_buffer *view, Py_ssize_t index)
2396 {
2397 char *ptr = (char *)view->buf;
2398 return lookup_dimension(view, ptr, 0, index);
2399 }
2400
2401 /* Get the pointer to the item at tuple. */
2402 static char *
ptr_from_tuple(const Py_buffer * view,PyObject * tup)2403 ptr_from_tuple(const Py_buffer *view, PyObject *tup)
2404 {
2405 char *ptr = (char *)view->buf;
2406 Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
2407
2408 if (nindices > view->ndim) {
2409 PyErr_Format(PyExc_TypeError,
2410 "cannot index %zd-dimension view with %zd-element tuple",
2411 view->ndim, nindices);
2412 return NULL;
2413 }
2414
2415 for (dim = 0; dim < nindices; dim++) {
2416 Py_ssize_t index;
2417 index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
2418 PyExc_IndexError);
2419 if (index == -1 && PyErr_Occurred())
2420 return NULL;
2421 ptr = lookup_dimension(view, ptr, (int)dim, index);
2422 if (ptr == NULL)
2423 return NULL;
2424 }
2425 return ptr;
2426 }
2427
2428 /* Return the item at index. In a one-dimensional view, this is an object
2429 with the type specified by view->format. Otherwise, the item is a sub-view.
2430 The function is used in memory_subscript() and memory_as_sequence. */
2431 static PyObject *
memory_item(PyObject * _self,Py_ssize_t index)2432 memory_item(PyObject *_self, Py_ssize_t index)
2433 {
2434 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
2435 Py_buffer *view = &(self->view);
2436 const char *fmt;
2437
2438 CHECK_RELEASED(self);
2439
2440 fmt = adjust_fmt(view);
2441 if (fmt == NULL)
2442 return NULL;
2443
2444 if (view->ndim == 0) {
2445 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
2446 return NULL;
2447 }
2448 if (view->ndim == 1) {
2449 char *ptr = ptr_from_index(view, index);
2450 if (ptr == NULL)
2451 return NULL;
2452 return unpack_single(self, ptr, fmt);
2453 }
2454
2455 PyErr_SetString(PyExc_NotImplementedError,
2456 "multi-dimensional sub-views are not implemented");
2457 return NULL;
2458 }
2459
2460 /* Return the item at position *key* (a tuple of indices). */
2461 static PyObject *
memory_item_multi(PyMemoryViewObject * self,PyObject * tup)2462 memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
2463 {
2464 Py_buffer *view = &(self->view);
2465 const char *fmt;
2466 Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
2467 char *ptr;
2468
2469 CHECK_RELEASED(self);
2470
2471 fmt = adjust_fmt(view);
2472 if (fmt == NULL)
2473 return NULL;
2474
2475 if (nindices < view->ndim) {
2476 PyErr_SetString(PyExc_NotImplementedError,
2477 "sub-views are not implemented");
2478 return NULL;
2479 }
2480 ptr = ptr_from_tuple(view, tup);
2481 if (ptr == NULL)
2482 return NULL;
2483 return unpack_single(self, ptr, fmt);
2484 }
2485
2486 static inline int
init_slice(Py_buffer * base,PyObject * key,int dim)2487 init_slice(Py_buffer *base, PyObject *key, int dim)
2488 {
2489 Py_ssize_t start, stop, step, slicelength;
2490
2491 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
2492 return -1;
2493 }
2494 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
2495
2496
2497 if (base->suboffsets == NULL || dim == 0) {
2498 adjust_buf:
2499 base->buf = (char *)base->buf + base->strides[dim] * start;
2500 }
2501 else {
2502 Py_ssize_t n = dim-1;
2503 while (n >= 0 && base->suboffsets[n] < 0)
2504 n--;
2505 if (n < 0)
2506 goto adjust_buf; /* all suboffsets are negative */
2507 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
2508 }
2509 base->shape[dim] = slicelength;
2510 base->strides[dim] = base->strides[dim] * step;
2511
2512 return 0;
2513 }
2514
2515 static int
is_multislice(PyObject * key)2516 is_multislice(PyObject *key)
2517 {
2518 Py_ssize_t size, i;
2519
2520 if (!PyTuple_Check(key))
2521 return 0;
2522 size = PyTuple_GET_SIZE(key);
2523 if (size == 0)
2524 return 0;
2525
2526 for (i = 0; i < size; i++) {
2527 PyObject *x = PyTuple_GET_ITEM(key, i);
2528 if (!PySlice_Check(x))
2529 return 0;
2530 }
2531 return 1;
2532 }
2533
2534 static Py_ssize_t
is_multiindex(PyObject * key)2535 is_multiindex(PyObject *key)
2536 {
2537 Py_ssize_t size, i;
2538
2539 if (!PyTuple_Check(key))
2540 return 0;
2541 size = PyTuple_GET_SIZE(key);
2542 for (i = 0; i < size; i++) {
2543 PyObject *x = PyTuple_GET_ITEM(key, i);
2544 if (!_PyIndex_Check(x)) {
2545 return 0;
2546 }
2547 }
2548 return 1;
2549 }
2550
2551 /* mv[obj] returns an object holding the data for one element if obj
2552 fully indexes the memoryview or another memoryview object if it
2553 does not.
2554
2555 0-d memoryview objects can be referenced using mv[...] or mv[()]
2556 but not with anything else. */
2557 static PyObject *
memory_subscript(PyObject * _self,PyObject * key)2558 memory_subscript(PyObject *_self, PyObject *key)
2559 {
2560 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
2561 Py_buffer *view;
2562 view = &(self->view);
2563
2564 CHECK_RELEASED(self);
2565
2566 if (view->ndim == 0) {
2567 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
2568 const char *fmt = adjust_fmt(view);
2569 if (fmt == NULL)
2570 return NULL;
2571 return unpack_single(self, view->buf, fmt);
2572 }
2573 else if (key == Py_Ellipsis) {
2574 return Py_NewRef(self);
2575 }
2576 else {
2577 PyErr_SetString(PyExc_TypeError,
2578 "invalid indexing of 0-dim memory");
2579 return NULL;
2580 }
2581 }
2582
2583 if (_PyIndex_Check(key)) {
2584 Py_ssize_t index;
2585 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2586 if (index == -1 && PyErr_Occurred())
2587 return NULL;
2588 return memory_item((PyObject *)self, index);
2589 }
2590 else if (PySlice_Check(key)) {
2591 CHECK_RESTRICTED(self);
2592 PyMemoryViewObject *sliced;
2593
2594 sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
2595 if (sliced == NULL)
2596 return NULL;
2597
2598 if (init_slice(&sliced->view, key, 0) < 0) {
2599 Py_DECREF(sliced);
2600 return NULL;
2601 }
2602 init_len(&sliced->view);
2603 init_flags(sliced);
2604
2605 return (PyObject *)sliced;
2606 }
2607 else if (is_multiindex(key)) {
2608 return memory_item_multi(self, key);
2609 }
2610 else if (is_multislice(key)) {
2611 PyErr_SetString(PyExc_NotImplementedError,
2612 "multi-dimensional slicing is not implemented");
2613 return NULL;
2614 }
2615
2616 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2617 return NULL;
2618 }
2619
2620 static int
memory_ass_sub(PyObject * _self,PyObject * key,PyObject * value)2621 memory_ass_sub(PyObject *_self, PyObject *key, PyObject *value)
2622 {
2623 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
2624 Py_buffer *view = &(self->view);
2625 Py_buffer src;
2626 const char *fmt;
2627 char *ptr;
2628
2629 CHECK_RELEASED_INT(self);
2630
2631 fmt = adjust_fmt(view);
2632 if (fmt == NULL)
2633 return -1;
2634
2635 if (view->readonly) {
2636 PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
2637 return -1;
2638 }
2639 if (value == NULL) {
2640 PyErr_SetString(PyExc_TypeError, "cannot delete memory");
2641 return -1;
2642 }
2643 if (view->ndim == 0) {
2644 if (key == Py_Ellipsis ||
2645 (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
2646 ptr = (char *)view->buf;
2647 return pack_single(self, ptr, value, fmt);
2648 }
2649 else {
2650 PyErr_SetString(PyExc_TypeError,
2651 "invalid indexing of 0-dim memory");
2652 return -1;
2653 }
2654 }
2655
2656 if (_PyIndex_Check(key)) {
2657 Py_ssize_t index;
2658 if (1 < view->ndim) {
2659 PyErr_SetString(PyExc_NotImplementedError,
2660 "sub-views are not implemented");
2661 return -1;
2662 }
2663 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2664 if (index == -1 && PyErr_Occurred())
2665 return -1;
2666 ptr = ptr_from_index(view, index);
2667 if (ptr == NULL)
2668 return -1;
2669 return pack_single(self, ptr, value, fmt);
2670 }
2671 /* one-dimensional: fast path */
2672 if (PySlice_Check(key) && view->ndim == 1) {
2673 Py_buffer dest; /* sliced view */
2674 Py_ssize_t arrays[3];
2675 int ret = -1;
2676
2677 /* rvalue must be an exporter */
2678 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
2679 return ret;
2680
2681 dest = *view;
2682 dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
2683 dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
2684 if (view->suboffsets) {
2685 dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
2686 }
2687
2688 if (init_slice(&dest, key, 0) < 0)
2689 goto end_block;
2690 dest.len = dest.shape[0] * dest.itemsize;
2691
2692 ret = copy_single(self, &dest, &src);
2693
2694 end_block:
2695 PyBuffer_Release(&src);
2696 return ret;
2697 }
2698 if (is_multiindex(key)) {
2699 char *ptr;
2700 if (PyTuple_GET_SIZE(key) < view->ndim) {
2701 PyErr_SetString(PyExc_NotImplementedError,
2702 "sub-views are not implemented");
2703 return -1;
2704 }
2705 ptr = ptr_from_tuple(view, key);
2706 if (ptr == NULL)
2707 return -1;
2708 return pack_single(self, ptr, value, fmt);
2709 }
2710 if (PySlice_Check(key) || is_multislice(key)) {
2711 /* Call memory_subscript() to produce a sliced lvalue, then copy
2712 rvalue into lvalue. This is already implemented in _testbuffer.c. */
2713 PyErr_SetString(PyExc_NotImplementedError,
2714 "memoryview slice assignments are currently restricted "
2715 "to ndim = 1");
2716 return -1;
2717 }
2718
2719 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2720 return -1;
2721 }
2722
2723 static Py_ssize_t
memory_length(PyObject * _self)2724 memory_length(PyObject *_self)
2725 {
2726 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
2727 CHECK_RELEASED_INT(self);
2728 if (self->view.ndim == 0) {
2729 PyErr_SetString(PyExc_TypeError, "0-dim memory has no length");
2730 return -1;
2731 }
2732 return self->view.shape[0];
2733 }
2734
2735 /* As mapping */
2736 static PyMappingMethods memory_as_mapping = {
2737 memory_length, /* mp_length */
2738 memory_subscript, /* mp_subscript */
2739 memory_ass_sub, /* mp_ass_subscript */
2740 };
2741
2742 /* As sequence */
2743 static PySequenceMethods memory_as_sequence = {
2744 memory_length, /* sq_length */
2745 0, /* sq_concat */
2746 0, /* sq_repeat */
2747 memory_item, /* sq_item */
2748 };
2749
2750
2751 /**************************************************************************/
2752 /* Comparisons */
2753 /**************************************************************************/
2754
2755 #define MV_COMPARE_EX -1 /* exception */
2756 #define MV_COMPARE_NOT_IMPL -2 /* not implemented */
2757
2758 /* Translate a StructError to "not equal". Preserve other exceptions. */
2759 static int
fix_struct_error_int(void)2760 fix_struct_error_int(void)
2761 {
2762 assert(PyErr_Occurred());
2763 /* XXX Cannot get at StructError directly? */
2764 if (PyErr_ExceptionMatches(PyExc_ImportError) ||
2765 PyErr_ExceptionMatches(PyExc_MemoryError)) {
2766 return MV_COMPARE_EX;
2767 }
2768 /* StructError: invalid or unknown format -> not equal */
2769 PyErr_Clear();
2770 return 0;
2771 }
2772
2773 /* Unpack and compare single items of p and q using the struct module. */
2774 static int
struct_unpack_cmp(const char * p,const char * q,struct unpacker * unpack_p,struct unpacker * unpack_q)2775 struct_unpack_cmp(const char *p, const char *q,
2776 struct unpacker *unpack_p, struct unpacker *unpack_q)
2777 {
2778 PyObject *v, *w;
2779 int ret;
2780
2781 /* At this point any exception from the struct module should not be
2782 StructError, since both formats have been accepted already. */
2783 v = struct_unpack_single(p, unpack_p);
2784 if (v == NULL)
2785 return MV_COMPARE_EX;
2786
2787 w = struct_unpack_single(q, unpack_q);
2788 if (w == NULL) {
2789 Py_DECREF(v);
2790 return MV_COMPARE_EX;
2791 }
2792
2793 /* MV_COMPARE_EX == -1: exceptions are preserved */
2794 ret = PyObject_RichCompareBool(v, w, Py_EQ);
2795 Py_DECREF(v);
2796 Py_DECREF(w);
2797
2798 return ret;
2799 }
2800
2801 /* Unpack and compare single items of p and q. If both p and q have the same
2802 single element native format, the comparison uses a fast path (gcc creates
2803 a jump table and converts memcpy into simple assignments on x86/x64).
2804
2805 Otherwise, the comparison is delegated to the struct module, which is
2806 30-60x slower. */
2807 #define CMP_SINGLE(p, q, type) \
2808 do { \
2809 type x; \
2810 type y; \
2811 memcpy((char *)&x, p, sizeof x); \
2812 memcpy((char *)&y, q, sizeof y); \
2813 equal = (x == y); \
2814 } while (0)
2815
2816 static inline int
unpack_cmp(const char * p,const char * q,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2817 unpack_cmp(const char *p, const char *q, char fmt,
2818 struct unpacker *unpack_p, struct unpacker *unpack_q)
2819 {
2820 int equal;
2821
2822 switch (fmt) {
2823
2824 /* signed integers and fast path for 'B' */
2825 case 'B': return *((const unsigned char *)p) == *((const unsigned char *)q);
2826 case 'b': return *((const signed char *)p) == *((const signed char *)q);
2827 case 'h': CMP_SINGLE(p, q, short); return equal;
2828 case 'i': CMP_SINGLE(p, q, int); return equal;
2829 case 'l': CMP_SINGLE(p, q, long); return equal;
2830
2831 /* boolean */
2832 case '?': CMP_SINGLE(p, q, _Bool); return equal;
2833
2834 /* unsigned integers */
2835 case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
2836 case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
2837 case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
2838
2839 /* native 64-bit */
2840 case 'q': CMP_SINGLE(p, q, long long); return equal;
2841 case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
2842
2843 /* ssize_t and size_t */
2844 case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
2845 case 'N': CMP_SINGLE(p, q, size_t); return equal;
2846
2847 /* floats */
2848 /* XXX DBL_EPSILON? */
2849 case 'f': CMP_SINGLE(p, q, float); return equal;
2850 case 'd': CMP_SINGLE(p, q, double); return equal;
2851 case 'e': {
2852 #if PY_LITTLE_ENDIAN
2853 int endian = 1;
2854 #else
2855 int endian = 0;
2856 #endif
2857 /* Note: PyFloat_Unpack2 should never fail */
2858 double u = PyFloat_Unpack2(p, endian);
2859 double v = PyFloat_Unpack2(q, endian);
2860 return (u == v);
2861 }
2862
2863 /* bytes object */
2864 case 'c': return *p == *q;
2865
2866 /* pointer */
2867 case 'P': CMP_SINGLE(p, q, void *); return equal;
2868
2869 /* use the struct module */
2870 case '_':
2871 assert(unpack_p);
2872 assert(unpack_q);
2873 return struct_unpack_cmp(p, q, unpack_p, unpack_q);
2874 }
2875
2876 /* NOT REACHED */
2877 PyErr_SetString(PyExc_RuntimeError,
2878 "memoryview: internal error in richcompare");
2879 return MV_COMPARE_EX;
2880 }
2881
2882 /* Base case for recursive array comparisons. Assumption: ndim == 1. */
2883 static int
cmp_base(const char * p,const char * q,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2884 cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
2885 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2886 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2887 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2888 {
2889 Py_ssize_t i;
2890 int equal;
2891
2892 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2893 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2894 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2895 equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
2896 if (equal <= 0)
2897 return equal;
2898 }
2899
2900 return 1;
2901 }
2902
2903 /* Recursively compare two multi-dimensional arrays that have the same
2904 logical structure. Assumption: ndim >= 1. */
2905 static int
cmp_rec(const char * p,const char * q,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2906 cmp_rec(const char *p, const char *q,
2907 Py_ssize_t ndim, const Py_ssize_t *shape,
2908 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2909 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2910 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2911 {
2912 Py_ssize_t i;
2913 int equal;
2914
2915 assert(ndim >= 1);
2916 assert(shape != NULL);
2917 assert(pstrides != NULL);
2918 assert(qstrides != NULL);
2919
2920 if (ndim == 1) {
2921 return cmp_base(p, q, shape,
2922 pstrides, psuboffsets,
2923 qstrides, qsuboffsets,
2924 fmt, unpack_p, unpack_q);
2925 }
2926
2927 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2928 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2929 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2930 equal = cmp_rec(xp, xq, ndim-1, shape+1,
2931 pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
2932 qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
2933 fmt, unpack_p, unpack_q);
2934 if (equal <= 0)
2935 return equal;
2936 }
2937
2938 return 1;
2939 }
2940
2941 static PyObject *
memory_richcompare(PyObject * v,PyObject * w,int op)2942 memory_richcompare(PyObject *v, PyObject *w, int op)
2943 {
2944 PyObject *res;
2945 Py_buffer wbuf, *vv;
2946 Py_buffer *ww = NULL;
2947 struct unpacker *unpack_v = NULL;
2948 struct unpacker *unpack_w = NULL;
2949 char vfmt, wfmt;
2950 int equal = MV_COMPARE_NOT_IMPL;
2951
2952 if (op != Py_EQ && op != Py_NE)
2953 goto result; /* Py_NotImplemented */
2954
2955 assert(PyMemoryView_Check(v));
2956 if (BASE_INACCESSIBLE(v)) {
2957 equal = (v == w);
2958 goto result;
2959 }
2960 vv = VIEW_ADDR(v);
2961
2962 if (PyMemoryView_Check(w)) {
2963 if (BASE_INACCESSIBLE(w)) {
2964 equal = (v == w);
2965 goto result;
2966 }
2967 ww = VIEW_ADDR(w);
2968 }
2969 else {
2970 if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
2971 PyErr_Clear();
2972 goto result; /* Py_NotImplemented */
2973 }
2974 ww = &wbuf;
2975 }
2976
2977 if (!equiv_shape(vv, ww)) {
2978 PyErr_Clear();
2979 equal = 0;
2980 goto result;
2981 }
2982
2983 /* Use fast unpacking for identical primitive C type formats. */
2984 if (get_native_fmtchar(&vfmt, vv->format) < 0)
2985 vfmt = '_';
2986 if (get_native_fmtchar(&wfmt, ww->format) < 0)
2987 wfmt = '_';
2988 if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
2989 /* Use struct module unpacking. NOTE: Even for equal format strings,
2990 memcmp() cannot be used for item comparison since it would give
2991 incorrect results in the case of NaNs or uninitialized padding
2992 bytes. */
2993 vfmt = '_';
2994 unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
2995 if (unpack_v == NULL) {
2996 equal = fix_struct_error_int();
2997 goto result;
2998 }
2999 unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
3000 if (unpack_w == NULL) {
3001 equal = fix_struct_error_int();
3002 goto result;
3003 }
3004 }
3005
3006 if (vv->ndim == 0) {
3007 equal = unpack_cmp(vv->buf, ww->buf,
3008 vfmt, unpack_v, unpack_w);
3009 }
3010 else if (vv->ndim == 1) {
3011 equal = cmp_base(vv->buf, ww->buf, vv->shape,
3012 vv->strides, vv->suboffsets,
3013 ww->strides, ww->suboffsets,
3014 vfmt, unpack_v, unpack_w);
3015 }
3016 else {
3017 equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
3018 vv->strides, vv->suboffsets,
3019 ww->strides, ww->suboffsets,
3020 vfmt, unpack_v, unpack_w);
3021 }
3022
3023 result:
3024 if (equal < 0) {
3025 if (equal == MV_COMPARE_NOT_IMPL)
3026 res = Py_NotImplemented;
3027 else /* exception */
3028 res = NULL;
3029 }
3030 else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
3031 res = Py_True;
3032 else
3033 res = Py_False;
3034
3035 if (ww == &wbuf)
3036 PyBuffer_Release(ww);
3037
3038 unpacker_free(unpack_v);
3039 unpacker_free(unpack_w);
3040
3041 return Py_XNewRef(res);
3042 }
3043
3044 /**************************************************************************/
3045 /* Hash */
3046 /**************************************************************************/
3047
3048 static Py_hash_t
memory_hash(PyObject * _self)3049 memory_hash(PyObject *_self)
3050 {
3051 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3052 if (self->hash == -1) {
3053 Py_buffer *view = &self->view;
3054 char *mem = view->buf;
3055 Py_ssize_t ret;
3056 char fmt;
3057
3058 CHECK_RELEASED_INT(self);
3059
3060 if (!view->readonly) {
3061 PyErr_SetString(PyExc_ValueError,
3062 "cannot hash writable memoryview object");
3063 return -1;
3064 }
3065 ret = get_native_fmtchar(&fmt, view->format);
3066 if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
3067 PyErr_SetString(PyExc_ValueError,
3068 "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
3069 return -1;
3070 }
3071 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
3072 /* Keep the original error message */
3073 return -1;
3074 }
3075
3076 if (!MV_C_CONTIGUOUS(self->flags)) {
3077 mem = PyMem_Malloc(view->len);
3078 if (mem == NULL) {
3079 PyErr_NoMemory();
3080 return -1;
3081 }
3082 if (buffer_to_contiguous(mem, view, 'C') < 0) {
3083 PyMem_Free(mem);
3084 return -1;
3085 }
3086 }
3087
3088 /* Can't fail */
3089 self->hash = _Py_HashBytes(mem, view->len);
3090
3091 if (mem != view->buf)
3092 PyMem_Free(mem);
3093 }
3094
3095 return self->hash;
3096 }
3097
3098
3099 /**************************************************************************/
3100 /* getters */
3101 /**************************************************************************/
3102
3103 static PyObject *
_IntTupleFromSsizet(int len,Py_ssize_t * vals)3104 _IntTupleFromSsizet(int len, Py_ssize_t *vals)
3105 {
3106 int i;
3107 PyObject *o;
3108 PyObject *intTuple;
3109
3110 if (vals == NULL)
3111 return PyTuple_New(0);
3112
3113 intTuple = PyTuple_New(len);
3114 if (!intTuple)
3115 return NULL;
3116 for (i=0; i<len; i++) {
3117 o = PyLong_FromSsize_t(vals[i]);
3118 if (!o) {
3119 Py_DECREF(intTuple);
3120 return NULL;
3121 }
3122 PyTuple_SET_ITEM(intTuple, i, o);
3123 }
3124 return intTuple;
3125 }
3126
3127 static PyObject *
memory_obj_get(PyObject * _self,void * Py_UNUSED (ignored))3128 memory_obj_get(PyObject *_self, void *Py_UNUSED(ignored))
3129 {
3130 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3131 Py_buffer *view = &self->view;
3132
3133 CHECK_RELEASED(self);
3134 if (view->obj == NULL) {
3135 Py_RETURN_NONE;
3136 }
3137 return Py_NewRef(view->obj);
3138 }
3139
3140 static PyObject *
memory_nbytes_get(PyObject * _self,void * Py_UNUSED (ignored))3141 memory_nbytes_get(PyObject *_self, void *Py_UNUSED(ignored))
3142 {
3143 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3144 CHECK_RELEASED(self);
3145 return PyLong_FromSsize_t(self->view.len);
3146 }
3147
3148 static PyObject *
memory_format_get(PyObject * _self,void * Py_UNUSED (ignored))3149 memory_format_get(PyObject *_self, void *Py_UNUSED(ignored))
3150 {
3151 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3152 CHECK_RELEASED(self);
3153 return PyUnicode_FromString(self->view.format);
3154 }
3155
3156 static PyObject *
memory_itemsize_get(PyObject * _self,void * Py_UNUSED (ignored))3157 memory_itemsize_get(PyObject *_self, void *Py_UNUSED(ignored))
3158 {
3159 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3160 CHECK_RELEASED(self);
3161 return PyLong_FromSsize_t(self->view.itemsize);
3162 }
3163
3164 static PyObject *
memory_shape_get(PyObject * _self,void * Py_UNUSED (ignored))3165 memory_shape_get(PyObject *_self, void *Py_UNUSED(ignored))
3166 {
3167 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3168 CHECK_RELEASED(self);
3169 return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
3170 }
3171
3172 static PyObject *
memory_strides_get(PyObject * _self,void * Py_UNUSED (ignored))3173 memory_strides_get(PyObject *_self, void *Py_UNUSED(ignored))
3174 {
3175 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3176 CHECK_RELEASED(self);
3177 return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
3178 }
3179
3180 static PyObject *
memory_suboffsets_get(PyObject * _self,void * Py_UNUSED (ignored))3181 memory_suboffsets_get(PyObject *_self, void *Py_UNUSED(ignored))
3182 {
3183 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3184 CHECK_RELEASED(self);
3185 return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
3186 }
3187
3188 static PyObject *
memory_readonly_get(PyObject * _self,void * Py_UNUSED (ignored))3189 memory_readonly_get(PyObject *_self, void *Py_UNUSED(ignored))
3190 {
3191 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3192 CHECK_RELEASED(self);
3193 return PyBool_FromLong(self->view.readonly);
3194 }
3195
3196 static PyObject *
memory_ndim_get(PyObject * _self,void * Py_UNUSED (ignored))3197 memory_ndim_get(PyObject *_self, void *Py_UNUSED(ignored))
3198 {
3199 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3200 CHECK_RELEASED(self);
3201 return PyLong_FromLong(self->view.ndim);
3202 }
3203
3204 static PyObject *
memory_c_contiguous(PyObject * _self,void * Py_UNUSED (ignored))3205 memory_c_contiguous(PyObject *_self, void *Py_UNUSED(ignored))
3206 {
3207 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3208 CHECK_RELEASED(self);
3209 return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
3210 }
3211
3212 static PyObject *
memory_f_contiguous(PyObject * _self,void * Py_UNUSED (ignored))3213 memory_f_contiguous(PyObject *_self, void *Py_UNUSED(ignored))
3214 {
3215 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3216 CHECK_RELEASED(self);
3217 return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
3218 }
3219
3220 static PyObject *
memory_contiguous(PyObject * _self,void * Py_UNUSED (ignored))3221 memory_contiguous(PyObject *_self, void *Py_UNUSED(ignored))
3222 {
3223 PyMemoryViewObject *self = (PyMemoryViewObject *)_self;
3224 CHECK_RELEASED(self);
3225 return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
3226 }
3227
3228 PyDoc_STRVAR(memory_obj_doc,
3229 "The underlying object of the memoryview.");
3230 PyDoc_STRVAR(memory_nbytes_doc,
3231 "The amount of space in bytes that the array would use in\n"
3232 " a contiguous representation.");
3233 PyDoc_STRVAR(memory_readonly_doc,
3234 "A bool indicating whether the memory is read only.");
3235 PyDoc_STRVAR(memory_itemsize_doc,
3236 "The size in bytes of each element of the memoryview.");
3237 PyDoc_STRVAR(memory_format_doc,
3238 "A string containing the format (in struct module style)\n"
3239 " for each element in the view.");
3240 PyDoc_STRVAR(memory_ndim_doc,
3241 "An integer indicating how many dimensions of a multi-dimensional\n"
3242 " array the memory represents.");
3243 PyDoc_STRVAR(memory_shape_doc,
3244 "A tuple of ndim integers giving the shape of the memory\n"
3245 " as an N-dimensional array.");
3246 PyDoc_STRVAR(memory_strides_doc,
3247 "A tuple of ndim integers giving the size in bytes to access\n"
3248 " each element for each dimension of the array.");
3249 PyDoc_STRVAR(memory_suboffsets_doc,
3250 "A tuple of integers used internally for PIL-style arrays.");
3251 PyDoc_STRVAR(memory_c_contiguous_doc,
3252 "A bool indicating whether the memory is C contiguous.");
3253 PyDoc_STRVAR(memory_f_contiguous_doc,
3254 "A bool indicating whether the memory is Fortran contiguous.");
3255 PyDoc_STRVAR(memory_contiguous_doc,
3256 "A bool indicating whether the memory is contiguous.");
3257 PyDoc_STRVAR(memory_exit_doc,
3258 "__exit__($self, /, *exc_info)\n--\n\n"
3259 "Release the underlying buffer exposed by the memoryview object.");
3260
3261
3262 static PyGetSetDef memory_getsetlist[] = {
3263 {"obj", memory_obj_get, NULL, memory_obj_doc},
3264 {"nbytes", memory_nbytes_get, NULL, memory_nbytes_doc},
3265 {"readonly", memory_readonly_get, NULL, memory_readonly_doc},
3266 {"itemsize", memory_itemsize_get, NULL, memory_itemsize_doc},
3267 {"format", memory_format_get, NULL, memory_format_doc},
3268 {"ndim", memory_ndim_get, NULL, memory_ndim_doc},
3269 {"shape", memory_shape_get, NULL, memory_shape_doc},
3270 {"strides", memory_strides_get, NULL, memory_strides_doc},
3271 {"suboffsets", memory_suboffsets_get, NULL, memory_suboffsets_doc},
3272 {"c_contiguous", memory_c_contiguous, NULL, memory_c_contiguous_doc},
3273 {"f_contiguous", memory_f_contiguous, NULL, memory_f_contiguous_doc},
3274 {"contiguous", memory_contiguous, NULL, memory_contiguous_doc},
3275 {NULL, NULL, NULL, NULL},
3276 };
3277
3278
3279 static PyMethodDef memory_methods[] = {
3280 MEMORYVIEW_RELEASE_METHODDEF
3281 MEMORYVIEW_TOBYTES_METHODDEF
3282 MEMORYVIEW_HEX_METHODDEF
3283 MEMORYVIEW_TOLIST_METHODDEF
3284 MEMORYVIEW_CAST_METHODDEF
3285 MEMORYVIEW_TOREADONLY_METHODDEF
3286 MEMORYVIEW__FROM_FLAGS_METHODDEF
3287 {"__enter__", memory_enter, METH_NOARGS, NULL},
3288 {"__exit__", memory_exit, METH_VARARGS, memory_exit_doc},
3289 {NULL, NULL}
3290 };
3291
3292 /**************************************************************************/
3293 /* Memoryview Iterator */
3294 /**************************************************************************/
3295
3296 PyTypeObject _PyMemoryIter_Type;
3297
3298 typedef struct {
3299 PyObject_HEAD
3300 Py_ssize_t it_index;
3301 PyMemoryViewObject *it_seq; // Set to NULL when iterator is exhausted
3302 Py_ssize_t it_length;
3303 const char *it_fmt;
3304 } memoryiterobject;
3305
3306 static void
memoryiter_dealloc(PyObject * self)3307 memoryiter_dealloc(PyObject *self)
3308 {
3309 memoryiterobject *it = (memoryiterobject *)self;
3310 _PyObject_GC_UNTRACK(it);
3311 Py_XDECREF(it->it_seq);
3312 PyObject_GC_Del(it);
3313 }
3314
3315 static int
memoryiter_traverse(PyObject * self,visitproc visit,void * arg)3316 memoryiter_traverse(PyObject *self, visitproc visit, void *arg)
3317 {
3318 memoryiterobject *it = (memoryiterobject *)self;
3319 Py_VISIT(it->it_seq);
3320 return 0;
3321 }
3322
3323 static PyObject *
memoryiter_next(PyObject * self)3324 memoryiter_next(PyObject *self)
3325 {
3326 memoryiterobject *it = (memoryiterobject *)self;
3327 PyMemoryViewObject *seq;
3328 seq = it->it_seq;
3329 if (seq == NULL) {
3330 return NULL;
3331 }
3332
3333 if (it->it_index < it->it_length) {
3334 CHECK_RELEASED(seq);
3335 Py_buffer *view = &(seq->view);
3336 char *ptr = (char *)seq->view.buf;
3337
3338 ptr += view->strides[0] * it->it_index++;
3339 ptr = ADJUST_PTR(ptr, view->suboffsets, 0);
3340 if (ptr == NULL) {
3341 return NULL;
3342 }
3343 return unpack_single(seq, ptr, it->it_fmt);
3344 }
3345
3346 it->it_seq = NULL;
3347 Py_DECREF(seq);
3348 return NULL;
3349 }
3350
3351 static PyObject *
memory_iter(PyObject * seq)3352 memory_iter(PyObject *seq)
3353 {
3354 if (!PyMemoryView_Check(seq)) {
3355 PyErr_BadInternalCall();
3356 return NULL;
3357 }
3358 CHECK_RELEASED(seq);
3359 PyMemoryViewObject *obj = (PyMemoryViewObject *)seq;
3360 int ndims = obj->view.ndim;
3361 if (ndims == 0) {
3362 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
3363 return NULL;
3364 }
3365 if (ndims != 1) {
3366 PyErr_SetString(PyExc_NotImplementedError,
3367 "multi-dimensional sub-views are not implemented");
3368 return NULL;
3369 }
3370
3371 const char *fmt = adjust_fmt(&obj->view);
3372 if (fmt == NULL) {
3373 return NULL;
3374 }
3375
3376 memoryiterobject *it;
3377 it = PyObject_GC_New(memoryiterobject, &_PyMemoryIter_Type);
3378 if (it == NULL) {
3379 return NULL;
3380 }
3381 it->it_fmt = fmt;
3382 it->it_length = memory_length((PyObject *)obj);
3383 it->it_index = 0;
3384 it->it_seq = (PyMemoryViewObject*)Py_NewRef(obj);
3385 _PyObject_GC_TRACK(it);
3386 return (PyObject *)it;
3387 }
3388
3389 PyTypeObject _PyMemoryIter_Type = {
3390 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3391 .tp_name = "memory_iterator",
3392 .tp_basicsize = sizeof(memoryiterobject),
3393 // methods
3394 .tp_dealloc = memoryiter_dealloc,
3395 .tp_getattro = PyObject_GenericGetAttr,
3396 .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
3397 .tp_traverse = memoryiter_traverse,
3398 .tp_iter = PyObject_SelfIter,
3399 .tp_iternext = memoryiter_next,
3400 };
3401
3402 PyTypeObject PyMemoryView_Type = {
3403 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3404 "memoryview", /* tp_name */
3405 offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
3406 sizeof(Py_ssize_t), /* tp_itemsize */
3407 memory_dealloc, /* tp_dealloc */
3408 0, /* tp_vectorcall_offset */
3409 0, /* tp_getattr */
3410 0, /* tp_setattr */
3411 0, /* tp_as_async */
3412 memory_repr, /* tp_repr */
3413 0, /* tp_as_number */
3414 &memory_as_sequence, /* tp_as_sequence */
3415 &memory_as_mapping, /* tp_as_mapping */
3416 memory_hash, /* tp_hash */
3417 0, /* tp_call */
3418 0, /* tp_str */
3419 PyObject_GenericGetAttr, /* tp_getattro */
3420 0, /* tp_setattro */
3421 &memory_as_buffer, /* tp_as_buffer */
3422 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3423 Py_TPFLAGS_SEQUENCE, /* tp_flags */
3424 memoryview__doc__, /* tp_doc */
3425 memory_traverse, /* tp_traverse */
3426 memory_clear, /* tp_clear */
3427 memory_richcompare, /* tp_richcompare */
3428 offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
3429 memory_iter, /* tp_iter */
3430 0, /* tp_iternext */
3431 memory_methods, /* tp_methods */
3432 0, /* tp_members */
3433 memory_getsetlist, /* tp_getset */
3434 0, /* tp_base */
3435 0, /* tp_dict */
3436 0, /* tp_descr_get */
3437 0, /* tp_descr_set */
3438 0, /* tp_dictoffset */
3439 0, /* tp_init */
3440 0, /* tp_alloc */
3441 memoryview, /* tp_new */
3442 };
3443