1 /* Memoryview object implementation */
2
3 #include "Python.h"
4 #include "pycore_object.h"
5 #include "pycore_pymem.h"
6 #include "pycore_pystate.h"
7 #include "pystrhex.h"
8 #include <stddef.h>
9
10 /*[clinic input]
11 class memoryview "PyMemoryViewObject *" "&PyMemoryView_Type"
12 [clinic start generated code]*/
13 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=e2e49d2192835219]*/
14
15 #include "clinic/memoryobject.c.h"
16
17 /****************************************************************************/
18 /* ManagedBuffer Object */
19 /****************************************************************************/
20
21 /*
22 ManagedBuffer Object:
23 ---------------------
24
25 The purpose of this object is to facilitate the handling of chained
26 memoryviews that have the same underlying exporting object. PEP-3118
27 allows the underlying object to change while a view is exported. This
28 could lead to unexpected results when constructing a new memoryview
29 from an existing memoryview.
30
31 Rather than repeatedly redirecting buffer requests to the original base
32 object, all chained memoryviews use a single buffer snapshot. This
33 snapshot is generated by the constructor _PyManagedBuffer_FromObject().
34
35 Ownership rules:
36 ----------------
37
38 The master buffer inside a managed buffer is filled in by the original
39 base object. shape, strides, suboffsets and format are read-only for
40 all consumers.
41
42 A memoryview's buffer is a private copy of the exporter's buffer. shape,
43 strides and suboffsets belong to the memoryview and are thus writable.
44
45 If a memoryview itself exports several buffers via memory_getbuf(), all
46 buffer copies share shape, strides and suboffsets. In this case, the
47 arrays are NOT writable.
48
49 Reference count assumptions:
50 ----------------------------
51
52 The 'obj' member of a Py_buffer must either be NULL or refer to the
53 exporting base object. In the Python codebase, all getbufferprocs
54 return a new reference to view.obj (example: bytes_buffer_getbuffer()).
55
56 PyBuffer_Release() decrements view.obj (if non-NULL), so the
57 releasebufferprocs must NOT decrement view.obj.
58 */
59
60
61 #define CHECK_MBUF_RELEASED(mbuf) \
62 if (((_PyManagedBufferObject *)mbuf)->flags&_Py_MANAGED_BUFFER_RELEASED) { \
63 PyErr_SetString(PyExc_ValueError, \
64 "operation forbidden on released memoryview object"); \
65 return NULL; \
66 }
67
68
69 static inline _PyManagedBufferObject *
mbuf_alloc(void)70 mbuf_alloc(void)
71 {
72 _PyManagedBufferObject *mbuf;
73
74 mbuf = (_PyManagedBufferObject *)
75 PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
76 if (mbuf == NULL)
77 return NULL;
78 mbuf->flags = 0;
79 mbuf->exports = 0;
80 mbuf->master.obj = NULL;
81 _PyObject_GC_TRACK(mbuf);
82
83 return mbuf;
84 }
85
86 static PyObject *
_PyManagedBuffer_FromObject(PyObject * base)87 _PyManagedBuffer_FromObject(PyObject *base)
88 {
89 _PyManagedBufferObject *mbuf;
90
91 mbuf = mbuf_alloc();
92 if (mbuf == NULL)
93 return NULL;
94
95 if (PyObject_GetBuffer(base, &mbuf->master, PyBUF_FULL_RO) < 0) {
96 mbuf->master.obj = NULL;
97 Py_DECREF(mbuf);
98 return NULL;
99 }
100
101 return (PyObject *)mbuf;
102 }
103
104 static void
mbuf_release(_PyManagedBufferObject * self)105 mbuf_release(_PyManagedBufferObject *self)
106 {
107 if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
108 return;
109
110 /* NOTE: at this point self->exports can still be > 0 if this function
111 is called from mbuf_clear() to break up a reference cycle. */
112 self->flags |= _Py_MANAGED_BUFFER_RELEASED;
113
114 /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
115 _PyObject_GC_UNTRACK(self);
116 PyBuffer_Release(&self->master);
117 }
118
119 static void
mbuf_dealloc(_PyManagedBufferObject * self)120 mbuf_dealloc(_PyManagedBufferObject *self)
121 {
122 assert(self->exports == 0);
123 mbuf_release(self);
124 if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
125 PyMem_Free(self->master.format);
126 PyObject_GC_Del(self);
127 }
128
129 static int
mbuf_traverse(_PyManagedBufferObject * self,visitproc visit,void * arg)130 mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
131 {
132 Py_VISIT(self->master.obj);
133 return 0;
134 }
135
136 static int
mbuf_clear(_PyManagedBufferObject * self)137 mbuf_clear(_PyManagedBufferObject *self)
138 {
139 assert(self->exports >= 0);
140 mbuf_release(self);
141 return 0;
142 }
143
144 PyTypeObject _PyManagedBuffer_Type = {
145 PyVarObject_HEAD_INIT(&PyType_Type, 0)
146 "managedbuffer",
147 sizeof(_PyManagedBufferObject),
148 0,
149 (destructor)mbuf_dealloc, /* tp_dealloc */
150 0, /* tp_vectorcall_offset */
151 0, /* tp_getattr */
152 0, /* tp_setattr */
153 0, /* tp_as_async */
154 0, /* tp_repr */
155 0, /* tp_as_number */
156 0, /* tp_as_sequence */
157 0, /* tp_as_mapping */
158 0, /* tp_hash */
159 0, /* tp_call */
160 0, /* tp_str */
161 PyObject_GenericGetAttr, /* tp_getattro */
162 0, /* tp_setattro */
163 0, /* tp_as_buffer */
164 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
165 0, /* tp_doc */
166 (traverseproc)mbuf_traverse, /* tp_traverse */
167 (inquiry)mbuf_clear /* tp_clear */
168 };
169
170
171 /****************************************************************************/
172 /* MemoryView Object */
173 /****************************************************************************/
174
175 /* In the process of breaking reference cycles mbuf_release() can be
176 called before memory_release(). */
177 #define BASE_INACCESSIBLE(mv) \
178 (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
179 ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
180
181 #define CHECK_RELEASED(mv) \
182 if (BASE_INACCESSIBLE(mv)) { \
183 PyErr_SetString(PyExc_ValueError, \
184 "operation forbidden on released memoryview object"); \
185 return NULL; \
186 }
187
188 #define CHECK_RELEASED_INT(mv) \
189 if (BASE_INACCESSIBLE(mv)) { \
190 PyErr_SetString(PyExc_ValueError, \
191 "operation forbidden on released memoryview object"); \
192 return -1; \
193 }
194
195 #define CHECK_LIST_OR_TUPLE(v) \
196 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
197 PyErr_SetString(PyExc_TypeError, \
198 #v " must be a list or a tuple"); \
199 return NULL; \
200 }
201
202 #define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
203
204 /* Check for the presence of suboffsets in the first dimension. */
205 #define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
206 /* Adjust ptr if suboffsets are present. */
207 #define ADJUST_PTR(ptr, suboffsets, dim) \
208 (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
209
210 /* Memoryview buffer properties */
211 #define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
212 #define MV_F_CONTIGUOUS(flags) \
213 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
214 #define MV_ANY_CONTIGUOUS(flags) \
215 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
216
217 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
218 #define MV_CONTIGUOUS_NDIM1(view) \
219 ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
220
221 /* getbuffer() requests */
222 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
223 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
224 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
225 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
226 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
227 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
228 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
229 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
230
231
232 PyDoc_STRVAR(memory_doc,
233 "memoryview(object)\n--\n\
234 \n\
235 Create a new memoryview object which references the given object.");
236
237
238 /**************************************************************************/
239 /* Copy memoryview buffers */
240 /**************************************************************************/
241
242 /* The functions in this section take a source and a destination buffer
243 with the same logical structure: format, itemsize, ndim and shape
244 are identical, with ndim > 0.
245
246 NOTE: All buffers are assumed to have PyBUF_FULL information, which
247 is the case for memoryviews! */
248
249
250 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
251 perhaps be explicitly forbidden in the PEP. */
252 #define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
253 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
254
255 static inline int
last_dim_is_contiguous(const Py_buffer * dest,const Py_buffer * src)256 last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
257 {
258 assert(dest->ndim > 0 && src->ndim > 0);
259 return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
260 !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
261 dest->strides[dest->ndim-1] == dest->itemsize &&
262 src->strides[src->ndim-1] == src->itemsize);
263 }
264
265 /* This is not a general function for determining format equivalence.
266 It is used in copy_single() and copy_buffer() to weed out non-matching
267 formats. Skipping the '@' character is specifically used in slice
268 assignments, where the lvalue is already known to have a single character
269 format. This is a performance hack that could be rewritten (if properly
270 benchmarked). */
271 static inline int
equiv_format(const Py_buffer * dest,const Py_buffer * src)272 equiv_format(const Py_buffer *dest, const Py_buffer *src)
273 {
274 const char *dfmt, *sfmt;
275
276 assert(dest->format && src->format);
277 dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
278 sfmt = src->format[0] == '@' ? src->format+1 : src->format;
279
280 if (strcmp(dfmt, sfmt) != 0 ||
281 dest->itemsize != src->itemsize) {
282 return 0;
283 }
284
285 return 1;
286 }
287
288 /* Two shapes are equivalent if they are either equal or identical up
289 to a zero element at the same position. For example, in NumPy arrays
290 the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
291 static inline int
equiv_shape(const Py_buffer * dest,const Py_buffer * src)292 equiv_shape(const Py_buffer *dest, const Py_buffer *src)
293 {
294 int i;
295
296 if (dest->ndim != src->ndim)
297 return 0;
298
299 for (i = 0; i < dest->ndim; i++) {
300 if (dest->shape[i] != src->shape[i])
301 return 0;
302 if (dest->shape[i] == 0)
303 break;
304 }
305
306 return 1;
307 }
308
309 /* Check that the logical structure of the destination and source buffers
310 is identical. */
311 static int
equiv_structure(const Py_buffer * dest,const Py_buffer * src)312 equiv_structure(const Py_buffer *dest, const Py_buffer *src)
313 {
314 if (!equiv_format(dest, src) ||
315 !equiv_shape(dest, src)) {
316 PyErr_SetString(PyExc_ValueError,
317 "memoryview assignment: lvalue and rvalue have different "
318 "structures");
319 return 0;
320 }
321
322 return 1;
323 }
324
325 /* Base case for recursive multi-dimensional copying. Contiguous arrays are
326 copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
327 sizeof(mem) == shape[0] * itemsize. */
328 static void
copy_base(const Py_ssize_t * shape,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)329 copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
330 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
331 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
332 char *mem)
333 {
334 if (mem == NULL) { /* contiguous */
335 Py_ssize_t size = shape[0] * itemsize;
336 if (dptr + size < sptr || sptr + size < dptr)
337 memcpy(dptr, sptr, size); /* no overlapping */
338 else
339 memmove(dptr, sptr, size);
340 }
341 else {
342 char *p;
343 Py_ssize_t i;
344 for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
345 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
346 memcpy(p, xsptr, itemsize);
347 }
348 for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
349 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
350 memcpy(xdptr, p, itemsize);
351 }
352 }
353
354 }
355
356 /* Recursively copy a source buffer to a destination buffer. The two buffers
357 have the same ndim, shape and itemsize. */
358 static void
copy_rec(const Py_ssize_t * shape,Py_ssize_t ndim,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)359 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
360 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
361 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
362 char *mem)
363 {
364 Py_ssize_t i;
365
366 assert(ndim >= 1);
367
368 if (ndim == 1) {
369 copy_base(shape, itemsize,
370 dptr, dstrides, dsuboffsets,
371 sptr, sstrides, ssuboffsets,
372 mem);
373 return;
374 }
375
376 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
377 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
378 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
379
380 copy_rec(shape+1, ndim-1, itemsize,
381 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
382 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
383 mem);
384 }
385 }
386
387 /* Faster copying of one-dimensional arrays. */
388 static int
copy_single(Py_buffer * dest,Py_buffer * src)389 copy_single(Py_buffer *dest, Py_buffer *src)
390 {
391 char *mem = NULL;
392
393 assert(dest->ndim == 1);
394
395 if (!equiv_structure(dest, src))
396 return -1;
397
398 if (!last_dim_is_contiguous(dest, src)) {
399 mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
400 if (mem == NULL) {
401 PyErr_NoMemory();
402 return -1;
403 }
404 }
405
406 copy_base(dest->shape, dest->itemsize,
407 dest->buf, dest->strides, dest->suboffsets,
408 src->buf, src->strides, src->suboffsets,
409 mem);
410
411 if (mem)
412 PyMem_Free(mem);
413
414 return 0;
415 }
416
417 /* Recursively copy src to dest. Both buffers must have the same basic
418 structure. Copying is atomic, the function never fails with a partial
419 copy. */
420 static int
copy_buffer(Py_buffer * dest,Py_buffer * src)421 copy_buffer(Py_buffer *dest, Py_buffer *src)
422 {
423 char *mem = NULL;
424
425 assert(dest->ndim > 0);
426
427 if (!equiv_structure(dest, src))
428 return -1;
429
430 if (!last_dim_is_contiguous(dest, src)) {
431 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
432 if (mem == NULL) {
433 PyErr_NoMemory();
434 return -1;
435 }
436 }
437
438 copy_rec(dest->shape, dest->ndim, dest->itemsize,
439 dest->buf, dest->strides, dest->suboffsets,
440 src->buf, src->strides, src->suboffsets,
441 mem);
442
443 if (mem)
444 PyMem_Free(mem);
445
446 return 0;
447 }
448
449 /* Initialize strides for a C-contiguous array. */
450 static inline void
init_strides_from_shape(Py_buffer * view)451 init_strides_from_shape(Py_buffer *view)
452 {
453 Py_ssize_t i;
454
455 assert(view->ndim > 0);
456
457 view->strides[view->ndim-1] = view->itemsize;
458 for (i = view->ndim-2; i >= 0; i--)
459 view->strides[i] = view->strides[i+1] * view->shape[i+1];
460 }
461
462 /* Initialize strides for a Fortran-contiguous array. */
463 static inline void
init_fortran_strides_from_shape(Py_buffer * view)464 init_fortran_strides_from_shape(Py_buffer *view)
465 {
466 Py_ssize_t i;
467
468 assert(view->ndim > 0);
469
470 view->strides[0] = view->itemsize;
471 for (i = 1; i < view->ndim; i++)
472 view->strides[i] = view->strides[i-1] * view->shape[i-1];
473 }
474
475 /* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
476 or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
477 len(mem) == src->len. */
478 static int
buffer_to_contiguous(char * mem,Py_buffer * src,char order)479 buffer_to_contiguous(char *mem, Py_buffer *src, char order)
480 {
481 Py_buffer dest;
482 Py_ssize_t *strides;
483 int ret;
484
485 assert(src->ndim >= 1);
486 assert(src->shape != NULL);
487 assert(src->strides != NULL);
488
489 strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
490 if (strides == NULL) {
491 PyErr_NoMemory();
492 return -1;
493 }
494
495 /* initialize dest */
496 dest = *src;
497 dest.buf = mem;
498 /* shape is constant and shared: the logical representation of the
499 array is unaltered. */
500
501 /* The physical representation determined by strides (and possibly
502 suboffsets) may change. */
503 dest.strides = strides;
504 if (order == 'C' || order == 'A') {
505 init_strides_from_shape(&dest);
506 }
507 else {
508 init_fortran_strides_from_shape(&dest);
509 }
510
511 dest.suboffsets = NULL;
512
513 ret = copy_buffer(&dest, src);
514
515 PyMem_Free(strides);
516 return ret;
517 }
518
519
520 /****************************************************************************/
521 /* Constructors */
522 /****************************************************************************/
523
524 /* Initialize values that are shared with the managed buffer. */
525 static inline void
init_shared_values(Py_buffer * dest,const Py_buffer * src)526 init_shared_values(Py_buffer *dest, const Py_buffer *src)
527 {
528 dest->obj = src->obj;
529 dest->buf = src->buf;
530 dest->len = src->len;
531 dest->itemsize = src->itemsize;
532 dest->readonly = src->readonly;
533 dest->format = src->format ? src->format : "B";
534 dest->internal = src->internal;
535 }
536
537 /* Copy shape and strides. Reconstruct missing values. */
538 static void
init_shape_strides(Py_buffer * dest,const Py_buffer * src)539 init_shape_strides(Py_buffer *dest, const Py_buffer *src)
540 {
541 Py_ssize_t i;
542
543 if (src->ndim == 0) {
544 dest->shape = NULL;
545 dest->strides = NULL;
546 return;
547 }
548 if (src->ndim == 1) {
549 dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
550 dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
551 return;
552 }
553
554 for (i = 0; i < src->ndim; i++)
555 dest->shape[i] = src->shape[i];
556 if (src->strides) {
557 for (i = 0; i < src->ndim; i++)
558 dest->strides[i] = src->strides[i];
559 }
560 else {
561 init_strides_from_shape(dest);
562 }
563 }
564
565 static inline void
init_suboffsets(Py_buffer * dest,const Py_buffer * src)566 init_suboffsets(Py_buffer *dest, const Py_buffer *src)
567 {
568 Py_ssize_t i;
569
570 if (src->suboffsets == NULL) {
571 dest->suboffsets = NULL;
572 return;
573 }
574 for (i = 0; i < src->ndim; i++)
575 dest->suboffsets[i] = src->suboffsets[i];
576 }
577
578 /* len = product(shape) * itemsize */
579 static inline void
init_len(Py_buffer * view)580 init_len(Py_buffer *view)
581 {
582 Py_ssize_t i, len;
583
584 len = 1;
585 for (i = 0; i < view->ndim; i++)
586 len *= view->shape[i];
587 len *= view->itemsize;
588
589 view->len = len;
590 }
591
592 /* Initialize memoryview buffer properties. */
593 static void
init_flags(PyMemoryViewObject * mv)594 init_flags(PyMemoryViewObject *mv)
595 {
596 const Py_buffer *view = &mv->view;
597 int flags = 0;
598
599 switch (view->ndim) {
600 case 0:
601 flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
602 _Py_MEMORYVIEW_FORTRAN);
603 break;
604 case 1:
605 if (MV_CONTIGUOUS_NDIM1(view))
606 flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
607 break;
608 default:
609 if (PyBuffer_IsContiguous(view, 'C'))
610 flags |= _Py_MEMORYVIEW_C;
611 if (PyBuffer_IsContiguous(view, 'F'))
612 flags |= _Py_MEMORYVIEW_FORTRAN;
613 break;
614 }
615
616 if (view->suboffsets) {
617 flags |= _Py_MEMORYVIEW_PIL;
618 flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
619 }
620
621 mv->flags = flags;
622 }
623
624 /* Allocate a new memoryview and perform basic initialization. New memoryviews
625 are exclusively created through the mbuf_add functions. */
626 static inline PyMemoryViewObject *
memory_alloc(int ndim)627 memory_alloc(int ndim)
628 {
629 PyMemoryViewObject *mv;
630
631 mv = (PyMemoryViewObject *)
632 PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
633 if (mv == NULL)
634 return NULL;
635
636 mv->mbuf = NULL;
637 mv->hash = -1;
638 mv->flags = 0;
639 mv->exports = 0;
640 mv->view.ndim = ndim;
641 mv->view.shape = mv->ob_array;
642 mv->view.strides = mv->ob_array + ndim;
643 mv->view.suboffsets = mv->ob_array + 2 * ndim;
644 mv->weakreflist = NULL;
645
646 _PyObject_GC_TRACK(mv);
647 return mv;
648 }
649
650 /*
651 Return a new memoryview that is registered with mbuf. If src is NULL,
652 use mbuf->master as the underlying buffer. Otherwise, use src.
653
654 The new memoryview has full buffer information: shape and strides
655 are always present, suboffsets as needed. Arrays are copied to
656 the memoryview's ob_array field.
657 */
658 static PyObject *
mbuf_add_view(_PyManagedBufferObject * mbuf,const Py_buffer * src)659 mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
660 {
661 PyMemoryViewObject *mv;
662 Py_buffer *dest;
663
664 if (src == NULL)
665 src = &mbuf->master;
666
667 if (src->ndim > PyBUF_MAX_NDIM) {
668 PyErr_SetString(PyExc_ValueError,
669 "memoryview: number of dimensions must not exceed "
670 Py_STRINGIFY(PyBUF_MAX_NDIM));
671 return NULL;
672 }
673
674 mv = memory_alloc(src->ndim);
675 if (mv == NULL)
676 return NULL;
677
678 dest = &mv->view;
679 init_shared_values(dest, src);
680 init_shape_strides(dest, src);
681 init_suboffsets(dest, src);
682 init_flags(mv);
683
684 mv->mbuf = mbuf;
685 Py_INCREF(mbuf);
686 mbuf->exports++;
687
688 return (PyObject *)mv;
689 }
690
691 /* Register an incomplete view: shape, strides, suboffsets and flags still
692 need to be initialized. Use 'ndim' instead of src->ndim to determine the
693 size of the memoryview's ob_array.
694
695 Assumption: ndim <= PyBUF_MAX_NDIM. */
696 static PyObject *
mbuf_add_incomplete_view(_PyManagedBufferObject * mbuf,const Py_buffer * src,int ndim)697 mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
698 int ndim)
699 {
700 PyMemoryViewObject *mv;
701 Py_buffer *dest;
702
703 if (src == NULL)
704 src = &mbuf->master;
705
706 assert(ndim <= PyBUF_MAX_NDIM);
707
708 mv = memory_alloc(ndim);
709 if (mv == NULL)
710 return NULL;
711
712 dest = &mv->view;
713 init_shared_values(dest, src);
714
715 mv->mbuf = mbuf;
716 Py_INCREF(mbuf);
717 mbuf->exports++;
718
719 return (PyObject *)mv;
720 }
721
722 /* Expose a raw memory area as a view of contiguous bytes. flags can be
723 PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
724 The memoryview has complete buffer information. */
725 PyObject *
PyMemoryView_FromMemory(char * mem,Py_ssize_t size,int flags)726 PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
727 {
728 _PyManagedBufferObject *mbuf;
729 PyObject *mv;
730 int readonly;
731
732 assert(mem != NULL);
733 assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
734
735 mbuf = mbuf_alloc();
736 if (mbuf == NULL)
737 return NULL;
738
739 readonly = (flags == PyBUF_WRITE) ? 0 : 1;
740 (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
741 PyBUF_FULL_RO);
742
743 mv = mbuf_add_view(mbuf, NULL);
744 Py_DECREF(mbuf);
745
746 return mv;
747 }
748
749 /* Create a memoryview from a given Py_buffer. For simple byte views,
750 PyMemoryView_FromMemory() should be used instead.
751 This function is the only entry point that can create a master buffer
752 without full information. Because of this fact init_shape_strides()
753 must be able to reconstruct missing values. */
754 PyObject *
PyMemoryView_FromBuffer(Py_buffer * info)755 PyMemoryView_FromBuffer(Py_buffer *info)
756 {
757 _PyManagedBufferObject *mbuf;
758 PyObject *mv;
759
760 if (info->buf == NULL) {
761 PyErr_SetString(PyExc_ValueError,
762 "PyMemoryView_FromBuffer(): info->buf must not be NULL");
763 return NULL;
764 }
765
766 mbuf = mbuf_alloc();
767 if (mbuf == NULL)
768 return NULL;
769
770 /* info->obj is either NULL or a borrowed reference. This reference
771 should not be decremented in PyBuffer_Release(). */
772 mbuf->master = *info;
773 mbuf->master.obj = NULL;
774
775 mv = mbuf_add_view(mbuf, NULL);
776 Py_DECREF(mbuf);
777
778 return mv;
779 }
780
781 /* Create a memoryview from an object that implements the buffer protocol.
782 If the object is a memoryview, the new memoryview must be registered
783 with the same managed buffer. Otherwise, a new managed buffer is created. */
784 PyObject *
PyMemoryView_FromObject(PyObject * v)785 PyMemoryView_FromObject(PyObject *v)
786 {
787 _PyManagedBufferObject *mbuf;
788
789 if (PyMemoryView_Check(v)) {
790 PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
791 CHECK_RELEASED(mv);
792 return mbuf_add_view(mv->mbuf, &mv->view);
793 }
794 else if (PyObject_CheckBuffer(v)) {
795 PyObject *ret;
796 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v);
797 if (mbuf == NULL)
798 return NULL;
799 ret = mbuf_add_view(mbuf, NULL);
800 Py_DECREF(mbuf);
801 return ret;
802 }
803
804 PyErr_Format(PyExc_TypeError,
805 "memoryview: a bytes-like object is required, not '%.200s'",
806 Py_TYPE(v)->tp_name);
807 return NULL;
808 }
809
810 /* Copy the format string from a base object that might vanish. */
811 static int
mbuf_copy_format(_PyManagedBufferObject * mbuf,const char * fmt)812 mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
813 {
814 if (fmt != NULL) {
815 char *cp = PyMem_Malloc(strlen(fmt)+1);
816 if (cp == NULL) {
817 PyErr_NoMemory();
818 return -1;
819 }
820 mbuf->master.format = strcpy(cp, fmt);
821 mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
822 }
823
824 return 0;
825 }
826
827 /*
828 Return a memoryview that is based on a contiguous copy of src.
829 Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
830
831 Ownership rules:
832 1) As usual, the returned memoryview has a private copy
833 of src->shape, src->strides and src->suboffsets.
834 2) src->format is copied to the master buffer and released
835 in mbuf_dealloc(). The releasebufferproc of the bytes
836 object is NULL, so it does not matter that mbuf_release()
837 passes the altered format pointer to PyBuffer_Release().
838 */
839 static PyObject *
memory_from_contiguous_copy(Py_buffer * src,char order)840 memory_from_contiguous_copy(Py_buffer *src, char order)
841 {
842 _PyManagedBufferObject *mbuf;
843 PyMemoryViewObject *mv;
844 PyObject *bytes;
845 Py_buffer *dest;
846 int i;
847
848 assert(src->ndim > 0);
849 assert(src->shape != NULL);
850
851 bytes = PyBytes_FromStringAndSize(NULL, src->len);
852 if (bytes == NULL)
853 return NULL;
854
855 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes);
856 Py_DECREF(bytes);
857 if (mbuf == NULL)
858 return NULL;
859
860 if (mbuf_copy_format(mbuf, src->format) < 0) {
861 Py_DECREF(mbuf);
862 return NULL;
863 }
864
865 mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
866 Py_DECREF(mbuf);
867 if (mv == NULL)
868 return NULL;
869
870 dest = &mv->view;
871
872 /* shared values are initialized correctly except for itemsize */
873 dest->itemsize = src->itemsize;
874
875 /* shape and strides */
876 for (i = 0; i < src->ndim; i++) {
877 dest->shape[i] = src->shape[i];
878 }
879 if (order == 'C' || order == 'A') {
880 init_strides_from_shape(dest);
881 }
882 else {
883 init_fortran_strides_from_shape(dest);
884 }
885 /* suboffsets */
886 dest->suboffsets = NULL;
887
888 /* flags */
889 init_flags(mv);
890
891 if (copy_buffer(dest, src) < 0) {
892 Py_DECREF(mv);
893 return NULL;
894 }
895
896 return (PyObject *)mv;
897 }
898
899 /*
900 Return a new memoryview object based on a contiguous exporter with
901 buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
902 The logical structure of the input and output buffers is the same
903 (i.e. tolist(input) == tolist(output)), but the physical layout in
904 memory can be explicitly chosen.
905
906 As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
907 otherwise it may be writable or read-only.
908
909 If the exporter is already contiguous with the desired target order,
910 the memoryview will be directly based on the exporter.
911
912 Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
913 based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
914 'F'ortran order otherwise.
915 */
916 PyObject *
PyMemoryView_GetContiguous(PyObject * obj,int buffertype,char order)917 PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
918 {
919 PyMemoryViewObject *mv;
920 PyObject *ret;
921 Py_buffer *view;
922
923 assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
924 assert(order == 'C' || order == 'F' || order == 'A');
925
926 mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
927 if (mv == NULL)
928 return NULL;
929
930 view = &mv->view;
931 if (buffertype == PyBUF_WRITE && view->readonly) {
932 PyErr_SetString(PyExc_BufferError,
933 "underlying buffer is not writable");
934 Py_DECREF(mv);
935 return NULL;
936 }
937
938 if (PyBuffer_IsContiguous(view, order))
939 return (PyObject *)mv;
940
941 if (buffertype == PyBUF_WRITE) {
942 PyErr_SetString(PyExc_BufferError,
943 "writable contiguous buffer requested "
944 "for a non-contiguous object.");
945 Py_DECREF(mv);
946 return NULL;
947 }
948
949 ret = memory_from_contiguous_copy(view, order);
950 Py_DECREF(mv);
951 return ret;
952 }
953
954
955 static PyObject *
memory_new(PyTypeObject * subtype,PyObject * args,PyObject * kwds)956 memory_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
957 {
958 PyObject *obj;
959 static char *kwlist[] = {"object", NULL};
960
961 if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:memoryview", kwlist,
962 &obj)) {
963 return NULL;
964 }
965
966 return PyMemoryView_FromObject(obj);
967 }
968
969
970 /****************************************************************************/
971 /* Previously in abstract.c */
972 /****************************************************************************/
973
974 typedef struct {
975 Py_buffer view;
976 Py_ssize_t array[1];
977 } Py_buffer_full;
978
979 int
PyBuffer_ToContiguous(void * buf,Py_buffer * src,Py_ssize_t len,char order)980 PyBuffer_ToContiguous(void *buf, Py_buffer *src, Py_ssize_t len, char order)
981 {
982 Py_buffer_full *fb = NULL;
983 int ret;
984
985 assert(order == 'C' || order == 'F' || order == 'A');
986
987 if (len != src->len) {
988 PyErr_SetString(PyExc_ValueError,
989 "PyBuffer_ToContiguous: len != view->len");
990 return -1;
991 }
992
993 if (PyBuffer_IsContiguous(src, order)) {
994 memcpy((char *)buf, src->buf, len);
995 return 0;
996 }
997
998 /* buffer_to_contiguous() assumes PyBUF_FULL */
999 fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
1000 if (fb == NULL) {
1001 PyErr_NoMemory();
1002 return -1;
1003 }
1004 fb->view.ndim = src->ndim;
1005 fb->view.shape = fb->array;
1006 fb->view.strides = fb->array + src->ndim;
1007 fb->view.suboffsets = fb->array + 2 * src->ndim;
1008
1009 init_shared_values(&fb->view, src);
1010 init_shape_strides(&fb->view, src);
1011 init_suboffsets(&fb->view, src);
1012
1013 src = &fb->view;
1014
1015 ret = buffer_to_contiguous(buf, src, order);
1016 PyMem_Free(fb);
1017 return ret;
1018 }
1019
1020
1021 /****************************************************************************/
1022 /* Release/GC management */
1023 /****************************************************************************/
1024
1025 /* Inform the managed buffer that this particular memoryview will not access
1026 the underlying buffer again. If no other memoryviews are registered with
1027 the managed buffer, the underlying buffer is released instantly and
1028 marked as inaccessible for both the memoryview and the managed buffer.
1029
1030 This function fails if the memoryview itself has exported buffers. */
1031 static int
_memory_release(PyMemoryViewObject * self)1032 _memory_release(PyMemoryViewObject *self)
1033 {
1034 if (self->flags & _Py_MEMORYVIEW_RELEASED)
1035 return 0;
1036
1037 if (self->exports == 0) {
1038 self->flags |= _Py_MEMORYVIEW_RELEASED;
1039 assert(self->mbuf->exports > 0);
1040 if (--self->mbuf->exports == 0)
1041 mbuf_release(self->mbuf);
1042 return 0;
1043 }
1044 if (self->exports > 0) {
1045 PyErr_Format(PyExc_BufferError,
1046 "memoryview has %zd exported buffer%s", self->exports,
1047 self->exports==1 ? "" : "s");
1048 return -1;
1049 }
1050
1051 Py_FatalError("_memory_release(): negative export count");
1052 return -1;
1053 }
1054
1055 static PyObject *
memory_release(PyMemoryViewObject * self,PyObject * noargs)1056 memory_release(PyMemoryViewObject *self, PyObject *noargs)
1057 {
1058 if (_memory_release(self) < 0)
1059 return NULL;
1060 Py_RETURN_NONE;
1061 }
1062
1063 static void
memory_dealloc(PyMemoryViewObject * self)1064 memory_dealloc(PyMemoryViewObject *self)
1065 {
1066 assert(self->exports == 0);
1067 _PyObject_GC_UNTRACK(self);
1068 (void)_memory_release(self);
1069 Py_CLEAR(self->mbuf);
1070 if (self->weakreflist != NULL)
1071 PyObject_ClearWeakRefs((PyObject *) self);
1072 PyObject_GC_Del(self);
1073 }
1074
1075 static int
memory_traverse(PyMemoryViewObject * self,visitproc visit,void * arg)1076 memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
1077 {
1078 Py_VISIT(self->mbuf);
1079 return 0;
1080 }
1081
1082 static int
memory_clear(PyMemoryViewObject * self)1083 memory_clear(PyMemoryViewObject *self)
1084 {
1085 (void)_memory_release(self);
1086 Py_CLEAR(self->mbuf);
1087 return 0;
1088 }
1089
1090 static PyObject *
memory_enter(PyObject * self,PyObject * args)1091 memory_enter(PyObject *self, PyObject *args)
1092 {
1093 CHECK_RELEASED(self);
1094 Py_INCREF(self);
1095 return self;
1096 }
1097
1098 static PyObject *
memory_exit(PyObject * self,PyObject * args)1099 memory_exit(PyObject *self, PyObject *args)
1100 {
1101 return memory_release((PyMemoryViewObject *)self, NULL);
1102 }
1103
1104
1105 /****************************************************************************/
1106 /* Casting format and shape */
1107 /****************************************************************************/
1108
1109 #define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
1110
1111 static inline Py_ssize_t
get_native_fmtchar(char * result,const char * fmt)1112 get_native_fmtchar(char *result, const char *fmt)
1113 {
1114 Py_ssize_t size = -1;
1115
1116 if (fmt[0] == '@') fmt++;
1117
1118 switch (fmt[0]) {
1119 case 'c': case 'b': case 'B': size = sizeof(char); break;
1120 case 'h': case 'H': size = sizeof(short); break;
1121 case 'i': case 'I': size = sizeof(int); break;
1122 case 'l': case 'L': size = sizeof(long); break;
1123 case 'q': case 'Q': size = sizeof(long long); break;
1124 case 'n': case 'N': size = sizeof(Py_ssize_t); break;
1125 case 'f': size = sizeof(float); break;
1126 case 'd': size = sizeof(double); break;
1127 case '?': size = sizeof(_Bool); break;
1128 case 'P': size = sizeof(void *); break;
1129 }
1130
1131 if (size > 0 && fmt[1] == '\0') {
1132 *result = fmt[0];
1133 return size;
1134 }
1135
1136 return -1;
1137 }
1138
1139 static inline const char *
get_native_fmtstr(const char * fmt)1140 get_native_fmtstr(const char *fmt)
1141 {
1142 int at = 0;
1143
1144 if (fmt[0] == '@') {
1145 at = 1;
1146 fmt++;
1147 }
1148 if (fmt[0] == '\0' || fmt[1] != '\0') {
1149 return NULL;
1150 }
1151
1152 #define RETURN(s) do { return at ? "@" s : s; } while (0)
1153
1154 switch (fmt[0]) {
1155 case 'c': RETURN("c");
1156 case 'b': RETURN("b");
1157 case 'B': RETURN("B");
1158 case 'h': RETURN("h");
1159 case 'H': RETURN("H");
1160 case 'i': RETURN("i");
1161 case 'I': RETURN("I");
1162 case 'l': RETURN("l");
1163 case 'L': RETURN("L");
1164 case 'q': RETURN("q");
1165 case 'Q': RETURN("Q");
1166 case 'n': RETURN("n");
1167 case 'N': RETURN("N");
1168 case 'f': RETURN("f");
1169 case 'd': RETURN("d");
1170 case '?': RETURN("?");
1171 case 'P': RETURN("P");
1172 }
1173
1174 return NULL;
1175 }
1176
1177
1178 /* Cast a memoryview's data type to 'format'. The input array must be
1179 C-contiguous. At least one of input-format, output-format must have
1180 byte size. The output array is 1-D, with the same byte length as the
1181 input array. Thus, view->len must be a multiple of the new itemsize. */
1182 static int
cast_to_1D(PyMemoryViewObject * mv,PyObject * format)1183 cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
1184 {
1185 Py_buffer *view = &mv->view;
1186 PyObject *asciifmt;
1187 char srcchar, destchar;
1188 Py_ssize_t itemsize;
1189 int ret = -1;
1190
1191 assert(view->ndim >= 1);
1192 assert(Py_SIZE(mv) == 3*view->ndim);
1193 assert(view->shape == mv->ob_array);
1194 assert(view->strides == mv->ob_array + view->ndim);
1195 assert(view->suboffsets == mv->ob_array + 2*view->ndim);
1196
1197 asciifmt = PyUnicode_AsASCIIString(format);
1198 if (asciifmt == NULL)
1199 return ret;
1200
1201 itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
1202 if (itemsize < 0) {
1203 PyErr_SetString(PyExc_ValueError,
1204 "memoryview: destination format must be a native single "
1205 "character format prefixed with an optional '@'");
1206 goto out;
1207 }
1208
1209 if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
1210 !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
1211 PyErr_SetString(PyExc_TypeError,
1212 "memoryview: cannot cast between two non-byte formats");
1213 goto out;
1214 }
1215 if (view->len % itemsize) {
1216 PyErr_SetString(PyExc_TypeError,
1217 "memoryview: length is not a multiple of itemsize");
1218 goto out;
1219 }
1220
1221 view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
1222 if (view->format == NULL) {
1223 /* NOT_REACHED: get_native_fmtchar() already validates the format. */
1224 PyErr_SetString(PyExc_RuntimeError,
1225 "memoryview: internal error");
1226 goto out;
1227 }
1228 view->itemsize = itemsize;
1229
1230 view->ndim = 1;
1231 view->shape[0] = view->len / view->itemsize;
1232 view->strides[0] = view->itemsize;
1233 view->suboffsets = NULL;
1234
1235 init_flags(mv);
1236
1237 ret = 0;
1238
1239 out:
1240 Py_DECREF(asciifmt);
1241 return ret;
1242 }
1243
1244 /* The memoryview must have space for 3*len(seq) elements. */
1245 static Py_ssize_t
copy_shape(Py_ssize_t * shape,const PyObject * seq,Py_ssize_t ndim,Py_ssize_t itemsize)1246 copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
1247 Py_ssize_t itemsize)
1248 {
1249 Py_ssize_t x, i;
1250 Py_ssize_t len = itemsize;
1251
1252 for (i = 0; i < ndim; i++) {
1253 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
1254 if (!PyLong_Check(tmp)) {
1255 PyErr_SetString(PyExc_TypeError,
1256 "memoryview.cast(): elements of shape must be integers");
1257 return -1;
1258 }
1259 x = PyLong_AsSsize_t(tmp);
1260 if (x == -1 && PyErr_Occurred()) {
1261 return -1;
1262 }
1263 if (x <= 0) {
1264 /* In general elements of shape may be 0, but not for casting. */
1265 PyErr_Format(PyExc_ValueError,
1266 "memoryview.cast(): elements of shape must be integers > 0");
1267 return -1;
1268 }
1269 if (x > PY_SSIZE_T_MAX / len) {
1270 PyErr_Format(PyExc_ValueError,
1271 "memoryview.cast(): product(shape) > SSIZE_MAX");
1272 return -1;
1273 }
1274 len *= x;
1275 shape[i] = x;
1276 }
1277
1278 return len;
1279 }
1280
1281 /* Cast a 1-D array to a new shape. The result array will be C-contiguous.
1282 If the result array does not have exactly the same byte length as the
1283 input array, raise ValueError. */
1284 static int
cast_to_ND(PyMemoryViewObject * mv,const PyObject * shape,int ndim)1285 cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
1286 {
1287 Py_buffer *view = &mv->view;
1288 Py_ssize_t len;
1289
1290 assert(view->ndim == 1); /* ndim from cast_to_1D() */
1291 assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
1292 assert(view->shape == mv->ob_array);
1293 assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
1294 assert(view->suboffsets == NULL);
1295
1296 view->ndim = ndim;
1297 if (view->ndim == 0) {
1298 view->shape = NULL;
1299 view->strides = NULL;
1300 len = view->itemsize;
1301 }
1302 else {
1303 len = copy_shape(view->shape, shape, ndim, view->itemsize);
1304 if (len < 0)
1305 return -1;
1306 init_strides_from_shape(view);
1307 }
1308
1309 if (view->len != len) {
1310 PyErr_SetString(PyExc_TypeError,
1311 "memoryview: product(shape) * itemsize != buffer size");
1312 return -1;
1313 }
1314
1315 init_flags(mv);
1316
1317 return 0;
1318 }
1319
1320 static int
zero_in_shape(PyMemoryViewObject * mv)1321 zero_in_shape(PyMemoryViewObject *mv)
1322 {
1323 Py_buffer *view = &mv->view;
1324 Py_ssize_t i;
1325
1326 for (i = 0; i < view->ndim; i++)
1327 if (view->shape[i] == 0)
1328 return 1;
1329
1330 return 0;
1331 }
1332
1333 /*
1334 Cast a copy of 'self' to a different view. The input view must
1335 be C-contiguous. The function always casts the input view to a
1336 1-D output according to 'format'. At least one of input-format,
1337 output-format must have byte size.
1338
1339 If 'shape' is given, the 1-D view from the previous step will
1340 be cast to a C-contiguous view with new shape and strides.
1341
1342 All casts must result in views that will have the exact byte
1343 size of the original input. Otherwise, an error is raised.
1344 */
1345 static PyObject *
memory_cast(PyMemoryViewObject * self,PyObject * args,PyObject * kwds)1346 memory_cast(PyMemoryViewObject *self, PyObject *args, PyObject *kwds)
1347 {
1348 static char *kwlist[] = {"format", "shape", NULL};
1349 PyMemoryViewObject *mv = NULL;
1350 PyObject *shape = NULL;
1351 PyObject *format;
1352 Py_ssize_t ndim = 1;
1353
1354 CHECK_RELEASED(self);
1355
1356 if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist,
1357 &format, &shape)) {
1358 return NULL;
1359 }
1360 if (!PyUnicode_Check(format)) {
1361 PyErr_SetString(PyExc_TypeError,
1362 "memoryview: format argument must be a string");
1363 return NULL;
1364 }
1365 if (!MV_C_CONTIGUOUS(self->flags)) {
1366 PyErr_SetString(PyExc_TypeError,
1367 "memoryview: casts are restricted to C-contiguous views");
1368 return NULL;
1369 }
1370 if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
1371 PyErr_SetString(PyExc_TypeError,
1372 "memoryview: cannot cast view with zeros in shape or strides");
1373 return NULL;
1374 }
1375 if (shape) {
1376 CHECK_LIST_OR_TUPLE(shape)
1377 ndim = PySequence_Fast_GET_SIZE(shape);
1378 if (ndim > PyBUF_MAX_NDIM) {
1379 PyErr_SetString(PyExc_ValueError,
1380 "memoryview: number of dimensions must not exceed "
1381 Py_STRINGIFY(PyBUF_MAX_NDIM));
1382 return NULL;
1383 }
1384 if (self->view.ndim != 1 && ndim != 1) {
1385 PyErr_SetString(PyExc_TypeError,
1386 "memoryview: cast must be 1D -> ND or ND -> 1D");
1387 return NULL;
1388 }
1389 }
1390
1391 mv = (PyMemoryViewObject *)
1392 mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
1393 if (mv == NULL)
1394 return NULL;
1395
1396 if (cast_to_1D(mv, format) < 0)
1397 goto error;
1398 if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
1399 goto error;
1400
1401 return (PyObject *)mv;
1402
1403 error:
1404 Py_DECREF(mv);
1405 return NULL;
1406 }
1407
1408 static PyObject *
memory_toreadonly(PyMemoryViewObject * self,PyObject * noargs)1409 memory_toreadonly(PyMemoryViewObject *self, PyObject *noargs)
1410 {
1411 CHECK_RELEASED(self);
1412 /* Even if self is already readonly, we still need to create a new
1413 * object for .release() to work correctly.
1414 */
1415 self = (PyMemoryViewObject *) mbuf_add_view(self->mbuf, &self->view);
1416 if (self != NULL) {
1417 self->view.readonly = 1;
1418 };
1419 return (PyObject *) self;
1420 }
1421
1422
1423 /**************************************************************************/
1424 /* getbuffer */
1425 /**************************************************************************/
1426
1427 static int
memory_getbuf(PyMemoryViewObject * self,Py_buffer * view,int flags)1428 memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
1429 {
1430 Py_buffer *base = &self->view;
1431 int baseflags = self->flags;
1432
1433 CHECK_RELEASED_INT(self);
1434
1435 /* start with complete information */
1436 *view = *base;
1437 view->obj = NULL;
1438
1439 if (REQ_WRITABLE(flags) && base->readonly) {
1440 PyErr_SetString(PyExc_BufferError,
1441 "memoryview: underlying buffer is not writable");
1442 return -1;
1443 }
1444 if (!REQ_FORMAT(flags)) {
1445 /* NULL indicates that the buffer's data type has been cast to 'B'.
1446 view->itemsize is the _previous_ itemsize. If shape is present,
1447 the equality product(shape) * itemsize = len still holds at this
1448 point. The equality calcsize(format) = itemsize does _not_ hold
1449 from here on! */
1450 view->format = NULL;
1451 }
1452
1453 if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
1454 PyErr_SetString(PyExc_BufferError,
1455 "memoryview: underlying buffer is not C-contiguous");
1456 return -1;
1457 }
1458 if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
1459 PyErr_SetString(PyExc_BufferError,
1460 "memoryview: underlying buffer is not Fortran contiguous");
1461 return -1;
1462 }
1463 if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
1464 PyErr_SetString(PyExc_BufferError,
1465 "memoryview: underlying buffer is not contiguous");
1466 return -1;
1467 }
1468 if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
1469 PyErr_SetString(PyExc_BufferError,
1470 "memoryview: underlying buffer requires suboffsets");
1471 return -1;
1472 }
1473 if (!REQ_STRIDES(flags)) {
1474 if (!MV_C_CONTIGUOUS(baseflags)) {
1475 PyErr_SetString(PyExc_BufferError,
1476 "memoryview: underlying buffer is not C-contiguous");
1477 return -1;
1478 }
1479 view->strides = NULL;
1480 }
1481 if (!REQ_SHAPE(flags)) {
1482 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1483 so base->buf = ndbuf->data. */
1484 if (view->format != NULL) {
1485 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1486 not make sense. */
1487 PyErr_Format(PyExc_BufferError,
1488 "memoryview: cannot cast to unsigned bytes if the format flag "
1489 "is present");
1490 return -1;
1491 }
1492 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1493 do _not_ hold from here on! */
1494 view->ndim = 1;
1495 view->shape = NULL;
1496 }
1497
1498
1499 view->obj = (PyObject *)self;
1500 Py_INCREF(view->obj);
1501 self->exports++;
1502
1503 return 0;
1504 }
1505
1506 static void
memory_releasebuf(PyMemoryViewObject * self,Py_buffer * view)1507 memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
1508 {
1509 self->exports--;
1510 return;
1511 /* PyBuffer_Release() decrements view->obj after this function returns. */
1512 }
1513
1514 /* Buffer methods */
1515 static PyBufferProcs memory_as_buffer = {
1516 (getbufferproc)memory_getbuf, /* bf_getbuffer */
1517 (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
1518 };
1519
1520
1521 /****************************************************************************/
1522 /* Optimized pack/unpack for all native format specifiers */
1523 /****************************************************************************/
1524
1525 /*
1526 Fix exceptions:
1527 1) Include format string in the error message.
1528 2) OverflowError -> ValueError.
1529 3) The error message from PyNumber_Index() is not ideal.
1530 */
1531 static int
type_error_int(const char * fmt)1532 type_error_int(const char *fmt)
1533 {
1534 PyErr_Format(PyExc_TypeError,
1535 "memoryview: invalid type for format '%s'", fmt);
1536 return -1;
1537 }
1538
1539 static int
value_error_int(const char * fmt)1540 value_error_int(const char *fmt)
1541 {
1542 PyErr_Format(PyExc_ValueError,
1543 "memoryview: invalid value for format '%s'", fmt);
1544 return -1;
1545 }
1546
1547 static int
fix_error_int(const char * fmt)1548 fix_error_int(const char *fmt)
1549 {
1550 assert(PyErr_Occurred());
1551 if (PyErr_ExceptionMatches(PyExc_TypeError)) {
1552 PyErr_Clear();
1553 return type_error_int(fmt);
1554 }
1555 else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
1556 PyErr_ExceptionMatches(PyExc_ValueError)) {
1557 PyErr_Clear();
1558 return value_error_int(fmt);
1559 }
1560
1561 return -1;
1562 }
1563
1564 /* Accept integer objects or objects with an __index__() method. */
1565 static long
pylong_as_ld(PyObject * item)1566 pylong_as_ld(PyObject *item)
1567 {
1568 PyObject *tmp;
1569 long ld;
1570
1571 tmp = PyNumber_Index(item);
1572 if (tmp == NULL)
1573 return -1;
1574
1575 ld = PyLong_AsLong(tmp);
1576 Py_DECREF(tmp);
1577 return ld;
1578 }
1579
1580 static unsigned long
pylong_as_lu(PyObject * item)1581 pylong_as_lu(PyObject *item)
1582 {
1583 PyObject *tmp;
1584 unsigned long lu;
1585
1586 tmp = PyNumber_Index(item);
1587 if (tmp == NULL)
1588 return (unsigned long)-1;
1589
1590 lu = PyLong_AsUnsignedLong(tmp);
1591 Py_DECREF(tmp);
1592 return lu;
1593 }
1594
1595 static long long
pylong_as_lld(PyObject * item)1596 pylong_as_lld(PyObject *item)
1597 {
1598 PyObject *tmp;
1599 long long lld;
1600
1601 tmp = PyNumber_Index(item);
1602 if (tmp == NULL)
1603 return -1;
1604
1605 lld = PyLong_AsLongLong(tmp);
1606 Py_DECREF(tmp);
1607 return lld;
1608 }
1609
1610 static unsigned long long
pylong_as_llu(PyObject * item)1611 pylong_as_llu(PyObject *item)
1612 {
1613 PyObject *tmp;
1614 unsigned long long llu;
1615
1616 tmp = PyNumber_Index(item);
1617 if (tmp == NULL)
1618 return (unsigned long long)-1;
1619
1620 llu = PyLong_AsUnsignedLongLong(tmp);
1621 Py_DECREF(tmp);
1622 return llu;
1623 }
1624
1625 static Py_ssize_t
pylong_as_zd(PyObject * item)1626 pylong_as_zd(PyObject *item)
1627 {
1628 PyObject *tmp;
1629 Py_ssize_t zd;
1630
1631 tmp = PyNumber_Index(item);
1632 if (tmp == NULL)
1633 return -1;
1634
1635 zd = PyLong_AsSsize_t(tmp);
1636 Py_DECREF(tmp);
1637 return zd;
1638 }
1639
1640 static size_t
pylong_as_zu(PyObject * item)1641 pylong_as_zu(PyObject *item)
1642 {
1643 PyObject *tmp;
1644 size_t zu;
1645
1646 tmp = PyNumber_Index(item);
1647 if (tmp == NULL)
1648 return (size_t)-1;
1649
1650 zu = PyLong_AsSize_t(tmp);
1651 Py_DECREF(tmp);
1652 return zu;
1653 }
1654
1655 /* Timings with the ndarray from _testbuffer.c indicate that using the
1656 struct module is around 15x slower than the two functions below. */
1657
1658 #define UNPACK_SINGLE(dest, ptr, type) \
1659 do { \
1660 type x; \
1661 memcpy((char *)&x, ptr, sizeof x); \
1662 dest = x; \
1663 } while (0)
1664
1665 /* Unpack a single item. 'fmt' can be any native format character in struct
1666 module syntax. This function is very sensitive to small changes. With this
1667 layout gcc automatically generates a fast jump table. */
1668 static inline PyObject *
unpack_single(const char * ptr,const char * fmt)1669 unpack_single(const char *ptr, const char *fmt)
1670 {
1671 unsigned long long llu;
1672 unsigned long lu;
1673 size_t zu;
1674 long long lld;
1675 long ld;
1676 Py_ssize_t zd;
1677 double d;
1678 unsigned char uc;
1679 void *p;
1680
1681 switch (fmt[0]) {
1682
1683 /* signed integers and fast path for 'B' */
1684 case 'B': uc = *((unsigned char *)ptr); goto convert_uc;
1685 case 'b': ld = *((signed char *)ptr); goto convert_ld;
1686 case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
1687 case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
1688 case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
1689
1690 /* boolean */
1691 case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
1692
1693 /* unsigned integers */
1694 case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
1695 case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
1696 case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
1697
1698 /* native 64-bit */
1699 case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
1700 case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
1701
1702 /* ssize_t and size_t */
1703 case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
1704 case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
1705
1706 /* floats */
1707 case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
1708 case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
1709
1710 /* bytes object */
1711 case 'c': goto convert_bytes;
1712
1713 /* pointer */
1714 case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
1715
1716 /* default */
1717 default: goto err_format;
1718 }
1719
1720 convert_uc:
1721 /* PyLong_FromUnsignedLong() is slower */
1722 return PyLong_FromLong(uc);
1723 convert_ld:
1724 return PyLong_FromLong(ld);
1725 convert_lu:
1726 return PyLong_FromUnsignedLong(lu);
1727 convert_lld:
1728 return PyLong_FromLongLong(lld);
1729 convert_llu:
1730 return PyLong_FromUnsignedLongLong(llu);
1731 convert_zd:
1732 return PyLong_FromSsize_t(zd);
1733 convert_zu:
1734 return PyLong_FromSize_t(zu);
1735 convert_double:
1736 return PyFloat_FromDouble(d);
1737 convert_bool:
1738 return PyBool_FromLong(ld);
1739 convert_bytes:
1740 return PyBytes_FromStringAndSize(ptr, 1);
1741 convert_pointer:
1742 return PyLong_FromVoidPtr(p);
1743 err_format:
1744 PyErr_Format(PyExc_NotImplementedError,
1745 "memoryview: format %s not supported", fmt);
1746 return NULL;
1747 }
1748
1749 #define PACK_SINGLE(ptr, src, type) \
1750 do { \
1751 type x; \
1752 x = (type)src; \
1753 memcpy(ptr, (char *)&x, sizeof x); \
1754 } while (0)
1755
1756 /* Pack a single item. 'fmt' can be any native format character in
1757 struct module syntax. */
1758 static int
pack_single(char * ptr,PyObject * item,const char * fmt)1759 pack_single(char *ptr, PyObject *item, const char *fmt)
1760 {
1761 unsigned long long llu;
1762 unsigned long lu;
1763 size_t zu;
1764 long long lld;
1765 long ld;
1766 Py_ssize_t zd;
1767 double d;
1768 void *p;
1769
1770 switch (fmt[0]) {
1771 /* signed integers */
1772 case 'b': case 'h': case 'i': case 'l':
1773 ld = pylong_as_ld(item);
1774 if (ld == -1 && PyErr_Occurred())
1775 goto err_occurred;
1776 switch (fmt[0]) {
1777 case 'b':
1778 if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
1779 *((signed char *)ptr) = (signed char)ld; break;
1780 case 'h':
1781 if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
1782 PACK_SINGLE(ptr, ld, short); break;
1783 case 'i':
1784 if (ld < INT_MIN || ld > INT_MAX) goto err_range;
1785 PACK_SINGLE(ptr, ld, int); break;
1786 default: /* 'l' */
1787 PACK_SINGLE(ptr, ld, long); break;
1788 }
1789 break;
1790
1791 /* unsigned integers */
1792 case 'B': case 'H': case 'I': case 'L':
1793 lu = pylong_as_lu(item);
1794 if (lu == (unsigned long)-1 && PyErr_Occurred())
1795 goto err_occurred;
1796 switch (fmt[0]) {
1797 case 'B':
1798 if (lu > UCHAR_MAX) goto err_range;
1799 *((unsigned char *)ptr) = (unsigned char)lu; break;
1800 case 'H':
1801 if (lu > USHRT_MAX) goto err_range;
1802 PACK_SINGLE(ptr, lu, unsigned short); break;
1803 case 'I':
1804 if (lu > UINT_MAX) goto err_range;
1805 PACK_SINGLE(ptr, lu, unsigned int); break;
1806 default: /* 'L' */
1807 PACK_SINGLE(ptr, lu, unsigned long); break;
1808 }
1809 break;
1810
1811 /* native 64-bit */
1812 case 'q':
1813 lld = pylong_as_lld(item);
1814 if (lld == -1 && PyErr_Occurred())
1815 goto err_occurred;
1816 PACK_SINGLE(ptr, lld, long long);
1817 break;
1818 case 'Q':
1819 llu = pylong_as_llu(item);
1820 if (llu == (unsigned long long)-1 && PyErr_Occurred())
1821 goto err_occurred;
1822 PACK_SINGLE(ptr, llu, unsigned long long);
1823 break;
1824
1825 /* ssize_t and size_t */
1826 case 'n':
1827 zd = pylong_as_zd(item);
1828 if (zd == -1 && PyErr_Occurred())
1829 goto err_occurred;
1830 PACK_SINGLE(ptr, zd, Py_ssize_t);
1831 break;
1832 case 'N':
1833 zu = pylong_as_zu(item);
1834 if (zu == (size_t)-1 && PyErr_Occurred())
1835 goto err_occurred;
1836 PACK_SINGLE(ptr, zu, size_t);
1837 break;
1838
1839 /* floats */
1840 case 'f': case 'd':
1841 d = PyFloat_AsDouble(item);
1842 if (d == -1.0 && PyErr_Occurred())
1843 goto err_occurred;
1844 if (fmt[0] == 'f') {
1845 PACK_SINGLE(ptr, d, float);
1846 }
1847 else {
1848 PACK_SINGLE(ptr, d, double);
1849 }
1850 break;
1851
1852 /* bool */
1853 case '?':
1854 ld = PyObject_IsTrue(item);
1855 if (ld < 0)
1856 return -1; /* preserve original error */
1857 PACK_SINGLE(ptr, ld, _Bool);
1858 break;
1859
1860 /* bytes object */
1861 case 'c':
1862 if (!PyBytes_Check(item))
1863 return type_error_int(fmt);
1864 if (PyBytes_GET_SIZE(item) != 1)
1865 return value_error_int(fmt);
1866 *ptr = PyBytes_AS_STRING(item)[0];
1867 break;
1868
1869 /* pointer */
1870 case 'P':
1871 p = PyLong_AsVoidPtr(item);
1872 if (p == NULL && PyErr_Occurred())
1873 goto err_occurred;
1874 PACK_SINGLE(ptr, p, void *);
1875 break;
1876
1877 /* default */
1878 default: goto err_format;
1879 }
1880
1881 return 0;
1882
1883 err_occurred:
1884 return fix_error_int(fmt);
1885 err_range:
1886 return value_error_int(fmt);
1887 err_format:
1888 PyErr_Format(PyExc_NotImplementedError,
1889 "memoryview: format %s not supported", fmt);
1890 return -1;
1891 }
1892
1893
1894 /****************************************************************************/
1895 /* unpack using the struct module */
1896 /****************************************************************************/
1897
1898 /* For reasonable performance it is necessary to cache all objects required
1899 for unpacking. An unpacker can handle the format passed to unpack_from().
1900 Invariant: All pointer fields of the struct should either be NULL or valid
1901 pointers. */
1902 struct unpacker {
1903 PyObject *unpack_from; /* Struct.unpack_from(format) */
1904 PyObject *mview; /* cached memoryview */
1905 char *item; /* buffer for mview */
1906 Py_ssize_t itemsize; /* len(item) */
1907 };
1908
1909 static struct unpacker *
unpacker_new(void)1910 unpacker_new(void)
1911 {
1912 struct unpacker *x = PyMem_Malloc(sizeof *x);
1913
1914 if (x == NULL) {
1915 PyErr_NoMemory();
1916 return NULL;
1917 }
1918
1919 x->unpack_from = NULL;
1920 x->mview = NULL;
1921 x->item = NULL;
1922 x->itemsize = 0;
1923
1924 return x;
1925 }
1926
1927 static void
unpacker_free(struct unpacker * x)1928 unpacker_free(struct unpacker *x)
1929 {
1930 if (x) {
1931 Py_XDECREF(x->unpack_from);
1932 Py_XDECREF(x->mview);
1933 PyMem_Free(x->item);
1934 PyMem_Free(x);
1935 }
1936 }
1937
1938 /* Return a new unpacker for the given format. */
1939 static struct unpacker *
struct_get_unpacker(const char * fmt,Py_ssize_t itemsize)1940 struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
1941 {
1942 PyObject *structmodule; /* XXX cache these two */
1943 PyObject *Struct = NULL; /* XXX in globals? */
1944 PyObject *structobj = NULL;
1945 PyObject *format = NULL;
1946 struct unpacker *x = NULL;
1947
1948 structmodule = PyImport_ImportModule("struct");
1949 if (structmodule == NULL)
1950 return NULL;
1951
1952 Struct = PyObject_GetAttrString(structmodule, "Struct");
1953 Py_DECREF(structmodule);
1954 if (Struct == NULL)
1955 return NULL;
1956
1957 x = unpacker_new();
1958 if (x == NULL)
1959 goto error;
1960
1961 format = PyBytes_FromString(fmt);
1962 if (format == NULL)
1963 goto error;
1964
1965 structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
1966 if (structobj == NULL)
1967 goto error;
1968
1969 x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
1970 if (x->unpack_from == NULL)
1971 goto error;
1972
1973 x->item = PyMem_Malloc(itemsize);
1974 if (x->item == NULL) {
1975 PyErr_NoMemory();
1976 goto error;
1977 }
1978 x->itemsize = itemsize;
1979
1980 x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
1981 if (x->mview == NULL)
1982 goto error;
1983
1984
1985 out:
1986 Py_XDECREF(Struct);
1987 Py_XDECREF(format);
1988 Py_XDECREF(structobj);
1989 return x;
1990
1991 error:
1992 unpacker_free(x);
1993 x = NULL;
1994 goto out;
1995 }
1996
1997 /* unpack a single item */
1998 static PyObject *
struct_unpack_single(const char * ptr,struct unpacker * x)1999 struct_unpack_single(const char *ptr, struct unpacker *x)
2000 {
2001 PyObject *v;
2002
2003 memcpy(x->item, ptr, x->itemsize);
2004 v = PyObject_CallFunctionObjArgs(x->unpack_from, x->mview, NULL);
2005 if (v == NULL)
2006 return NULL;
2007
2008 if (PyTuple_GET_SIZE(v) == 1) {
2009 PyObject *tmp = PyTuple_GET_ITEM(v, 0);
2010 Py_INCREF(tmp);
2011 Py_DECREF(v);
2012 return tmp;
2013 }
2014
2015 return v;
2016 }
2017
2018
2019 /****************************************************************************/
2020 /* Representations */
2021 /****************************************************************************/
2022
2023 /* allow explicit form of native format */
2024 static inline const char *
adjust_fmt(const Py_buffer * view)2025 adjust_fmt(const Py_buffer *view)
2026 {
2027 const char *fmt;
2028
2029 fmt = (view->format[0] == '@') ? view->format+1 : view->format;
2030 if (fmt[0] && fmt[1] == '\0')
2031 return fmt;
2032
2033 PyErr_Format(PyExc_NotImplementedError,
2034 "memoryview: unsupported format %s", view->format);
2035 return NULL;
2036 }
2037
2038 /* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
2039 static PyObject *
tolist_base(const char * ptr,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2040 tolist_base(const char *ptr, const Py_ssize_t *shape,
2041 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2042 const char *fmt)
2043 {
2044 PyObject *lst, *item;
2045 Py_ssize_t i;
2046
2047 lst = PyList_New(shape[0]);
2048 if (lst == NULL)
2049 return NULL;
2050
2051 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2052 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2053 item = unpack_single(xptr, fmt);
2054 if (item == NULL) {
2055 Py_DECREF(lst);
2056 return NULL;
2057 }
2058 PyList_SET_ITEM(lst, i, item);
2059 }
2060
2061 return lst;
2062 }
2063
2064 /* Unpack a multi-dimensional array into a nested list.
2065 Assumption: ndim >= 1. */
2066 static PyObject *
tolist_rec(const char * ptr,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2067 tolist_rec(const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
2068 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2069 const char *fmt)
2070 {
2071 PyObject *lst, *item;
2072 Py_ssize_t i;
2073
2074 assert(ndim >= 1);
2075 assert(shape != NULL);
2076 assert(strides != NULL);
2077
2078 if (ndim == 1)
2079 return tolist_base(ptr, shape, strides, suboffsets, fmt);
2080
2081 lst = PyList_New(shape[0]);
2082 if (lst == NULL)
2083 return NULL;
2084
2085 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2086 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2087 item = tolist_rec(xptr, ndim-1, shape+1,
2088 strides+1, suboffsets ? suboffsets+1 : NULL,
2089 fmt);
2090 if (item == NULL) {
2091 Py_DECREF(lst);
2092 return NULL;
2093 }
2094 PyList_SET_ITEM(lst, i, item);
2095 }
2096
2097 return lst;
2098 }
2099
2100 /* Return a list representation of the memoryview. Currently only buffers
2101 with native format strings are supported. */
2102 static PyObject *
memory_tolist(PyMemoryViewObject * mv,PyObject * noargs)2103 memory_tolist(PyMemoryViewObject *mv, PyObject *noargs)
2104 {
2105 const Py_buffer *view = &(mv->view);
2106 const char *fmt;
2107
2108 CHECK_RELEASED(mv);
2109
2110 fmt = adjust_fmt(view);
2111 if (fmt == NULL)
2112 return NULL;
2113 if (view->ndim == 0) {
2114 return unpack_single(view->buf, fmt);
2115 }
2116 else if (view->ndim == 1) {
2117 return tolist_base(view->buf, view->shape,
2118 view->strides, view->suboffsets,
2119 fmt);
2120 }
2121 else {
2122 return tolist_rec(view->buf, view->ndim, view->shape,
2123 view->strides, view->suboffsets,
2124 fmt);
2125 }
2126 }
2127
2128 static PyObject *
memory_tobytes(PyMemoryViewObject * self,PyObject * args,PyObject * kwds)2129 memory_tobytes(PyMemoryViewObject *self, PyObject *args, PyObject *kwds)
2130 {
2131 static char *kwlist[] = {"order", NULL};
2132 Py_buffer *src = VIEW_ADDR(self);
2133 char *order = NULL;
2134 char ord = 'C';
2135 PyObject *bytes;
2136
2137 CHECK_RELEASED(self);
2138
2139 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|z", kwlist, &order)) {
2140 return NULL;
2141 }
2142
2143 if (order) {
2144 if (strcmp(order, "F") == 0) {
2145 ord = 'F';
2146 }
2147 else if (strcmp(order, "A") == 0) {
2148 ord = 'A';
2149 }
2150 else if (strcmp(order, "C") != 0) {
2151 PyErr_SetString(PyExc_ValueError,
2152 "order must be 'C', 'F' or 'A'");
2153 return NULL;
2154 }
2155 }
2156
2157 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2158 if (bytes == NULL)
2159 return NULL;
2160
2161 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, ord) < 0) {
2162 Py_DECREF(bytes);
2163 return NULL;
2164 }
2165
2166 return bytes;
2167 }
2168
2169 /*[clinic input]
2170 memoryview.hex
2171
2172 sep: object = NULL
2173 An optional single character or byte to separate hex bytes.
2174 bytes_per_sep: int = 1
2175 How many bytes between separators. Positive values count from the
2176 right, negative values count from the left.
2177
2178 Return the data in the buffer as a str of hexadecimal numbers.
2179
2180 Example:
2181 >>> value = memoryview(b'\xb9\x01\xef')
2182 >>> value.hex()
2183 'b901ef'
2184 >>> value.hex(':')
2185 'b9:01:ef'
2186 >>> value.hex(':', 2)
2187 'b9:01ef'
2188 >>> value.hex(':', -2)
2189 'b901:ef'
2190 [clinic start generated code]*/
2191
2192 static PyObject *
memoryview_hex_impl(PyMemoryViewObject * self,PyObject * sep,int bytes_per_sep)2193 memoryview_hex_impl(PyMemoryViewObject *self, PyObject *sep,
2194 int bytes_per_sep)
2195 /*[clinic end generated code: output=430ca760f94f3ca7 input=539f6a3a5fb56946]*/
2196 {
2197 Py_buffer *src = VIEW_ADDR(self);
2198 PyObject *bytes;
2199 PyObject *ret;
2200
2201 CHECK_RELEASED(self);
2202
2203 if (MV_C_CONTIGUOUS(self->flags)) {
2204 return _Py_strhex_with_sep(src->buf, src->len, sep, bytes_per_sep);
2205 }
2206
2207 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2208 if (bytes == NULL)
2209 return NULL;
2210
2211 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, 'C') < 0) {
2212 Py_DECREF(bytes);
2213 return NULL;
2214 }
2215
2216 ret = _Py_strhex_with_sep(
2217 PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes),
2218 sep, bytes_per_sep);
2219 Py_DECREF(bytes);
2220
2221 return ret;
2222 }
2223
2224 static PyObject *
memory_repr(PyMemoryViewObject * self)2225 memory_repr(PyMemoryViewObject *self)
2226 {
2227 if (self->flags & _Py_MEMORYVIEW_RELEASED)
2228 return PyUnicode_FromFormat("<released memory at %p>", self);
2229 else
2230 return PyUnicode_FromFormat("<memory at %p>", self);
2231 }
2232
2233
2234 /**************************************************************************/
2235 /* Indexing and slicing */
2236 /**************************************************************************/
2237
2238 static char *
lookup_dimension(Py_buffer * view,char * ptr,int dim,Py_ssize_t index)2239 lookup_dimension(Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
2240 {
2241 Py_ssize_t nitems; /* items in the given dimension */
2242
2243 assert(view->shape);
2244 assert(view->strides);
2245
2246 nitems = view->shape[dim];
2247 if (index < 0) {
2248 index += nitems;
2249 }
2250 if (index < 0 || index >= nitems) {
2251 PyErr_Format(PyExc_IndexError,
2252 "index out of bounds on dimension %d", dim + 1);
2253 return NULL;
2254 }
2255
2256 ptr += view->strides[dim] * index;
2257
2258 ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
2259
2260 return ptr;
2261 }
2262
2263 /* Get the pointer to the item at index. */
2264 static char *
ptr_from_index(Py_buffer * view,Py_ssize_t index)2265 ptr_from_index(Py_buffer *view, Py_ssize_t index)
2266 {
2267 char *ptr = (char *)view->buf;
2268 return lookup_dimension(view, ptr, 0, index);
2269 }
2270
2271 /* Get the pointer to the item at tuple. */
2272 static char *
ptr_from_tuple(Py_buffer * view,PyObject * tup)2273 ptr_from_tuple(Py_buffer *view, PyObject *tup)
2274 {
2275 char *ptr = (char *)view->buf;
2276 Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
2277
2278 if (nindices > view->ndim) {
2279 PyErr_Format(PyExc_TypeError,
2280 "cannot index %zd-dimension view with %zd-element tuple",
2281 view->ndim, nindices);
2282 return NULL;
2283 }
2284
2285 for (dim = 0; dim < nindices; dim++) {
2286 Py_ssize_t index;
2287 index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
2288 PyExc_IndexError);
2289 if (index == -1 && PyErr_Occurred())
2290 return NULL;
2291 ptr = lookup_dimension(view, ptr, (int)dim, index);
2292 if (ptr == NULL)
2293 return NULL;
2294 }
2295 return ptr;
2296 }
2297
2298 /* Return the item at index. In a one-dimensional view, this is an object
2299 with the type specified by view->format. Otherwise, the item is a sub-view.
2300 The function is used in memory_subscript() and memory_as_sequence. */
2301 static PyObject *
memory_item(PyMemoryViewObject * self,Py_ssize_t index)2302 memory_item(PyMemoryViewObject *self, Py_ssize_t index)
2303 {
2304 Py_buffer *view = &(self->view);
2305 const char *fmt;
2306
2307 CHECK_RELEASED(self);
2308
2309 fmt = adjust_fmt(view);
2310 if (fmt == NULL)
2311 return NULL;
2312
2313 if (view->ndim == 0) {
2314 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
2315 return NULL;
2316 }
2317 if (view->ndim == 1) {
2318 char *ptr = ptr_from_index(view, index);
2319 if (ptr == NULL)
2320 return NULL;
2321 return unpack_single(ptr, fmt);
2322 }
2323
2324 PyErr_SetString(PyExc_NotImplementedError,
2325 "multi-dimensional sub-views are not implemented");
2326 return NULL;
2327 }
2328
2329 /* Return the item at position *key* (a tuple of indices). */
2330 static PyObject *
memory_item_multi(PyMemoryViewObject * self,PyObject * tup)2331 memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
2332 {
2333 Py_buffer *view = &(self->view);
2334 const char *fmt;
2335 Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
2336 char *ptr;
2337
2338 CHECK_RELEASED(self);
2339
2340 fmt = adjust_fmt(view);
2341 if (fmt == NULL)
2342 return NULL;
2343
2344 if (nindices < view->ndim) {
2345 PyErr_SetString(PyExc_NotImplementedError,
2346 "sub-views are not implemented");
2347 return NULL;
2348 }
2349 ptr = ptr_from_tuple(view, tup);
2350 if (ptr == NULL)
2351 return NULL;
2352 return unpack_single(ptr, fmt);
2353 }
2354
2355 static inline int
init_slice(Py_buffer * base,PyObject * key,int dim)2356 init_slice(Py_buffer *base, PyObject *key, int dim)
2357 {
2358 Py_ssize_t start, stop, step, slicelength;
2359
2360 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
2361 return -1;
2362 }
2363 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
2364
2365
2366 if (base->suboffsets == NULL || dim == 0) {
2367 adjust_buf:
2368 base->buf = (char *)base->buf + base->strides[dim] * start;
2369 }
2370 else {
2371 Py_ssize_t n = dim-1;
2372 while (n >= 0 && base->suboffsets[n] < 0)
2373 n--;
2374 if (n < 0)
2375 goto adjust_buf; /* all suboffsets are negative */
2376 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
2377 }
2378 base->shape[dim] = slicelength;
2379 base->strides[dim] = base->strides[dim] * step;
2380
2381 return 0;
2382 }
2383
2384 static int
is_multislice(PyObject * key)2385 is_multislice(PyObject *key)
2386 {
2387 Py_ssize_t size, i;
2388
2389 if (!PyTuple_Check(key))
2390 return 0;
2391 size = PyTuple_GET_SIZE(key);
2392 if (size == 0)
2393 return 0;
2394
2395 for (i = 0; i < size; i++) {
2396 PyObject *x = PyTuple_GET_ITEM(key, i);
2397 if (!PySlice_Check(x))
2398 return 0;
2399 }
2400 return 1;
2401 }
2402
2403 static Py_ssize_t
is_multiindex(PyObject * key)2404 is_multiindex(PyObject *key)
2405 {
2406 Py_ssize_t size, i;
2407
2408 if (!PyTuple_Check(key))
2409 return 0;
2410 size = PyTuple_GET_SIZE(key);
2411 for (i = 0; i < size; i++) {
2412 PyObject *x = PyTuple_GET_ITEM(key, i);
2413 if (!PyIndex_Check(x))
2414 return 0;
2415 }
2416 return 1;
2417 }
2418
2419 /* mv[obj] returns an object holding the data for one element if obj
2420 fully indexes the memoryview or another memoryview object if it
2421 does not.
2422
2423 0-d memoryview objects can be referenced using mv[...] or mv[()]
2424 but not with anything else. */
2425 static PyObject *
memory_subscript(PyMemoryViewObject * self,PyObject * key)2426 memory_subscript(PyMemoryViewObject *self, PyObject *key)
2427 {
2428 Py_buffer *view;
2429 view = &(self->view);
2430
2431 CHECK_RELEASED(self);
2432
2433 if (view->ndim == 0) {
2434 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
2435 const char *fmt = adjust_fmt(view);
2436 if (fmt == NULL)
2437 return NULL;
2438 return unpack_single(view->buf, fmt);
2439 }
2440 else if (key == Py_Ellipsis) {
2441 Py_INCREF(self);
2442 return (PyObject *)self;
2443 }
2444 else {
2445 PyErr_SetString(PyExc_TypeError,
2446 "invalid indexing of 0-dim memory");
2447 return NULL;
2448 }
2449 }
2450
2451 if (PyIndex_Check(key)) {
2452 Py_ssize_t index;
2453 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2454 if (index == -1 && PyErr_Occurred())
2455 return NULL;
2456 return memory_item(self, index);
2457 }
2458 else if (PySlice_Check(key)) {
2459 PyMemoryViewObject *sliced;
2460
2461 sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
2462 if (sliced == NULL)
2463 return NULL;
2464
2465 if (init_slice(&sliced->view, key, 0) < 0) {
2466 Py_DECREF(sliced);
2467 return NULL;
2468 }
2469 init_len(&sliced->view);
2470 init_flags(sliced);
2471
2472 return (PyObject *)sliced;
2473 }
2474 else if (is_multiindex(key)) {
2475 return memory_item_multi(self, key);
2476 }
2477 else if (is_multislice(key)) {
2478 PyErr_SetString(PyExc_NotImplementedError,
2479 "multi-dimensional slicing is not implemented");
2480 return NULL;
2481 }
2482
2483 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2484 return NULL;
2485 }
2486
2487 static int
memory_ass_sub(PyMemoryViewObject * self,PyObject * key,PyObject * value)2488 memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
2489 {
2490 Py_buffer *view = &(self->view);
2491 Py_buffer src;
2492 const char *fmt;
2493 char *ptr;
2494
2495 CHECK_RELEASED_INT(self);
2496
2497 fmt = adjust_fmt(view);
2498 if (fmt == NULL)
2499 return -1;
2500
2501 if (view->readonly) {
2502 PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
2503 return -1;
2504 }
2505 if (value == NULL) {
2506 PyErr_SetString(PyExc_TypeError, "cannot delete memory");
2507 return -1;
2508 }
2509 if (view->ndim == 0) {
2510 if (key == Py_Ellipsis ||
2511 (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
2512 ptr = (char *)view->buf;
2513 return pack_single(ptr, value, fmt);
2514 }
2515 else {
2516 PyErr_SetString(PyExc_TypeError,
2517 "invalid indexing of 0-dim memory");
2518 return -1;
2519 }
2520 }
2521
2522 if (PyIndex_Check(key)) {
2523 Py_ssize_t index;
2524 if (1 < view->ndim) {
2525 PyErr_SetString(PyExc_NotImplementedError,
2526 "sub-views are not implemented");
2527 return -1;
2528 }
2529 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2530 if (index == -1 && PyErr_Occurred())
2531 return -1;
2532 ptr = ptr_from_index(view, index);
2533 if (ptr == NULL)
2534 return -1;
2535 return pack_single(ptr, value, fmt);
2536 }
2537 /* one-dimensional: fast path */
2538 if (PySlice_Check(key) && view->ndim == 1) {
2539 Py_buffer dest; /* sliced view */
2540 Py_ssize_t arrays[3];
2541 int ret = -1;
2542
2543 /* rvalue must be an exporter */
2544 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
2545 return ret;
2546
2547 dest = *view;
2548 dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
2549 dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
2550 if (view->suboffsets) {
2551 dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
2552 }
2553
2554 if (init_slice(&dest, key, 0) < 0)
2555 goto end_block;
2556 dest.len = dest.shape[0] * dest.itemsize;
2557
2558 ret = copy_single(&dest, &src);
2559
2560 end_block:
2561 PyBuffer_Release(&src);
2562 return ret;
2563 }
2564 if (is_multiindex(key)) {
2565 char *ptr;
2566 if (PyTuple_GET_SIZE(key) < view->ndim) {
2567 PyErr_SetString(PyExc_NotImplementedError,
2568 "sub-views are not implemented");
2569 return -1;
2570 }
2571 ptr = ptr_from_tuple(view, key);
2572 if (ptr == NULL)
2573 return -1;
2574 return pack_single(ptr, value, fmt);
2575 }
2576 if (PySlice_Check(key) || is_multislice(key)) {
2577 /* Call memory_subscript() to produce a sliced lvalue, then copy
2578 rvalue into lvalue. This is already implemented in _testbuffer.c. */
2579 PyErr_SetString(PyExc_NotImplementedError,
2580 "memoryview slice assignments are currently restricted "
2581 "to ndim = 1");
2582 return -1;
2583 }
2584
2585 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2586 return -1;
2587 }
2588
2589 static Py_ssize_t
memory_length(PyMemoryViewObject * self)2590 memory_length(PyMemoryViewObject *self)
2591 {
2592 CHECK_RELEASED_INT(self);
2593 return self->view.ndim == 0 ? 1 : self->view.shape[0];
2594 }
2595
2596 /* As mapping */
2597 static PyMappingMethods memory_as_mapping = {
2598 (lenfunc)memory_length, /* mp_length */
2599 (binaryfunc)memory_subscript, /* mp_subscript */
2600 (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
2601 };
2602
2603 /* As sequence */
2604 static PySequenceMethods memory_as_sequence = {
2605 (lenfunc)memory_length, /* sq_length */
2606 0, /* sq_concat */
2607 0, /* sq_repeat */
2608 (ssizeargfunc)memory_item, /* sq_item */
2609 };
2610
2611
2612 /**************************************************************************/
2613 /* Comparisons */
2614 /**************************************************************************/
2615
2616 #define MV_COMPARE_EX -1 /* exception */
2617 #define MV_COMPARE_NOT_IMPL -2 /* not implemented */
2618
2619 /* Translate a StructError to "not equal". Preserve other exceptions. */
2620 static int
fix_struct_error_int(void)2621 fix_struct_error_int(void)
2622 {
2623 assert(PyErr_Occurred());
2624 /* XXX Cannot get at StructError directly? */
2625 if (PyErr_ExceptionMatches(PyExc_ImportError) ||
2626 PyErr_ExceptionMatches(PyExc_MemoryError)) {
2627 return MV_COMPARE_EX;
2628 }
2629 /* StructError: invalid or unknown format -> not equal */
2630 PyErr_Clear();
2631 return 0;
2632 }
2633
2634 /* Unpack and compare single items of p and q using the struct module. */
2635 static int
struct_unpack_cmp(const char * p,const char * q,struct unpacker * unpack_p,struct unpacker * unpack_q)2636 struct_unpack_cmp(const char *p, const char *q,
2637 struct unpacker *unpack_p, struct unpacker *unpack_q)
2638 {
2639 PyObject *v, *w;
2640 int ret;
2641
2642 /* At this point any exception from the struct module should not be
2643 StructError, since both formats have been accepted already. */
2644 v = struct_unpack_single(p, unpack_p);
2645 if (v == NULL)
2646 return MV_COMPARE_EX;
2647
2648 w = struct_unpack_single(q, unpack_q);
2649 if (w == NULL) {
2650 Py_DECREF(v);
2651 return MV_COMPARE_EX;
2652 }
2653
2654 /* MV_COMPARE_EX == -1: exceptions are preserved */
2655 ret = PyObject_RichCompareBool(v, w, Py_EQ);
2656 Py_DECREF(v);
2657 Py_DECREF(w);
2658
2659 return ret;
2660 }
2661
2662 /* Unpack and compare single items of p and q. If both p and q have the same
2663 single element native format, the comparison uses a fast path (gcc creates
2664 a jump table and converts memcpy into simple assignments on x86/x64).
2665
2666 Otherwise, the comparison is delegated to the struct module, which is
2667 30-60x slower. */
2668 #define CMP_SINGLE(p, q, type) \
2669 do { \
2670 type x; \
2671 type y; \
2672 memcpy((char *)&x, p, sizeof x); \
2673 memcpy((char *)&y, q, sizeof y); \
2674 equal = (x == y); \
2675 } while (0)
2676
2677 static inline int
unpack_cmp(const char * p,const char * q,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2678 unpack_cmp(const char *p, const char *q, char fmt,
2679 struct unpacker *unpack_p, struct unpacker *unpack_q)
2680 {
2681 int equal;
2682
2683 switch (fmt) {
2684
2685 /* signed integers and fast path for 'B' */
2686 case 'B': return *((unsigned char *)p) == *((unsigned char *)q);
2687 case 'b': return *((signed char *)p) == *((signed char *)q);
2688 case 'h': CMP_SINGLE(p, q, short); return equal;
2689 case 'i': CMP_SINGLE(p, q, int); return equal;
2690 case 'l': CMP_SINGLE(p, q, long); return equal;
2691
2692 /* boolean */
2693 case '?': CMP_SINGLE(p, q, _Bool); return equal;
2694
2695 /* unsigned integers */
2696 case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
2697 case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
2698 case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
2699
2700 /* native 64-bit */
2701 case 'q': CMP_SINGLE(p, q, long long); return equal;
2702 case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
2703
2704 /* ssize_t and size_t */
2705 case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
2706 case 'N': CMP_SINGLE(p, q, size_t); return equal;
2707
2708 /* floats */
2709 /* XXX DBL_EPSILON? */
2710 case 'f': CMP_SINGLE(p, q, float); return equal;
2711 case 'd': CMP_SINGLE(p, q, double); return equal;
2712
2713 /* bytes object */
2714 case 'c': return *p == *q;
2715
2716 /* pointer */
2717 case 'P': CMP_SINGLE(p, q, void *); return equal;
2718
2719 /* use the struct module */
2720 case '_':
2721 assert(unpack_p);
2722 assert(unpack_q);
2723 return struct_unpack_cmp(p, q, unpack_p, unpack_q);
2724 }
2725
2726 /* NOT REACHED */
2727 PyErr_SetString(PyExc_RuntimeError,
2728 "memoryview: internal error in richcompare");
2729 return MV_COMPARE_EX;
2730 }
2731
2732 /* Base case for recursive array comparisons. Assumption: ndim == 1. */
2733 static int
cmp_base(const char * p,const char * q,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2734 cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
2735 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2736 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2737 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2738 {
2739 Py_ssize_t i;
2740 int equal;
2741
2742 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2743 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2744 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2745 equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
2746 if (equal <= 0)
2747 return equal;
2748 }
2749
2750 return 1;
2751 }
2752
2753 /* Recursively compare two multi-dimensional arrays that have the same
2754 logical structure. Assumption: ndim >= 1. */
2755 static int
cmp_rec(const char * p,const char * q,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2756 cmp_rec(const char *p, const char *q,
2757 Py_ssize_t ndim, const Py_ssize_t *shape,
2758 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2759 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2760 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2761 {
2762 Py_ssize_t i;
2763 int equal;
2764
2765 assert(ndim >= 1);
2766 assert(shape != NULL);
2767 assert(pstrides != NULL);
2768 assert(qstrides != NULL);
2769
2770 if (ndim == 1) {
2771 return cmp_base(p, q, shape,
2772 pstrides, psuboffsets,
2773 qstrides, qsuboffsets,
2774 fmt, unpack_p, unpack_q);
2775 }
2776
2777 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2778 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2779 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2780 equal = cmp_rec(xp, xq, ndim-1, shape+1,
2781 pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
2782 qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
2783 fmt, unpack_p, unpack_q);
2784 if (equal <= 0)
2785 return equal;
2786 }
2787
2788 return 1;
2789 }
2790
2791 static PyObject *
memory_richcompare(PyObject * v,PyObject * w,int op)2792 memory_richcompare(PyObject *v, PyObject *w, int op)
2793 {
2794 PyObject *res;
2795 Py_buffer wbuf, *vv;
2796 Py_buffer *ww = NULL;
2797 struct unpacker *unpack_v = NULL;
2798 struct unpacker *unpack_w = NULL;
2799 char vfmt, wfmt;
2800 int equal = MV_COMPARE_NOT_IMPL;
2801
2802 if (op != Py_EQ && op != Py_NE)
2803 goto result; /* Py_NotImplemented */
2804
2805 assert(PyMemoryView_Check(v));
2806 if (BASE_INACCESSIBLE(v)) {
2807 equal = (v == w);
2808 goto result;
2809 }
2810 vv = VIEW_ADDR(v);
2811
2812 if (PyMemoryView_Check(w)) {
2813 if (BASE_INACCESSIBLE(w)) {
2814 equal = (v == w);
2815 goto result;
2816 }
2817 ww = VIEW_ADDR(w);
2818 }
2819 else {
2820 if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
2821 PyErr_Clear();
2822 goto result; /* Py_NotImplemented */
2823 }
2824 ww = &wbuf;
2825 }
2826
2827 if (!equiv_shape(vv, ww)) {
2828 PyErr_Clear();
2829 equal = 0;
2830 goto result;
2831 }
2832
2833 /* Use fast unpacking for identical primitive C type formats. */
2834 if (get_native_fmtchar(&vfmt, vv->format) < 0)
2835 vfmt = '_';
2836 if (get_native_fmtchar(&wfmt, ww->format) < 0)
2837 wfmt = '_';
2838 if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
2839 /* Use struct module unpacking. NOTE: Even for equal format strings,
2840 memcmp() cannot be used for item comparison since it would give
2841 incorrect results in the case of NaNs or uninitialized padding
2842 bytes. */
2843 vfmt = '_';
2844 unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
2845 if (unpack_v == NULL) {
2846 equal = fix_struct_error_int();
2847 goto result;
2848 }
2849 unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
2850 if (unpack_w == NULL) {
2851 equal = fix_struct_error_int();
2852 goto result;
2853 }
2854 }
2855
2856 if (vv->ndim == 0) {
2857 equal = unpack_cmp(vv->buf, ww->buf,
2858 vfmt, unpack_v, unpack_w);
2859 }
2860 else if (vv->ndim == 1) {
2861 equal = cmp_base(vv->buf, ww->buf, vv->shape,
2862 vv->strides, vv->suboffsets,
2863 ww->strides, ww->suboffsets,
2864 vfmt, unpack_v, unpack_w);
2865 }
2866 else {
2867 equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
2868 vv->strides, vv->suboffsets,
2869 ww->strides, ww->suboffsets,
2870 vfmt, unpack_v, unpack_w);
2871 }
2872
2873 result:
2874 if (equal < 0) {
2875 if (equal == MV_COMPARE_NOT_IMPL)
2876 res = Py_NotImplemented;
2877 else /* exception */
2878 res = NULL;
2879 }
2880 else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
2881 res = Py_True;
2882 else
2883 res = Py_False;
2884
2885 if (ww == &wbuf)
2886 PyBuffer_Release(ww);
2887
2888 unpacker_free(unpack_v);
2889 unpacker_free(unpack_w);
2890
2891 Py_XINCREF(res);
2892 return res;
2893 }
2894
2895 /**************************************************************************/
2896 /* Hash */
2897 /**************************************************************************/
2898
2899 static Py_hash_t
memory_hash(PyMemoryViewObject * self)2900 memory_hash(PyMemoryViewObject *self)
2901 {
2902 if (self->hash == -1) {
2903 Py_buffer *view = &self->view;
2904 char *mem = view->buf;
2905 Py_ssize_t ret;
2906 char fmt;
2907
2908 CHECK_RELEASED_INT(self);
2909
2910 if (!view->readonly) {
2911 PyErr_SetString(PyExc_ValueError,
2912 "cannot hash writable memoryview object");
2913 return -1;
2914 }
2915 ret = get_native_fmtchar(&fmt, view->format);
2916 if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
2917 PyErr_SetString(PyExc_ValueError,
2918 "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
2919 return -1;
2920 }
2921 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
2922 /* Keep the original error message */
2923 return -1;
2924 }
2925
2926 if (!MV_C_CONTIGUOUS(self->flags)) {
2927 mem = PyMem_Malloc(view->len);
2928 if (mem == NULL) {
2929 PyErr_NoMemory();
2930 return -1;
2931 }
2932 if (buffer_to_contiguous(mem, view, 'C') < 0) {
2933 PyMem_Free(mem);
2934 return -1;
2935 }
2936 }
2937
2938 /* Can't fail */
2939 self->hash = _Py_HashBytes(mem, view->len);
2940
2941 if (mem != view->buf)
2942 PyMem_Free(mem);
2943 }
2944
2945 return self->hash;
2946 }
2947
2948
2949 /**************************************************************************/
2950 /* getters */
2951 /**************************************************************************/
2952
2953 static PyObject *
_IntTupleFromSsizet(int len,Py_ssize_t * vals)2954 _IntTupleFromSsizet(int len, Py_ssize_t *vals)
2955 {
2956 int i;
2957 PyObject *o;
2958 PyObject *intTuple;
2959
2960 if (vals == NULL)
2961 return PyTuple_New(0);
2962
2963 intTuple = PyTuple_New(len);
2964 if (!intTuple)
2965 return NULL;
2966 for (i=0; i<len; i++) {
2967 o = PyLong_FromSsize_t(vals[i]);
2968 if (!o) {
2969 Py_DECREF(intTuple);
2970 return NULL;
2971 }
2972 PyTuple_SET_ITEM(intTuple, i, o);
2973 }
2974 return intTuple;
2975 }
2976
2977 static PyObject *
memory_obj_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2978 memory_obj_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2979 {
2980 Py_buffer *view = &self->view;
2981
2982 CHECK_RELEASED(self);
2983 if (view->obj == NULL) {
2984 Py_RETURN_NONE;
2985 }
2986 Py_INCREF(view->obj);
2987 return view->obj;
2988 }
2989
2990 static PyObject *
memory_nbytes_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2991 memory_nbytes_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2992 {
2993 CHECK_RELEASED(self);
2994 return PyLong_FromSsize_t(self->view.len);
2995 }
2996
2997 static PyObject *
memory_format_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))2998 memory_format_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
2999 {
3000 CHECK_RELEASED(self);
3001 return PyUnicode_FromString(self->view.format);
3002 }
3003
3004 static PyObject *
memory_itemsize_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3005 memory_itemsize_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3006 {
3007 CHECK_RELEASED(self);
3008 return PyLong_FromSsize_t(self->view.itemsize);
3009 }
3010
3011 static PyObject *
memory_shape_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3012 memory_shape_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3013 {
3014 CHECK_RELEASED(self);
3015 return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
3016 }
3017
3018 static PyObject *
memory_strides_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3019 memory_strides_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3020 {
3021 CHECK_RELEASED(self);
3022 return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
3023 }
3024
3025 static PyObject *
memory_suboffsets_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3026 memory_suboffsets_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3027 {
3028 CHECK_RELEASED(self);
3029 return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
3030 }
3031
3032 static PyObject *
memory_readonly_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3033 memory_readonly_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3034 {
3035 CHECK_RELEASED(self);
3036 return PyBool_FromLong(self->view.readonly);
3037 }
3038
3039 static PyObject *
memory_ndim_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3040 memory_ndim_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3041 {
3042 CHECK_RELEASED(self);
3043 return PyLong_FromLong(self->view.ndim);
3044 }
3045
3046 static PyObject *
memory_c_contiguous(PyMemoryViewObject * self,PyObject * dummy)3047 memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3048 {
3049 CHECK_RELEASED(self);
3050 return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
3051 }
3052
3053 static PyObject *
memory_f_contiguous(PyMemoryViewObject * self,PyObject * dummy)3054 memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3055 {
3056 CHECK_RELEASED(self);
3057 return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
3058 }
3059
3060 static PyObject *
memory_contiguous(PyMemoryViewObject * self,PyObject * dummy)3061 memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3062 {
3063 CHECK_RELEASED(self);
3064 return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
3065 }
3066
3067 PyDoc_STRVAR(memory_obj_doc,
3068 "The underlying object of the memoryview.");
3069 PyDoc_STRVAR(memory_nbytes_doc,
3070 "The amount of space in bytes that the array would use in\n"
3071 " a contiguous representation.");
3072 PyDoc_STRVAR(memory_readonly_doc,
3073 "A bool indicating whether the memory is read only.");
3074 PyDoc_STRVAR(memory_itemsize_doc,
3075 "The size in bytes of each element of the memoryview.");
3076 PyDoc_STRVAR(memory_format_doc,
3077 "A string containing the format (in struct module style)\n"
3078 " for each element in the view.");
3079 PyDoc_STRVAR(memory_ndim_doc,
3080 "An integer indicating how many dimensions of a multi-dimensional\n"
3081 " array the memory represents.");
3082 PyDoc_STRVAR(memory_shape_doc,
3083 "A tuple of ndim integers giving the shape of the memory\n"
3084 " as an N-dimensional array.");
3085 PyDoc_STRVAR(memory_strides_doc,
3086 "A tuple of ndim integers giving the size in bytes to access\n"
3087 " each element for each dimension of the array.");
3088 PyDoc_STRVAR(memory_suboffsets_doc,
3089 "A tuple of integers used internally for PIL-style arrays.");
3090 PyDoc_STRVAR(memory_c_contiguous_doc,
3091 "A bool indicating whether the memory is C contiguous.");
3092 PyDoc_STRVAR(memory_f_contiguous_doc,
3093 "A bool indicating whether the memory is Fortran contiguous.");
3094 PyDoc_STRVAR(memory_contiguous_doc,
3095 "A bool indicating whether the memory is contiguous.");
3096
3097
3098 static PyGetSetDef memory_getsetlist[] = {
3099 {"obj", (getter)memory_obj_get, NULL, memory_obj_doc},
3100 {"nbytes", (getter)memory_nbytes_get, NULL, memory_nbytes_doc},
3101 {"readonly", (getter)memory_readonly_get, NULL, memory_readonly_doc},
3102 {"itemsize", (getter)memory_itemsize_get, NULL, memory_itemsize_doc},
3103 {"format", (getter)memory_format_get, NULL, memory_format_doc},
3104 {"ndim", (getter)memory_ndim_get, NULL, memory_ndim_doc},
3105 {"shape", (getter)memory_shape_get, NULL, memory_shape_doc},
3106 {"strides", (getter)memory_strides_get, NULL, memory_strides_doc},
3107 {"suboffsets", (getter)memory_suboffsets_get, NULL, memory_suboffsets_doc},
3108 {"c_contiguous", (getter)memory_c_contiguous, NULL, memory_c_contiguous_doc},
3109 {"f_contiguous", (getter)memory_f_contiguous, NULL, memory_f_contiguous_doc},
3110 {"contiguous", (getter)memory_contiguous, NULL, memory_contiguous_doc},
3111 {NULL, NULL, NULL, NULL},
3112 };
3113
3114 PyDoc_STRVAR(memory_release_doc,
3115 "release($self, /)\n--\n\
3116 \n\
3117 Release the underlying buffer exposed by the memoryview object.");
3118 PyDoc_STRVAR(memory_tobytes_doc,
3119 "tobytes($self, /, order=None)\n--\n\
3120 \n\
3121 Return the data in the buffer as a byte string. Order can be {'C', 'F', 'A'}.\n\
3122 When order is 'C' or 'F', the data of the original array is converted to C or\n\
3123 Fortran order. For contiguous views, 'A' returns an exact copy of the physical\n\
3124 memory. In particular, in-memory Fortran order is preserved. For non-contiguous\n\
3125 views, the data is converted to C first. order=None is the same as order='C'.");
3126 PyDoc_STRVAR(memory_tolist_doc,
3127 "tolist($self, /)\n--\n\
3128 \n\
3129 Return the data in the buffer as a list of elements.");
3130 PyDoc_STRVAR(memory_cast_doc,
3131 "cast($self, /, format, *, shape)\n--\n\
3132 \n\
3133 Cast a memoryview to a new format or shape.");
3134 PyDoc_STRVAR(memory_toreadonly_doc,
3135 "toreadonly($self, /)\n--\n\
3136 \n\
3137 Return a readonly version of the memoryview.");
3138
3139 static PyMethodDef memory_methods[] = {
3140 {"release", (PyCFunction)memory_release, METH_NOARGS, memory_release_doc},
3141 {"tobytes", (PyCFunction)(void(*)(void))memory_tobytes, METH_VARARGS|METH_KEYWORDS, memory_tobytes_doc},
3142 MEMORYVIEW_HEX_METHODDEF
3143 {"tolist", (PyCFunction)memory_tolist, METH_NOARGS, memory_tolist_doc},
3144 {"cast", (PyCFunction)(void(*)(void))memory_cast, METH_VARARGS|METH_KEYWORDS, memory_cast_doc},
3145 {"toreadonly", (PyCFunction)memory_toreadonly, METH_NOARGS, memory_toreadonly_doc},
3146 {"__enter__", memory_enter, METH_NOARGS, NULL},
3147 {"__exit__", memory_exit, METH_VARARGS, NULL},
3148 {NULL, NULL}
3149 };
3150
3151
3152 PyTypeObject PyMemoryView_Type = {
3153 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3154 "memoryview", /* tp_name */
3155 offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
3156 sizeof(Py_ssize_t), /* tp_itemsize */
3157 (destructor)memory_dealloc, /* tp_dealloc */
3158 0, /* tp_vectorcall_offset */
3159 0, /* tp_getattr */
3160 0, /* tp_setattr */
3161 0, /* tp_as_async */
3162 (reprfunc)memory_repr, /* tp_repr */
3163 0, /* tp_as_number */
3164 &memory_as_sequence, /* tp_as_sequence */
3165 &memory_as_mapping, /* tp_as_mapping */
3166 (hashfunc)memory_hash, /* tp_hash */
3167 0, /* tp_call */
3168 0, /* tp_str */
3169 PyObject_GenericGetAttr, /* tp_getattro */
3170 0, /* tp_setattro */
3171 &memory_as_buffer, /* tp_as_buffer */
3172 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
3173 memory_doc, /* tp_doc */
3174 (traverseproc)memory_traverse, /* tp_traverse */
3175 (inquiry)memory_clear, /* tp_clear */
3176 memory_richcompare, /* tp_richcompare */
3177 offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
3178 0, /* tp_iter */
3179 0, /* tp_iternext */
3180 memory_methods, /* tp_methods */
3181 0, /* tp_members */
3182 memory_getsetlist, /* tp_getset */
3183 0, /* tp_base */
3184 0, /* tp_dict */
3185 0, /* tp_descr_get */
3186 0, /* tp_descr_set */
3187 0, /* tp_dictoffset */
3188 0, /* tp_init */
3189 0, /* tp_alloc */
3190 memory_new, /* tp_new */
3191 };
3192