1 /* List object implementation */
2
3 #include "Python.h"
4 #include "pycore_object.h"
5 #include "pycore_pystate.h"
6 #include "pycore_tupleobject.h"
7 #include "pycore_accu.h"
8
9 #ifdef STDC_HEADERS
10 #include <stddef.h>
11 #else
12 #include <sys/types.h> /* For size_t */
13 #endif
14
15 /*[clinic input]
16 class list "PyListObject *" "&PyList_Type"
17 [clinic start generated code]*/
18 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=f9b222678f9f71e0]*/
19
20 #include "clinic/listobject.c.h"
21
22 /* Ensure ob_item has room for at least newsize elements, and set
23 * ob_size to newsize. If newsize > ob_size on entry, the content
24 * of the new slots at exit is undefined heap trash; it's the caller's
25 * responsibility to overwrite them with sane values.
26 * The number of allocated elements may grow, shrink, or stay the same.
27 * Failure is impossible if newsize <= self.allocated on entry, although
28 * that partly relies on an assumption that the system realloc() never
29 * fails when passed a number of bytes <= the number of bytes last
30 * allocated (the C standard doesn't guarantee this, but it's hard to
31 * imagine a realloc implementation where it wouldn't be true).
32 * Note that self->ob_item may change, and even if newsize is less
33 * than ob_size on entry.
34 */
35 static int
list_resize(PyListObject * self,Py_ssize_t newsize)36 list_resize(PyListObject *self, Py_ssize_t newsize)
37 {
38 PyObject **items;
39 size_t new_allocated, num_allocated_bytes;
40 Py_ssize_t allocated = self->allocated;
41
42 /* Bypass realloc() when a previous overallocation is large enough
43 to accommodate the newsize. If the newsize falls lower than half
44 the allocated size, then proceed with the realloc() to shrink the list.
45 */
46 if (allocated >= newsize && newsize >= (allocated >> 1)) {
47 assert(self->ob_item != NULL || newsize == 0);
48 Py_SIZE(self) = newsize;
49 return 0;
50 }
51
52 /* This over-allocates proportional to the list size, making room
53 * for additional growth. The over-allocation is mild, but is
54 * enough to give linear-time amortized behavior over a long
55 * sequence of appends() in the presence of a poorly-performing
56 * system realloc().
57 * The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
58 * Note: new_allocated won't overflow because the largest possible value
59 * is PY_SSIZE_T_MAX * (9 / 8) + 6 which always fits in a size_t.
60 */
61 new_allocated = (size_t)newsize + (newsize >> 3) + (newsize < 9 ? 3 : 6);
62 if (new_allocated > (size_t)PY_SSIZE_T_MAX / sizeof(PyObject *)) {
63 PyErr_NoMemory();
64 return -1;
65 }
66
67 if (newsize == 0)
68 new_allocated = 0;
69 num_allocated_bytes = new_allocated * sizeof(PyObject *);
70 items = (PyObject **)PyMem_Realloc(self->ob_item, num_allocated_bytes);
71 if (items == NULL) {
72 PyErr_NoMemory();
73 return -1;
74 }
75 self->ob_item = items;
76 Py_SIZE(self) = newsize;
77 self->allocated = new_allocated;
78 return 0;
79 }
80
81 static int
list_preallocate_exact(PyListObject * self,Py_ssize_t size)82 list_preallocate_exact(PyListObject *self, Py_ssize_t size)
83 {
84 assert(self->ob_item == NULL);
85 assert(size > 0);
86
87 PyObject **items = PyMem_New(PyObject*, size);
88 if (items == NULL) {
89 PyErr_NoMemory();
90 return -1;
91 }
92 self->ob_item = items;
93 self->allocated = size;
94 return 0;
95 }
96
97 /* Debug statistic to compare allocations with reuse through the free list */
98 #undef SHOW_ALLOC_COUNT
99 #ifdef SHOW_ALLOC_COUNT
100 static size_t count_alloc = 0;
101 static size_t count_reuse = 0;
102
103 static void
show_alloc(void)104 show_alloc(void)
105 {
106 PyInterpreterState *interp = _PyInterpreterState_Get();
107 if (!interp->config.show_alloc_count) {
108 return;
109 }
110
111 fprintf(stderr, "List allocations: %" PY_FORMAT_SIZE_T "d\n",
112 count_alloc);
113 fprintf(stderr, "List reuse through freelist: %" PY_FORMAT_SIZE_T
114 "d\n", count_reuse);
115 fprintf(stderr, "%.2f%% reuse rate\n\n",
116 (100.0*count_reuse/(count_alloc+count_reuse)));
117 }
118 #endif
119
120 /* Empty list reuse scheme to save calls to malloc and free */
121 #ifndef PyList_MAXFREELIST
122 #define PyList_MAXFREELIST 80
123 #endif
124 static PyListObject *free_list[PyList_MAXFREELIST];
125 static int numfree = 0;
126
127 int
PyList_ClearFreeList(void)128 PyList_ClearFreeList(void)
129 {
130 PyListObject *op;
131 int ret = numfree;
132 while (numfree) {
133 op = free_list[--numfree];
134 assert(PyList_CheckExact(op));
135 PyObject_GC_Del(op);
136 }
137 return ret;
138 }
139
140 void
PyList_Fini(void)141 PyList_Fini(void)
142 {
143 PyList_ClearFreeList();
144 }
145
146 /* Print summary info about the state of the optimized allocator */
147 void
_PyList_DebugMallocStats(FILE * out)148 _PyList_DebugMallocStats(FILE *out)
149 {
150 _PyDebugAllocatorStats(out,
151 "free PyListObject",
152 numfree, sizeof(PyListObject));
153 }
154
155 PyObject *
PyList_New(Py_ssize_t size)156 PyList_New(Py_ssize_t size)
157 {
158 PyListObject *op;
159 #ifdef SHOW_ALLOC_COUNT
160 static int initialized = 0;
161 if (!initialized) {
162 Py_AtExit(show_alloc);
163 initialized = 1;
164 }
165 #endif
166
167 if (size < 0) {
168 PyErr_BadInternalCall();
169 return NULL;
170 }
171 if (numfree) {
172 numfree--;
173 op = free_list[numfree];
174 _Py_NewReference((PyObject *)op);
175 #ifdef SHOW_ALLOC_COUNT
176 count_reuse++;
177 #endif
178 } else {
179 op = PyObject_GC_New(PyListObject, &PyList_Type);
180 if (op == NULL)
181 return NULL;
182 #ifdef SHOW_ALLOC_COUNT
183 count_alloc++;
184 #endif
185 }
186 if (size <= 0)
187 op->ob_item = NULL;
188 else {
189 op->ob_item = (PyObject **) PyMem_Calloc(size, sizeof(PyObject *));
190 if (op->ob_item == NULL) {
191 Py_DECREF(op);
192 return PyErr_NoMemory();
193 }
194 }
195 Py_SIZE(op) = size;
196 op->allocated = size;
197 _PyObject_GC_TRACK(op);
198 return (PyObject *) op;
199 }
200
201 static PyObject *
list_new_prealloc(Py_ssize_t size)202 list_new_prealloc(Py_ssize_t size)
203 {
204 PyListObject *op = (PyListObject *) PyList_New(0);
205 if (size == 0 || op == NULL) {
206 return (PyObject *) op;
207 }
208 assert(op->ob_item == NULL);
209 op->ob_item = PyMem_New(PyObject *, size);
210 if (op->ob_item == NULL) {
211 Py_DECREF(op);
212 return PyErr_NoMemory();
213 }
214 op->allocated = size;
215 return (PyObject *) op;
216 }
217
218 Py_ssize_t
PyList_Size(PyObject * op)219 PyList_Size(PyObject *op)
220 {
221 if (!PyList_Check(op)) {
222 PyErr_BadInternalCall();
223 return -1;
224 }
225 else
226 return Py_SIZE(op);
227 }
228
229 static inline int
valid_index(Py_ssize_t i,Py_ssize_t limit)230 valid_index(Py_ssize_t i, Py_ssize_t limit)
231 {
232 /* The cast to size_t lets us use just a single comparison
233 to check whether i is in the range: 0 <= i < limit.
234
235 See: Section 14.2 "Bounds Checking" in the Agner Fog
236 optimization manual found at:
237 https://www.agner.org/optimize/optimizing_cpp.pdf
238 */
239 return (size_t) i < (size_t) limit;
240 }
241
242 static PyObject *indexerr = NULL;
243
244 PyObject *
PyList_GetItem(PyObject * op,Py_ssize_t i)245 PyList_GetItem(PyObject *op, Py_ssize_t i)
246 {
247 if (!PyList_Check(op)) {
248 PyErr_BadInternalCall();
249 return NULL;
250 }
251 if (!valid_index(i, Py_SIZE(op))) {
252 if (indexerr == NULL) {
253 indexerr = PyUnicode_FromString(
254 "list index out of range");
255 if (indexerr == NULL)
256 return NULL;
257 }
258 PyErr_SetObject(PyExc_IndexError, indexerr);
259 return NULL;
260 }
261 return ((PyListObject *)op) -> ob_item[i];
262 }
263
264 int
PyList_SetItem(PyObject * op,Py_ssize_t i,PyObject * newitem)265 PyList_SetItem(PyObject *op, Py_ssize_t i,
266 PyObject *newitem)
267 {
268 PyObject **p;
269 if (!PyList_Check(op)) {
270 Py_XDECREF(newitem);
271 PyErr_BadInternalCall();
272 return -1;
273 }
274 if (!valid_index(i, Py_SIZE(op))) {
275 Py_XDECREF(newitem);
276 PyErr_SetString(PyExc_IndexError,
277 "list assignment index out of range");
278 return -1;
279 }
280 p = ((PyListObject *)op) -> ob_item + i;
281 Py_XSETREF(*p, newitem);
282 return 0;
283 }
284
285 static int
ins1(PyListObject * self,Py_ssize_t where,PyObject * v)286 ins1(PyListObject *self, Py_ssize_t where, PyObject *v)
287 {
288 Py_ssize_t i, n = Py_SIZE(self);
289 PyObject **items;
290 if (v == NULL) {
291 PyErr_BadInternalCall();
292 return -1;
293 }
294 if (n == PY_SSIZE_T_MAX) {
295 PyErr_SetString(PyExc_OverflowError,
296 "cannot add more objects to list");
297 return -1;
298 }
299
300 if (list_resize(self, n+1) < 0)
301 return -1;
302
303 if (where < 0) {
304 where += n;
305 if (where < 0)
306 where = 0;
307 }
308 if (where > n)
309 where = n;
310 items = self->ob_item;
311 for (i = n; --i >= where; )
312 items[i+1] = items[i];
313 Py_INCREF(v);
314 items[where] = v;
315 return 0;
316 }
317
318 int
PyList_Insert(PyObject * op,Py_ssize_t where,PyObject * newitem)319 PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
320 {
321 if (!PyList_Check(op)) {
322 PyErr_BadInternalCall();
323 return -1;
324 }
325 return ins1((PyListObject *)op, where, newitem);
326 }
327
328 static int
app1(PyListObject * self,PyObject * v)329 app1(PyListObject *self, PyObject *v)
330 {
331 Py_ssize_t n = PyList_GET_SIZE(self);
332
333 assert (v != NULL);
334 if (n == PY_SSIZE_T_MAX) {
335 PyErr_SetString(PyExc_OverflowError,
336 "cannot add more objects to list");
337 return -1;
338 }
339
340 if (list_resize(self, n+1) < 0)
341 return -1;
342
343 Py_INCREF(v);
344 PyList_SET_ITEM(self, n, v);
345 return 0;
346 }
347
348 int
PyList_Append(PyObject * op,PyObject * newitem)349 PyList_Append(PyObject *op, PyObject *newitem)
350 {
351 if (PyList_Check(op) && (newitem != NULL))
352 return app1((PyListObject *)op, newitem);
353 PyErr_BadInternalCall();
354 return -1;
355 }
356
357 /* Methods */
358
359 static void
list_dealloc(PyListObject * op)360 list_dealloc(PyListObject *op)
361 {
362 Py_ssize_t i;
363 PyObject_GC_UnTrack(op);
364 Py_TRASHCAN_BEGIN(op, list_dealloc)
365 if (op->ob_item != NULL) {
366 /* Do it backwards, for Christian Tismer.
367 There's a simple test case where somehow this reduces
368 thrashing when a *very* large list is created and
369 immediately deleted. */
370 i = Py_SIZE(op);
371 while (--i >= 0) {
372 Py_XDECREF(op->ob_item[i]);
373 }
374 PyMem_FREE(op->ob_item);
375 }
376 if (numfree < PyList_MAXFREELIST && PyList_CheckExact(op))
377 free_list[numfree++] = op;
378 else
379 Py_TYPE(op)->tp_free((PyObject *)op);
380 Py_TRASHCAN_END
381 }
382
383 static PyObject *
list_repr(PyListObject * v)384 list_repr(PyListObject *v)
385 {
386 Py_ssize_t i;
387 PyObject *s;
388 _PyUnicodeWriter writer;
389
390 if (Py_SIZE(v) == 0) {
391 return PyUnicode_FromString("[]");
392 }
393
394 i = Py_ReprEnter((PyObject*)v);
395 if (i != 0) {
396 return i > 0 ? PyUnicode_FromString("[...]") : NULL;
397 }
398
399 _PyUnicodeWriter_Init(&writer);
400 writer.overallocate = 1;
401 /* "[" + "1" + ", 2" * (len - 1) + "]" */
402 writer.min_length = 1 + 1 + (2 + 1) * (Py_SIZE(v) - 1) + 1;
403
404 if (_PyUnicodeWriter_WriteChar(&writer, '[') < 0)
405 goto error;
406
407 /* Do repr() on each element. Note that this may mutate the list,
408 so must refetch the list size on each iteration. */
409 for (i = 0; i < Py_SIZE(v); ++i) {
410 if (i > 0) {
411 if (_PyUnicodeWriter_WriteASCIIString(&writer, ", ", 2) < 0)
412 goto error;
413 }
414
415 s = PyObject_Repr(v->ob_item[i]);
416 if (s == NULL)
417 goto error;
418
419 if (_PyUnicodeWriter_WriteStr(&writer, s) < 0) {
420 Py_DECREF(s);
421 goto error;
422 }
423 Py_DECREF(s);
424 }
425
426 writer.overallocate = 0;
427 if (_PyUnicodeWriter_WriteChar(&writer, ']') < 0)
428 goto error;
429
430 Py_ReprLeave((PyObject *)v);
431 return _PyUnicodeWriter_Finish(&writer);
432
433 error:
434 _PyUnicodeWriter_Dealloc(&writer);
435 Py_ReprLeave((PyObject *)v);
436 return NULL;
437 }
438
439 static Py_ssize_t
list_length(PyListObject * a)440 list_length(PyListObject *a)
441 {
442 return Py_SIZE(a);
443 }
444
445 static int
list_contains(PyListObject * a,PyObject * el)446 list_contains(PyListObject *a, PyObject *el)
447 {
448 Py_ssize_t i;
449 int cmp;
450
451 for (i = 0, cmp = 0 ; cmp == 0 && i < Py_SIZE(a); ++i)
452 cmp = PyObject_RichCompareBool(el, PyList_GET_ITEM(a, i),
453 Py_EQ);
454 return cmp;
455 }
456
457 static PyObject *
list_item(PyListObject * a,Py_ssize_t i)458 list_item(PyListObject *a, Py_ssize_t i)
459 {
460 if (!valid_index(i, Py_SIZE(a))) {
461 if (indexerr == NULL) {
462 indexerr = PyUnicode_FromString(
463 "list index out of range");
464 if (indexerr == NULL)
465 return NULL;
466 }
467 PyErr_SetObject(PyExc_IndexError, indexerr);
468 return NULL;
469 }
470 Py_INCREF(a->ob_item[i]);
471 return a->ob_item[i];
472 }
473
474 static PyObject *
list_slice(PyListObject * a,Py_ssize_t ilow,Py_ssize_t ihigh)475 list_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
476 {
477 PyListObject *np;
478 PyObject **src, **dest;
479 Py_ssize_t i, len;
480 len = ihigh - ilow;
481 np = (PyListObject *) list_new_prealloc(len);
482 if (np == NULL)
483 return NULL;
484
485 src = a->ob_item + ilow;
486 dest = np->ob_item;
487 for (i = 0; i < len; i++) {
488 PyObject *v = src[i];
489 Py_INCREF(v);
490 dest[i] = v;
491 }
492 Py_SIZE(np) = len;
493 return (PyObject *)np;
494 }
495
496 PyObject *
PyList_GetSlice(PyObject * a,Py_ssize_t ilow,Py_ssize_t ihigh)497 PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
498 {
499 if (!PyList_Check(a)) {
500 PyErr_BadInternalCall();
501 return NULL;
502 }
503 if (ilow < 0) {
504 ilow = 0;
505 }
506 else if (ilow > Py_SIZE(a)) {
507 ilow = Py_SIZE(a);
508 }
509 if (ihigh < ilow) {
510 ihigh = ilow;
511 }
512 else if (ihigh > Py_SIZE(a)) {
513 ihigh = Py_SIZE(a);
514 }
515 return list_slice((PyListObject *)a, ilow, ihigh);
516 }
517
518 static PyObject *
list_concat(PyListObject * a,PyObject * bb)519 list_concat(PyListObject *a, PyObject *bb)
520 {
521 Py_ssize_t size;
522 Py_ssize_t i;
523 PyObject **src, **dest;
524 PyListObject *np;
525 if (!PyList_Check(bb)) {
526 PyErr_Format(PyExc_TypeError,
527 "can only concatenate list (not \"%.200s\") to list",
528 bb->ob_type->tp_name);
529 return NULL;
530 }
531 #define b ((PyListObject *)bb)
532 if (Py_SIZE(a) > PY_SSIZE_T_MAX - Py_SIZE(b))
533 return PyErr_NoMemory();
534 size = Py_SIZE(a) + Py_SIZE(b);
535 np = (PyListObject *) list_new_prealloc(size);
536 if (np == NULL) {
537 return NULL;
538 }
539 src = a->ob_item;
540 dest = np->ob_item;
541 for (i = 0; i < Py_SIZE(a); i++) {
542 PyObject *v = src[i];
543 Py_INCREF(v);
544 dest[i] = v;
545 }
546 src = b->ob_item;
547 dest = np->ob_item + Py_SIZE(a);
548 for (i = 0; i < Py_SIZE(b); i++) {
549 PyObject *v = src[i];
550 Py_INCREF(v);
551 dest[i] = v;
552 }
553 Py_SIZE(np) = size;
554 return (PyObject *)np;
555 #undef b
556 }
557
558 static PyObject *
list_repeat(PyListObject * a,Py_ssize_t n)559 list_repeat(PyListObject *a, Py_ssize_t n)
560 {
561 Py_ssize_t i, j;
562 Py_ssize_t size;
563 PyListObject *np;
564 PyObject **p, **items;
565 PyObject *elem;
566 if (n < 0)
567 n = 0;
568 if (n > 0 && Py_SIZE(a) > PY_SSIZE_T_MAX / n)
569 return PyErr_NoMemory();
570 size = Py_SIZE(a) * n;
571 if (size == 0)
572 return PyList_New(0);
573 np = (PyListObject *) list_new_prealloc(size);
574 if (np == NULL)
575 return NULL;
576
577 if (Py_SIZE(a) == 1) {
578 items = np->ob_item;
579 elem = a->ob_item[0];
580 for (i = 0; i < n; i++) {
581 items[i] = elem;
582 Py_INCREF(elem);
583 }
584 }
585 else {
586 p = np->ob_item;
587 items = a->ob_item;
588 for (i = 0; i < n; i++) {
589 for (j = 0; j < Py_SIZE(a); j++) {
590 *p = items[j];
591 Py_INCREF(*p);
592 p++;
593 }
594 }
595 }
596 Py_SIZE(np) = size;
597 return (PyObject *) np;
598 }
599
600 static int
_list_clear(PyListObject * a)601 _list_clear(PyListObject *a)
602 {
603 Py_ssize_t i;
604 PyObject **item = a->ob_item;
605 if (item != NULL) {
606 /* Because XDECREF can recursively invoke operations on
607 this list, we make it empty first. */
608 i = Py_SIZE(a);
609 Py_SIZE(a) = 0;
610 a->ob_item = NULL;
611 a->allocated = 0;
612 while (--i >= 0) {
613 Py_XDECREF(item[i]);
614 }
615 PyMem_FREE(item);
616 }
617 /* Never fails; the return value can be ignored.
618 Note that there is no guarantee that the list is actually empty
619 at this point, because XDECREF may have populated it again! */
620 return 0;
621 }
622
623 /* a[ilow:ihigh] = v if v != NULL.
624 * del a[ilow:ihigh] if v == NULL.
625 *
626 * Special speed gimmick: when v is NULL and ihigh - ilow <= 8, it's
627 * guaranteed the call cannot fail.
628 */
629 static int
list_ass_slice(PyListObject * a,Py_ssize_t ilow,Py_ssize_t ihigh,PyObject * v)630 list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
631 {
632 /* Because [X]DECREF can recursively invoke list operations on
633 this list, we must postpone all [X]DECREF activity until
634 after the list is back in its canonical shape. Therefore
635 we must allocate an additional array, 'recycle', into which
636 we temporarily copy the items that are deleted from the
637 list. :-( */
638 PyObject *recycle_on_stack[8];
639 PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
640 PyObject **item;
641 PyObject **vitem = NULL;
642 PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
643 Py_ssize_t n; /* # of elements in replacement list */
644 Py_ssize_t norig; /* # of elements in list getting replaced */
645 Py_ssize_t d; /* Change in size */
646 Py_ssize_t k;
647 size_t s;
648 int result = -1; /* guilty until proved innocent */
649 #define b ((PyListObject *)v)
650 if (v == NULL)
651 n = 0;
652 else {
653 if (a == b) {
654 /* Special case "a[i:j] = a" -- copy b first */
655 v = list_slice(b, 0, Py_SIZE(b));
656 if (v == NULL)
657 return result;
658 result = list_ass_slice(a, ilow, ihigh, v);
659 Py_DECREF(v);
660 return result;
661 }
662 v_as_SF = PySequence_Fast(v, "can only assign an iterable");
663 if(v_as_SF == NULL)
664 goto Error;
665 n = PySequence_Fast_GET_SIZE(v_as_SF);
666 vitem = PySequence_Fast_ITEMS(v_as_SF);
667 }
668 if (ilow < 0)
669 ilow = 0;
670 else if (ilow > Py_SIZE(a))
671 ilow = Py_SIZE(a);
672
673 if (ihigh < ilow)
674 ihigh = ilow;
675 else if (ihigh > Py_SIZE(a))
676 ihigh = Py_SIZE(a);
677
678 norig = ihigh - ilow;
679 assert(norig >= 0);
680 d = n - norig;
681 if (Py_SIZE(a) + d == 0) {
682 Py_XDECREF(v_as_SF);
683 return _list_clear(a);
684 }
685 item = a->ob_item;
686 /* recycle the items that we are about to remove */
687 s = norig * sizeof(PyObject *);
688 /* If norig == 0, item might be NULL, in which case we may not memcpy from it. */
689 if (s) {
690 if (s > sizeof(recycle_on_stack)) {
691 recycle = (PyObject **)PyMem_MALLOC(s);
692 if (recycle == NULL) {
693 PyErr_NoMemory();
694 goto Error;
695 }
696 }
697 memcpy(recycle, &item[ilow], s);
698 }
699
700 if (d < 0) { /* Delete -d items */
701 Py_ssize_t tail;
702 tail = (Py_SIZE(a) - ihigh) * sizeof(PyObject *);
703 memmove(&item[ihigh+d], &item[ihigh], tail);
704 if (list_resize(a, Py_SIZE(a) + d) < 0) {
705 memmove(&item[ihigh], &item[ihigh+d], tail);
706 memcpy(&item[ilow], recycle, s);
707 goto Error;
708 }
709 item = a->ob_item;
710 }
711 else if (d > 0) { /* Insert d items */
712 k = Py_SIZE(a);
713 if (list_resize(a, k+d) < 0)
714 goto Error;
715 item = a->ob_item;
716 memmove(&item[ihigh+d], &item[ihigh],
717 (k - ihigh)*sizeof(PyObject *));
718 }
719 for (k = 0; k < n; k++, ilow++) {
720 PyObject *w = vitem[k];
721 Py_XINCREF(w);
722 item[ilow] = w;
723 }
724 for (k = norig - 1; k >= 0; --k)
725 Py_XDECREF(recycle[k]);
726 result = 0;
727 Error:
728 if (recycle != recycle_on_stack)
729 PyMem_FREE(recycle);
730 Py_XDECREF(v_as_SF);
731 return result;
732 #undef b
733 }
734
735 int
PyList_SetSlice(PyObject * a,Py_ssize_t ilow,Py_ssize_t ihigh,PyObject * v)736 PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
737 {
738 if (!PyList_Check(a)) {
739 PyErr_BadInternalCall();
740 return -1;
741 }
742 return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
743 }
744
745 static PyObject *
list_inplace_repeat(PyListObject * self,Py_ssize_t n)746 list_inplace_repeat(PyListObject *self, Py_ssize_t n)
747 {
748 PyObject **items;
749 Py_ssize_t size, i, j, p;
750
751
752 size = PyList_GET_SIZE(self);
753 if (size == 0 || n == 1) {
754 Py_INCREF(self);
755 return (PyObject *)self;
756 }
757
758 if (n < 1) {
759 (void)_list_clear(self);
760 Py_INCREF(self);
761 return (PyObject *)self;
762 }
763
764 if (size > PY_SSIZE_T_MAX / n) {
765 return PyErr_NoMemory();
766 }
767
768 if (list_resize(self, size*n) < 0)
769 return NULL;
770
771 p = size;
772 items = self->ob_item;
773 for (i = 1; i < n; i++) { /* Start counting at 1, not 0 */
774 for (j = 0; j < size; j++) {
775 PyObject *o = items[j];
776 Py_INCREF(o);
777 items[p++] = o;
778 }
779 }
780 Py_INCREF(self);
781 return (PyObject *)self;
782 }
783
784 static int
list_ass_item(PyListObject * a,Py_ssize_t i,PyObject * v)785 list_ass_item(PyListObject *a, Py_ssize_t i, PyObject *v)
786 {
787 if (!valid_index(i, Py_SIZE(a))) {
788 PyErr_SetString(PyExc_IndexError,
789 "list assignment index out of range");
790 return -1;
791 }
792 if (v == NULL)
793 return list_ass_slice(a, i, i+1, v);
794 Py_INCREF(v);
795 Py_SETREF(a->ob_item[i], v);
796 return 0;
797 }
798
799 /*[clinic input]
800 list.insert
801
802 index: Py_ssize_t
803 object: object
804 /
805
806 Insert object before index.
807 [clinic start generated code]*/
808
809 static PyObject *
list_insert_impl(PyListObject * self,Py_ssize_t index,PyObject * object)810 list_insert_impl(PyListObject *self, Py_ssize_t index, PyObject *object)
811 /*[clinic end generated code: output=7f35e32f60c8cb78 input=858514cf894c7eab]*/
812 {
813 if (ins1(self, index, object) == 0)
814 Py_RETURN_NONE;
815 return NULL;
816 }
817
818 /*[clinic input]
819 list.clear
820
821 Remove all items from list.
822 [clinic start generated code]*/
823
824 static PyObject *
list_clear_impl(PyListObject * self)825 list_clear_impl(PyListObject *self)
826 /*[clinic end generated code: output=67a1896c01f74362 input=ca3c1646856742f6]*/
827 {
828 _list_clear(self);
829 Py_RETURN_NONE;
830 }
831
832 /*[clinic input]
833 list.copy
834
835 Return a shallow copy of the list.
836 [clinic start generated code]*/
837
838 static PyObject *
list_copy_impl(PyListObject * self)839 list_copy_impl(PyListObject *self)
840 /*[clinic end generated code: output=ec6b72d6209d418e input=6453ab159e84771f]*/
841 {
842 return list_slice(self, 0, Py_SIZE(self));
843 }
844
845 /*[clinic input]
846 list.append
847
848 object: object
849 /
850
851 Append object to the end of the list.
852 [clinic start generated code]*/
853
854 static PyObject *
list_append(PyListObject * self,PyObject * object)855 list_append(PyListObject *self, PyObject *object)
856 /*[clinic end generated code: output=7c096003a29c0eae input=43a3fe48a7066e91]*/
857 {
858 if (app1(self, object) == 0)
859 Py_RETURN_NONE;
860 return NULL;
861 }
862
863 /*[clinic input]
864 list.extend
865
866 iterable: object
867 /
868
869 Extend list by appending elements from the iterable.
870 [clinic start generated code]*/
871
872 static PyObject *
list_extend(PyListObject * self,PyObject * iterable)873 list_extend(PyListObject *self, PyObject *iterable)
874 /*[clinic end generated code: output=630fb3bca0c8e789 input=9ec5ba3a81be3a4d]*/
875 {
876 PyObject *it; /* iter(v) */
877 Py_ssize_t m; /* size of self */
878 Py_ssize_t n; /* guess for size of iterable */
879 Py_ssize_t mn; /* m + n */
880 Py_ssize_t i;
881 PyObject *(*iternext)(PyObject *);
882
883 /* Special cases:
884 1) lists and tuples which can use PySequence_Fast ops
885 2) extending self to self requires making a copy first
886 */
887 if (PyList_CheckExact(iterable) || PyTuple_CheckExact(iterable) ||
888 (PyObject *)self == iterable) {
889 PyObject **src, **dest;
890 iterable = PySequence_Fast(iterable, "argument must be iterable");
891 if (!iterable)
892 return NULL;
893 n = PySequence_Fast_GET_SIZE(iterable);
894 if (n == 0) {
895 /* short circuit when iterable is empty */
896 Py_DECREF(iterable);
897 Py_RETURN_NONE;
898 }
899 m = Py_SIZE(self);
900 /* It should not be possible to allocate a list large enough to cause
901 an overflow on any relevant platform */
902 assert(m < PY_SSIZE_T_MAX - n);
903 if (list_resize(self, m + n) < 0) {
904 Py_DECREF(iterable);
905 return NULL;
906 }
907 /* note that we may still have self == iterable here for the
908 * situation a.extend(a), but the following code works
909 * in that case too. Just make sure to resize self
910 * before calling PySequence_Fast_ITEMS.
911 */
912 /* populate the end of self with iterable's items */
913 src = PySequence_Fast_ITEMS(iterable);
914 dest = self->ob_item + m;
915 for (i = 0; i < n; i++) {
916 PyObject *o = src[i];
917 Py_INCREF(o);
918 dest[i] = o;
919 }
920 Py_DECREF(iterable);
921 Py_RETURN_NONE;
922 }
923
924 it = PyObject_GetIter(iterable);
925 if (it == NULL)
926 return NULL;
927 iternext = *it->ob_type->tp_iternext;
928
929 /* Guess a result list size. */
930 n = PyObject_LengthHint(iterable, 8);
931 if (n < 0) {
932 Py_DECREF(it);
933 return NULL;
934 }
935 m = Py_SIZE(self);
936 if (m > PY_SSIZE_T_MAX - n) {
937 /* m + n overflowed; on the chance that n lied, and there really
938 * is enough room, ignore it. If n was telling the truth, we'll
939 * eventually run out of memory during the loop.
940 */
941 }
942 else {
943 mn = m + n;
944 /* Make room. */
945 if (list_resize(self, mn) < 0)
946 goto error;
947 /* Make the list sane again. */
948 Py_SIZE(self) = m;
949 }
950
951 /* Run iterator to exhaustion. */
952 for (;;) {
953 PyObject *item = iternext(it);
954 if (item == NULL) {
955 if (PyErr_Occurred()) {
956 if (PyErr_ExceptionMatches(PyExc_StopIteration))
957 PyErr_Clear();
958 else
959 goto error;
960 }
961 break;
962 }
963 if (Py_SIZE(self) < self->allocated) {
964 /* steals ref */
965 PyList_SET_ITEM(self, Py_SIZE(self), item);
966 ++Py_SIZE(self);
967 }
968 else {
969 int status = app1(self, item);
970 Py_DECREF(item); /* append creates a new ref */
971 if (status < 0)
972 goto error;
973 }
974 }
975
976 /* Cut back result list if initial guess was too large. */
977 if (Py_SIZE(self) < self->allocated) {
978 if (list_resize(self, Py_SIZE(self)) < 0)
979 goto error;
980 }
981
982 Py_DECREF(it);
983 Py_RETURN_NONE;
984
985 error:
986 Py_DECREF(it);
987 return NULL;
988 }
989
990 PyObject *
_PyList_Extend(PyListObject * self,PyObject * iterable)991 _PyList_Extend(PyListObject *self, PyObject *iterable)
992 {
993 return list_extend(self, iterable);
994 }
995
996 static PyObject *
list_inplace_concat(PyListObject * self,PyObject * other)997 list_inplace_concat(PyListObject *self, PyObject *other)
998 {
999 PyObject *result;
1000
1001 result = list_extend(self, other);
1002 if (result == NULL)
1003 return result;
1004 Py_DECREF(result);
1005 Py_INCREF(self);
1006 return (PyObject *)self;
1007 }
1008
1009 /*[clinic input]
1010 list.pop
1011
1012 index: Py_ssize_t = -1
1013 /
1014
1015 Remove and return item at index (default last).
1016
1017 Raises IndexError if list is empty or index is out of range.
1018 [clinic start generated code]*/
1019
1020 static PyObject *
list_pop_impl(PyListObject * self,Py_ssize_t index)1021 list_pop_impl(PyListObject *self, Py_ssize_t index)
1022 /*[clinic end generated code: output=6bd69dcb3f17eca8 input=b83675976f329e6f]*/
1023 {
1024 PyObject *v;
1025 int status;
1026
1027 if (Py_SIZE(self) == 0) {
1028 /* Special-case most common failure cause */
1029 PyErr_SetString(PyExc_IndexError, "pop from empty list");
1030 return NULL;
1031 }
1032 if (index < 0)
1033 index += Py_SIZE(self);
1034 if (!valid_index(index, Py_SIZE(self))) {
1035 PyErr_SetString(PyExc_IndexError, "pop index out of range");
1036 return NULL;
1037 }
1038 v = self->ob_item[index];
1039 if (index == Py_SIZE(self) - 1) {
1040 status = list_resize(self, Py_SIZE(self) - 1);
1041 if (status >= 0)
1042 return v; /* and v now owns the reference the list had */
1043 else
1044 return NULL;
1045 }
1046 Py_INCREF(v);
1047 status = list_ass_slice(self, index, index+1, (PyObject *)NULL);
1048 if (status < 0) {
1049 Py_DECREF(v);
1050 return NULL;
1051 }
1052 return v;
1053 }
1054
1055 /* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
1056 static void
reverse_slice(PyObject ** lo,PyObject ** hi)1057 reverse_slice(PyObject **lo, PyObject **hi)
1058 {
1059 assert(lo && hi);
1060
1061 --hi;
1062 while (lo < hi) {
1063 PyObject *t = *lo;
1064 *lo = *hi;
1065 *hi = t;
1066 ++lo;
1067 --hi;
1068 }
1069 }
1070
1071 /* Lots of code for an adaptive, stable, natural mergesort. There are many
1072 * pieces to this algorithm; read listsort.txt for overviews and details.
1073 */
1074
1075 /* A sortslice contains a pointer to an array of keys and a pointer to
1076 * an array of corresponding values. In other words, keys[i]
1077 * corresponds with values[i]. If values == NULL, then the keys are
1078 * also the values.
1079 *
1080 * Several convenience routines are provided here, so that keys and
1081 * values are always moved in sync.
1082 */
1083
1084 typedef struct {
1085 PyObject **keys;
1086 PyObject **values;
1087 } sortslice;
1088
1089 Py_LOCAL_INLINE(void)
sortslice_copy(sortslice * s1,Py_ssize_t i,sortslice * s2,Py_ssize_t j)1090 sortslice_copy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j)
1091 {
1092 s1->keys[i] = s2->keys[j];
1093 if (s1->values != NULL)
1094 s1->values[i] = s2->values[j];
1095 }
1096
1097 Py_LOCAL_INLINE(void)
sortslice_copy_incr(sortslice * dst,sortslice * src)1098 sortslice_copy_incr(sortslice *dst, sortslice *src)
1099 {
1100 *dst->keys++ = *src->keys++;
1101 if (dst->values != NULL)
1102 *dst->values++ = *src->values++;
1103 }
1104
1105 Py_LOCAL_INLINE(void)
sortslice_copy_decr(sortslice * dst,sortslice * src)1106 sortslice_copy_decr(sortslice *dst, sortslice *src)
1107 {
1108 *dst->keys-- = *src->keys--;
1109 if (dst->values != NULL)
1110 *dst->values-- = *src->values--;
1111 }
1112
1113
1114 Py_LOCAL_INLINE(void)
sortslice_memcpy(sortslice * s1,Py_ssize_t i,sortslice * s2,Py_ssize_t j,Py_ssize_t n)1115 sortslice_memcpy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1116 Py_ssize_t n)
1117 {
1118 memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1119 if (s1->values != NULL)
1120 memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1121 }
1122
1123 Py_LOCAL_INLINE(void)
sortslice_memmove(sortslice * s1,Py_ssize_t i,sortslice * s2,Py_ssize_t j,Py_ssize_t n)1124 sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1125 Py_ssize_t n)
1126 {
1127 memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1128 if (s1->values != NULL)
1129 memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1130 }
1131
1132 Py_LOCAL_INLINE(void)
sortslice_advance(sortslice * slice,Py_ssize_t n)1133 sortslice_advance(sortslice *slice, Py_ssize_t n)
1134 {
1135 slice->keys += n;
1136 if (slice->values != NULL)
1137 slice->values += n;
1138 }
1139
1140 /* Comparison function: ms->key_compare, which is set at run-time in
1141 * listsort_impl to optimize for various special cases.
1142 * Returns -1 on error, 1 if x < y, 0 if x >= y.
1143 */
1144
1145 #define ISLT(X, Y) (*(ms->key_compare))(X, Y, ms)
1146
1147 /* Compare X to Y via "<". Goto "fail" if the comparison raises an
1148 error. Else "k" is set to true iff X<Y, and an "if (k)" block is
1149 started. It makes more sense in context <wink>. X and Y are PyObject*s.
1150 */
1151 #define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail; \
1152 if (k)
1153
1154 /* The maximum number of entries in a MergeState's pending-runs stack.
1155 * This is enough to sort arrays of size up to about
1156 * 32 * phi ** MAX_MERGE_PENDING
1157 * where phi ~= 1.618. 85 is ridiculouslylarge enough, good for an array
1158 * with 2**64 elements.
1159 */
1160 #define MAX_MERGE_PENDING 85
1161
1162 /* When we get into galloping mode, we stay there until both runs win less
1163 * often than MIN_GALLOP consecutive times. See listsort.txt for more info.
1164 */
1165 #define MIN_GALLOP 7
1166
1167 /* Avoid malloc for small temp arrays. */
1168 #define MERGESTATE_TEMP_SIZE 256
1169
1170 /* One MergeState exists on the stack per invocation of mergesort. It's just
1171 * a convenient way to pass state around among the helper functions.
1172 */
1173 struct s_slice {
1174 sortslice base;
1175 Py_ssize_t len;
1176 };
1177
1178 typedef struct s_MergeState MergeState;
1179 struct s_MergeState {
1180 /* This controls when we get *into* galloping mode. It's initialized
1181 * to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for
1182 * random data, and lower for highly structured data.
1183 */
1184 Py_ssize_t min_gallop;
1185
1186 /* 'a' is temp storage to help with merges. It contains room for
1187 * alloced entries.
1188 */
1189 sortslice a; /* may point to temparray below */
1190 Py_ssize_t alloced;
1191
1192 /* A stack of n pending runs yet to be merged. Run #i starts at
1193 * address base[i] and extends for len[i] elements. It's always
1194 * true (so long as the indices are in bounds) that
1195 *
1196 * pending[i].base + pending[i].len == pending[i+1].base
1197 *
1198 * so we could cut the storage for this, but it's a minor amount,
1199 * and keeping all the info explicit simplifies the code.
1200 */
1201 int n;
1202 struct s_slice pending[MAX_MERGE_PENDING];
1203
1204 /* 'a' points to this when possible, rather than muck with malloc. */
1205 PyObject *temparray[MERGESTATE_TEMP_SIZE];
1206
1207 /* This is the function we will use to compare two keys,
1208 * even when none of our special cases apply and we have to use
1209 * safe_object_compare. */
1210 int (*key_compare)(PyObject *, PyObject *, MergeState *);
1211
1212 /* This function is used by unsafe_object_compare to optimize comparisons
1213 * when we know our list is type-homogeneous but we can't assume anything else.
1214 * In the pre-sort check it is set equal to key->ob_type->tp_richcompare */
1215 PyObject *(*key_richcompare)(PyObject *, PyObject *, int);
1216
1217 /* This function is used by unsafe_tuple_compare to compare the first elements
1218 * of tuples. It may be set to safe_object_compare, but the idea is that hopefully
1219 * we can assume more, and use one of the special-case compares. */
1220 int (*tuple_elem_compare)(PyObject *, PyObject *, MergeState *);
1221 };
1222
1223 /* binarysort is the best method for sorting small arrays: it does
1224 few compares, but can do data movement quadratic in the number of
1225 elements.
1226 [lo, hi) is a contiguous slice of a list, and is sorted via
1227 binary insertion. This sort is stable.
1228 On entry, must have lo <= start <= hi, and that [lo, start) is already
1229 sorted (pass start == lo if you don't know!).
1230 If islt() complains return -1, else 0.
1231 Even in case of error, the output slice will be some permutation of
1232 the input (nothing is lost or duplicated).
1233 */
1234 static int
binarysort(MergeState * ms,sortslice lo,PyObject ** hi,PyObject ** start)1235 binarysort(MergeState *ms, sortslice lo, PyObject **hi, PyObject **start)
1236 {
1237 Py_ssize_t k;
1238 PyObject **l, **p, **r;
1239 PyObject *pivot;
1240
1241 assert(lo.keys <= start && start <= hi);
1242 /* assert [lo, start) is sorted */
1243 if (lo.keys == start)
1244 ++start;
1245 for (; start < hi; ++start) {
1246 /* set l to where *start belongs */
1247 l = lo.keys;
1248 r = start;
1249 pivot = *r;
1250 /* Invariants:
1251 * pivot >= all in [lo, l).
1252 * pivot < all in [r, start).
1253 * The second is vacuously true at the start.
1254 */
1255 assert(l < r);
1256 do {
1257 p = l + ((r - l) >> 1);
1258 IFLT(pivot, *p)
1259 r = p;
1260 else
1261 l = p+1;
1262 } while (l < r);
1263 assert(l == r);
1264 /* The invariants still hold, so pivot >= all in [lo, l) and
1265 pivot < all in [l, start), so pivot belongs at l. Note
1266 that if there are elements equal to pivot, l points to the
1267 first slot after them -- that's why this sort is stable.
1268 Slide over to make room.
1269 Caution: using memmove is much slower under MSVC 5;
1270 we're not usually moving many slots. */
1271 for (p = start; p > l; --p)
1272 *p = *(p-1);
1273 *l = pivot;
1274 if (lo.values != NULL) {
1275 Py_ssize_t offset = lo.values - lo.keys;
1276 p = start + offset;
1277 pivot = *p;
1278 l += offset;
1279 for (p = start + offset; p > l; --p)
1280 *p = *(p-1);
1281 *l = pivot;
1282 }
1283 }
1284 return 0;
1285
1286 fail:
1287 return -1;
1288 }
1289
1290 /*
1291 Return the length of the run beginning at lo, in the slice [lo, hi). lo < hi
1292 is required on entry. "A run" is the longest ascending sequence, with
1293
1294 lo[0] <= lo[1] <= lo[2] <= ...
1295
1296 or the longest descending sequence, with
1297
1298 lo[0] > lo[1] > lo[2] > ...
1299
1300 Boolean *descending is set to 0 in the former case, or to 1 in the latter.
1301 For its intended use in a stable mergesort, the strictness of the defn of
1302 "descending" is needed so that the caller can safely reverse a descending
1303 sequence without violating stability (strict > ensures there are no equal
1304 elements to get out of order).
1305
1306 Returns -1 in case of error.
1307 */
1308 static Py_ssize_t
count_run(MergeState * ms,PyObject ** lo,PyObject ** hi,int * descending)1309 count_run(MergeState *ms, PyObject **lo, PyObject **hi, int *descending)
1310 {
1311 Py_ssize_t k;
1312 Py_ssize_t n;
1313
1314 assert(lo < hi);
1315 *descending = 0;
1316 ++lo;
1317 if (lo == hi)
1318 return 1;
1319
1320 n = 2;
1321 IFLT(*lo, *(lo-1)) {
1322 *descending = 1;
1323 for (lo = lo+1; lo < hi; ++lo, ++n) {
1324 IFLT(*lo, *(lo-1))
1325 ;
1326 else
1327 break;
1328 }
1329 }
1330 else {
1331 for (lo = lo+1; lo < hi; ++lo, ++n) {
1332 IFLT(*lo, *(lo-1))
1333 break;
1334 }
1335 }
1336
1337 return n;
1338 fail:
1339 return -1;
1340 }
1341
1342 /*
1343 Locate the proper position of key in a sorted vector; if the vector contains
1344 an element equal to key, return the position immediately to the left of
1345 the leftmost equal element. [gallop_right() does the same except returns
1346 the position to the right of the rightmost equal element (if any).]
1347
1348 "a" is a sorted vector with n elements, starting at a[0]. n must be > 0.
1349
1350 "hint" is an index at which to begin the search, 0 <= hint < n. The closer
1351 hint is to the final result, the faster this runs.
1352
1353 The return value is the int k in 0..n such that
1354
1355 a[k-1] < key <= a[k]
1356
1357 pretending that *(a-1) is minus infinity and a[n] is plus infinity. IOW,
1358 key belongs at index k; or, IOW, the first k elements of a should precede
1359 key, and the last n-k should follow key.
1360
1361 Returns -1 on error. See listsort.txt for info on the method.
1362 */
1363 static Py_ssize_t
gallop_left(MergeState * ms,PyObject * key,PyObject ** a,Py_ssize_t n,Py_ssize_t hint)1364 gallop_left(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
1365 {
1366 Py_ssize_t ofs;
1367 Py_ssize_t lastofs;
1368 Py_ssize_t k;
1369
1370 assert(key && a && n > 0 && hint >= 0 && hint < n);
1371
1372 a += hint;
1373 lastofs = 0;
1374 ofs = 1;
1375 IFLT(*a, key) {
1376 /* a[hint] < key -- gallop right, until
1377 * a[hint + lastofs] < key <= a[hint + ofs]
1378 */
1379 const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
1380 while (ofs < maxofs) {
1381 IFLT(a[ofs], key) {
1382 lastofs = ofs;
1383 assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1384 ofs = (ofs << 1) + 1;
1385 }
1386 else /* key <= a[hint + ofs] */
1387 break;
1388 }
1389 if (ofs > maxofs)
1390 ofs = maxofs;
1391 /* Translate back to offsets relative to &a[0]. */
1392 lastofs += hint;
1393 ofs += hint;
1394 }
1395 else {
1396 /* key <= a[hint] -- gallop left, until
1397 * a[hint - ofs] < key <= a[hint - lastofs]
1398 */
1399 const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
1400 while (ofs < maxofs) {
1401 IFLT(*(a-ofs), key)
1402 break;
1403 /* key <= a[hint - ofs] */
1404 lastofs = ofs;
1405 assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1406 ofs = (ofs << 1) + 1;
1407 }
1408 if (ofs > maxofs)
1409 ofs = maxofs;
1410 /* Translate back to positive offsets relative to &a[0]. */
1411 k = lastofs;
1412 lastofs = hint - ofs;
1413 ofs = hint - k;
1414 }
1415 a -= hint;
1416
1417 assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1418 /* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
1419 * right of lastofs but no farther right than ofs. Do a binary
1420 * search, with invariant a[lastofs-1] < key <= a[ofs].
1421 */
1422 ++lastofs;
1423 while (lastofs < ofs) {
1424 Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1425
1426 IFLT(a[m], key)
1427 lastofs = m+1; /* a[m] < key */
1428 else
1429 ofs = m; /* key <= a[m] */
1430 }
1431 assert(lastofs == ofs); /* so a[ofs-1] < key <= a[ofs] */
1432 return ofs;
1433
1434 fail:
1435 return -1;
1436 }
1437
1438 /*
1439 Exactly like gallop_left(), except that if key already exists in a[0:n],
1440 finds the position immediately to the right of the rightmost equal value.
1441
1442 The return value is the int k in 0..n such that
1443
1444 a[k-1] <= key < a[k]
1445
1446 or -1 if error.
1447
1448 The code duplication is massive, but this is enough different given that
1449 we're sticking to "<" comparisons that it's much harder to follow if
1450 written as one routine with yet another "left or right?" flag.
1451 */
1452 static Py_ssize_t
gallop_right(MergeState * ms,PyObject * key,PyObject ** a,Py_ssize_t n,Py_ssize_t hint)1453 gallop_right(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
1454 {
1455 Py_ssize_t ofs;
1456 Py_ssize_t lastofs;
1457 Py_ssize_t k;
1458
1459 assert(key && a && n > 0 && hint >= 0 && hint < n);
1460
1461 a += hint;
1462 lastofs = 0;
1463 ofs = 1;
1464 IFLT(key, *a) {
1465 /* key < a[hint] -- gallop left, until
1466 * a[hint - ofs] <= key < a[hint - lastofs]
1467 */
1468 const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
1469 while (ofs < maxofs) {
1470 IFLT(key, *(a-ofs)) {
1471 lastofs = ofs;
1472 assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1473 ofs = (ofs << 1) + 1;
1474 }
1475 else /* a[hint - ofs] <= key */
1476 break;
1477 }
1478 if (ofs > maxofs)
1479 ofs = maxofs;
1480 /* Translate back to positive offsets relative to &a[0]. */
1481 k = lastofs;
1482 lastofs = hint - ofs;
1483 ofs = hint - k;
1484 }
1485 else {
1486 /* a[hint] <= key -- gallop right, until
1487 * a[hint + lastofs] <= key < a[hint + ofs]
1488 */
1489 const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
1490 while (ofs < maxofs) {
1491 IFLT(key, a[ofs])
1492 break;
1493 /* a[hint + ofs] <= key */
1494 lastofs = ofs;
1495 assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1496 ofs = (ofs << 1) + 1;
1497 }
1498 if (ofs > maxofs)
1499 ofs = maxofs;
1500 /* Translate back to offsets relative to &a[0]. */
1501 lastofs += hint;
1502 ofs += hint;
1503 }
1504 a -= hint;
1505
1506 assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1507 /* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
1508 * right of lastofs but no farther right than ofs. Do a binary
1509 * search, with invariant a[lastofs-1] <= key < a[ofs].
1510 */
1511 ++lastofs;
1512 while (lastofs < ofs) {
1513 Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1514
1515 IFLT(key, a[m])
1516 ofs = m; /* key < a[m] */
1517 else
1518 lastofs = m+1; /* a[m] <= key */
1519 }
1520 assert(lastofs == ofs); /* so a[ofs-1] <= key < a[ofs] */
1521 return ofs;
1522
1523 fail:
1524 return -1;
1525 }
1526
1527 /* Conceptually a MergeState's constructor. */
1528 static void
merge_init(MergeState * ms,Py_ssize_t list_size,int has_keyfunc)1529 merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc)
1530 {
1531 assert(ms != NULL);
1532 if (has_keyfunc) {
1533 /* The temporary space for merging will need at most half the list
1534 * size rounded up. Use the minimum possible space so we can use the
1535 * rest of temparray for other things. In particular, if there is
1536 * enough extra space, listsort() will use it to store the keys.
1537 */
1538 ms->alloced = (list_size + 1) / 2;
1539
1540 /* ms->alloced describes how many keys will be stored at
1541 ms->temparray, but we also need to store the values. Hence,
1542 ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */
1543 if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced)
1544 ms->alloced = MERGESTATE_TEMP_SIZE / 2;
1545 ms->a.values = &ms->temparray[ms->alloced];
1546 }
1547 else {
1548 ms->alloced = MERGESTATE_TEMP_SIZE;
1549 ms->a.values = NULL;
1550 }
1551 ms->a.keys = ms->temparray;
1552 ms->n = 0;
1553 ms->min_gallop = MIN_GALLOP;
1554 }
1555
1556 /* Free all the temp memory owned by the MergeState. This must be called
1557 * when you're done with a MergeState, and may be called before then if
1558 * you want to free the temp memory early.
1559 */
1560 static void
merge_freemem(MergeState * ms)1561 merge_freemem(MergeState *ms)
1562 {
1563 assert(ms != NULL);
1564 if (ms->a.keys != ms->temparray)
1565 PyMem_Free(ms->a.keys);
1566 }
1567
1568 /* Ensure enough temp memory for 'need' array slots is available.
1569 * Returns 0 on success and -1 if the memory can't be gotten.
1570 */
1571 static int
merge_getmem(MergeState * ms,Py_ssize_t need)1572 merge_getmem(MergeState *ms, Py_ssize_t need)
1573 {
1574 int multiplier;
1575
1576 assert(ms != NULL);
1577 if (need <= ms->alloced)
1578 return 0;
1579
1580 multiplier = ms->a.values != NULL ? 2 : 1;
1581
1582 /* Don't realloc! That can cost cycles to copy the old data, but
1583 * we don't care what's in the block.
1584 */
1585 merge_freemem(ms);
1586 if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject *) / multiplier) {
1587 PyErr_NoMemory();
1588 return -1;
1589 }
1590 ms->a.keys = (PyObject **)PyMem_Malloc(multiplier * need
1591 * sizeof(PyObject *));
1592 if (ms->a.keys != NULL) {
1593 ms->alloced = need;
1594 if (ms->a.values != NULL)
1595 ms->a.values = &ms->a.keys[need];
1596 return 0;
1597 }
1598 PyErr_NoMemory();
1599 return -1;
1600 }
1601 #define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 : \
1602 merge_getmem(MS, NEED))
1603
1604 /* Merge the na elements starting at ssa with the nb elements starting at
1605 * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
1606 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
1607 * should have na <= nb. See listsort.txt for more info. Return 0 if
1608 * successful, -1 if error.
1609 */
1610 static Py_ssize_t
merge_lo(MergeState * ms,sortslice ssa,Py_ssize_t na,sortslice ssb,Py_ssize_t nb)1611 merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na,
1612 sortslice ssb, Py_ssize_t nb)
1613 {
1614 Py_ssize_t k;
1615 sortslice dest;
1616 int result = -1; /* guilty until proved innocent */
1617 Py_ssize_t min_gallop;
1618
1619 assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
1620 assert(ssa.keys + na == ssb.keys);
1621 if (MERGE_GETMEM(ms, na) < 0)
1622 return -1;
1623 sortslice_memcpy(&ms->a, 0, &ssa, 0, na);
1624 dest = ssa;
1625 ssa = ms->a;
1626
1627 sortslice_copy_incr(&dest, &ssb);
1628 --nb;
1629 if (nb == 0)
1630 goto Succeed;
1631 if (na == 1)
1632 goto CopyB;
1633
1634 min_gallop = ms->min_gallop;
1635 for (;;) {
1636 Py_ssize_t acount = 0; /* # of times A won in a row */
1637 Py_ssize_t bcount = 0; /* # of times B won in a row */
1638
1639 /* Do the straightforward thing until (if ever) one run
1640 * appears to win consistently.
1641 */
1642 for (;;) {
1643 assert(na > 1 && nb > 0);
1644 k = ISLT(ssb.keys[0], ssa.keys[0]);
1645 if (k) {
1646 if (k < 0)
1647 goto Fail;
1648 sortslice_copy_incr(&dest, &ssb);
1649 ++bcount;
1650 acount = 0;
1651 --nb;
1652 if (nb == 0)
1653 goto Succeed;
1654 if (bcount >= min_gallop)
1655 break;
1656 }
1657 else {
1658 sortslice_copy_incr(&dest, &ssa);
1659 ++acount;
1660 bcount = 0;
1661 --na;
1662 if (na == 1)
1663 goto CopyB;
1664 if (acount >= min_gallop)
1665 break;
1666 }
1667 }
1668
1669 /* One run is winning so consistently that galloping may
1670 * be a huge win. So try that, and continue galloping until
1671 * (if ever) neither run appears to be winning consistently
1672 * anymore.
1673 */
1674 ++min_gallop;
1675 do {
1676 assert(na > 1 && nb > 0);
1677 min_gallop -= min_gallop > 1;
1678 ms->min_gallop = min_gallop;
1679 k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0);
1680 acount = k;
1681 if (k) {
1682 if (k < 0)
1683 goto Fail;
1684 sortslice_memcpy(&dest, 0, &ssa, 0, k);
1685 sortslice_advance(&dest, k);
1686 sortslice_advance(&ssa, k);
1687 na -= k;
1688 if (na == 1)
1689 goto CopyB;
1690 /* na==0 is impossible now if the comparison
1691 * function is consistent, but we can't assume
1692 * that it is.
1693 */
1694 if (na == 0)
1695 goto Succeed;
1696 }
1697 sortslice_copy_incr(&dest, &ssb);
1698 --nb;
1699 if (nb == 0)
1700 goto Succeed;
1701
1702 k = gallop_left(ms, ssa.keys[0], ssb.keys, nb, 0);
1703 bcount = k;
1704 if (k) {
1705 if (k < 0)
1706 goto Fail;
1707 sortslice_memmove(&dest, 0, &ssb, 0, k);
1708 sortslice_advance(&dest, k);
1709 sortslice_advance(&ssb, k);
1710 nb -= k;
1711 if (nb == 0)
1712 goto Succeed;
1713 }
1714 sortslice_copy_incr(&dest, &ssa);
1715 --na;
1716 if (na == 1)
1717 goto CopyB;
1718 } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1719 ++min_gallop; /* penalize it for leaving galloping mode */
1720 ms->min_gallop = min_gallop;
1721 }
1722 Succeed:
1723 result = 0;
1724 Fail:
1725 if (na)
1726 sortslice_memcpy(&dest, 0, &ssa, 0, na);
1727 return result;
1728 CopyB:
1729 assert(na == 1 && nb > 0);
1730 /* The last element of ssa belongs at the end of the merge. */
1731 sortslice_memmove(&dest, 0, &ssb, 0, nb);
1732 sortslice_copy(&dest, nb, &ssa, 0);
1733 return 0;
1734 }
1735
1736 /* Merge the na elements starting at pa with the nb elements starting at
1737 * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
1738 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
1739 * should have na >= nb. See listsort.txt for more info. Return 0 if
1740 * successful, -1 if error.
1741 */
1742 static Py_ssize_t
merge_hi(MergeState * ms,sortslice ssa,Py_ssize_t na,sortslice ssb,Py_ssize_t nb)1743 merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na,
1744 sortslice ssb, Py_ssize_t nb)
1745 {
1746 Py_ssize_t k;
1747 sortslice dest, basea, baseb;
1748 int result = -1; /* guilty until proved innocent */
1749 Py_ssize_t min_gallop;
1750
1751 assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
1752 assert(ssa.keys + na == ssb.keys);
1753 if (MERGE_GETMEM(ms, nb) < 0)
1754 return -1;
1755 dest = ssb;
1756 sortslice_advance(&dest, nb-1);
1757 sortslice_memcpy(&ms->a, 0, &ssb, 0, nb);
1758 basea = ssa;
1759 baseb = ms->a;
1760 ssb.keys = ms->a.keys + nb - 1;
1761 if (ssb.values != NULL)
1762 ssb.values = ms->a.values + nb - 1;
1763 sortslice_advance(&ssa, na - 1);
1764
1765 sortslice_copy_decr(&dest, &ssa);
1766 --na;
1767 if (na == 0)
1768 goto Succeed;
1769 if (nb == 1)
1770 goto CopyA;
1771
1772 min_gallop = ms->min_gallop;
1773 for (;;) {
1774 Py_ssize_t acount = 0; /* # of times A won in a row */
1775 Py_ssize_t bcount = 0; /* # of times B won in a row */
1776
1777 /* Do the straightforward thing until (if ever) one run
1778 * appears to win consistently.
1779 */
1780 for (;;) {
1781 assert(na > 0 && nb > 1);
1782 k = ISLT(ssb.keys[0], ssa.keys[0]);
1783 if (k) {
1784 if (k < 0)
1785 goto Fail;
1786 sortslice_copy_decr(&dest, &ssa);
1787 ++acount;
1788 bcount = 0;
1789 --na;
1790 if (na == 0)
1791 goto Succeed;
1792 if (acount >= min_gallop)
1793 break;
1794 }
1795 else {
1796 sortslice_copy_decr(&dest, &ssb);
1797 ++bcount;
1798 acount = 0;
1799 --nb;
1800 if (nb == 1)
1801 goto CopyA;
1802 if (bcount >= min_gallop)
1803 break;
1804 }
1805 }
1806
1807 /* One run is winning so consistently that galloping may
1808 * be a huge win. So try that, and continue galloping until
1809 * (if ever) neither run appears to be winning consistently
1810 * anymore.
1811 */
1812 ++min_gallop;
1813 do {
1814 assert(na > 0 && nb > 1);
1815 min_gallop -= min_gallop > 1;
1816 ms->min_gallop = min_gallop;
1817 k = gallop_right(ms, ssb.keys[0], basea.keys, na, na-1);
1818 if (k < 0)
1819 goto Fail;
1820 k = na - k;
1821 acount = k;
1822 if (k) {
1823 sortslice_advance(&dest, -k);
1824 sortslice_advance(&ssa, -k);
1825 sortslice_memmove(&dest, 1, &ssa, 1, k);
1826 na -= k;
1827 if (na == 0)
1828 goto Succeed;
1829 }
1830 sortslice_copy_decr(&dest, &ssb);
1831 --nb;
1832 if (nb == 1)
1833 goto CopyA;
1834
1835 k = gallop_left(ms, ssa.keys[0], baseb.keys, nb, nb-1);
1836 if (k < 0)
1837 goto Fail;
1838 k = nb - k;
1839 bcount = k;
1840 if (k) {
1841 sortslice_advance(&dest, -k);
1842 sortslice_advance(&ssb, -k);
1843 sortslice_memcpy(&dest, 1, &ssb, 1, k);
1844 nb -= k;
1845 if (nb == 1)
1846 goto CopyA;
1847 /* nb==0 is impossible now if the comparison
1848 * function is consistent, but we can't assume
1849 * that it is.
1850 */
1851 if (nb == 0)
1852 goto Succeed;
1853 }
1854 sortslice_copy_decr(&dest, &ssa);
1855 --na;
1856 if (na == 0)
1857 goto Succeed;
1858 } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1859 ++min_gallop; /* penalize it for leaving galloping mode */
1860 ms->min_gallop = min_gallop;
1861 }
1862 Succeed:
1863 result = 0;
1864 Fail:
1865 if (nb)
1866 sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb);
1867 return result;
1868 CopyA:
1869 assert(nb == 1 && na > 0);
1870 /* The first element of ssb belongs at the front of the merge. */
1871 sortslice_memmove(&dest, 1-na, &ssa, 1-na, na);
1872 sortslice_advance(&dest, -na);
1873 sortslice_advance(&ssa, -na);
1874 sortslice_copy(&dest, 0, &ssb, 0);
1875 return 0;
1876 }
1877
1878 /* Merge the two runs at stack indices i and i+1.
1879 * Returns 0 on success, -1 on error.
1880 */
1881 static Py_ssize_t
merge_at(MergeState * ms,Py_ssize_t i)1882 merge_at(MergeState *ms, Py_ssize_t i)
1883 {
1884 sortslice ssa, ssb;
1885 Py_ssize_t na, nb;
1886 Py_ssize_t k;
1887
1888 assert(ms != NULL);
1889 assert(ms->n >= 2);
1890 assert(i >= 0);
1891 assert(i == ms->n - 2 || i == ms->n - 3);
1892
1893 ssa = ms->pending[i].base;
1894 na = ms->pending[i].len;
1895 ssb = ms->pending[i+1].base;
1896 nb = ms->pending[i+1].len;
1897 assert(na > 0 && nb > 0);
1898 assert(ssa.keys + na == ssb.keys);
1899
1900 /* Record the length of the combined runs; if i is the 3rd-last
1901 * run now, also slide over the last run (which isn't involved
1902 * in this merge). The current run i+1 goes away in any case.
1903 */
1904 ms->pending[i].len = na + nb;
1905 if (i == ms->n - 3)
1906 ms->pending[i+1] = ms->pending[i+2];
1907 --ms->n;
1908
1909 /* Where does b start in a? Elements in a before that can be
1910 * ignored (already in place).
1911 */
1912 k = gallop_right(ms, *ssb.keys, ssa.keys, na, 0);
1913 if (k < 0)
1914 return -1;
1915 sortslice_advance(&ssa, k);
1916 na -= k;
1917 if (na == 0)
1918 return 0;
1919
1920 /* Where does a end in b? Elements in b after that can be
1921 * ignored (already in place).
1922 */
1923 nb = gallop_left(ms, ssa.keys[na-1], ssb.keys, nb, nb-1);
1924 if (nb <= 0)
1925 return nb;
1926
1927 /* Merge what remains of the runs, using a temp array with
1928 * min(na, nb) elements.
1929 */
1930 if (na <= nb)
1931 return merge_lo(ms, ssa, na, ssb, nb);
1932 else
1933 return merge_hi(ms, ssa, na, ssb, nb);
1934 }
1935
1936 /* Examine the stack of runs waiting to be merged, merging adjacent runs
1937 * until the stack invariants are re-established:
1938 *
1939 * 1. len[-3] > len[-2] + len[-1]
1940 * 2. len[-2] > len[-1]
1941 *
1942 * See listsort.txt for more info.
1943 *
1944 * Returns 0 on success, -1 on error.
1945 */
1946 static int
merge_collapse(MergeState * ms)1947 merge_collapse(MergeState *ms)
1948 {
1949 struct s_slice *p = ms->pending;
1950
1951 assert(ms);
1952 while (ms->n > 1) {
1953 Py_ssize_t n = ms->n - 2;
1954 if ((n > 0 && p[n-1].len <= p[n].len + p[n+1].len) ||
1955 (n > 1 && p[n-2].len <= p[n-1].len + p[n].len)) {
1956 if (p[n-1].len < p[n+1].len)
1957 --n;
1958 if (merge_at(ms, n) < 0)
1959 return -1;
1960 }
1961 else if (p[n].len <= p[n+1].len) {
1962 if (merge_at(ms, n) < 0)
1963 return -1;
1964 }
1965 else
1966 break;
1967 }
1968 return 0;
1969 }
1970
1971 /* Regardless of invariants, merge all runs on the stack until only one
1972 * remains. This is used at the end of the mergesort.
1973 *
1974 * Returns 0 on success, -1 on error.
1975 */
1976 static int
merge_force_collapse(MergeState * ms)1977 merge_force_collapse(MergeState *ms)
1978 {
1979 struct s_slice *p = ms->pending;
1980
1981 assert(ms);
1982 while (ms->n > 1) {
1983 Py_ssize_t n = ms->n - 2;
1984 if (n > 0 && p[n-1].len < p[n+1].len)
1985 --n;
1986 if (merge_at(ms, n) < 0)
1987 return -1;
1988 }
1989 return 0;
1990 }
1991
1992 /* Compute a good value for the minimum run length; natural runs shorter
1993 * than this are boosted artificially via binary insertion.
1994 *
1995 * If n < 64, return n (it's too small to bother with fancy stuff).
1996 * Else if n is an exact power of 2, return 32.
1997 * Else return an int k, 32 <= k <= 64, such that n/k is close to, but
1998 * strictly less than, an exact power of 2.
1999 *
2000 * See listsort.txt for more info.
2001 */
2002 static Py_ssize_t
merge_compute_minrun(Py_ssize_t n)2003 merge_compute_minrun(Py_ssize_t n)
2004 {
2005 Py_ssize_t r = 0; /* becomes 1 if any 1 bits are shifted off */
2006
2007 assert(n >= 0);
2008 while (n >= 64) {
2009 r |= n & 1;
2010 n >>= 1;
2011 }
2012 return n + r;
2013 }
2014
2015 static void
reverse_sortslice(sortslice * s,Py_ssize_t n)2016 reverse_sortslice(sortslice *s, Py_ssize_t n)
2017 {
2018 reverse_slice(s->keys, &s->keys[n]);
2019 if (s->values != NULL)
2020 reverse_slice(s->values, &s->values[n]);
2021 }
2022
2023 /* Here we define custom comparison functions to optimize for the cases one commonly
2024 * encounters in practice: homogeneous lists, often of one of the basic types. */
2025
2026 /* This struct holds the comparison function and helper functions
2027 * selected in the pre-sort check. */
2028
2029 /* These are the special case compare functions.
2030 * ms->key_compare will always point to one of these: */
2031
2032 /* Heterogeneous compare: default, always safe to fall back on. */
2033 static int
safe_object_compare(PyObject * v,PyObject * w,MergeState * ms)2034 safe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2035 {
2036 /* No assumptions necessary! */
2037 return PyObject_RichCompareBool(v, w, Py_LT);
2038 }
2039
2040 /* Homogeneous compare: safe for any two compareable objects of the same type.
2041 * (ms->key_richcompare is set to ob_type->tp_richcompare in the
2042 * pre-sort check.)
2043 */
2044 static int
unsafe_object_compare(PyObject * v,PyObject * w,MergeState * ms)2045 unsafe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2046 {
2047 PyObject *res_obj; int res;
2048
2049 /* No assumptions, because we check first: */
2050 if (v->ob_type->tp_richcompare != ms->key_richcompare)
2051 return PyObject_RichCompareBool(v, w, Py_LT);
2052
2053 assert(ms->key_richcompare != NULL);
2054 res_obj = (*(ms->key_richcompare))(v, w, Py_LT);
2055
2056 if (res_obj == Py_NotImplemented) {
2057 Py_DECREF(res_obj);
2058 return PyObject_RichCompareBool(v, w, Py_LT);
2059 }
2060 if (res_obj == NULL)
2061 return -1;
2062
2063 if (PyBool_Check(res_obj)) {
2064 res = (res_obj == Py_True);
2065 }
2066 else {
2067 res = PyObject_IsTrue(res_obj);
2068 }
2069 Py_DECREF(res_obj);
2070
2071 /* Note that we can't assert
2072 * res == PyObject_RichCompareBool(v, w, Py_LT);
2073 * because of evil compare functions like this:
2074 * lambda a, b: int(random.random() * 3) - 1)
2075 * (which is actually in test_sort.py) */
2076 return res;
2077 }
2078
2079 /* Latin string compare: safe for any two latin (one byte per char) strings. */
2080 static int
unsafe_latin_compare(PyObject * v,PyObject * w,MergeState * ms)2081 unsafe_latin_compare(PyObject *v, PyObject *w, MergeState *ms)
2082 {
2083 Py_ssize_t len;
2084 int res;
2085
2086 /* Modified from Objects/unicodeobject.c:unicode_compare, assuming: */
2087 assert(v->ob_type == w->ob_type);
2088 assert(v->ob_type == &PyUnicode_Type);
2089 assert(PyUnicode_KIND(v) == PyUnicode_KIND(w));
2090 assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND);
2091
2092 len = Py_MIN(PyUnicode_GET_LENGTH(v), PyUnicode_GET_LENGTH(w));
2093 res = memcmp(PyUnicode_DATA(v), PyUnicode_DATA(w), len);
2094
2095 res = (res != 0 ?
2096 res < 0 :
2097 PyUnicode_GET_LENGTH(v) < PyUnicode_GET_LENGTH(w));
2098
2099 assert(res == PyObject_RichCompareBool(v, w, Py_LT));;
2100 return res;
2101 }
2102
2103 /* Bounded int compare: compare any two longs that fit in a single machine word. */
2104 static int
unsafe_long_compare(PyObject * v,PyObject * w,MergeState * ms)2105 unsafe_long_compare(PyObject *v, PyObject *w, MergeState *ms)
2106 {
2107 PyLongObject *vl, *wl; sdigit v0, w0; int res;
2108
2109 /* Modified from Objects/longobject.c:long_compare, assuming: */
2110 assert(v->ob_type == w->ob_type);
2111 assert(v->ob_type == &PyLong_Type);
2112 assert(Py_ABS(Py_SIZE(v)) <= 1);
2113 assert(Py_ABS(Py_SIZE(w)) <= 1);
2114
2115 vl = (PyLongObject*)v;
2116 wl = (PyLongObject*)w;
2117
2118 v0 = Py_SIZE(vl) == 0 ? 0 : (sdigit)vl->ob_digit[0];
2119 w0 = Py_SIZE(wl) == 0 ? 0 : (sdigit)wl->ob_digit[0];
2120
2121 if (Py_SIZE(vl) < 0)
2122 v0 = -v0;
2123 if (Py_SIZE(wl) < 0)
2124 w0 = -w0;
2125
2126 res = v0 < w0;
2127 assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2128 return res;
2129 }
2130
2131 /* Float compare: compare any two floats. */
2132 static int
unsafe_float_compare(PyObject * v,PyObject * w,MergeState * ms)2133 unsafe_float_compare(PyObject *v, PyObject *w, MergeState *ms)
2134 {
2135 int res;
2136
2137 /* Modified from Objects/floatobject.c:float_richcompare, assuming: */
2138 assert(v->ob_type == w->ob_type);
2139 assert(v->ob_type == &PyFloat_Type);
2140
2141 res = PyFloat_AS_DOUBLE(v) < PyFloat_AS_DOUBLE(w);
2142 assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2143 return res;
2144 }
2145
2146 /* Tuple compare: compare *any* two tuples, using
2147 * ms->tuple_elem_compare to compare the first elements, which is set
2148 * using the same pre-sort check as we use for ms->key_compare,
2149 * but run on the list [x[0] for x in L]. This allows us to optimize compares
2150 * on two levels (as long as [x[0] for x in L] is type-homogeneous.) The idea is
2151 * that most tuple compares don't involve x[1:]. */
2152 static int
unsafe_tuple_compare(PyObject * v,PyObject * w,MergeState * ms)2153 unsafe_tuple_compare(PyObject *v, PyObject *w, MergeState *ms)
2154 {
2155 PyTupleObject *vt, *wt;
2156 Py_ssize_t i, vlen, wlen;
2157 int k;
2158
2159 /* Modified from Objects/tupleobject.c:tuplerichcompare, assuming: */
2160 assert(v->ob_type == w->ob_type);
2161 assert(v->ob_type == &PyTuple_Type);
2162 assert(Py_SIZE(v) > 0);
2163 assert(Py_SIZE(w) > 0);
2164
2165 vt = (PyTupleObject *)v;
2166 wt = (PyTupleObject *)w;
2167
2168 vlen = Py_SIZE(vt);
2169 wlen = Py_SIZE(wt);
2170
2171 for (i = 0; i < vlen && i < wlen; i++) {
2172 k = PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_EQ);
2173 if (k < 0)
2174 return -1;
2175 if (!k)
2176 break;
2177 }
2178
2179 if (i >= vlen || i >= wlen)
2180 return vlen < wlen;
2181
2182 if (i == 0)
2183 return ms->tuple_elem_compare(vt->ob_item[i], wt->ob_item[i], ms);
2184 else
2185 return PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_LT);
2186 }
2187
2188 /* An adaptive, stable, natural mergesort. See listsort.txt.
2189 * Returns Py_None on success, NULL on error. Even in case of error, the
2190 * list will be some permutation of its input state (nothing is lost or
2191 * duplicated).
2192 */
2193 /*[clinic input]
2194 list.sort
2195
2196 *
2197 key as keyfunc: object = None
2198 reverse: bool(accept={int}) = False
2199
2200 Sort the list in ascending order and return None.
2201
2202 The sort is in-place (i.e. the list itself is modified) and stable (i.e. the
2203 order of two equal elements is maintained).
2204
2205 If a key function is given, apply it once to each list item and sort them,
2206 ascending or descending, according to their function values.
2207
2208 The reverse flag can be set to sort in descending order.
2209 [clinic start generated code]*/
2210
2211 static PyObject *
list_sort_impl(PyListObject * self,PyObject * keyfunc,int reverse)2212 list_sort_impl(PyListObject *self, PyObject *keyfunc, int reverse)
2213 /*[clinic end generated code: output=57b9f9c5e23fbe42 input=cb56cd179a713060]*/
2214 {
2215 MergeState ms;
2216 Py_ssize_t nremaining;
2217 Py_ssize_t minrun;
2218 sortslice lo;
2219 Py_ssize_t saved_ob_size, saved_allocated;
2220 PyObject **saved_ob_item;
2221 PyObject **final_ob_item;
2222 PyObject *result = NULL; /* guilty until proved innocent */
2223 Py_ssize_t i;
2224 PyObject **keys;
2225
2226 assert(self != NULL);
2227 assert(PyList_Check(self));
2228 if (keyfunc == Py_None)
2229 keyfunc = NULL;
2230
2231 /* The list is temporarily made empty, so that mutations performed
2232 * by comparison functions can't affect the slice of memory we're
2233 * sorting (allowing mutations during sorting is a core-dump
2234 * factory, since ob_item may change).
2235 */
2236 saved_ob_size = Py_SIZE(self);
2237 saved_ob_item = self->ob_item;
2238 saved_allocated = self->allocated;
2239 Py_SIZE(self) = 0;
2240 self->ob_item = NULL;
2241 self->allocated = -1; /* any operation will reset it to >= 0 */
2242
2243 if (keyfunc == NULL) {
2244 keys = NULL;
2245 lo.keys = saved_ob_item;
2246 lo.values = NULL;
2247 }
2248 else {
2249 if (saved_ob_size < MERGESTATE_TEMP_SIZE/2)
2250 /* Leverage stack space we allocated but won't otherwise use */
2251 keys = &ms.temparray[saved_ob_size+1];
2252 else {
2253 keys = PyMem_MALLOC(sizeof(PyObject *) * saved_ob_size);
2254 if (keys == NULL) {
2255 PyErr_NoMemory();
2256 goto keyfunc_fail;
2257 }
2258 }
2259
2260 for (i = 0; i < saved_ob_size ; i++) {
2261 keys[i] = PyObject_CallFunctionObjArgs(keyfunc, saved_ob_item[i],
2262 NULL);
2263 if (keys[i] == NULL) {
2264 for (i=i-1 ; i>=0 ; i--)
2265 Py_DECREF(keys[i]);
2266 if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
2267 PyMem_FREE(keys);
2268 goto keyfunc_fail;
2269 }
2270 }
2271
2272 lo.keys = keys;
2273 lo.values = saved_ob_item;
2274 }
2275
2276
2277 /* The pre-sort check: here's where we decide which compare function to use.
2278 * How much optimization is safe? We test for homogeneity with respect to
2279 * several properties that are expensive to check at compare-time, and
2280 * set ms appropriately. */
2281 if (saved_ob_size > 1) {
2282 /* Assume the first element is representative of the whole list. */
2283 int keys_are_in_tuples = (lo.keys[0]->ob_type == &PyTuple_Type &&
2284 Py_SIZE(lo.keys[0]) > 0);
2285
2286 PyTypeObject* key_type = (keys_are_in_tuples ?
2287 PyTuple_GET_ITEM(lo.keys[0], 0)->ob_type :
2288 lo.keys[0]->ob_type);
2289
2290 int keys_are_all_same_type = 1;
2291 int strings_are_latin = 1;
2292 int ints_are_bounded = 1;
2293
2294 /* Prove that assumption by checking every key. */
2295 for (i=0; i < saved_ob_size; i++) {
2296
2297 if (keys_are_in_tuples &&
2298 !(lo.keys[i]->ob_type == &PyTuple_Type && Py_SIZE(lo.keys[i]) != 0)) {
2299 keys_are_in_tuples = 0;
2300 keys_are_all_same_type = 0;
2301 break;
2302 }
2303
2304 /* Note: for lists of tuples, key is the first element of the tuple
2305 * lo.keys[i], not lo.keys[i] itself! We verify type-homogeneity
2306 * for lists of tuples in the if-statement directly above. */
2307 PyObject *key = (keys_are_in_tuples ?
2308 PyTuple_GET_ITEM(lo.keys[i], 0) :
2309 lo.keys[i]);
2310
2311 if (key->ob_type != key_type) {
2312 keys_are_all_same_type = 0;
2313 /* If keys are in tuple we must loop over the whole list to make
2314 sure all items are tuples */
2315 if (!keys_are_in_tuples) {
2316 break;
2317 }
2318 }
2319
2320 if (keys_are_all_same_type) {
2321 if (key_type == &PyLong_Type &&
2322 ints_are_bounded &&
2323 Py_ABS(Py_SIZE(key)) > 1) {
2324
2325 ints_are_bounded = 0;
2326 }
2327 else if (key_type == &PyUnicode_Type &&
2328 strings_are_latin &&
2329 PyUnicode_KIND(key) != PyUnicode_1BYTE_KIND) {
2330
2331 strings_are_latin = 0;
2332 }
2333 }
2334 }
2335
2336 /* Choose the best compare, given what we now know about the keys. */
2337 if (keys_are_all_same_type) {
2338
2339 if (key_type == &PyUnicode_Type && strings_are_latin) {
2340 ms.key_compare = unsafe_latin_compare;
2341 }
2342 else if (key_type == &PyLong_Type && ints_are_bounded) {
2343 ms.key_compare = unsafe_long_compare;
2344 }
2345 else if (key_type == &PyFloat_Type) {
2346 ms.key_compare = unsafe_float_compare;
2347 }
2348 else if ((ms.key_richcompare = key_type->tp_richcompare) != NULL) {
2349 ms.key_compare = unsafe_object_compare;
2350 }
2351 else {
2352 ms.key_compare = safe_object_compare;
2353 }
2354 }
2355 else {
2356 ms.key_compare = safe_object_compare;
2357 }
2358
2359 if (keys_are_in_tuples) {
2360 /* Make sure we're not dealing with tuples of tuples
2361 * (remember: here, key_type refers list [key[0] for key in keys]) */
2362 if (key_type == &PyTuple_Type) {
2363 ms.tuple_elem_compare = safe_object_compare;
2364 }
2365 else {
2366 ms.tuple_elem_compare = ms.key_compare;
2367 }
2368
2369 ms.key_compare = unsafe_tuple_compare;
2370 }
2371 }
2372 /* End of pre-sort check: ms is now set properly! */
2373
2374 merge_init(&ms, saved_ob_size, keys != NULL);
2375
2376 nremaining = saved_ob_size;
2377 if (nremaining < 2)
2378 goto succeed;
2379
2380 /* Reverse sort stability achieved by initially reversing the list,
2381 applying a stable forward sort, then reversing the final result. */
2382 if (reverse) {
2383 if (keys != NULL)
2384 reverse_slice(&keys[0], &keys[saved_ob_size]);
2385 reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]);
2386 }
2387
2388 /* March over the array once, left to right, finding natural runs,
2389 * and extending short natural runs to minrun elements.
2390 */
2391 minrun = merge_compute_minrun(nremaining);
2392 do {
2393 int descending;
2394 Py_ssize_t n;
2395
2396 /* Identify next run. */
2397 n = count_run(&ms, lo.keys, lo.keys + nremaining, &descending);
2398 if (n < 0)
2399 goto fail;
2400 if (descending)
2401 reverse_sortslice(&lo, n);
2402 /* If short, extend to min(minrun, nremaining). */
2403 if (n < minrun) {
2404 const Py_ssize_t force = nremaining <= minrun ?
2405 nremaining : minrun;
2406 if (binarysort(&ms, lo, lo.keys + force, lo.keys + n) < 0)
2407 goto fail;
2408 n = force;
2409 }
2410 /* Push run onto pending-runs stack, and maybe merge. */
2411 assert(ms.n < MAX_MERGE_PENDING);
2412 ms.pending[ms.n].base = lo;
2413 ms.pending[ms.n].len = n;
2414 ++ms.n;
2415 if (merge_collapse(&ms) < 0)
2416 goto fail;
2417 /* Advance to find next run. */
2418 sortslice_advance(&lo, n);
2419 nremaining -= n;
2420 } while (nremaining);
2421
2422 if (merge_force_collapse(&ms) < 0)
2423 goto fail;
2424 assert(ms.n == 1);
2425 assert(keys == NULL
2426 ? ms.pending[0].base.keys == saved_ob_item
2427 : ms.pending[0].base.keys == &keys[0]);
2428 assert(ms.pending[0].len == saved_ob_size);
2429 lo = ms.pending[0].base;
2430
2431 succeed:
2432 result = Py_None;
2433 fail:
2434 if (keys != NULL) {
2435 for (i = 0; i < saved_ob_size; i++)
2436 Py_DECREF(keys[i]);
2437 if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
2438 PyMem_FREE(keys);
2439 }
2440
2441 if (self->allocated != -1 && result != NULL) {
2442 /* The user mucked with the list during the sort,
2443 * and we don't already have another error to report.
2444 */
2445 PyErr_SetString(PyExc_ValueError, "list modified during sort");
2446 result = NULL;
2447 }
2448
2449 if (reverse && saved_ob_size > 1)
2450 reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size);
2451
2452 merge_freemem(&ms);
2453
2454 keyfunc_fail:
2455 final_ob_item = self->ob_item;
2456 i = Py_SIZE(self);
2457 Py_SIZE(self) = saved_ob_size;
2458 self->ob_item = saved_ob_item;
2459 self->allocated = saved_allocated;
2460 if (final_ob_item != NULL) {
2461 /* we cannot use _list_clear() for this because it does not
2462 guarantee that the list is really empty when it returns */
2463 while (--i >= 0) {
2464 Py_XDECREF(final_ob_item[i]);
2465 }
2466 PyMem_FREE(final_ob_item);
2467 }
2468 Py_XINCREF(result);
2469 return result;
2470 }
2471 #undef IFLT
2472 #undef ISLT
2473
2474 int
PyList_Sort(PyObject * v)2475 PyList_Sort(PyObject *v)
2476 {
2477 if (v == NULL || !PyList_Check(v)) {
2478 PyErr_BadInternalCall();
2479 return -1;
2480 }
2481 v = list_sort_impl((PyListObject *)v, NULL, 0);
2482 if (v == NULL)
2483 return -1;
2484 Py_DECREF(v);
2485 return 0;
2486 }
2487
2488 /*[clinic input]
2489 list.reverse
2490
2491 Reverse *IN PLACE*.
2492 [clinic start generated code]*/
2493
2494 static PyObject *
list_reverse_impl(PyListObject * self)2495 list_reverse_impl(PyListObject *self)
2496 /*[clinic end generated code: output=482544fc451abea9 input=eefd4c3ae1bc9887]*/
2497 {
2498 if (Py_SIZE(self) > 1)
2499 reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
2500 Py_RETURN_NONE;
2501 }
2502
2503 int
PyList_Reverse(PyObject * v)2504 PyList_Reverse(PyObject *v)
2505 {
2506 PyListObject *self = (PyListObject *)v;
2507
2508 if (v == NULL || !PyList_Check(v)) {
2509 PyErr_BadInternalCall();
2510 return -1;
2511 }
2512 if (Py_SIZE(self) > 1)
2513 reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
2514 return 0;
2515 }
2516
2517 PyObject *
PyList_AsTuple(PyObject * v)2518 PyList_AsTuple(PyObject *v)
2519 {
2520 if (v == NULL || !PyList_Check(v)) {
2521 PyErr_BadInternalCall();
2522 return NULL;
2523 }
2524 return _PyTuple_FromArray(((PyListObject *)v)->ob_item, Py_SIZE(v));
2525 }
2526
2527 /*[clinic input]
2528 list.index
2529
2530 value: object
2531 start: slice_index(accept={int}) = 0
2532 stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize
2533 /
2534
2535 Return first index of value.
2536
2537 Raises ValueError if the value is not present.
2538 [clinic start generated code]*/
2539
2540 static PyObject *
list_index_impl(PyListObject * self,PyObject * value,Py_ssize_t start,Py_ssize_t stop)2541 list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start,
2542 Py_ssize_t stop)
2543 /*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/
2544 {
2545 Py_ssize_t i;
2546
2547 if (start < 0) {
2548 start += Py_SIZE(self);
2549 if (start < 0)
2550 start = 0;
2551 }
2552 if (stop < 0) {
2553 stop += Py_SIZE(self);
2554 if (stop < 0)
2555 stop = 0;
2556 }
2557 for (i = start; i < stop && i < Py_SIZE(self); i++) {
2558 int cmp = PyObject_RichCompareBool(self->ob_item[i], value, Py_EQ);
2559 if (cmp > 0)
2560 return PyLong_FromSsize_t(i);
2561 else if (cmp < 0)
2562 return NULL;
2563 }
2564 PyErr_Format(PyExc_ValueError, "%R is not in list", value);
2565 return NULL;
2566 }
2567
2568 /*[clinic input]
2569 list.count
2570
2571 value: object
2572 /
2573
2574 Return number of occurrences of value.
2575 [clinic start generated code]*/
2576
2577 static PyObject *
list_count(PyListObject * self,PyObject * value)2578 list_count(PyListObject *self, PyObject *value)
2579 /*[clinic end generated code: output=b1f5d284205ae714 input=3bdc3a5e6f749565]*/
2580 {
2581 Py_ssize_t count = 0;
2582 Py_ssize_t i;
2583
2584 for (i = 0; i < Py_SIZE(self); i++) {
2585 int cmp = PyObject_RichCompareBool(self->ob_item[i], value, Py_EQ);
2586 if (cmp > 0)
2587 count++;
2588 else if (cmp < 0)
2589 return NULL;
2590 }
2591 return PyLong_FromSsize_t(count);
2592 }
2593
2594 /*[clinic input]
2595 list.remove
2596
2597 value: object
2598 /
2599
2600 Remove first occurrence of value.
2601
2602 Raises ValueError if the value is not present.
2603 [clinic start generated code]*/
2604
2605 static PyObject *
list_remove(PyListObject * self,PyObject * value)2606 list_remove(PyListObject *self, PyObject *value)
2607 /*[clinic end generated code: output=f087e1951a5e30d1 input=2dc2ba5bb2fb1f82]*/
2608 {
2609 Py_ssize_t i;
2610
2611 for (i = 0; i < Py_SIZE(self); i++) {
2612 int cmp = PyObject_RichCompareBool(self->ob_item[i], value, Py_EQ);
2613 if (cmp > 0) {
2614 if (list_ass_slice(self, i, i+1,
2615 (PyObject *)NULL) == 0)
2616 Py_RETURN_NONE;
2617 return NULL;
2618 }
2619 else if (cmp < 0)
2620 return NULL;
2621 }
2622 PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
2623 return NULL;
2624 }
2625
2626 static int
list_traverse(PyListObject * o,visitproc visit,void * arg)2627 list_traverse(PyListObject *o, visitproc visit, void *arg)
2628 {
2629 Py_ssize_t i;
2630
2631 for (i = Py_SIZE(o); --i >= 0; )
2632 Py_VISIT(o->ob_item[i]);
2633 return 0;
2634 }
2635
2636 static PyObject *
list_richcompare(PyObject * v,PyObject * w,int op)2637 list_richcompare(PyObject *v, PyObject *w, int op)
2638 {
2639 PyListObject *vl, *wl;
2640 Py_ssize_t i;
2641
2642 if (!PyList_Check(v) || !PyList_Check(w))
2643 Py_RETURN_NOTIMPLEMENTED;
2644
2645 vl = (PyListObject *)v;
2646 wl = (PyListObject *)w;
2647
2648 if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) {
2649 /* Shortcut: if the lengths differ, the lists differ */
2650 if (op == Py_EQ)
2651 Py_RETURN_FALSE;
2652 else
2653 Py_RETURN_TRUE;
2654 }
2655
2656 /* Search for the first index where items are different */
2657 for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) {
2658 int k = PyObject_RichCompareBool(vl->ob_item[i],
2659 wl->ob_item[i], Py_EQ);
2660 if (k < 0)
2661 return NULL;
2662 if (!k)
2663 break;
2664 }
2665
2666 if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) {
2667 /* No more items to compare -- compare sizes */
2668 Py_RETURN_RICHCOMPARE(Py_SIZE(vl), Py_SIZE(wl), op);
2669 }
2670
2671 /* We have an item that differs -- shortcuts for EQ/NE */
2672 if (op == Py_EQ) {
2673 Py_RETURN_FALSE;
2674 }
2675 if (op == Py_NE) {
2676 Py_RETURN_TRUE;
2677 }
2678
2679 /* Compare the final item again using the proper operator */
2680 return PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
2681 }
2682
2683 /*[clinic input]
2684 list.__init__
2685
2686 iterable: object(c_default="NULL") = ()
2687 /
2688
2689 Built-in mutable sequence.
2690
2691 If no argument is given, the constructor creates a new empty list.
2692 The argument must be an iterable if specified.
2693 [clinic start generated code]*/
2694
2695 static int
list___init___impl(PyListObject * self,PyObject * iterable)2696 list___init___impl(PyListObject *self, PyObject *iterable)
2697 /*[clinic end generated code: output=0f3c21379d01de48 input=b3f3fe7206af8f6b]*/
2698 {
2699 /* Verify list invariants established by PyType_GenericAlloc() */
2700 assert(0 <= Py_SIZE(self));
2701 assert(Py_SIZE(self) <= self->allocated || self->allocated == -1);
2702 assert(self->ob_item != NULL ||
2703 self->allocated == 0 || self->allocated == -1);
2704
2705 /* Empty previous contents */
2706 if (self->ob_item != NULL) {
2707 (void)_list_clear(self);
2708 }
2709 if (iterable != NULL) {
2710 if (_PyObject_HasLen(iterable)) {
2711 Py_ssize_t iter_len = PyObject_Size(iterable);
2712 if (iter_len == -1) {
2713 if (!PyErr_ExceptionMatches(PyExc_TypeError)) {
2714 return -1;
2715 }
2716 PyErr_Clear();
2717 }
2718 if (iter_len > 0 && self->ob_item == NULL
2719 && list_preallocate_exact(self, iter_len)) {
2720 return -1;
2721 }
2722 }
2723 PyObject *rv = list_extend(self, iterable);
2724 if (rv == NULL)
2725 return -1;
2726 Py_DECREF(rv);
2727 }
2728 return 0;
2729 }
2730
2731 /*[clinic input]
2732 list.__sizeof__
2733
2734 Return the size of the list in memory, in bytes.
2735 [clinic start generated code]*/
2736
2737 static PyObject *
list___sizeof___impl(PyListObject * self)2738 list___sizeof___impl(PyListObject *self)
2739 /*[clinic end generated code: output=3417541f95f9a53e input=b8030a5d5ce8a187]*/
2740 {
2741 Py_ssize_t res;
2742
2743 res = _PyObject_SIZE(Py_TYPE(self)) + self->allocated * sizeof(void*);
2744 return PyLong_FromSsize_t(res);
2745 }
2746
2747 static PyObject *list_iter(PyObject *seq);
2748 static PyObject *list_subscript(PyListObject*, PyObject*);
2749
2750 static PyMethodDef list_methods[] = {
2751 {"__getitem__", (PyCFunction)list_subscript, METH_O|METH_COEXIST, "x.__getitem__(y) <==> x[y]"},
2752 LIST___REVERSED___METHODDEF
2753 LIST___SIZEOF___METHODDEF
2754 LIST_CLEAR_METHODDEF
2755 LIST_COPY_METHODDEF
2756 LIST_APPEND_METHODDEF
2757 LIST_INSERT_METHODDEF
2758 LIST_EXTEND_METHODDEF
2759 LIST_POP_METHODDEF
2760 LIST_REMOVE_METHODDEF
2761 LIST_INDEX_METHODDEF
2762 LIST_COUNT_METHODDEF
2763 LIST_REVERSE_METHODDEF
2764 LIST_SORT_METHODDEF
2765 {NULL, NULL} /* sentinel */
2766 };
2767
2768 static PySequenceMethods list_as_sequence = {
2769 (lenfunc)list_length, /* sq_length */
2770 (binaryfunc)list_concat, /* sq_concat */
2771 (ssizeargfunc)list_repeat, /* sq_repeat */
2772 (ssizeargfunc)list_item, /* sq_item */
2773 0, /* sq_slice */
2774 (ssizeobjargproc)list_ass_item, /* sq_ass_item */
2775 0, /* sq_ass_slice */
2776 (objobjproc)list_contains, /* sq_contains */
2777 (binaryfunc)list_inplace_concat, /* sq_inplace_concat */
2778 (ssizeargfunc)list_inplace_repeat, /* sq_inplace_repeat */
2779 };
2780
2781 static PyObject *
list_subscript(PyListObject * self,PyObject * item)2782 list_subscript(PyListObject* self, PyObject* item)
2783 {
2784 if (PyIndex_Check(item)) {
2785 Py_ssize_t i;
2786 i = PyNumber_AsSsize_t(item, PyExc_IndexError);
2787 if (i == -1 && PyErr_Occurred())
2788 return NULL;
2789 if (i < 0)
2790 i += PyList_GET_SIZE(self);
2791 return list_item(self, i);
2792 }
2793 else if (PySlice_Check(item)) {
2794 Py_ssize_t start, stop, step, slicelength, cur, i;
2795 PyObject* result;
2796 PyObject* it;
2797 PyObject **src, **dest;
2798
2799 if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
2800 return NULL;
2801 }
2802 slicelength = PySlice_AdjustIndices(Py_SIZE(self), &start, &stop,
2803 step);
2804
2805 if (slicelength <= 0) {
2806 return PyList_New(0);
2807 }
2808 else if (step == 1) {
2809 return list_slice(self, start, stop);
2810 }
2811 else {
2812 result = list_new_prealloc(slicelength);
2813 if (!result) return NULL;
2814
2815 src = self->ob_item;
2816 dest = ((PyListObject *)result)->ob_item;
2817 for (cur = start, i = 0; i < slicelength;
2818 cur += (size_t)step, i++) {
2819 it = src[cur];
2820 Py_INCREF(it);
2821 dest[i] = it;
2822 }
2823 Py_SIZE(result) = slicelength;
2824 return result;
2825 }
2826 }
2827 else {
2828 PyErr_Format(PyExc_TypeError,
2829 "list indices must be integers or slices, not %.200s",
2830 item->ob_type->tp_name);
2831 return NULL;
2832 }
2833 }
2834
2835 static int
list_ass_subscript(PyListObject * self,PyObject * item,PyObject * value)2836 list_ass_subscript(PyListObject* self, PyObject* item, PyObject* value)
2837 {
2838 if (PyIndex_Check(item)) {
2839 Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
2840 if (i == -1 && PyErr_Occurred())
2841 return -1;
2842 if (i < 0)
2843 i += PyList_GET_SIZE(self);
2844 return list_ass_item(self, i, value);
2845 }
2846 else if (PySlice_Check(item)) {
2847 Py_ssize_t start, stop, step, slicelength;
2848
2849 if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
2850 return -1;
2851 }
2852 slicelength = PySlice_AdjustIndices(Py_SIZE(self), &start, &stop,
2853 step);
2854
2855 if (step == 1)
2856 return list_ass_slice(self, start, stop, value);
2857
2858 /* Make sure s[5:2] = [..] inserts at the right place:
2859 before 5, not before 2. */
2860 if ((step < 0 && start < stop) ||
2861 (step > 0 && start > stop))
2862 stop = start;
2863
2864 if (value == NULL) {
2865 /* delete slice */
2866 PyObject **garbage;
2867 size_t cur;
2868 Py_ssize_t i;
2869 int res;
2870
2871 if (slicelength <= 0)
2872 return 0;
2873
2874 if (step < 0) {
2875 stop = start + 1;
2876 start = stop + step*(slicelength - 1) - 1;
2877 step = -step;
2878 }
2879
2880 garbage = (PyObject**)
2881 PyMem_MALLOC(slicelength*sizeof(PyObject*));
2882 if (!garbage) {
2883 PyErr_NoMemory();
2884 return -1;
2885 }
2886
2887 /* drawing pictures might help understand these for
2888 loops. Basically, we memmove the parts of the
2889 list that are *not* part of the slice: step-1
2890 items for each item that is part of the slice,
2891 and then tail end of the list that was not
2892 covered by the slice */
2893 for (cur = start, i = 0;
2894 cur < (size_t)stop;
2895 cur += step, i++) {
2896 Py_ssize_t lim = step - 1;
2897
2898 garbage[i] = PyList_GET_ITEM(self, cur);
2899
2900 if (cur + step >= (size_t)Py_SIZE(self)) {
2901 lim = Py_SIZE(self) - cur - 1;
2902 }
2903
2904 memmove(self->ob_item + cur - i,
2905 self->ob_item + cur + 1,
2906 lim * sizeof(PyObject *));
2907 }
2908 cur = start + (size_t)slicelength * step;
2909 if (cur < (size_t)Py_SIZE(self)) {
2910 memmove(self->ob_item + cur - slicelength,
2911 self->ob_item + cur,
2912 (Py_SIZE(self) - cur) *
2913 sizeof(PyObject *));
2914 }
2915
2916 Py_SIZE(self) -= slicelength;
2917 res = list_resize(self, Py_SIZE(self));
2918
2919 for (i = 0; i < slicelength; i++) {
2920 Py_DECREF(garbage[i]);
2921 }
2922 PyMem_FREE(garbage);
2923
2924 return res;
2925 }
2926 else {
2927 /* assign slice */
2928 PyObject *ins, *seq;
2929 PyObject **garbage, **seqitems, **selfitems;
2930 Py_ssize_t cur, i;
2931
2932 /* protect against a[::-1] = a */
2933 if (self == (PyListObject*)value) {
2934 seq = list_slice((PyListObject*)value, 0,
2935 PyList_GET_SIZE(value));
2936 }
2937 else {
2938 seq = PySequence_Fast(value,
2939 "must assign iterable "
2940 "to extended slice");
2941 }
2942 if (!seq)
2943 return -1;
2944
2945 if (PySequence_Fast_GET_SIZE(seq) != slicelength) {
2946 PyErr_Format(PyExc_ValueError,
2947 "attempt to assign sequence of "
2948 "size %zd to extended slice of "
2949 "size %zd",
2950 PySequence_Fast_GET_SIZE(seq),
2951 slicelength);
2952 Py_DECREF(seq);
2953 return -1;
2954 }
2955
2956 if (!slicelength) {
2957 Py_DECREF(seq);
2958 return 0;
2959 }
2960
2961 garbage = (PyObject**)
2962 PyMem_MALLOC(slicelength*sizeof(PyObject*));
2963 if (!garbage) {
2964 Py_DECREF(seq);
2965 PyErr_NoMemory();
2966 return -1;
2967 }
2968
2969 selfitems = self->ob_item;
2970 seqitems = PySequence_Fast_ITEMS(seq);
2971 for (cur = start, i = 0; i < slicelength;
2972 cur += (size_t)step, i++) {
2973 garbage[i] = selfitems[cur];
2974 ins = seqitems[i];
2975 Py_INCREF(ins);
2976 selfitems[cur] = ins;
2977 }
2978
2979 for (i = 0; i < slicelength; i++) {
2980 Py_DECREF(garbage[i]);
2981 }
2982
2983 PyMem_FREE(garbage);
2984 Py_DECREF(seq);
2985
2986 return 0;
2987 }
2988 }
2989 else {
2990 PyErr_Format(PyExc_TypeError,
2991 "list indices must be integers or slices, not %.200s",
2992 item->ob_type->tp_name);
2993 return -1;
2994 }
2995 }
2996
2997 static PyMappingMethods list_as_mapping = {
2998 (lenfunc)list_length,
2999 (binaryfunc)list_subscript,
3000 (objobjargproc)list_ass_subscript
3001 };
3002
3003 PyTypeObject PyList_Type = {
3004 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3005 "list",
3006 sizeof(PyListObject),
3007 0,
3008 (destructor)list_dealloc, /* tp_dealloc */
3009 0, /* tp_vectorcall_offset */
3010 0, /* tp_getattr */
3011 0, /* tp_setattr */
3012 0, /* tp_as_async */
3013 (reprfunc)list_repr, /* tp_repr */
3014 0, /* tp_as_number */
3015 &list_as_sequence, /* tp_as_sequence */
3016 &list_as_mapping, /* tp_as_mapping */
3017 PyObject_HashNotImplemented, /* tp_hash */
3018 0, /* tp_call */
3019 0, /* tp_str */
3020 PyObject_GenericGetAttr, /* tp_getattro */
3021 0, /* tp_setattro */
3022 0, /* tp_as_buffer */
3023 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3024 Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS, /* tp_flags */
3025 list___init____doc__, /* tp_doc */
3026 (traverseproc)list_traverse, /* tp_traverse */
3027 (inquiry)_list_clear, /* tp_clear */
3028 list_richcompare, /* tp_richcompare */
3029 0, /* tp_weaklistoffset */
3030 list_iter, /* tp_iter */
3031 0, /* tp_iternext */
3032 list_methods, /* tp_methods */
3033 0, /* tp_members */
3034 0, /* tp_getset */
3035 0, /* tp_base */
3036 0, /* tp_dict */
3037 0, /* tp_descr_get */
3038 0, /* tp_descr_set */
3039 0, /* tp_dictoffset */
3040 (initproc)list___init__, /* tp_init */
3041 PyType_GenericAlloc, /* tp_alloc */
3042 PyType_GenericNew, /* tp_new */
3043 PyObject_GC_Del, /* tp_free */
3044 };
3045
3046 /*********************** List Iterator **************************/
3047
3048 typedef struct {
3049 PyObject_HEAD
3050 Py_ssize_t it_index;
3051 PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
3052 } listiterobject;
3053
3054 static void listiter_dealloc(listiterobject *);
3055 static int listiter_traverse(listiterobject *, visitproc, void *);
3056 static PyObject *listiter_next(listiterobject *);
3057 static PyObject *listiter_len(listiterobject *, PyObject *);
3058 static PyObject *listiter_reduce_general(void *_it, int forward);
3059 static PyObject *listiter_reduce(listiterobject *, PyObject *);
3060 static PyObject *listiter_setstate(listiterobject *, PyObject *state);
3061
3062 PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
3063 PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
3064 PyDoc_STRVAR(setstate_doc, "Set state information for unpickling.");
3065
3066 static PyMethodDef listiter_methods[] = {
3067 {"__length_hint__", (PyCFunction)listiter_len, METH_NOARGS, length_hint_doc},
3068 {"__reduce__", (PyCFunction)listiter_reduce, METH_NOARGS, reduce_doc},
3069 {"__setstate__", (PyCFunction)listiter_setstate, METH_O, setstate_doc},
3070 {NULL, NULL} /* sentinel */
3071 };
3072
3073 PyTypeObject PyListIter_Type = {
3074 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3075 "list_iterator", /* tp_name */
3076 sizeof(listiterobject), /* tp_basicsize */
3077 0, /* tp_itemsize */
3078 /* methods */
3079 (destructor)listiter_dealloc, /* tp_dealloc */
3080 0, /* tp_vectorcall_offset */
3081 0, /* tp_getattr */
3082 0, /* tp_setattr */
3083 0, /* tp_as_async */
3084 0, /* tp_repr */
3085 0, /* tp_as_number */
3086 0, /* tp_as_sequence */
3087 0, /* tp_as_mapping */
3088 0, /* tp_hash */
3089 0, /* tp_call */
3090 0, /* tp_str */
3091 PyObject_GenericGetAttr, /* tp_getattro */
3092 0, /* tp_setattro */
3093 0, /* tp_as_buffer */
3094 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
3095 0, /* tp_doc */
3096 (traverseproc)listiter_traverse, /* tp_traverse */
3097 0, /* tp_clear */
3098 0, /* tp_richcompare */
3099 0, /* tp_weaklistoffset */
3100 PyObject_SelfIter, /* tp_iter */
3101 (iternextfunc)listiter_next, /* tp_iternext */
3102 listiter_methods, /* tp_methods */
3103 0, /* tp_members */
3104 };
3105
3106
3107 static PyObject *
list_iter(PyObject * seq)3108 list_iter(PyObject *seq)
3109 {
3110 listiterobject *it;
3111
3112 if (!PyList_Check(seq)) {
3113 PyErr_BadInternalCall();
3114 return NULL;
3115 }
3116 it = PyObject_GC_New(listiterobject, &PyListIter_Type);
3117 if (it == NULL)
3118 return NULL;
3119 it->it_index = 0;
3120 Py_INCREF(seq);
3121 it->it_seq = (PyListObject *)seq;
3122 _PyObject_GC_TRACK(it);
3123 return (PyObject *)it;
3124 }
3125
3126 static void
listiter_dealloc(listiterobject * it)3127 listiter_dealloc(listiterobject *it)
3128 {
3129 _PyObject_GC_UNTRACK(it);
3130 Py_XDECREF(it->it_seq);
3131 PyObject_GC_Del(it);
3132 }
3133
3134 static int
listiter_traverse(listiterobject * it,visitproc visit,void * arg)3135 listiter_traverse(listiterobject *it, visitproc visit, void *arg)
3136 {
3137 Py_VISIT(it->it_seq);
3138 return 0;
3139 }
3140
3141 static PyObject *
listiter_next(listiterobject * it)3142 listiter_next(listiterobject *it)
3143 {
3144 PyListObject *seq;
3145 PyObject *item;
3146
3147 assert(it != NULL);
3148 seq = it->it_seq;
3149 if (seq == NULL)
3150 return NULL;
3151 assert(PyList_Check(seq));
3152
3153 if (it->it_index < PyList_GET_SIZE(seq)) {
3154 item = PyList_GET_ITEM(seq, it->it_index);
3155 ++it->it_index;
3156 Py_INCREF(item);
3157 return item;
3158 }
3159
3160 it->it_seq = NULL;
3161 Py_DECREF(seq);
3162 return NULL;
3163 }
3164
3165 static PyObject *
listiter_len(listiterobject * it,PyObject * Py_UNUSED (ignored))3166 listiter_len(listiterobject *it, PyObject *Py_UNUSED(ignored))
3167 {
3168 Py_ssize_t len;
3169 if (it->it_seq) {
3170 len = PyList_GET_SIZE(it->it_seq) - it->it_index;
3171 if (len >= 0)
3172 return PyLong_FromSsize_t(len);
3173 }
3174 return PyLong_FromLong(0);
3175 }
3176
3177 static PyObject *
listiter_reduce(listiterobject * it,PyObject * Py_UNUSED (ignored))3178 listiter_reduce(listiterobject *it, PyObject *Py_UNUSED(ignored))
3179 {
3180 return listiter_reduce_general(it, 1);
3181 }
3182
3183 static PyObject *
listiter_setstate(listiterobject * it,PyObject * state)3184 listiter_setstate(listiterobject *it, PyObject *state)
3185 {
3186 Py_ssize_t index = PyLong_AsSsize_t(state);
3187 if (index == -1 && PyErr_Occurred())
3188 return NULL;
3189 if (it->it_seq != NULL) {
3190 if (index < 0)
3191 index = 0;
3192 else if (index > PyList_GET_SIZE(it->it_seq))
3193 index = PyList_GET_SIZE(it->it_seq); /* iterator exhausted */
3194 it->it_index = index;
3195 }
3196 Py_RETURN_NONE;
3197 }
3198
3199 /*********************** List Reverse Iterator **************************/
3200
3201 typedef struct {
3202 PyObject_HEAD
3203 Py_ssize_t it_index;
3204 PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
3205 } listreviterobject;
3206
3207 static void listreviter_dealloc(listreviterobject *);
3208 static int listreviter_traverse(listreviterobject *, visitproc, void *);
3209 static PyObject *listreviter_next(listreviterobject *);
3210 static PyObject *listreviter_len(listreviterobject *, PyObject *);
3211 static PyObject *listreviter_reduce(listreviterobject *, PyObject *);
3212 static PyObject *listreviter_setstate(listreviterobject *, PyObject *);
3213
3214 static PyMethodDef listreviter_methods[] = {
3215 {"__length_hint__", (PyCFunction)listreviter_len, METH_NOARGS, length_hint_doc},
3216 {"__reduce__", (PyCFunction)listreviter_reduce, METH_NOARGS, reduce_doc},
3217 {"__setstate__", (PyCFunction)listreviter_setstate, METH_O, setstate_doc},
3218 {NULL, NULL} /* sentinel */
3219 };
3220
3221 PyTypeObject PyListRevIter_Type = {
3222 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3223 "list_reverseiterator", /* tp_name */
3224 sizeof(listreviterobject), /* tp_basicsize */
3225 0, /* tp_itemsize */
3226 /* methods */
3227 (destructor)listreviter_dealloc, /* tp_dealloc */
3228 0, /* tp_vectorcall_offset */
3229 0, /* tp_getattr */
3230 0, /* tp_setattr */
3231 0, /* tp_as_async */
3232 0, /* tp_repr */
3233 0, /* tp_as_number */
3234 0, /* tp_as_sequence */
3235 0, /* tp_as_mapping */
3236 0, /* tp_hash */
3237 0, /* tp_call */
3238 0, /* tp_str */
3239 PyObject_GenericGetAttr, /* tp_getattro */
3240 0, /* tp_setattro */
3241 0, /* tp_as_buffer */
3242 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
3243 0, /* tp_doc */
3244 (traverseproc)listreviter_traverse, /* tp_traverse */
3245 0, /* tp_clear */
3246 0, /* tp_richcompare */
3247 0, /* tp_weaklistoffset */
3248 PyObject_SelfIter, /* tp_iter */
3249 (iternextfunc)listreviter_next, /* tp_iternext */
3250 listreviter_methods, /* tp_methods */
3251 0,
3252 };
3253
3254 /*[clinic input]
3255 list.__reversed__
3256
3257 Return a reverse iterator over the list.
3258 [clinic start generated code]*/
3259
3260 static PyObject *
list___reversed___impl(PyListObject * self)3261 list___reversed___impl(PyListObject *self)
3262 /*[clinic end generated code: output=b166f073208c888c input=eadb6e17f8a6a280]*/
3263 {
3264 listreviterobject *it;
3265
3266 it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type);
3267 if (it == NULL)
3268 return NULL;
3269 assert(PyList_Check(self));
3270 it->it_index = PyList_GET_SIZE(self) - 1;
3271 Py_INCREF(self);
3272 it->it_seq = self;
3273 PyObject_GC_Track(it);
3274 return (PyObject *)it;
3275 }
3276
3277 static void
listreviter_dealloc(listreviterobject * it)3278 listreviter_dealloc(listreviterobject *it)
3279 {
3280 PyObject_GC_UnTrack(it);
3281 Py_XDECREF(it->it_seq);
3282 PyObject_GC_Del(it);
3283 }
3284
3285 static int
listreviter_traverse(listreviterobject * it,visitproc visit,void * arg)3286 listreviter_traverse(listreviterobject *it, visitproc visit, void *arg)
3287 {
3288 Py_VISIT(it->it_seq);
3289 return 0;
3290 }
3291
3292 static PyObject *
listreviter_next(listreviterobject * it)3293 listreviter_next(listreviterobject *it)
3294 {
3295 PyObject *item;
3296 Py_ssize_t index;
3297 PyListObject *seq;
3298
3299 assert(it != NULL);
3300 seq = it->it_seq;
3301 if (seq == NULL) {
3302 return NULL;
3303 }
3304 assert(PyList_Check(seq));
3305
3306 index = it->it_index;
3307 if (index>=0 && index < PyList_GET_SIZE(seq)) {
3308 item = PyList_GET_ITEM(seq, index);
3309 it->it_index--;
3310 Py_INCREF(item);
3311 return item;
3312 }
3313 it->it_index = -1;
3314 it->it_seq = NULL;
3315 Py_DECREF(seq);
3316 return NULL;
3317 }
3318
3319 static PyObject *
listreviter_len(listreviterobject * it,PyObject * Py_UNUSED (ignored))3320 listreviter_len(listreviterobject *it, PyObject *Py_UNUSED(ignored))
3321 {
3322 Py_ssize_t len = it->it_index + 1;
3323 if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len)
3324 len = 0;
3325 return PyLong_FromSsize_t(len);
3326 }
3327
3328 static PyObject *
listreviter_reduce(listreviterobject * it,PyObject * Py_UNUSED (ignored))3329 listreviter_reduce(listreviterobject *it, PyObject *Py_UNUSED(ignored))
3330 {
3331 return listiter_reduce_general(it, 0);
3332 }
3333
3334 static PyObject *
listreviter_setstate(listreviterobject * it,PyObject * state)3335 listreviter_setstate(listreviterobject *it, PyObject *state)
3336 {
3337 Py_ssize_t index = PyLong_AsSsize_t(state);
3338 if (index == -1 && PyErr_Occurred())
3339 return NULL;
3340 if (it->it_seq != NULL) {
3341 if (index < -1)
3342 index = -1;
3343 else if (index > PyList_GET_SIZE(it->it_seq) - 1)
3344 index = PyList_GET_SIZE(it->it_seq) - 1;
3345 it->it_index = index;
3346 }
3347 Py_RETURN_NONE;
3348 }
3349
3350 /* common pickling support */
3351
3352 static PyObject *
listiter_reduce_general(void * _it,int forward)3353 listiter_reduce_general(void *_it, int forward)
3354 {
3355 _Py_IDENTIFIER(iter);
3356 _Py_IDENTIFIER(reversed);
3357 PyObject *list;
3358
3359 /* the objects are not the same, index is of different types! */
3360 if (forward) {
3361 listiterobject *it = (listiterobject *)_it;
3362 if (it->it_seq)
3363 return Py_BuildValue("N(O)n", _PyEval_GetBuiltinId(&PyId_iter),
3364 it->it_seq, it->it_index);
3365 } else {
3366 listreviterobject *it = (listreviterobject *)_it;
3367 if (it->it_seq)
3368 return Py_BuildValue("N(O)n", _PyEval_GetBuiltinId(&PyId_reversed),
3369 it->it_seq, it->it_index);
3370 }
3371 /* empty iterator, create an empty list */
3372 list = PyList_New(0);
3373 if (list == NULL)
3374 return NULL;
3375 return Py_BuildValue("N(N)", _PyEval_GetBuiltinId(&PyId_iter), list);
3376 }
3377