comparison cos/python/Objects/listobject.c @ 27:7f74363f4c82

Added some files for the python port
author windel
date Tue, 27 Dec 2011 18:59:02 +0100
parents
children
comparison
equal deleted inserted replaced
26:dcce92b1efbc 27:7f74363f4c82
1 /* List object implementation */
2
3 #include "Python.h"
4
5 /* Ensure ob_item has room for at least newsize elements, and set
6 * ob_size to newsize. If newsize > ob_size on entry, the content
7 * of the new slots at exit is undefined heap trash; it's the caller's
8 * responsibility to overwrite them with sane values.
9 * The number of allocated elements may grow, shrink, or stay the same.
10 * Failure is impossible if newsize <= self.allocated on entry, although
11 * that partly relies on an assumption that the system realloc() never
12 * fails when passed a number of bytes <= the number of bytes last
13 * allocated (the C standard doesn't guarantee this, but it's hard to
14 * imagine a realloc implementation where it wouldn't be true).
15 * Note that self->ob_item may change, and even if newsize is less
16 * than ob_size on entry.
17 */
18 static int
19 list_resize(PyListObject *self, Py_ssize_t newsize)
20 {
21 PyObject **items;
22 size_t new_allocated;
23 Py_ssize_t allocated = self->allocated;
24
25 /* Bypass realloc() when a previous overallocation is large enough
26 to accommodate the newsize. If the newsize falls lower than half
27 the allocated size, then proceed with the realloc() to shrink the list.
28 */
29 if (allocated >= newsize && newsize >= (allocated >> 1)) {
30 assert(self->ob_item != NULL || newsize == 0);
31 Py_SIZE(self) = newsize;
32 return 0;
33 }
34
35 /* This over-allocates proportional to the list size, making room
36 * for additional growth. The over-allocation is mild, but is
37 * enough to give linear-time amortized behavior over a long
38 * sequence of appends() in the presence of a poorly-performing
39 * system realloc().
40 * The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
41 */
42 new_allocated = (newsize >> 3) + (newsize < 9 ? 3 : 6);
43
44 /* check for integer overflow */
45 if (new_allocated > PY_SIZE_MAX - newsize) {
46 PyErr_NoMemory();
47 return -1;
48 } else {
49 new_allocated += newsize;
50 }
51
52 if (newsize == 0)
53 new_allocated = 0;
54 items = self->ob_item;
55 if (new_allocated <= (PY_SIZE_MAX / sizeof(PyObject *)))
56 PyMem_RESIZE(items, PyObject *, new_allocated);
57 else
58 items = NULL;
59 if (items == NULL) {
60 PyErr_NoMemory();
61 return -1;
62 }
63 self->ob_item = items;
64 Py_SIZE(self) = newsize;
65 self->allocated = new_allocated;
66 return 0;
67 }
68
69
70 /* Empty list reuse scheme to save calls to malloc and free */
71 #ifndef PyList_MAXFREELIST
72 #define PyList_MAXFREELIST 80
73 #endif
74 static PyListObject *free_list[PyList_MAXFREELIST];
75 static int numfree = 0;
76
77 int
78 PyList_ClearFreeList(void)
79 {
80 PyListObject *op;
81 int ret = numfree;
82 while (numfree) {
83 op = free_list[--numfree];
84 assert(PyList_CheckExact(op));
85 PyObject_GC_Del(op);
86 }
87 return ret;
88 }
89
90 void
91 PyList_Fini(void)
92 {
93 PyList_ClearFreeList();
94 }
95
96 PyObject *
97 PyList_New(Py_ssize_t size)
98 {
99 PyListObject *op;
100 size_t nbytes;
101
102 if (size < 0) {
103 PyErr_BadInternalCall();
104 return NULL;
105 }
106 /* Check for overflow without an actual overflow,
107 * which can cause compiler to optimise out */
108 if ((size_t)size > PY_SIZE_MAX / sizeof(PyObject *))
109 return PyErr_NoMemory();
110 nbytes = size * sizeof(PyObject *);
111 if (numfree) {
112 numfree--;
113 op = free_list[numfree];
114 _Py_NewReference((PyObject *)op);
115 } else {
116 op = PyObject_GC_New(PyListObject, &PyList_Type);
117 if (op == NULL)
118 return NULL;
119 }
120 if (size <= 0)
121 op->ob_item = NULL;
122 else {
123 op->ob_item = (PyObject **) PyMem_MALLOC(nbytes);
124 if (op->ob_item == NULL) {
125 Py_DECREF(op);
126 return PyErr_NoMemory();
127 }
128 memset(op->ob_item, 0, nbytes);
129 }
130 Py_SIZE(op) = size;
131 op->allocated = size;
132 _PyObject_GC_TRACK(op);
133 return (PyObject *) op;
134 }
135
136 Py_ssize_t
137 PyList_Size(PyObject *op)
138 {
139 if (!PyList_Check(op)) {
140 PyErr_BadInternalCall();
141 return -1;
142 }
143 else
144 return Py_SIZE(op);
145 }
146
147 static PyObject *indexerr = NULL;
148
149 PyObject *
150 PyList_GetItem(PyObject *op, Py_ssize_t i)
151 {
152 if (!PyList_Check(op)) {
153 PyErr_BadInternalCall();
154 return NULL;
155 }
156 if (i < 0 || i >= Py_SIZE(op)) {
157 if (indexerr == NULL) {
158 indexerr = PyUnicode_FromString(
159 "list index out of range");
160 if (indexerr == NULL)
161 return NULL;
162 }
163 PyErr_SetObject(PyExc_IndexError, indexerr);
164 return NULL;
165 }
166 return ((PyListObject *)op) -> ob_item[i];
167 }
168
169 int
170 PyList_SetItem(register PyObject *op, register int i,
171 register PyObject *newitem)
172 {
173 register PyObject *olditem;
174 register PyObject **p;
175 if (!PyList_Check(op))
176 {
177 Py_XDECREF(newitem);
178 PyErr_BadInternalCall();
179 return -1;
180 }
181 if (i < 0 || i >= Py_SIZE(op))
182 {
183 Py_XDECREF(newitem);
184 PyErr_SetString(PyExc_IndexError,
185 "list assignment index out of range");
186 return -1;
187 }
188 p = ((PyListObject *)op) -> ob_item + i;
189 olditem = *p;
190 *p = newitem;
191 Py_XDECREF(olditem);
192 return 0;
193 }
194
195 static int
196 ins1(PyListObject *self, int where, PyObject *v)
197 {
198 int i, n = Py_SIZE(self);
199 PyObject **items;
200 if (v == NULL) {
201 PyErr_BadInternalCall();
202 return -1;
203 }
204 if (n == PY_SSIZE_T_MAX) {
205 PyErr_SetString(PyExc_OverflowError,
206 "cannot add more objects to list");
207 return -1;
208 }
209
210 if (list_resize(self, n+1) == -1)
211 return -1;
212
213 if (where < 0) {
214 where += n;
215 if (where < 0)
216 where = 0;
217 }
218 if (where > n)
219 where = n;
220 items = self->ob_item;
221 for (i = n; --i >= where; )
222 items[i+1] = items[i];
223 Py_INCREF(v);
224 items[where] = v;
225 return 0;
226 }
227
228 int
229 PyList_Insert(PyObject *op, int where, PyObject *newitem)
230 {
231 if (!PyList_Check(op)) {
232 PyErr_BadInternalCall();
233 return -1;
234 }
235 return ins1((PyListObject *)op, where, newitem);
236 }
237
238 static int
239 app1(PyListObject *self, PyObject *v)
240 {
241 Py_ssize_t n = PyList_GET_SIZE(self);
242
243 assert (v != NULL);
244 if (n == PY_SSIZE_T_MAX) {
245 PyErr_SetString(PyExc_OverflowError,
246 "cannot add more objects to list");
247 return -1;
248 }
249
250 if (list_resize(self, n+1) == -1)
251 return -1;
252
253 Py_INCREF(v);
254 PyList_SET_ITEM(self, n, v);
255 return 0;
256 }
257
258 int
259 PyList_Append(PyObject *op, PyObject *newitem)
260 {
261 if (PyList_Check(op) && (newitem != NULL))
262 return app1((PyListObject *)op, newitem);
263 PyErr_BadInternalCall();
264 return -1;
265 }
266
267 /* Methods */
268
269 static void
270 list_dealloc(PyListObject *op)
271 {
272 Py_ssize_t i;
273 PyObject_GC_UnTrack(op);
274 Py_TRASHCAN_SAFE_BEGIN(op)
275 if (op->ob_item != NULL) {
276 /* Do it backwards, for Christian Tismer.
277 There's a simple test case where somehow this reduces
278 thrashing when a *very* large list is created and
279 immediately deleted. */
280 i = Py_SIZE(op);
281 while (--i >= 0) {
282 Py_XDECREF(op->ob_item[i]);
283 }
284 PyMem_FREE(op->ob_item);
285 }
286 if (numfree < PyList_MAXFREELIST && PyList_CheckExact(op))
287 free_list[numfree++] = op;
288 else
289 Py_TYPE(op)->tp_free((PyObject *)op);
290 Py_TRASHCAN_SAFE_END(op)
291 }
292
293 static PyObject *
294 list_repr(PyListObject *v)
295 {
296 Py_ssize_t i;
297 PyObject *s = NULL;
298 _PyAccu acc;
299 static PyObject *sep = NULL;
300
301 if (Py_SIZE(v) == 0) {
302 return PyUnicode_FromString("[]");
303 }
304
305 if (sep == NULL) {
306 sep = PyUnicode_FromString(", ");
307 if (sep == NULL)
308 return NULL;
309 }
310
311 i = Py_ReprEnter((PyObject*)v);
312 if (i != 0) {
313 return i > 0 ? PyUnicode_FromString("[...]") : NULL;
314 }
315
316 if (_PyAccu_Init(&acc))
317 goto error;
318
319 s = PyUnicode_FromString("[");
320 if (s == NULL || _PyAccu_Accumulate(&acc, s))
321 goto error;
322 Py_CLEAR(s);
323
324 /* Do repr() on each element. Note that this may mutate the list,
325 so must refetch the list size on each iteration. */
326 for (i = 0; i < Py_SIZE(v); ++i) {
327 if (Py_EnterRecursiveCall(" while getting the repr of a list"))
328 goto error;
329 s = PyObject_Repr(v->ob_item[i]);
330 Py_LeaveRecursiveCall();
331 if (i > 0 && _PyAccu_Accumulate(&acc, sep))
332 goto error;
333 if (s == NULL || _PyAccu_Accumulate(&acc, s))
334 goto error;
335 Py_CLEAR(s);
336 }
337 s = PyUnicode_FromString("]");
338 if (s == NULL || _PyAccu_Accumulate(&acc, s))
339 goto error;
340 Py_CLEAR(s);
341
342 Py_ReprLeave((PyObject *)v);
343 return _PyAccu_Finish(&acc);
344
345 error:
346 _PyAccu_Destroy(&acc);
347 Py_XDECREF(s);
348 Py_ReprLeave((PyObject *)v);
349 return NULL;
350 }
351
352 static Py_ssize_t
353 list_length(PyListObject *a)
354 {
355 return Py_SIZE(a);
356 }
357
358 static int
359 list_contains(PyListObject *a, PyObject *el)
360 {
361 Py_ssize_t i;
362 int cmp;
363
364 for (i = 0, cmp = 0 ; cmp == 0 && i < Py_SIZE(a); ++i)
365 cmp = PyObject_RichCompareBool(el, PyList_GET_ITEM(a, i),
366 Py_EQ);
367 return cmp;
368 }
369
370 static PyObject *
371 list_item(PyListObject *a, int i)
372 {
373 if (i < 0 || i >= Py_SIZE(a)) {
374 if (indexerr == NULL) {
375 indexerr = PyUnicode_FromString(
376 "list index out of range");
377 if (indexerr == NULL)
378 return NULL;
379 }
380 PyErr_SetObject(PyExc_IndexError, indexerr);
381 return NULL;
382 }
383 Py_INCREF(a->ob_item[i]);
384 return a->ob_item[i];
385 }
386
387 static PyObject *
388 list_slice(PyListObject *a, int ilow, int ihigh)
389 {
390 PyListObject *np;
391 PyObject **src, **dest;
392 int i, len;
393 if (ilow < 0)
394 ilow = 0;
395 else if (ilow > Py_SIZE(a))
396 ilow = Py_SIZE(a);
397 if (ihigh < ilow)
398 ihigh = ilow;
399 else if (ihigh > Py_SIZE(a))
400 ihigh = Py_SIZE(a);
401 len = ihigh - ilow;
402 np = (PyListObject *) PyList_New(len);
403 if (np == NULL)
404 return NULL;
405
406 src = a->ob_item + ilow;
407 dest = np->ob_item;
408 for (i = 0; i < len; i++) {
409 PyObject *v = src[i];
410 Py_INCREF(v);
411 dest[i] = v;
412 }
413 return (PyObject *)np;
414 }
415
416 PyObject *
417 PyList_GetSlice(PyObject *a, int ilow, int ihigh)
418 {
419 if (!PyList_Check(a)) {
420 PyErr_BadInternalCall();
421 return NULL;
422 }
423 return list_slice((PyListObject *)a, ilow, ihigh);
424 }
425
426 static PyObject *
427 list_concat(PyListObject *a, PyObject *bb)
428 {
429 Py_ssize_t size;
430 Py_ssize_t i;
431 PyObject **src, **dest;
432 PyListObject *np;
433 if (!PyList_Check(bb)) {
434 PyErr_Format(PyExc_TypeError,
435 "can only concatenate list (not \"%.200s\") to list",
436 bb->ob_type->tp_name);
437 return NULL;
438 }
439 #define b ((PyListObject *)bb)
440 size = Py_SIZE(a) + Py_SIZE(b);
441 if (size < 0)
442 return PyErr_NoMemory();
443 np = (PyListObject *) PyList_New(size);
444 if (np == NULL) {
445 return NULL;
446 }
447 src = a->ob_item;
448 dest = np->ob_item;
449 for (i = 0; i < Py_SIZE(a); i++) {
450 PyObject *v = src[i];
451 Py_INCREF(v);
452 dest[i] = v;
453 }
454 src = b->ob_item;
455 dest = np->ob_item + Py_SIZE(a);
456 for (i = 0; i < Py_SIZE(b); i++) {
457 PyObject *v = src[i];
458 Py_INCREF(v);
459 dest[i] = v;
460 }
461 return (PyObject *)np;
462 #undef b
463 }
464
465 static PyObject *
466 list_repeat(PyListObject *a, int n)
467 {
468 Py_ssize_t i, j;
469 Py_ssize_t size;
470 PyListObject *np;
471 PyObject **p, **items;
472 PyObject *elem;
473 if (n < 0)
474 n = 0;
475 if (n > 0 && Py_SIZE(a) > PY_SSIZE_T_MAX / n)
476 return PyErr_NoMemory();
477 size = Py_SIZE(a) * n;
478 if (size == 0)
479 return PyList_New(0);
480 np = (PyListObject *) PyList_New(size);
481 if (np == NULL)
482 return NULL;
483
484 items = np->ob_item;
485 if (Py_SIZE(a) == 1) {
486 elem = a->ob_item[0];
487 for (i = 0; i < n; i++) {
488 items[i] = elem;
489 Py_INCREF(elem);
490 }
491 return (PyObject *) np;
492 }
493 p = np->ob_item;
494 items = a->ob_item;
495 for (i = 0; i < n; i++) {
496 for (j = 0; j < Py_SIZE(a); j++) {
497 *p = items[j];
498 Py_INCREF(*p);
499 p++;
500 }
501 }
502 return (PyObject *) np;
503 }
504
505 static int
506 list_clear(PyListObject *a)
507 {
508 Py_ssize_t i;
509 PyObject **item = a->ob_item;
510 if (item != NULL) {
511 /* Because XDECREF can recursively invoke operations on
512 this list, we make it empty first. */
513 i = Py_SIZE(a);
514 Py_SIZE(a) = 0;
515 a->ob_item = NULL;
516 a->allocated = 0;
517 while (--i >= 0) {
518 Py_XDECREF(item[i]);
519 }
520 PyMem_FREE(item);
521 }
522 /* Never fails; the return value can be ignored.
523 Note that there is no guarantee that the list is actually empty
524 at this point, because XDECREF may have populated it again! */
525 return 0;
526 }
527
528 /* a[ilow:ihigh] = v if v != NULL.
529 * del a[ilow:ihigh] if v == NULL.
530 *
531 * Special speed gimmick: when v is NULL and ihigh - ilow <= 8, it's
532 * guaranteed the call cannot fail.
533 */
534 static int
535 list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
536 {
537 /* Because [X]DECREF can recursively invoke list operations on
538 this list, we must postpone all [X]DECREF activity until
539 after the list is back in its canonical shape. Therefore
540 we must allocate an additional array, 'recycle', into which
541 we temporarily copy the items that are deleted from the
542 list. :-( */
543 PyObject *recycle_on_stack[8];
544 PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
545 PyObject **item;
546 PyObject **vitem = NULL;
547 PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
548 Py_ssize_t n; /* # of elements in replacement list */
549 Py_ssize_t norig; /* # of elements in list getting replaced */
550 Py_ssize_t d; /* Change in size */
551 Py_ssize_t k;
552 size_t s;
553 int result = -1; /* guilty until proved innocent */
554 #define b ((PyListObject *)v)
555 if (v == NULL)
556 n = 0;
557 else {
558 if (a == b) {
559 /* Special case "a[i:j] = a" -- copy b first */
560 v = list_slice(b, 0, Py_SIZE(b));
561 if (v == NULL)
562 return result;
563 result = list_ass_slice(a, ilow, ihigh, v);
564 Py_DECREF(v);
565 return result;
566 }
567 v_as_SF = PySequence_Fast(v, "can only assign an iterable");
568 if(v_as_SF == NULL)
569 goto Error;
570 n = PySequence_Fast_GET_SIZE(v_as_SF);
571 vitem = PySequence_Fast_ITEMS(v_as_SF);
572 }
573 if (ilow < 0)
574 ilow = 0;
575 else if (ilow > Py_SIZE(a))
576 ilow = Py_SIZE(a);
577
578 if (ihigh < ilow)
579 ihigh = ilow;
580 else if (ihigh > Py_SIZE(a))
581 ihigh = Py_SIZE(a);
582
583 norig = ihigh - ilow;
584 assert(norig >= 0);
585 d = n - norig;
586 if (Py_SIZE(a) + d == 0) {
587 Py_XDECREF(v_as_SF);
588 return list_clear(a);
589 }
590 item = a->ob_item;
591 /* recycle the items that we are about to remove */
592 s = norig * sizeof(PyObject *);
593 if (s > sizeof(recycle_on_stack)) {
594 recycle = (PyObject **)PyMem_MALLOC(s);
595 if (recycle == NULL) {
596 PyErr_NoMemory();
597 goto Error;
598 }
599 }
600 memcpy(recycle, &item[ilow], s);
601
602 if (d < 0) { /* Delete -d items */
603 memmove(&item[ihigh+d], &item[ihigh],
604 (Py_SIZE(a) - ihigh)*sizeof(PyObject *));
605 list_resize(a, Py_SIZE(a) + d);
606 item = a->ob_item;
607 }
608 else if (d > 0) { /* Insert d items */
609 k = Py_SIZE(a);
610 if (list_resize(a, k+d) < 0)
611 goto Error;
612 item = a->ob_item;
613 memmove(&item[ihigh+d], &item[ihigh],
614 (k - ihigh)*sizeof(PyObject *));
615 }
616 for (k = 0; k < n; k++, ilow++) {
617 PyObject *w = vitem[k];
618 Py_XINCREF(w);
619 item[ilow] = w;
620 }
621 for (k = norig - 1; k >= 0; --k)
622 Py_XDECREF(recycle[k]);
623 result = 0;
624 Error:
625 if (recycle != recycle_on_stack)
626 PyMem_FREE(recycle);
627 Py_XDECREF(v_as_SF);
628 return result;
629 #undef b
630 }
631
632 int
633 PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
634 {
635 if (!PyList_Check(a)) {
636 PyErr_BadInternalCall();
637 return -1;
638 }
639 return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
640 }
641
642 static PyObject *
643 list_inplace_repeat(PyListObject *self, Py_ssize_t n)
644 {
645 PyObject **items;
646 Py_ssize_t size, i, j, p;
647
648
649 size = PyList_GET_SIZE(self);
650 if (size == 0 || n == 1) {
651 Py_INCREF(self);
652 return (PyObject *)self;
653 }
654
655 if (n < 1) {
656 (void)list_clear(self);
657 Py_INCREF(self);
658 return (PyObject *)self;
659 }
660
661 if (size > PY_SSIZE_T_MAX / n) {
662 return PyErr_NoMemory();
663 }
664
665 if (list_resize(self, size*n) == -1)
666 return NULL;
667
668 p = size;
669 items = self->ob_item;
670 for (i = 1; i < n; i++) { /* Start counting at 1, not 0 */
671 for (j = 0; j < size; j++) {
672 PyObject *o = items[j];
673 Py_INCREF(o);
674 items[p++] = o;
675 }
676 }
677 Py_INCREF(self);
678 return (PyObject *)self;
679 }
680
681 static int
682 list_ass_item(PyListObject *a, Py_ssize_t i, PyObject *v)
683 {
684 PyObject *old_value;
685 if (i < 0 || i >= Py_SIZE(a)) {
686 PyErr_SetString(PyExc_IndexError,
687 "list assignment index out of range");
688 return -1;
689 }
690 if (v == NULL)
691 return list_ass_slice(a, i, i+1, v);
692 Py_INCREF(v);
693 old_value = a->ob_item[i];
694 a->ob_item[i] = v;
695 Py_DECREF(old_value);
696 return 0;
697 }
698
699 static PyObject *
700 listinsert(PyListObject *self, PyObject *args)
701 {
702 Py_ssize_t i;
703 PyObject *v;
704 if (!PyArg_ParseTuple(args, "nO:insert", &i, &v))
705 return NULL;
706 if (ins1(self, i, v) == 0)
707 Py_RETURN_NONE;
708 return NULL;
709 }
710
711 static PyObject *
712 listclear(PyListObject *self)
713 {
714 list_clear(self);
715 Py_RETURN_NONE;
716 }
717
718 static PyObject *
719 listcopy(PyListObject *self)
720 {
721 return list_slice(self, 0, Py_SIZE(self));
722 }
723
724 static PyObject *
725 listappend(PyListObject *self, PyObject *v)
726 {
727 if (app1(self, v) == 0)
728 Py_RETURN_NONE;
729 return NULL;
730 }
731
732 static PyObject *
733 listextend(PyListObject *self, PyObject *b)
734 {
735 PyObject *it; /* iter(v) */
736 Py_ssize_t m; /* size of self */
737 Py_ssize_t n; /* guess for size of b */
738 Py_ssize_t mn; /* m + n */
739 Py_ssize_t i;
740 PyObject *(*iternext)(PyObject *);
741
742 /* Special cases:
743 1) lists and tuples which can use PySequence_Fast ops
744 2) extending self to self requires making a copy first
745 */
746 if (PyList_CheckExact(b) || PyTuple_CheckExact(b) || (PyObject *)self == b) {
747 PyObject **src, **dest;
748 b = PySequence_Fast(b, "argument must be iterable");
749 if (!b)
750 return NULL;
751 n = PySequence_Fast_GET_SIZE(b);
752 if (n == 0) {
753 /* short circuit when b is empty */
754 Py_DECREF(b);
755 Py_RETURN_NONE;
756 }
757 m = Py_SIZE(self);
758 if (list_resize(self, m + n) == -1) {
759 Py_DECREF(b);
760 return NULL;
761 }
762 /* note that we may still have self == b here for the
763 * situation a.extend(a), but the following code works
764 * in that case too. Just make sure to resize self
765 * before calling PySequence_Fast_ITEMS.
766 */
767 /* populate the end of self with b's items */
768 src = PySequence_Fast_ITEMS(b);
769 dest = self->ob_item + m;
770 for (i = 0; i < n; i++) {
771 PyObject *o = src[i];
772 Py_INCREF(o);
773 dest[i] = o;
774 }
775 Py_DECREF(b);
776 Py_RETURN_NONE;
777 }
778
779 it = PyObject_GetIter(b);
780 if (it == NULL)
781 return NULL;
782 iternext = *it->ob_type->tp_iternext;
783
784 /* Guess a result list size. */
785 n = _PyObject_LengthHint(b, 8);
786 if (n == -1) {
787 Py_DECREF(it);
788 return NULL;
789 }
790 m = Py_SIZE(self);
791 mn = m + n;
792 if (mn >= m) {
793 /* Make room. */
794 if (list_resize(self, mn) == -1)
795 goto error;
796 /* Make the list sane again. */
797 Py_SIZE(self) = m;
798 }
799 /* Else m + n overflowed; on the chance that n lied, and there really
800 * is enough room, ignore it. If n was telling the truth, we'll
801 * eventually run out of memory during the loop.
802 */
803
804 /* Run iterator to exhaustion. */
805 for (;;) {
806 PyObject *item = iternext(it);
807 if (item == NULL) {
808 if (PyErr_Occurred()) {
809 if (PyErr_ExceptionMatches(PyExc_StopIteration))
810 PyErr_Clear();
811 else
812 goto error;
813 }
814 break;
815 }
816 if (Py_SIZE(self) < self->allocated) {
817 /* steals ref */
818 PyList_SET_ITEM(self, Py_SIZE(self), item);
819 ++Py_SIZE(self);
820 }
821 else {
822 int status = app1(self, item);
823 Py_DECREF(item); /* append creates a new ref */
824 if (status < 0)
825 goto error;
826 }
827 }
828
829 /* Cut back result list if initial guess was too large. */
830 if (Py_SIZE(self) < self->allocated)
831 list_resize(self, Py_SIZE(self)); /* shrinking can't fail */
832
833 Py_DECREF(it);
834 Py_RETURN_NONE;
835
836 error:
837 Py_DECREF(it);
838 return NULL;
839 }
840
841 PyObject *
842 _PyList_Extend(PyListObject *self, PyObject *b)
843 {
844 return listextend(self, b);
845 }
846
847 static PyObject *
848 list_inplace_concat(PyListObject *self, PyObject *other)
849 {
850 PyObject *result;
851
852 result = listextend(self, other);
853 if (result == NULL)
854 return result;
855 Py_DECREF(result);
856 Py_INCREF(self);
857 return (PyObject *)self;
858 }
859
860 static PyObject *
861 listpop(PyListObject *self, PyObject *args)
862 {
863 Py_ssize_t i = -1;
864 PyObject *v;
865 int status;
866
867 if (!PyArg_ParseTuple(args, "|n:pop", &i))
868 return NULL;
869
870 if (Py_SIZE(self) == 0) {
871 /* Special-case most common failure cause */
872 PyErr_SetString(PyExc_IndexError, "pop from empty list");
873 return NULL;
874 }
875 if (i < 0)
876 i += Py_SIZE(self);
877 if (i < 0 || i >= Py_SIZE(self)) {
878 PyErr_SetString(PyExc_IndexError, "pop index out of range");
879 return NULL;
880 }
881 v = self->ob_item[i];
882 if (i == Py_SIZE(self) - 1) {
883 status = list_resize(self, Py_SIZE(self) - 1);
884 assert(status >= 0);
885 return v; /* and v now owns the reference the list had */
886 }
887 Py_INCREF(v);
888 status = list_ass_slice(self, i, i+1, (PyObject *)NULL);
889 assert(status >= 0);
890 /* Use status, so that in a release build compilers don't
891 * complain about the unused name.
892 */
893 (void) status;
894
895 return v;
896 }
897
898 /* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
899 static void
900 reverse_slice(PyObject **lo, PyObject **hi)
901 {
902 assert(lo && hi);
903
904 --hi;
905 while (lo < hi) {
906 PyObject *t = *lo;
907 *lo = *hi;
908 *hi = t;
909 ++lo;
910 --hi;
911 }
912 }
913
914 /* Lots of code for an adaptive, stable, natural mergesort. There are many
915 * pieces to this algorithm; read listsort.txt for overviews and details.
916 */
917
918 /* A sortslice contains a pointer to an array of keys and a pointer to
919 * an array of corresponding values. In other words, keys[i]
920 * corresponds with values[i]. If values == NULL, then the keys are
921 * also the values.
922 *
923 * Several convenience routines are provided here, so that keys and
924 * values are always moved in sync.
925 */
926
927 typedef struct {
928 PyObject **keys;
929 PyObject **values;
930 } sortslice;
931
932 Py_LOCAL_INLINE(void)
933 sortslice_copy(sortslice *s1, int i, sortslice *s2, int j)
934 {
935 s1->keys[i] = s2->keys[j];
936 if (s1->values != NULL)
937 s1->values[i] = s2->values[j];
938 }
939
940 Py_LOCAL_INLINE(void)
941 sortslice_copy_incr(sortslice *dst, sortslice *src)
942 {
943 *dst->keys++ = *src->keys++;
944 if (dst->values != NULL)
945 *dst->values++ = *src->values++;
946 }
947
948 Py_LOCAL_INLINE(void)
949 sortslice_copy_decr(sortslice *dst, sortslice *src)
950 {
951 *dst->keys-- = *src->keys--;
952 if (dst->values != NULL)
953 *dst->values-- = *src->values--;
954 }
955
956
957 Py_LOCAL_INLINE(void)
958 sortslice_memcpy(sortslice *s1, int i, sortslice *s2, int j, int n)
959 {
960 memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
961 if (s1->values != NULL)
962 memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
963 }
964
965 Py_LOCAL_INLINE(void)
966 sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
967 Py_ssize_t n)
968 {
969 memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
970 if (s1->values != NULL)
971 memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
972 }
973
974 Py_LOCAL_INLINE(void)
975 sortslice_advance(sortslice *slice, Py_ssize_t n)
976 {
977 slice->keys += n;
978 if (slice->values != NULL)
979 slice->values += n;
980 }
981
982 /* Comparison function: PyObject_RichCompareBool with Py_LT.
983 * Returns -1 on error, 1 if x < y, 0 if x >= y.
984 */
985
986 #define ISLT(X, Y) (PyObject_RichCompareBool(X, Y, Py_LT))
987
988 /* Compare X to Y via "<". Goto "fail" if the comparison raises an
989 error. Else "k" is set to true iff X<Y, and an "if (k)" block is
990 started. It makes more sense in context <wink>. X and Y are PyObject*s.
991 */
992 #define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail; \
993 if (k)
994
995 /* binarysort is the best method for sorting small arrays: it does
996 few compares, but can do data movement quadratic in the number of
997 elements.
998 [lo, hi) is a contiguous slice of a list, and is sorted via
999 binary insertion. This sort is stable.
1000 On entry, must have lo <= start <= hi, and that [lo, start) is already
1001 sorted (pass start == lo if you don't know!).
1002 If islt() complains return -1, else 0.
1003 Even in case of error, the output slice will be some permutation of
1004 the input (nothing is lost or duplicated).
1005 */
1006 static int
1007 binarysort(sortslice lo, PyObject **hi, PyObject **start)
1008 {
1009 register Py_ssize_t k;
1010 register PyObject **l, **p, **r;
1011 register PyObject *pivot;
1012
1013 assert(lo.keys <= start && start <= hi);
1014 /* assert [lo, start) is sorted */
1015 if (lo.keys == start)
1016 ++start;
1017 for (; start < hi; ++start) {
1018 /* set l to where *start belongs */
1019 l = lo.keys;
1020 r = start;
1021 pivot = *r;
1022 /* Invariants:
1023 * pivot >= all in [lo, l).
1024 * pivot < all in [r, start).
1025 * The second is vacuously true at the start.
1026 */
1027 assert(l < r);
1028 do {
1029 p = l + ((r - l) >> 1);
1030 IFLT(pivot, *p)
1031 r = p;
1032 else
1033 l = p+1;
1034 } while (l < r);
1035 assert(l == r);
1036 /* The invariants still hold, so pivot >= all in [lo, l) and
1037 pivot < all in [l, start), so pivot belongs at l. Note
1038 that if there are elements equal to pivot, l points to the
1039 first slot after them -- that's why this sort is stable.
1040 Slide over to make room.
1041 Caution: using memmove is much slower under MSVC 5;
1042 we're not usually moving many slots. */
1043 for (p = start; p > l; --p)
1044 *p = *(p-1);
1045 *l = pivot;
1046 if (lo.values != NULL) {
1047 Py_ssize_t offset = lo.values - lo.keys;
1048 p = start + offset;
1049 pivot = *p;
1050 l += offset;
1051 for (p = start + offset; p > l; --p)
1052 *p = *(p-1);
1053 *l = pivot;
1054 }
1055 }
1056 return 0;
1057
1058 fail:
1059 return -1;
1060 }
1061
1062 /*
1063 Return the length of the run beginning at lo, in the slice [lo, hi). lo < hi
1064 is required on entry. "A run" is the longest ascending sequence, with
1065
1066 lo[0] <= lo[1] <= lo[2] <= ...
1067
1068 or the longest descending sequence, with
1069
1070 lo[0] > lo[1] > lo[2] > ...
1071
1072 Boolean *descending is set to 0 in the former case, or to 1 in the latter.
1073 For its intended use in a stable mergesort, the strictness of the defn of
1074 "descending" is needed so that the caller can safely reverse a descending
1075 sequence without violating stability (strict > ensures there are no equal
1076 elements to get out of order).
1077
1078 Returns -1 in case of error.
1079 */
1080 static Py_ssize_t
1081 count_run(PyObject **lo, PyObject **hi, int *descending)
1082 {
1083 Py_ssize_t k;
1084 Py_ssize_t n;
1085
1086 assert(lo < hi);
1087 *descending = 0;
1088 ++lo;
1089 if (lo == hi)
1090 return 1;
1091
1092 n = 2;
1093 IFLT(*lo, *(lo-1)) {
1094 *descending = 1;
1095 for (lo = lo+1; lo < hi; ++lo, ++n) {
1096 IFLT(*lo, *(lo-1))
1097 ;
1098 else
1099 break;
1100 }
1101 }
1102 else {
1103 for (lo = lo+1; lo < hi; ++lo, ++n) {
1104 IFLT(*lo, *(lo-1))
1105 break;
1106 }
1107 }
1108
1109 return n;
1110 fail:
1111 return -1;
1112 }
1113
1114 /*
1115 Locate the proper position of key in a sorted vector; if the vector contains
1116 an element equal to key, return the position immediately to the left of
1117 the leftmost equal element. [gallop_right() does the same except returns
1118 the position to the right of the rightmost equal element (if any).]
1119
1120 "a" is a sorted vector with n elements, starting at a[0]. n must be > 0.
1121
1122 "hint" is an index at which to begin the search, 0 <= hint < n. The closer
1123 hint is to the final result, the faster this runs.
1124
1125 The return value is the int k in 0..n such that
1126
1127 a[k-1] < key <= a[k]
1128
1129 pretending that *(a-1) is minus infinity and a[n] is plus infinity. IOW,
1130 key belongs at index k; or, IOW, the first k elements of a should precede
1131 key, and the last n-k should follow key.
1132
1133 Returns -1 on error. See listsort.txt for info on the method.
1134 */
1135 static Py_ssize_t
1136 gallop_left(PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
1137 {
1138 Py_ssize_t ofs;
1139 Py_ssize_t lastofs;
1140 Py_ssize_t k;
1141
1142 assert(key && a && n > 0 && hint >= 0 && hint < n);
1143
1144 a += hint;
1145 lastofs = 0;
1146 ofs = 1;
1147 IFLT(*a, key) {
1148 /* a[hint] < key -- gallop right, until
1149 * a[hint + lastofs] < key <= a[hint + ofs]
1150 */
1151 const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
1152 while (ofs < maxofs) {
1153 IFLT(a[ofs], key) {
1154 lastofs = ofs;
1155 ofs = (ofs << 1) + 1;
1156 if (ofs <= 0) /* int overflow */
1157 ofs = maxofs;
1158 }
1159 else /* key <= a[hint + ofs] */
1160 break;
1161 }
1162 if (ofs > maxofs)
1163 ofs = maxofs;
1164 /* Translate back to offsets relative to &a[0]. */
1165 lastofs += hint;
1166 ofs += hint;
1167 }
1168 else {
1169 /* key <= a[hint] -- gallop left, until
1170 * a[hint - ofs] < key <= a[hint - lastofs]
1171 */
1172 const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
1173 while (ofs < maxofs) {
1174 IFLT(*(a-ofs), key)
1175 break;
1176 /* key <= a[hint - ofs] */
1177 lastofs = ofs;
1178 ofs = (ofs << 1) + 1;
1179 if (ofs <= 0) /* int overflow */
1180 ofs = maxofs;
1181 }
1182 if (ofs > maxofs)
1183 ofs = maxofs;
1184 /* Translate back to positive offsets relative to &a[0]. */
1185 k = lastofs;
1186 lastofs = hint - ofs;
1187 ofs = hint - k;
1188 }
1189 a -= hint;
1190
1191 assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1192 /* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
1193 * right of lastofs but no farther right than ofs. Do a binary
1194 * search, with invariant a[lastofs-1] < key <= a[ofs].
1195 */
1196 ++lastofs;
1197 while (lastofs < ofs) {
1198 Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1199
1200 IFLT(a[m], key)
1201 lastofs = m+1; /* a[m] < key */
1202 else
1203 ofs = m; /* key <= a[m] */
1204 }
1205 assert(lastofs == ofs); /* so a[ofs-1] < key <= a[ofs] */
1206 return ofs;
1207
1208 fail:
1209 return -1;
1210 }
1211
1212 /*
1213 Exactly like gallop_left(), except that if key already exists in a[0:n],
1214 finds the position immediately to the right of the rightmost equal value.
1215
1216 The return value is the int k in 0..n such that
1217
1218 a[k-1] <= key < a[k]
1219
1220 or -1 if error.
1221
1222 The code duplication is massive, but this is enough different given that
1223 we're sticking to "<" comparisons that it's much harder to follow if
1224 written as one routine with yet another "left or right?" flag.
1225 */
1226 static Py_ssize_t
1227 gallop_right(PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
1228 {
1229 Py_ssize_t ofs;
1230 Py_ssize_t lastofs;
1231 Py_ssize_t k;
1232
1233 assert(key && a && n > 0 && hint >= 0 && hint < n);
1234
1235 a += hint;
1236 lastofs = 0;
1237 ofs = 1;
1238 IFLT(key, *a) {
1239 /* key < a[hint] -- gallop left, until
1240 * a[hint - ofs] <= key < a[hint - lastofs]
1241 */
1242 const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
1243 while (ofs < maxofs) {
1244 IFLT(key, *(a-ofs)) {
1245 lastofs = ofs;
1246 ofs = (ofs << 1) + 1;
1247 if (ofs <= 0) /* int overflow */
1248 ofs = maxofs;
1249 }
1250 else /* a[hint - ofs] <= key */
1251 break;
1252 }
1253 if (ofs > maxofs)
1254 ofs = maxofs;
1255 /* Translate back to positive offsets relative to &a[0]. */
1256 k = lastofs;
1257 lastofs = hint - ofs;
1258 ofs = hint - k;
1259 }
1260 else {
1261 /* a[hint] <= key -- gallop right, until
1262 * a[hint + lastofs] <= key < a[hint + ofs]
1263 */
1264 const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
1265 while (ofs < maxofs) {
1266 IFLT(key, a[ofs])
1267 break;
1268 /* a[hint + ofs] <= key */
1269 lastofs = ofs;
1270 ofs = (ofs << 1) + 1;
1271 if (ofs <= 0) /* int overflow */
1272 ofs = maxofs;
1273 }
1274 if (ofs > maxofs)
1275 ofs = maxofs;
1276 /* Translate back to offsets relative to &a[0]. */
1277 lastofs += hint;
1278 ofs += hint;
1279 }
1280 a -= hint;
1281
1282 assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1283 /* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
1284 * right of lastofs but no farther right than ofs. Do a binary
1285 * search, with invariant a[lastofs-1] <= key < a[ofs].
1286 */
1287 ++lastofs;
1288 while (lastofs < ofs) {
1289 Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1290
1291 IFLT(key, a[m])
1292 ofs = m; /* key < a[m] */
1293 else
1294 lastofs = m+1; /* a[m] <= key */
1295 }
1296 assert(lastofs == ofs); /* so a[ofs-1] <= key < a[ofs] */
1297 return ofs;
1298
1299 fail:
1300 return -1;
1301 }
1302
1303 /* The maximum number of entries in a MergeState's pending-runs stack.
1304 * This is enough to sort arrays of size up to about
1305 * 32 * phi ** MAX_MERGE_PENDING
1306 * where phi ~= 1.618. 85 is ridiculouslylarge enough, good for an array
1307 * with 2**64 elements.
1308 */
1309 #define MAX_MERGE_PENDING 85
1310
1311 /* When we get into galloping mode, we stay there until both runs win less
1312 * often than MIN_GALLOP consecutive times. See listsort.txt for more info.
1313 */
1314 #define MIN_GALLOP 7
1315
1316 /* Avoid malloc for small temp arrays. */
1317 #define MERGESTATE_TEMP_SIZE 256
1318
1319 /* One MergeState exists on the stack per invocation of mergesort. It's just
1320 * a convenient way to pass state around among the helper functions.
1321 */
1322 struct s_slice {
1323 sortslice base;
1324 Py_ssize_t len;
1325 };
1326
1327 typedef struct s_MergeState {
1328 /* This controls when we get *into* galloping mode. It's initialized
1329 * to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for
1330 * random data, and lower for highly structured data.
1331 */
1332 Py_ssize_t min_gallop;
1333
1334 /* 'a' is temp storage to help with merges. It contains room for
1335 * alloced entries.
1336 */
1337 sortslice a; /* may point to temparray below */
1338 Py_ssize_t alloced;
1339
1340 /* A stack of n pending runs yet to be merged. Run #i starts at
1341 * address base[i] and extends for len[i] elements. It's always
1342 * true (so long as the indices are in bounds) that
1343 *
1344 * pending[i].base + pending[i].len == pending[i+1].base
1345 *
1346 * so we could cut the storage for this, but it's a minor amount,
1347 * and keeping all the info explicit simplifies the code.
1348 */
1349 int n;
1350 struct s_slice pending[MAX_MERGE_PENDING];
1351
1352 /* 'a' points to this when possible, rather than muck with malloc. */
1353 PyObject *temparray[MERGESTATE_TEMP_SIZE];
1354 } MergeState;
1355
1356 /* Conceptually a MergeState's constructor. */
1357 static void
1358 merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc)
1359 {
1360 assert(ms != NULL);
1361 if (has_keyfunc) {
1362 /* The temporary space for merging will need at most half the list
1363 * size rounded up. Use the minimum possible space so we can use the
1364 * rest of temparray for other things. In particular, if there is
1365 * enough extra space, listsort() will use it to store the keys.
1366 */
1367 ms->alloced = (list_size + 1) / 2;
1368
1369 /* ms->alloced describes how many keys will be stored at
1370 ms->temparray, but we also need to store the values. Hence,
1371 ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */
1372 if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced)
1373 ms->alloced = MERGESTATE_TEMP_SIZE / 2;
1374 ms->a.values = &ms->temparray[ms->alloced];
1375 }
1376 else {
1377 ms->alloced = MERGESTATE_TEMP_SIZE;
1378 ms->a.values = NULL;
1379 }
1380 ms->a.keys = ms->temparray;
1381 ms->n = 0;
1382 ms->min_gallop = MIN_GALLOP;
1383 }
1384
1385 /* Free all the temp memory owned by the MergeState. This must be called
1386 * when you're done with a MergeState, and may be called before then if
1387 * you want to free the temp memory early.
1388 */
1389 static void
1390 merge_freemem(MergeState *ms)
1391 {
1392 assert(ms != NULL);
1393 if (ms->a.keys != ms->temparray)
1394 PyMem_Free(ms->a.keys);
1395 }
1396
1397 /* Ensure enough temp memory for 'need' array slots is available.
1398 * Returns 0 on success and -1 if the memory can't be gotten.
1399 */
1400 static int
1401 merge_getmem(MergeState *ms, Py_ssize_t need)
1402 {
1403 int multiplier;
1404
1405 assert(ms != NULL);
1406 if (need <= ms->alloced)
1407 return 0;
1408
1409 multiplier = ms->a.values != NULL ? 2 : 1;
1410
1411 /* Don't realloc! That can cost cycles to copy the old data, but
1412 * we don't care what's in the block.
1413 */
1414 merge_freemem(ms);
1415 if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject*) / multiplier) {
1416 PyErr_NoMemory();
1417 return -1;
1418 }
1419 ms->a.keys = (PyObject**)PyMem_Malloc(multiplier * need
1420 * sizeof(PyObject *));
1421 if (ms->a.keys != NULL) {
1422 ms->alloced = need;
1423 if (ms->a.values != NULL)
1424 ms->a.values = &ms->a.keys[need];
1425 return 0;
1426 }
1427 PyErr_NoMemory();
1428 return -1;
1429 }
1430 #define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 : \
1431 merge_getmem(MS, NEED))
1432
1433 /* Merge the na elements starting at ssa with the nb elements starting at
1434 * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
1435 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
1436 * should have na <= nb. See listsort.txt for more info. Return 0 if
1437 * successful, -1 if error.
1438 */
1439 static Py_ssize_t
1440 merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na,
1441 sortslice ssb, Py_ssize_t nb)
1442 {
1443 Py_ssize_t k;
1444 sortslice dest;
1445 int result = -1; /* guilty until proved innocent */
1446 Py_ssize_t min_gallop;
1447
1448 assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
1449 assert(ssa.keys + na == ssb.keys);
1450 if (MERGE_GETMEM(ms, na) < 0)
1451 return -1;
1452 sortslice_memcpy(&ms->a, 0, &ssa, 0, na);
1453 dest = ssa;
1454 ssa = ms->a;
1455
1456 sortslice_copy_incr(&dest, &ssb);
1457 --nb;
1458 if (nb == 0)
1459 goto Succeed;
1460 if (na == 1)
1461 goto CopyB;
1462
1463 min_gallop = ms->min_gallop;
1464 for (;;) {
1465 Py_ssize_t acount = 0; /* # of times A won in a row */
1466 Py_ssize_t bcount = 0; /* # of times B won in a row */
1467
1468 /* Do the straightforward thing until (if ever) one run
1469 * appears to win consistently.
1470 */
1471 for (;;) {
1472 assert(na > 1 && nb > 0);
1473 k = ISLT(ssb.keys[0], ssa.keys[0]);
1474 if (k) {
1475 if (k < 0)
1476 goto Fail;
1477 sortslice_copy_incr(&dest, &ssb);
1478 ++bcount;
1479 acount = 0;
1480 --nb;
1481 if (nb == 0)
1482 goto Succeed;
1483 if (bcount >= min_gallop)
1484 break;
1485 }
1486 else {
1487 sortslice_copy_incr(&dest, &ssa);
1488 ++acount;
1489 bcount = 0;
1490 --na;
1491 if (na == 1)
1492 goto CopyB;
1493 if (acount >= min_gallop)
1494 break;
1495 }
1496 }
1497
1498 /* One run is winning so consistently that galloping may
1499 * be a huge win. So try that, and continue galloping until
1500 * (if ever) neither run appears to be winning consistently
1501 * anymore.
1502 */
1503 ++min_gallop;
1504 do {
1505 assert(na > 1 && nb > 0);
1506 min_gallop -= min_gallop > 1;
1507 ms->min_gallop = min_gallop;
1508 k = gallop_right(ssb.keys[0], ssa.keys, na, 0);
1509 acount = k;
1510 if (k) {
1511 if (k < 0)
1512 goto Fail;
1513 sortslice_memcpy(&dest, 0, &ssa, 0, k);
1514 sortslice_advance(&dest, k);
1515 sortslice_advance(&ssa, k);
1516 na -= k;
1517 if (na == 1)
1518 goto CopyB;
1519 /* na==0 is impossible now if the comparison
1520 * function is consistent, but we can't assume
1521 * that it is.
1522 */
1523 if (na == 0)
1524 goto Succeed;
1525 }
1526 sortslice_copy_incr(&dest, &ssb);
1527 --nb;
1528 if (nb == 0)
1529 goto Succeed;
1530
1531 k = gallop_left(ssa.keys[0], ssb.keys, nb, 0);
1532 bcount = k;
1533 if (k) {
1534 if (k < 0)
1535 goto Fail;
1536 sortslice_memmove(&dest, 0, &ssb, 0, k);
1537 sortslice_advance(&dest, k);
1538 sortslice_advance(&ssb, k);
1539 nb -= k;
1540 if (nb == 0)
1541 goto Succeed;
1542 }
1543 sortslice_copy_incr(&dest, &ssa);
1544 --na;
1545 if (na == 1)
1546 goto CopyB;
1547 } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1548 ++min_gallop; /* penalize it for leaving galloping mode */
1549 ms->min_gallop = min_gallop;
1550 }
1551 Succeed:
1552 result = 0;
1553 Fail:
1554 if (na)
1555 sortslice_memcpy(&dest, 0, &ssa, 0, na);
1556 return result;
1557 CopyB:
1558 assert(na == 1 && nb > 0);
1559 /* The last element of ssa belongs at the end of the merge. */
1560 sortslice_memmove(&dest, 0, &ssb, 0, nb);
1561 sortslice_copy(&dest, nb, &ssa, 0);
1562 return 0;
1563 }
1564
1565 /* Merge the na elements starting at pa with the nb elements starting at
1566 * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
1567 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
1568 * should have na >= nb. See listsort.txt for more info. Return 0 if
1569 * successful, -1 if error.
1570 */
1571 static Py_ssize_t
1572 merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na,
1573 sortslice ssb, Py_ssize_t nb)
1574 {
1575 Py_ssize_t k;
1576 sortslice dest, basea, baseb;
1577 int result = -1; /* guilty until proved innocent */
1578 Py_ssize_t min_gallop;
1579
1580 assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
1581 assert(ssa.keys + na == ssb.keys);
1582 if (MERGE_GETMEM(ms, nb) < 0)
1583 return -1;
1584 dest = ssb;
1585 sortslice_advance(&dest, nb-1);
1586 sortslice_memcpy(&ms->a, 0, &ssb, 0, nb);
1587 basea = ssa;
1588 baseb = ms->a;
1589 ssb.keys = ms->a.keys + nb - 1;
1590 if (ssb.values != NULL)
1591 ssb.values = ms->a.values + nb - 1;
1592 sortslice_advance(&ssa, na - 1);
1593
1594 sortslice_copy_decr(&dest, &ssa);
1595 --na;
1596 if (na == 0)
1597 goto Succeed;
1598 if (nb == 1)
1599 goto CopyA;
1600
1601 min_gallop = ms->min_gallop;
1602 for (;;) {
1603 Py_ssize_t acount = 0; /* # of times A won in a row */
1604 Py_ssize_t bcount = 0; /* # of times B won in a row */
1605
1606 /* Do the straightforward thing until (if ever) one run
1607 * appears to win consistently.
1608 */
1609 for (;;) {
1610 assert(na > 0 && nb > 1);
1611 k = ISLT(ssb.keys[0], ssa.keys[0]);
1612 if (k) {
1613 if (k < 0)
1614 goto Fail;
1615 sortslice_copy_decr(&dest, &ssa);
1616 ++acount;
1617 bcount = 0;
1618 --na;
1619 if (na == 0)
1620 goto Succeed;
1621 if (acount >= min_gallop)
1622 break;
1623 }
1624 else {
1625 sortslice_copy_decr(&dest, &ssb);
1626 ++bcount;
1627 acount = 0;
1628 --nb;
1629 if (nb == 1)
1630 goto CopyA;
1631 if (bcount >= min_gallop)
1632 break;
1633 }
1634 }
1635
1636 /* One run is winning so consistently that galloping may
1637 * be a huge win. So try that, and continue galloping until
1638 * (if ever) neither run appears to be winning consistently
1639 * anymore.
1640 */
1641 ++min_gallop;
1642 do {
1643 assert(na > 0 && nb > 1);
1644 min_gallop -= min_gallop > 1;
1645 ms->min_gallop = min_gallop;
1646 k = gallop_right(ssb.keys[0], basea.keys, na, na-1);
1647 if (k < 0)
1648 goto Fail;
1649 k = na - k;
1650 acount = k;
1651 if (k) {
1652 sortslice_advance(&dest, -k);
1653 sortslice_advance(&ssa, -k);
1654 sortslice_memmove(&dest, 1, &ssa, 1, k);
1655 na -= k;
1656 if (na == 0)
1657 goto Succeed;
1658 }
1659 sortslice_copy_decr(&dest, &ssb);
1660 --nb;
1661 if (nb == 1)
1662 goto CopyA;
1663
1664 k = gallop_left(ssa.keys[0], baseb.keys, nb, nb-1);
1665 if (k < 0)
1666 goto Fail;
1667 k = nb - k;
1668 bcount = k;
1669 if (k) {
1670 sortslice_advance(&dest, -k);
1671 sortslice_advance(&ssb, -k);
1672 sortslice_memcpy(&dest, 1, &ssb, 1, k);
1673 nb -= k;
1674 if (nb == 1)
1675 goto CopyA;
1676 /* nb==0 is impossible now if the comparison
1677 * function is consistent, but we can't assume
1678 * that it is.
1679 */
1680 if (nb == 0)
1681 goto Succeed;
1682 }
1683 sortslice_copy_decr(&dest, &ssa);
1684 --na;
1685 if (na == 0)
1686 goto Succeed;
1687 } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1688 ++min_gallop; /* penalize it for leaving galloping mode */
1689 ms->min_gallop = min_gallop;
1690 }
1691 Succeed:
1692 result = 0;
1693 Fail:
1694 if (nb)
1695 sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb);
1696 return result;
1697 CopyA:
1698 assert(nb == 1 && na > 0);
1699 /* The first element of ssb belongs at the front of the merge. */
1700 sortslice_memmove(&dest, 1-na, &ssa, 1-na, na);
1701 sortslice_advance(&dest, -na);
1702 sortslice_advance(&ssa, -na);
1703 sortslice_copy(&dest, 0, &ssb, 0);
1704 return 0;
1705 }
1706
1707 /* Merge the two runs at stack indices i and i+1.
1708 * Returns 0 on success, -1 on error.
1709 */
1710 static Py_ssize_t
1711 merge_at(MergeState *ms, Py_ssize_t i)
1712 {
1713 sortslice ssa, ssb;
1714 Py_ssize_t na, nb;
1715 Py_ssize_t k;
1716
1717 assert(ms != NULL);
1718 assert(ms->n >= 2);
1719 assert(i >= 0);
1720 assert(i == ms->n - 2 || i == ms->n - 3);
1721
1722 ssa = ms->pending[i].base;
1723 na = ms->pending[i].len;
1724 ssb = ms->pending[i+1].base;
1725 nb = ms->pending[i+1].len;
1726 assert(na > 0 && nb > 0);
1727 assert(ssa.keys + na == ssb.keys);
1728
1729 /* Record the length of the combined runs; if i is the 3rd-last
1730 * run now, also slide over the last run (which isn't involved
1731 * in this merge). The current run i+1 goes away in any case.
1732 */
1733 ms->pending[i].len = na + nb;
1734 if (i == ms->n - 3)
1735 ms->pending[i+1] = ms->pending[i+2];
1736 --ms->n;
1737
1738 /* Where does b start in a? Elements in a before that can be
1739 * ignored (already in place).
1740 */
1741 k = gallop_right(*ssb.keys, ssa.keys, na, 0);
1742 if (k < 0)
1743 return -1;
1744 sortslice_advance(&ssa, k);
1745 na -= k;
1746 if (na == 0)
1747 return 0;
1748
1749 /* Where does a end in b? Elements in b after that can be
1750 * ignored (already in place).
1751 */
1752 nb = gallop_left(ssa.keys[na-1], ssb.keys, nb, nb-1);
1753 if (nb <= 0)
1754 return nb;
1755
1756 /* Merge what remains of the runs, using a temp array with
1757 * min(na, nb) elements.
1758 */
1759 if (na <= nb)
1760 return merge_lo(ms, ssa, na, ssb, nb);
1761 else
1762 return merge_hi(ms, ssa, na, ssb, nb);
1763 }
1764
1765 /* Examine the stack of runs waiting to be merged, merging adjacent runs
1766 * until the stack invariants are re-established:
1767 *
1768 * 1. len[-3] > len[-2] + len[-1]
1769 * 2. len[-2] > len[-1]
1770 *
1771 * See listsort.txt for more info.
1772 *
1773 * Returns 0 on success, -1 on error.
1774 */
1775 static int
1776 merge_collapse(MergeState *ms)
1777 {
1778 struct s_slice *p = ms->pending;
1779
1780 assert(ms);
1781 while (ms->n > 1) {
1782 Py_ssize_t n = ms->n - 2;
1783 if (n > 0 && p[n-1].len <= p[n].len + p[n+1].len) {
1784 if (p[n-1].len < p[n+1].len)
1785 --n;
1786 if (merge_at(ms, n) < 0)
1787 return -1;
1788 }
1789 else if (p[n].len <= p[n+1].len) {
1790 if (merge_at(ms, n) < 0)
1791 return -1;
1792 }
1793 else
1794 break;
1795 }
1796 return 0;
1797 }
1798
1799 /* Regardless of invariants, merge all runs on the stack until only one
1800 * remains. This is used at the end of the mergesort.
1801 *
1802 * Returns 0 on success, -1 on error.
1803 */
1804 static int
1805 merge_force_collapse(MergeState *ms)
1806 {
1807 struct s_slice *p = ms->pending;
1808
1809 assert(ms);
1810 while (ms->n > 1) {
1811 Py_ssize_t n = ms->n - 2;
1812 if (n > 0 && p[n-1].len < p[n+1].len)
1813 --n;
1814 if (merge_at(ms, n) < 0)
1815 return -1;
1816 }
1817 return 0;
1818 }
1819
1820 /* Compute a good value for the minimum run length; natural runs shorter
1821 * than this are boosted artificially via binary insertion.
1822 *
1823 * If n < 64, return n (it's too small to bother with fancy stuff).
1824 * Else if n is an exact power of 2, return 32.
1825 * Else return an int k, 32 <= k <= 64, such that n/k is close to, but
1826 * strictly less than, an exact power of 2.
1827 *
1828 * See listsort.txt for more info.
1829 */
1830 static Py_ssize_t
1831 merge_compute_minrun(Py_ssize_t n)
1832 {
1833 Py_ssize_t r = 0; /* becomes 1 if any 1 bits are shifted off */
1834
1835 assert(n >= 0);
1836 while (n >= 64) {
1837 r |= n & 1;
1838 n >>= 1;
1839 }
1840 return n + r;
1841 }
1842
1843 static void
1844 reverse_sortslice(sortslice *s, Py_ssize_t n)
1845 {
1846 reverse_slice(s->keys, &s->keys[n]);
1847 if (s->values != NULL)
1848 reverse_slice(s->values, &s->values[n]);
1849 }
1850
1851 /* An adaptive, stable, natural mergesort. See listsort.txt.
1852 * Returns Py_None on success, NULL on error. Even in case of error, the
1853 * list will be some permutation of its input state (nothing is lost or
1854 * duplicated).
1855 */
1856 static PyObject *
1857 listsort(PyListObject *self, PyObject *args, PyObject *kwds)
1858 {
1859 MergeState ms;
1860 Py_ssize_t nremaining;
1861 Py_ssize_t minrun;
1862 sortslice lo;
1863 Py_ssize_t saved_ob_size, saved_allocated;
1864 PyObject **saved_ob_item;
1865 PyObject **final_ob_item;
1866 PyObject *result = NULL; /* guilty until proved innocent */
1867 int reverse = 0;
1868 PyObject *keyfunc = NULL;
1869 Py_ssize_t i;
1870 static char *kwlist[] = {"key", "reverse", 0};
1871 PyObject **keys;
1872
1873 assert(self != NULL);
1874 assert (PyList_Check(self));
1875 if (args != NULL) {
1876 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:sort",
1877 kwlist, &keyfunc, &reverse))
1878 return NULL;
1879 if (Py_SIZE(args) > 0) {
1880 PyErr_SetString(PyExc_TypeError,
1881 "must use keyword argument for key function");
1882 return NULL;
1883 }
1884 }
1885 if (keyfunc == Py_None)
1886 keyfunc = NULL;
1887
1888 /* The list is temporarily made empty, so that mutations performed
1889 * by comparison functions can't affect the slice of memory we're
1890 * sorting (allowing mutations during sorting is a core-dump
1891 * factory, since ob_item may change).
1892 */
1893 saved_ob_size = Py_SIZE(self);
1894 saved_ob_item = self->ob_item;
1895 saved_allocated = self->allocated;
1896 Py_SIZE(self) = 0;
1897 self->ob_item = NULL;
1898 self->allocated = -1; /* any operation will reset it to >= 0 */
1899
1900 if (keyfunc == NULL) {
1901 keys = NULL;
1902 lo.keys = saved_ob_item;
1903 lo.values = NULL;
1904 }
1905 else {
1906 if (saved_ob_size < MERGESTATE_TEMP_SIZE/2)
1907 /* Leverage stack space we allocated but won't otherwise use */
1908 keys = &ms.temparray[saved_ob_size+1];
1909 else {
1910 keys = PyMem_MALLOC(sizeof(PyObject *) * saved_ob_size);
1911 if (keys == NULL)
1912 return NULL;
1913 }
1914
1915 for (i = 0; i < saved_ob_size ; i++) {
1916 keys[i] = PyObject_CallFunctionObjArgs(keyfunc, saved_ob_item[i],
1917 NULL);
1918 if (keys[i] == NULL) {
1919 for (i=i-1 ; i>=0 ; i--)
1920 Py_DECREF(keys[i]);
1921 if (keys != &ms.temparray[saved_ob_size+1])
1922 PyMem_FREE(keys);
1923 goto keyfunc_fail;
1924 }
1925 }
1926
1927 lo.keys = keys;
1928 lo.values = saved_ob_item;
1929 }
1930
1931 merge_init(&ms, saved_ob_size, keys != NULL);
1932
1933 nremaining = saved_ob_size;
1934 if (nremaining < 2)
1935 goto succeed;
1936
1937 /* Reverse sort stability achieved by initially reversing the list,
1938 applying a stable forward sort, then reversing the final result. */
1939 if (reverse) {
1940 if (keys != NULL)
1941 reverse_slice(&keys[0], &keys[saved_ob_size]);
1942 reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]);
1943 }
1944
1945 /* March over the array once, left to right, finding natural runs,
1946 * and extending short natural runs to minrun elements.
1947 */
1948 minrun = merge_compute_minrun(nremaining);
1949 do {
1950 int descending;
1951 Py_ssize_t n;
1952
1953 /* Identify next run. */
1954 n = count_run(lo.keys, lo.keys + nremaining, &descending);
1955 if (n < 0)
1956 goto fail;
1957 if (descending)
1958 reverse_sortslice(&lo, n);
1959 /* If short, extend to min(minrun, nremaining). */
1960 if (n < minrun) {
1961 const Py_ssize_t force = nremaining <= minrun ?
1962 nremaining : minrun;
1963 if (binarysort(lo, lo.keys + force, lo.keys + n) < 0)
1964 goto fail;
1965 n = force;
1966 }
1967 /* Push run onto pending-runs stack, and maybe merge. */
1968 assert(ms.n < MAX_MERGE_PENDING);
1969 ms.pending[ms.n].base = lo;
1970 ms.pending[ms.n].len = n;
1971 ++ms.n;
1972 if (merge_collapse(&ms) < 0)
1973 goto fail;
1974 /* Advance to find next run. */
1975 sortslice_advance(&lo, n);
1976 nremaining -= n;
1977 } while (nremaining);
1978
1979 if (merge_force_collapse(&ms) < 0)
1980 goto fail;
1981 assert(ms.n == 1);
1982 assert(keys == NULL
1983 ? ms.pending[0].base.keys == saved_ob_item
1984 : ms.pending[0].base.keys == &keys[0]);
1985 assert(ms.pending[0].len == saved_ob_size);
1986 lo = ms.pending[0].base;
1987
1988 succeed:
1989 result = Py_None;
1990 fail:
1991 if (keys != NULL) {
1992 for (i = 0; i < saved_ob_size; i++)
1993 Py_DECREF(keys[i]);
1994 if (keys != &ms.temparray[saved_ob_size+1])
1995 PyMem_FREE(keys);
1996 }
1997
1998 if (self->allocated != -1 && result != NULL) {
1999 /* The user mucked with the list during the sort,
2000 * and we don't already have another error to report.
2001 */
2002 PyErr_SetString(PyExc_ValueError, "list modified during sort");
2003 result = NULL;
2004 }
2005
2006 if (reverse && saved_ob_size > 1)
2007 reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size);
2008
2009 merge_freemem(&ms);
2010
2011 keyfunc_fail:
2012 final_ob_item = self->ob_item;
2013 i = Py_SIZE(self);
2014 Py_SIZE(self) = saved_ob_size;
2015 self->ob_item = saved_ob_item;
2016 self->allocated = saved_allocated;
2017 if (final_ob_item != NULL) {
2018 /* we cannot use list_clear() for this because it does not
2019 guarantee that the list is really empty when it returns */
2020 while (--i >= 0) {
2021 Py_XDECREF(final_ob_item[i]);
2022 }
2023 PyMem_FREE(final_ob_item);
2024 }
2025 Py_XINCREF(result);
2026 return result;
2027 }
2028 #undef IFLT
2029 #undef ISLT
2030
2031 int
2032 PyList_Sort(PyObject *v)
2033 {
2034 if (v == NULL || !PyList_Check(v)) {
2035 PyErr_BadInternalCall();
2036 return -1;
2037 }
2038 v = listsort((PyListObject *)v, (PyObject *)NULL, (PyObject *)NULL);
2039 if (v == NULL)
2040 return -1;
2041 Py_DECREF(v);
2042 return 0;
2043 }
2044
2045 static PyObject *
2046 listreverse(PyListObject *self)
2047 {
2048 if (Py_SIZE(self) > 1)
2049 reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
2050 Py_RETURN_NONE;
2051 }
2052
2053 int
2054 PyList_Reverse(PyObject *v)
2055 {
2056 PyListObject *self = (PyListObject *)v;
2057
2058 if (v == NULL || !PyList_Check(v)) {
2059 PyErr_BadInternalCall();
2060 return -1;
2061 }
2062 if (Py_SIZE(self) > 1)
2063 reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
2064 return 0;
2065 }
2066
2067 PyObject *
2068 PyList_AsTuple(PyObject *v)
2069 {
2070 PyObject *w;
2071 PyObject **p, **q;
2072 Py_ssize_t n;
2073 if (v == NULL || !PyList_Check(v)) {
2074 PyErr_BadInternalCall();
2075 return NULL;
2076 }
2077 n = Py_SIZE(v);
2078 w = PyTuple_New(n);
2079 if (w == NULL)
2080 return NULL;
2081 p = ((PyTupleObject *)w)->ob_item;
2082 q = ((PyListObject *)v)->ob_item;
2083 while (--n >= 0) {
2084 Py_INCREF(*q);
2085 *p = *q;
2086 p++;
2087 q++;
2088 }
2089 return w;
2090 }
2091
2092 static PyObject *
2093 listindex(PyListObject *self, PyObject *args)
2094 {
2095 Py_ssize_t i, start=0, stop=Py_SIZE(self);
2096 PyObject *v;
2097
2098 if (!PyArg_ParseTuple(args, "O|O&O&:index", &v,
2099 _PyEval_SliceIndex, &start,
2100 _PyEval_SliceIndex, &stop))
2101 return NULL;
2102 if (start < 0) {
2103 start += Py_SIZE(self);
2104 if (start < 0)
2105 start = 0;
2106 }
2107 if (stop < 0) {
2108 stop += Py_SIZE(self);
2109 if (stop < 0)
2110 stop = 0;
2111 }
2112 for (i = start; i < stop && i < Py_SIZE(self); i++) {
2113 int cmp = PyObject_RichCompareBool(self->ob_item[i], v, Py_EQ);
2114 if (cmp > 0)
2115 return PyLong_FromSsize_t(i);
2116 else if (cmp < 0)
2117 return NULL;
2118 }
2119 PyErr_Format(PyExc_ValueError, "%R is not in list", v);
2120 return NULL;
2121 }
2122
2123 static PyObject *
2124 listcount(PyListObject *self, PyObject *v)
2125 {
2126 Py_ssize_t count = 0;
2127 Py_ssize_t i;
2128
2129 for (i = 0; i < Py_SIZE(self); i++) {
2130 int cmp = PyObject_RichCompareBool(self->ob_item[i], v, Py_EQ);
2131 if (cmp > 0)
2132 count++;
2133 else if (cmp < 0)
2134 return NULL;
2135 }
2136 return PyLong_FromSsize_t(count);
2137 }
2138
2139 static PyObject *
2140 listremove(PyListObject *self, PyObject *v)
2141 {
2142 Py_ssize_t i;
2143
2144 for (i = 0; i < Py_SIZE(self); i++) {
2145 int cmp = PyObject_RichCompareBool(self->ob_item[i], v, Py_EQ);
2146 if (cmp > 0) {
2147 if (list_ass_slice(self, i, i+1,
2148 (PyObject *)NULL) == 0)
2149 Py_RETURN_NONE;
2150 return NULL;
2151 }
2152 else if (cmp < 0)
2153 return NULL;
2154 }
2155 PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
2156 return NULL;
2157 }
2158
2159 static int
2160 list_traverse(PyListObject *o, visitproc visit, void *arg)
2161 {
2162 Py_ssize_t i;
2163
2164 for (i = Py_SIZE(o); --i >= 0; )
2165 Py_VISIT(o->ob_item[i]);
2166 return 0;
2167 }
2168
2169 static PyObject *
2170 list_richcompare(PyObject *v, PyObject *w, int op)
2171 {
2172 PyListObject *vl, *wl;
2173 Py_ssize_t i;
2174
2175 if (!PyList_Check(v) || !PyList_Check(w))
2176 Py_RETURN_NOTIMPLEMENTED;
2177
2178 vl = (PyListObject *)v;
2179 wl = (PyListObject *)w;
2180
2181 if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) {
2182 /* Shortcut: if the lengths differ, the lists differ */
2183 PyObject *res;
2184 if (op == Py_EQ)
2185 res = Py_False;
2186 else
2187 res = Py_True;
2188 Py_INCREF(res);
2189 return res;
2190 }
2191
2192 /* Search for the first index where items are different */
2193 for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) {
2194 int k = PyObject_RichCompareBool(vl->ob_item[i],
2195 wl->ob_item[i], Py_EQ);
2196 if (k < 0)
2197 return NULL;
2198 if (!k)
2199 break;
2200 }
2201
2202 if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) {
2203 /* No more items to compare -- compare sizes */
2204 Py_ssize_t vs = Py_SIZE(vl);
2205 Py_ssize_t ws = Py_SIZE(wl);
2206 int cmp;
2207 PyObject *res;
2208 switch (op) {
2209 case Py_LT: cmp = vs < ws; break;
2210 case Py_LE: cmp = vs <= ws; break;
2211 case Py_EQ: cmp = vs == ws; break;
2212 case Py_NE: cmp = vs != ws; break;
2213 case Py_GT: cmp = vs > ws; break;
2214 case Py_GE: cmp = vs >= ws; break;
2215 default: return NULL; /* cannot happen */
2216 }
2217 if (cmp)
2218 res = Py_True;
2219 else
2220 res = Py_False;
2221 Py_INCREF(res);
2222 return res;
2223 }
2224
2225 /* We have an item that differs -- shortcuts for EQ/NE */
2226 if (op == Py_EQ) {
2227 Py_INCREF(Py_False);
2228 return Py_False;
2229 }
2230 if (op == Py_NE) {
2231 Py_INCREF(Py_True);
2232 return Py_True;
2233 }
2234
2235 /* Compare the final item again using the proper operator */
2236 return PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
2237 }
2238
2239 static int
2240 list_init(PyListObject *self, PyObject *args, PyObject *kw)
2241 {
2242 PyObject *arg = NULL;
2243 static char *kwlist[] = {"sequence", 0};
2244
2245 if (!PyArg_ParseTupleAndKeywords(args, kw, "|O:list", kwlist, &arg))
2246 return -1;
2247
2248 /* Verify list invariants established by PyType_GenericAlloc() */
2249 assert(0 <= Py_SIZE(self));
2250 assert(Py_SIZE(self) <= self->allocated || self->allocated == -1);
2251 assert(self->ob_item != NULL ||
2252 self->allocated == 0 || self->allocated == -1);
2253
2254 /* Empty previous contents */
2255 if (self->ob_item != NULL) {
2256 (void)list_clear(self);
2257 }
2258 if (arg != NULL) {
2259 PyObject *rv = listextend(self, arg);
2260 if (rv == NULL)
2261 return -1;
2262 Py_DECREF(rv);
2263 }
2264 return 0;
2265 }
2266
2267 static PyObject *
2268 list_sizeof(PyListObject *self)
2269 {
2270 Py_ssize_t res;
2271
2272 res = sizeof(PyListObject) + self->allocated * sizeof(void*);
2273 return PyLong_FromSsize_t(res);
2274 }
2275
2276 static PyObject *list_iter(PyObject *seq);
2277 static PyObject *list_reversed(PyListObject* seq, PyObject* unused);
2278
2279 PyDoc_STRVAR(getitem_doc,
2280 "x.__getitem__(y) <==> x[y]");
2281 PyDoc_STRVAR(reversed_doc,
2282 "L.__reversed__() -- return a reverse iterator over the list");
2283 PyDoc_STRVAR(sizeof_doc,
2284 "L.__sizeof__() -- size of L in memory, in bytes");
2285 PyDoc_STRVAR(clear_doc,
2286 "L.clear() -> None -- remove all items from L");
2287 PyDoc_STRVAR(copy_doc,
2288 "L.copy() -> list -- a shallow copy of L");
2289 PyDoc_STRVAR(append_doc,
2290 "L.append(object) -> None -- append object to end");
2291 PyDoc_STRVAR(extend_doc,
2292 "L.extend(iterable) -> None -- extend list by appending elements from the iterable");
2293 PyDoc_STRVAR(insert_doc,
2294 "L.insert(index, object) -- insert object before index");
2295 PyDoc_STRVAR(pop_doc,
2296 "L.pop([index]) -> item -- remove and return item at index (default last).\n"
2297 "Raises IndexError if list is empty or index is out of range.");
2298 PyDoc_STRVAR(remove_doc,
2299 "L.remove(value) -> None -- remove first occurrence of value.\n"
2300 "Raises ValueError if the value is not present.");
2301 PyDoc_STRVAR(index_doc,
2302 "L.index(value, [start, [stop]]) -> integer -- return first index of value.\n"
2303 "Raises ValueError if the value is not present.");
2304 PyDoc_STRVAR(count_doc,
2305 "L.count(value) -> integer -- return number of occurrences of value");
2306 PyDoc_STRVAR(reverse_doc,
2307 "L.reverse() -- reverse *IN PLACE*");
2308 PyDoc_STRVAR(sort_doc,
2309 "L.sort(key=None, reverse=False) -> None -- stable sort *IN PLACE*");
2310
2311 static PyObject *list_subscript(PyListObject*, PyObject*);
2312
2313 static PyMethodDef list_methods[] = {
2314 {"__getitem__", (PyCFunction)list_subscript, METH_O|METH_COEXIST, getitem_doc},
2315 {"__reversed__",(PyCFunction)list_reversed, METH_NOARGS, reversed_doc},
2316 {"__sizeof__", (PyCFunction)list_sizeof, METH_NOARGS, sizeof_doc},
2317 {"clear", (PyCFunction)listclear, METH_NOARGS, clear_doc},
2318 {"copy", (PyCFunction)listcopy, METH_NOARGS, copy_doc},
2319 {"append", (PyCFunction)listappend, METH_O, append_doc},
2320 {"insert", (PyCFunction)listinsert, METH_VARARGS, insert_doc},
2321 {"extend", (PyCFunction)listextend, METH_O, extend_doc},
2322 {"pop", (PyCFunction)listpop, METH_VARARGS, pop_doc},
2323 {"remove", (PyCFunction)listremove, METH_O, remove_doc},
2324 {"index", (PyCFunction)listindex, METH_VARARGS, index_doc},
2325 {"count", (PyCFunction)listcount, METH_O, count_doc},
2326 {"reverse", (PyCFunction)listreverse, METH_NOARGS, reverse_doc},
2327 {"sort", (PyCFunction)listsort, METH_VARARGS | METH_KEYWORDS, sort_doc},
2328 {NULL, NULL} /* sentinel */
2329 };
2330
2331 static PySequenceMethods list_as_sequence = {
2332 (lenfunc)list_length, /* sq_length */
2333 (binaryfunc)list_concat, /* sq_concat */
2334 (ssizeargfunc)list_repeat, /* sq_repeat */
2335 (ssizeargfunc)list_item, /* sq_item */
2336 0, /* sq_slice */
2337 (ssizeobjargproc)list_ass_item, /* sq_ass_item */
2338 0, /* sq_ass_slice */
2339 (objobjproc)list_contains, /* sq_contains */
2340 (binaryfunc)list_inplace_concat, /* sq_inplace_concat */
2341 (ssizeargfunc)list_inplace_repeat, /* sq_inplace_repeat */
2342 };
2343
2344 PyDoc_STRVAR(list_doc,
2345 "list() -> new empty list\n"
2346 "list(iterable) -> new list initialized from iterable's items");
2347
2348 static PyObject *
2349 list_subscript(PyListObject* self, PyObject* item)
2350 {
2351 if (PyIndex_Check(item)) {
2352 Py_ssize_t i;
2353 i = PyNumber_AsSsize_t(item, PyExc_IndexError);
2354 if (i == -1 && PyErr_Occurred())
2355 return NULL;
2356 if (i < 0)
2357 i += PyList_GET_SIZE(self);
2358 return list_item(self, i);
2359 }
2360 else if (PySlice_Check(item)) {
2361 Py_ssize_t start, stop, step, slicelength, cur, i;
2362 PyObject* result;
2363 PyObject* it;
2364 PyObject **src, **dest;
2365
2366 if (PySlice_GetIndicesEx(item, Py_SIZE(self),
2367 &start, &stop, &step, &slicelength) < 0) {
2368 return NULL;
2369 }
2370
2371 if (slicelength <= 0) {
2372 return PyList_New(0);
2373 }
2374 else if (step == 1) {
2375 return list_slice(self, start, stop);
2376 }
2377 else {
2378 result = PyList_New(slicelength);
2379 if (!result) return NULL;
2380
2381 src = self->ob_item;
2382 dest = ((PyListObject *)result)->ob_item;
2383 for (cur = start, i = 0; i < slicelength;
2384 cur += (size_t)step, i++) {
2385 it = src[cur];
2386 Py_INCREF(it);
2387 dest[i] = it;
2388 }
2389
2390 return result;
2391 }
2392 }
2393 else {
2394 PyErr_Format(PyExc_TypeError,
2395 "list indices must be integers, not %.200s",
2396 item->ob_type->tp_name);
2397 return NULL;
2398 }
2399 }
2400
2401 static int
2402 list_ass_subscript(PyListObject* self, PyObject* item, PyObject* value)
2403 {
2404 if (PyIndex_Check(item)) {
2405 Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
2406 if (i == -1 && PyErr_Occurred())
2407 return -1;
2408 if (i < 0)
2409 i += PyList_GET_SIZE(self);
2410 return list_ass_item(self, i, value);
2411 }
2412 else if (PySlice_Check(item)) {
2413 Py_ssize_t start, stop, step, slicelength;
2414
2415 if (PySlice_GetIndicesEx(item, Py_SIZE(self),
2416 &start, &stop, &step, &slicelength) < 0) {
2417 return -1;
2418 }
2419
2420 if (step == 1)
2421 return list_ass_slice(self, start, stop, value);
2422
2423 /* Make sure s[5:2] = [..] inserts at the right place:
2424 before 5, not before 2. */
2425 if ((step < 0 && start < stop) ||
2426 (step > 0 && start > stop))
2427 stop = start;
2428
2429 if (value == NULL) {
2430 /* delete slice */
2431 PyObject **garbage;
2432 size_t cur;
2433 Py_ssize_t i;
2434
2435 if (slicelength <= 0)
2436 return 0;
2437
2438 if (step < 0) {
2439 stop = start + 1;
2440 start = stop + step*(slicelength - 1) - 1;
2441 step = -step;
2442 }
2443
2444 assert((size_t)slicelength <=
2445 PY_SIZE_MAX / sizeof(PyObject*));
2446
2447 garbage = (PyObject**)
2448 PyMem_MALLOC(slicelength*sizeof(PyObject*));
2449 if (!garbage) {
2450 PyErr_NoMemory();
2451 return -1;
2452 }
2453
2454 /* drawing pictures might help understand these for
2455 loops. Basically, we memmove the parts of the
2456 list that are *not* part of the slice: step-1
2457 items for each item that is part of the slice,
2458 and then tail end of the list that was not
2459 covered by the slice */
2460 for (cur = start, i = 0;
2461 cur < (size_t)stop;
2462 cur += step, i++) {
2463 Py_ssize_t lim = step - 1;
2464
2465 garbage[i] = PyList_GET_ITEM(self, cur);
2466
2467 if (cur + step >= (size_t)Py_SIZE(self)) {
2468 lim = Py_SIZE(self) - cur - 1;
2469 }
2470
2471 memmove(self->ob_item + cur - i,
2472 self->ob_item + cur + 1,
2473 lim * sizeof(PyObject *));
2474 }
2475 cur = start + (size_t)slicelength * step;
2476 if (cur < (size_t)Py_SIZE(self)) {
2477 memmove(self->ob_item + cur - slicelength,
2478 self->ob_item + cur,
2479 (Py_SIZE(self) - cur) *
2480 sizeof(PyObject *));
2481 }
2482
2483 Py_SIZE(self) -= slicelength;
2484 list_resize(self, Py_SIZE(self));
2485
2486 for (i = 0; i < slicelength; i++) {
2487 Py_DECREF(garbage[i]);
2488 }
2489 PyMem_FREE(garbage);
2490
2491 return 0;
2492 }
2493 else {
2494 /* assign slice */
2495 PyObject *ins, *seq;
2496 PyObject **garbage, **seqitems, **selfitems;
2497 Py_ssize_t cur, i;
2498
2499 /* protect against a[::-1] = a */
2500 if (self == (PyListObject*)value) {
2501 seq = list_slice((PyListObject*)value, 0,
2502 PyList_GET_SIZE(value));
2503 }
2504 else {
2505 seq = PySequence_Fast(value,
2506 "must assign iterable "
2507 "to extended slice");
2508 }
2509 if (!seq)
2510 return -1;
2511
2512 if (PySequence_Fast_GET_SIZE(seq) != slicelength) {
2513 PyErr_Format(PyExc_ValueError,
2514 "attempt to assign sequence of "
2515 "size %zd to extended slice of "
2516 "size %zd",
2517 PySequence_Fast_GET_SIZE(seq),
2518 slicelength);
2519 Py_DECREF(seq);
2520 return -1;
2521 }
2522
2523 if (!slicelength) {
2524 Py_DECREF(seq);
2525 return 0;
2526 }
2527
2528 garbage = (PyObject**)
2529 PyMem_MALLOC(slicelength*sizeof(PyObject*));
2530 if (!garbage) {
2531 Py_DECREF(seq);
2532 PyErr_NoMemory();
2533 return -1;
2534 }
2535
2536 selfitems = self->ob_item;
2537 seqitems = PySequence_Fast_ITEMS(seq);
2538 for (cur = start, i = 0; i < slicelength;
2539 cur += (size_t)step, i++) {
2540 garbage[i] = selfitems[cur];
2541 ins = seqitems[i];
2542 Py_INCREF(ins);
2543 selfitems[cur] = ins;
2544 }
2545
2546 for (i = 0; i < slicelength; i++) {
2547 Py_DECREF(garbage[i]);
2548 }
2549
2550 PyMem_FREE(garbage);
2551 Py_DECREF(seq);
2552
2553 return 0;
2554 }
2555 }
2556 else {
2557 PyErr_Format(PyExc_TypeError,
2558 "list indices must be integers, not %.200s",
2559 item->ob_type->tp_name);
2560 return -1;
2561 }
2562 }
2563
2564 static PyMappingMethods list_as_mapping = {
2565 (lenfunc)list_length,
2566 (binaryfunc)list_subscript,
2567 (objobjargproc)list_ass_subscript
2568 };
2569
2570 PyTypeObject PyList_Type = {
2571 PyVarObject_HEAD_INIT(&PyType_Type, 0)
2572 "list",
2573 sizeof(PyListObject),
2574 0,
2575 (destructor)list_dealloc, /* tp_dealloc */
2576 0, /* tp_print */
2577 0, /* tp_getattr */
2578 0, /* tp_setattr */
2579 0, /* tp_reserved */
2580 (reprfunc)list_repr, /* tp_repr */
2581 0, /* tp_as_number */
2582 &list_as_sequence, /* tp_as_sequence */
2583 &list_as_mapping, /* tp_as_mapping */
2584 PyObject_HashNotImplemented, /* tp_hash */
2585 0, /* tp_call */
2586 0, /* tp_str */
2587 PyObject_GenericGetAttr, /* tp_getattro */
2588 0, /* tp_setattro */
2589 0, /* tp_as_buffer */
2590 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
2591 Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS, /* tp_flags */
2592 list_doc, /* tp_doc */
2593 (traverseproc)list_traverse, /* tp_traverse */
2594 (inquiry)list_clear, /* tp_clear */
2595 list_richcompare, /* tp_richcompare */
2596 0, /* tp_weaklistoffset */
2597 list_iter, /* tp_iter */
2598 0, /* tp_iternext */
2599 list_methods, /* tp_methods */
2600 0, /* tp_members */
2601 0, /* tp_getset */
2602 0, /* tp_base */
2603 0, /* tp_dict */
2604 0, /* tp_descr_get */
2605 0, /* tp_descr_set */
2606 0, /* tp_dictoffset */
2607 (initproc)list_init, /* tp_init */
2608 PyType_GenericAlloc, /* tp_alloc */
2609 PyType_GenericNew, /* tp_new */
2610 PyObject_GC_Del, /* tp_free */
2611 };
2612
2613
2614 /*********************** List Iterator **************************/
2615
2616 typedef struct {
2617 PyObject_HEAD
2618 long it_index;
2619 PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
2620 } listiterobject;
2621
2622 static PyObject *list_iter(PyObject *);
2623 static void listiter_dealloc(listiterobject *);
2624 static int listiter_traverse(listiterobject *, visitproc, void *);
2625 static PyObject *listiter_next(listiterobject *);
2626 static PyObject *listiter_len(listiterobject *);
2627
2628 PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
2629
2630 static PyMethodDef listiter_methods[] = {
2631 {"__length_hint__", (PyCFunction)listiter_len, METH_NOARGS, length_hint_doc},
2632 {NULL, NULL} /* sentinel */
2633 };
2634
2635 PyTypeObject PyListIter_Type = {
2636 PyVarObject_HEAD_INIT(&PyType_Type, 0)
2637 "list_iterator", /* tp_name */
2638 sizeof(listiterobject), /* tp_basicsize */
2639 0, /* tp_itemsize */
2640 /* methods */
2641 (destructor)listiter_dealloc, /* tp_dealloc */
2642 0, /* tp_print */
2643 0, /* tp_getattr */
2644 0, /* tp_setattr */
2645 0, /* tp_reserved */
2646 0, /* tp_repr */
2647 0, /* tp_as_number */
2648 0, /* tp_as_sequence */
2649 0, /* tp_as_mapping */
2650 0, /* tp_hash */
2651 0, /* tp_call */
2652 0, /* tp_str */
2653 PyObject_GenericGetAttr, /* tp_getattro */
2654 0, /* tp_setattro */
2655 0, /* tp_as_buffer */
2656 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
2657 0, /* tp_doc */
2658 (traverseproc)listiter_traverse, /* tp_traverse */
2659 0, /* tp_clear */
2660 0, /* tp_richcompare */
2661 0, /* tp_weaklistoffset */
2662 PyObject_SelfIter, /* tp_iter */
2663 (iternextfunc)listiter_next, /* tp_iternext */
2664 listiter_methods, /* tp_methods */
2665 0, /* tp_members */
2666 };
2667
2668
2669 static PyObject *
2670 list_iter(PyObject *seq)
2671 {
2672 listiterobject *it;
2673
2674 if (!PyList_Check(seq)) {
2675 PyErr_BadInternalCall();
2676 return NULL;
2677 }
2678 it = PyObject_GC_New(listiterobject, &PyListIter_Type);
2679 if (it == NULL)
2680 return NULL;
2681 it->it_index = 0;
2682 Py_INCREF(seq);
2683 it->it_seq = (PyListObject *)seq;
2684 _PyObject_GC_TRACK(it);
2685 return (PyObject *)it;
2686 }
2687
2688 static void
2689 listiter_dealloc(listiterobject *it)
2690 {
2691 _PyObject_GC_UNTRACK(it);
2692 Py_XDECREF(it->it_seq);
2693 PyObject_GC_Del(it);
2694 }
2695
2696 static int
2697 listiter_traverse(listiterobject *it, visitproc visit, void *arg)
2698 {
2699 Py_VISIT(it->it_seq);
2700 return 0;
2701 }
2702
2703 static PyObject *
2704 listiter_next(listiterobject *it)
2705 {
2706 PyListObject *seq;
2707 PyObject *item;
2708
2709 assert(it != NULL);
2710 seq = it->it_seq;
2711 if (seq == NULL)
2712 return NULL;
2713 assert(PyList_Check(seq));
2714
2715 if (it->it_index < PyList_GET_SIZE(seq)) {
2716 item = PyList_GET_ITEM(seq, it->it_index);
2717 ++it->it_index;
2718 Py_INCREF(item);
2719 return item;
2720 }
2721
2722 Py_DECREF(seq);
2723 it->it_seq = NULL;
2724 return NULL;
2725 }
2726
2727 static PyObject *
2728 listiter_len(listiterobject *it)
2729 {
2730 Py_ssize_t len;
2731 if (it->it_seq) {
2732 len = PyList_GET_SIZE(it->it_seq) - it->it_index;
2733 if (len >= 0)
2734 return PyLong_FromSsize_t(len);
2735 }
2736 return PyLong_FromLong(0);
2737 }
2738 /*********************** List Reverse Iterator **************************/
2739
2740 typedef struct {
2741 PyObject_HEAD
2742 Py_ssize_t it_index;
2743 PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
2744 } listreviterobject;
2745
2746 static PyObject *list_reversed(PyListObject *, PyObject *);
2747 static void listreviter_dealloc(listreviterobject *);
2748 static int listreviter_traverse(listreviterobject *, visitproc, void *);
2749 static PyObject *listreviter_next(listreviterobject *);
2750 static PyObject *listreviter_len(listreviterobject *);
2751
2752 static PyMethodDef listreviter_methods[] = {
2753 {"__length_hint__", (PyCFunction)listreviter_len, METH_NOARGS, length_hint_doc},
2754 {NULL, NULL} /* sentinel */
2755 };
2756
2757 PyTypeObject PyListRevIter_Type = {
2758 PyVarObject_HEAD_INIT(&PyType_Type, 0)
2759 "list_reverseiterator", /* tp_name */
2760 sizeof(listreviterobject), /* tp_basicsize */
2761 0, /* tp_itemsize */
2762 /* methods */
2763 (destructor)listreviter_dealloc, /* tp_dealloc */
2764 0, /* tp_print */
2765 0, /* tp_getattr */
2766 0, /* tp_setattr */
2767 0, /* tp_reserved */
2768 0, /* tp_repr */
2769 0, /* tp_as_number */
2770 0, /* tp_as_sequence */
2771 0, /* tp_as_mapping */
2772 0, /* tp_hash */
2773 0, /* tp_call */
2774 0, /* tp_str */
2775 PyObject_GenericGetAttr, /* tp_getattro */
2776 0, /* tp_setattro */
2777 0, /* tp_as_buffer */
2778 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
2779 0, /* tp_doc */
2780 (traverseproc)listreviter_traverse, /* tp_traverse */
2781 0, /* tp_clear */
2782 0, /* tp_richcompare */
2783 0, /* tp_weaklistoffset */
2784 PyObject_SelfIter, /* tp_iter */
2785 (iternextfunc)listreviter_next, /* tp_iternext */
2786 listreviter_methods, /* tp_methods */
2787 0,
2788 };
2789
2790 static PyObject *
2791 list_reversed(PyListObject *seq, PyObject *unused)
2792 {
2793 listreviterobject *it;
2794
2795 it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type);
2796 if (it == NULL)
2797 return NULL;
2798 assert(PyList_Check(seq));
2799 it->it_index = PyList_GET_SIZE(seq) - 1;
2800 Py_INCREF(seq);
2801 it->it_seq = seq;
2802 PyObject_GC_Track(it);
2803 return (PyObject *)it;
2804 }
2805
2806 static void
2807 listreviter_dealloc(listreviterobject *it)
2808 {
2809 PyObject_GC_UnTrack(it);
2810 Py_XDECREF(it->it_seq);
2811 PyObject_GC_Del(it);
2812 }
2813
2814 static int
2815 listreviter_traverse(listreviterobject *it, visitproc visit, void *arg)
2816 {
2817 Py_VISIT(it->it_seq);
2818 return 0;
2819 }
2820
2821 static PyObject *
2822 listreviter_next(listreviterobject *it)
2823 {
2824 PyObject *item;
2825 Py_ssize_t index = it->it_index;
2826 PyListObject *seq = it->it_seq;
2827
2828 if (index>=0 && index < PyList_GET_SIZE(seq)) {
2829 item = PyList_GET_ITEM(seq, index);
2830 it->it_index--;
2831 Py_INCREF(item);
2832 return item;
2833 }
2834 it->it_index = -1;
2835 if (seq != NULL) {
2836 it->it_seq = NULL;
2837 Py_DECREF(seq);
2838 }
2839 return NULL;
2840 }
2841
2842 static PyObject *
2843 listreviter_len(listreviterobject *it)
2844 {
2845 Py_ssize_t len = it->it_index + 1;
2846 if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len)
2847 len = 0;
2848 return PyLong_FromSsize_t(len);
2849 }