Lines Matching refs:page
352 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
354 VM_BUG_ON_PAGE(PageTail(page), page); in slab_lock()
355 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
358 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
360 VM_BUG_ON_PAGE(PageTail(page), page); in slab_unlock()
361 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
374 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab()
381 slab_lock(page); in __cmpxchg_double_slab()
382 if (page->freelist == freelist_old && in __cmpxchg_double_slab()
383 page->counters == counters_old) { in __cmpxchg_double_slab()
384 page->freelist = freelist_new; in __cmpxchg_double_slab()
385 page->counters = counters_new; in __cmpxchg_double_slab()
386 slab_unlock(page); in __cmpxchg_double_slab()
389 slab_unlock(page); in __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
410 if (cmpxchg_double(&page->freelist, &page->counters, in cmpxchg_double_slab()
420 slab_lock(page); in cmpxchg_double_slab()
421 if (page->freelist == freelist_old && in cmpxchg_double_slab()
422 page->counters == counters_old) { in cmpxchg_double_slab()
423 page->freelist = freelist_new; in cmpxchg_double_slab()
424 page->counters = counters_new; in cmpxchg_double_slab()
425 slab_unlock(page); in cmpxchg_double_slab()
429 slab_unlock(page); in cmpxchg_double_slab()
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument
453 void *addr = page_address(page); in get_map()
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
509 struct page *page, void *object) in check_valid_pointer() argument
516 base = page_address(page); in check_valid_pointer()
519 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
612 static void print_page_info(struct page *page) in print_page_info() argument
615 page, page->objects, page->inuse, page->freelist, page->flags); in print_page_info()
647 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
650 u8 *addr = page_address(page); in print_trailer()
654 print_page_info(page); in print_trailer()
689 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
693 print_trailer(s, page, object); in object_err()
696 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
706 print_page_info(page); in slab_err()
733 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
753 print_trailer(s, page, object); in check_bytes_and_report()
797 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
814 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
819 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
831 start = page_address(page); in slab_pad_check()
832 length = page_size(page); in slab_pad_check()
847 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); in slab_pad_check()
854 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
861 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
865 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
870 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
878 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
880 !check_bytes_and_report(s, page, p, "Poison", in check_object()
886 check_pad_bytes(s, page, p); in check_object()
897 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
898 object_err(s, page, p, "Freepointer corrupt"); in check_object()
910 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
916 if (!PageSlab(page)) { in check_slab()
917 slab_err(s, page, "Not a valid slab page"); in check_slab()
921 maxobj = order_objects(compound_order(page), s->size); in check_slab()
922 if (page->objects > maxobj) { in check_slab()
923 slab_err(s, page, "objects %u > max %u", in check_slab()
924 page->objects, maxobj); in check_slab()
927 if (page->inuse > page->objects) { in check_slab()
928 slab_err(s, page, "inuse %u > max %u", in check_slab()
929 page->inuse, page->objects); in check_slab()
933 slab_pad_check(s, page); in check_slab()
941 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
948 fp = page->freelist; in on_freelist()
949 while (fp && nr <= page->objects) { in on_freelist()
952 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
954 object_err(s, page, object, in on_freelist()
958 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
959 page->freelist = NULL; in on_freelist()
960 page->inuse = page->objects; in on_freelist()
971 max_objects = order_objects(compound_order(page), s->size); in on_freelist()
975 if (page->objects != max_objects) { in on_freelist()
976 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", in on_freelist()
977 page->objects, max_objects); in on_freelist()
978 page->objects = max_objects; in on_freelist()
981 if (page->inuse != page->objects - nr) { in on_freelist()
982 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
983 page->inuse, page->objects - nr); in on_freelist()
984 page->inuse = page->objects - nr; in on_freelist()
990 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
997 object, page->inuse, in trace()
998 page->freelist); in trace()
1012 struct kmem_cache_node *n, struct page *page) in add_full() argument
1018 list_add(&page->slab_list, &n->full); in add_full()
1021 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
1027 list_del(&page->slab_list); in remove_full()
1067 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1078 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) in setup_page_debug() argument
1084 memset(addr, POISON_INUSE, page_size(page)); in setup_page_debug()
1089 struct page *page, void *object) in alloc_consistency_checks() argument
1091 if (!check_slab(s, page)) in alloc_consistency_checks()
1094 if (!check_valid_pointer(s, page, object)) { in alloc_consistency_checks()
1095 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1099 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1106 struct page *page, in alloc_debug_processing() argument
1110 if (!alloc_consistency_checks(s, page, object)) in alloc_debug_processing()
1117 trace(s, page, object, 1); in alloc_debug_processing()
1122 if (PageSlab(page)) { in alloc_debug_processing()
1129 page->inuse = page->objects; in alloc_debug_processing()
1130 page->freelist = NULL; in alloc_debug_processing()
1136 struct page *page, void *object, unsigned long addr) in free_consistency_checks() argument
1138 if (!check_valid_pointer(s, page, object)) { in free_consistency_checks()
1139 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1143 if (on_freelist(s, page, object)) { in free_consistency_checks()
1144 object_err(s, page, object, "Object already free"); in free_consistency_checks()
1148 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1151 if (unlikely(s != page->slab_cache)) { in free_consistency_checks()
1152 if (!PageSlab(page)) { in free_consistency_checks()
1153 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1155 } else if (!page->slab_cache) { in free_consistency_checks()
1160 object_err(s, page, object, in free_consistency_checks()
1169 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1173 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1180 slab_lock(page); in free_debug_processing()
1183 if (!check_slab(s, page)) in free_debug_processing()
1191 if (!free_consistency_checks(s, page, object, addr)) in free_debug_processing()
1197 trace(s, page, object, 0); in free_debug_processing()
1210 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1213 slab_unlock(page); in free_debug_processing()
1343 struct page *page, void *object) {} in setup_object_debug() argument
1345 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} in setup_page_debug() argument
1348 struct page *page, void *object, unsigned long addr) { return 0; } in alloc_debug_processing() argument
1351 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1355 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1357 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1360 struct page *page) {} in add_full() argument
1362 struct page *page) {} in remove_full() argument
1472 static void *setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1475 setup_object_debug(s, page, object); in setup_object()
1488 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page()
1491 struct page *page; in alloc_slab_page() local
1495 page = alloc_pages(flags, order); in alloc_slab_page()
1497 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1499 if (page && charge_slab_page(page, flags, order, s)) { in alloc_slab_page()
1500 __free_pages(page, order); in alloc_slab_page()
1501 page = NULL; in alloc_slab_page()
1504 return page; in alloc_slab_page()
1549 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, in next_freelist_entry() argument
1571 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1578 if (page->objects < 2 || !s->random_seq) in shuffle_freelist()
1584 page_limit = page->objects * s->size; in shuffle_freelist()
1585 start = fixup_red_left(s, page_address(page)); in shuffle_freelist()
1588 cur = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1590 cur = setup_object(s, page, cur); in shuffle_freelist()
1591 page->freelist = cur; in shuffle_freelist()
1593 for (idx = 1; idx < page->objects; idx++) { in shuffle_freelist()
1594 next = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1596 next = setup_object(s, page, next); in shuffle_freelist()
1610 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1616 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
1618 struct page *page; in allocate_slab() local
1640 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1641 if (unlikely(!page)) { in allocate_slab()
1648 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1649 if (unlikely(!page)) in allocate_slab()
1654 page->objects = oo_objects(oo); in allocate_slab()
1656 page->slab_cache = s; in allocate_slab()
1657 __SetPageSlab(page); in allocate_slab()
1658 if (page_is_pfmemalloc(page)) in allocate_slab()
1659 SetPageSlabPfmemalloc(page); in allocate_slab()
1661 kasan_poison_slab(page); in allocate_slab()
1663 start = page_address(page); in allocate_slab()
1665 setup_page_debug(s, page, start); in allocate_slab()
1667 shuffle = shuffle_freelist(s, page); in allocate_slab()
1671 start = setup_object(s, page, start); in allocate_slab()
1672 page->freelist = start; in allocate_slab()
1673 for (idx = 0, p = start; idx < page->objects - 1; idx++) { in allocate_slab()
1675 next = setup_object(s, page, next); in allocate_slab()
1682 page->inuse = page->objects; in allocate_slab()
1683 page->frozen = 1; in allocate_slab()
1688 if (!page) in allocate_slab()
1691 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1693 return page; in allocate_slab()
1696 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
1710 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1712 int order = compound_order(page); in __free_slab()
1718 slab_pad_check(s, page); in __free_slab()
1719 for_each_object(p, s, page_address(page), in __free_slab()
1720 page->objects) in __free_slab()
1721 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1724 __ClearPageSlabPfmemalloc(page); in __free_slab()
1725 __ClearPageSlab(page); in __free_slab()
1727 page->mapping = NULL; in __free_slab()
1730 uncharge_slab_page(page, order, s); in __free_slab()
1731 __free_pages(page, order); in __free_slab()
1736 struct page *page = container_of(h, struct page, rcu_head); in rcu_free_slab() local
1738 __free_slab(page->slab_cache, page); in rcu_free_slab()
1741 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1744 call_rcu(&page->rcu_head, rcu_free_slab); in free_slab()
1746 __free_slab(s, page); in free_slab()
1749 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1751 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1752 free_slab(s, page); in discard_slab()
1759 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) in __add_partial() argument
1763 list_add_tail(&page->slab_list, &n->partial); in __add_partial()
1765 list_add(&page->slab_list, &n->partial); in __add_partial()
1769 struct page *page, int tail) in add_partial() argument
1772 __add_partial(n, page, tail); in add_partial()
1776 struct page *page) in remove_partial() argument
1779 list_del(&page->slab_list); in remove_partial()
1790 struct kmem_cache_node *n, struct page *page, in acquire_slab() argument
1795 struct page new; in acquire_slab()
1804 freelist = page->freelist; in acquire_slab()
1805 counters = page->counters; in acquire_slab()
1809 new.inuse = page->objects; in acquire_slab()
1818 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1824 remove_partial(n, page); in acquire_slab()
1829 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1830 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1838 struct page *page, *page2; in get_partial_node() local
1853 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { in get_partial_node()
1856 if (!pfmemalloc_match(page, flags)) in get_partial_node()
1859 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1865 c->page = page; in get_partial_node()
1869 put_cpu_partial(s, page, 0); in get_partial_node()
2038 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
2042 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
2047 struct page new; in deactivate_slab()
2048 struct page old; in deactivate_slab()
2050 if (page->freelist) { in deactivate_slab()
2068 prior = page->freelist; in deactivate_slab()
2069 counters = page->counters; in deactivate_slab()
2075 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2099 old.freelist = page->freelist; in deactivate_slab()
2100 old.counters = page->counters; in deactivate_slab()
2142 remove_partial(n, page); in deactivate_slab()
2144 remove_full(s, n, page); in deactivate_slab()
2147 add_partial(n, page, tail); in deactivate_slab()
2149 add_full(s, n, page); in deactivate_slab()
2153 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2168 discard_slab(s, page); in deactivate_slab()
2172 c->page = NULL; in deactivate_slab()
2188 struct page *page, *discard_page = NULL; in unfreeze_partials() local
2190 while ((page = c->partial)) { in unfreeze_partials()
2191 struct page new; in unfreeze_partials()
2192 struct page old; in unfreeze_partials()
2194 c->partial = page->next; in unfreeze_partials()
2196 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2207 old.freelist = page->freelist; in unfreeze_partials()
2208 old.counters = page->counters; in unfreeze_partials()
2216 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2222 page->next = discard_page; in unfreeze_partials()
2223 discard_page = page; in unfreeze_partials()
2225 add_partial(n, page, DEACTIVATE_TO_TAIL); in unfreeze_partials()
2234 page = discard_page; in unfreeze_partials()
2238 discard_slab(s, page); in unfreeze_partials()
2251 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2254 struct page *oldpage; in put_cpu_partial()
2284 pobjects += page->objects - page->inuse; in put_cpu_partial()
2286 page->pages = pages; in put_cpu_partial()
2287 page->pobjects = pobjects; in put_cpu_partial()
2288 page->next = oldpage; in put_cpu_partial()
2290 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2306 deactivate_slab(s, c->page, c->freelist, c); in flush_slab()
2320 if (c->page) in __flush_cpu_slab()
2338 return c->page || slub_percpu_partial(c); in has_cpu_slab()
2369 static inline int node_match(struct page *page, int node) in node_match() argument
2372 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match()
2379 static int count_free(struct page *page) in count_free() argument
2381 return page->objects - page->inuse; in count_free()
2392 int (*get_count)(struct page *)) in count_partial() argument
2396 struct page *page; in count_partial() local
2399 list_for_each_entry(page, &n->partial, slab_list) in count_partial()
2400 x += get_count(page); in count_partial()
2448 struct page *page; in new_slab_objects() local
2457 page = new_slab(s, flags, node); in new_slab_objects()
2458 if (page) { in new_slab_objects()
2460 if (c->page) in new_slab_objects()
2467 freelist = page->freelist; in new_slab_objects()
2468 page->freelist = NULL; in new_slab_objects()
2471 c->page = page; in new_slab_objects()
2478 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) in pfmemalloc_match() argument
2480 if (unlikely(PageSlabPfmemalloc(page))) in pfmemalloc_match()
2496 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2498 struct page new; in get_freelist()
2503 freelist = page->freelist; in get_freelist()
2504 counters = page->counters; in get_freelist()
2509 new.inuse = page->objects; in get_freelist()
2512 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2543 struct page *page; in ___slab_alloc() local
2545 page = c->page; in ___slab_alloc()
2546 if (!page) in ___slab_alloc()
2550 if (unlikely(!node_match(page, node))) { in ___slab_alloc()
2556 if (unlikely(!node_match(page, searchnode))) { in ___slab_alloc()
2558 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2568 if (unlikely(!pfmemalloc_match(page, gfpflags))) { in ___slab_alloc()
2569 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2578 freelist = get_freelist(s, page); in ___slab_alloc()
2581 c->page = NULL; in ___slab_alloc()
2594 VM_BUG_ON(!c->page->frozen); in ___slab_alloc()
2602 page = c->page = slub_percpu_partial(c); in ___slab_alloc()
2603 slub_set_percpu_partial(c, page); in ___slab_alloc()
2615 page = c->page; in ___slab_alloc()
2616 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2621 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2624 deactivate_slab(s, page, get_freepointer(s, freelist), c); in ___slab_alloc()
2679 struct page *page; in slab_alloc_node() local
2720 page = c->page; in slab_alloc_node()
2721 if (unlikely(!object || !node_match(page, node))) { in slab_alloc_node()
2828 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2835 struct page new; in __slab_free()
2843 !free_debug_processing(s, page, head, tail, cnt, addr)) in __slab_free()
2851 prior = page->freelist; in __slab_free()
2852 counters = page->counters; in __slab_free()
2871 n = get_node(s, page_to_nid(page)); in __slab_free()
2885 } while (!cmpxchg_double_slab(s, page, in __slab_free()
2897 put_cpu_partial(s, page, 1); in __slab_free()
2917 remove_full(s, n, page); in __slab_free()
2918 add_partial(n, page, DEACTIVATE_TO_TAIL); in __slab_free()
2929 remove_partial(n, page); in __slab_free()
2933 remove_full(s, n, page); in __slab_free()
2938 discard_slab(s, page); in __slab_free()
2957 struct page *page, void *head, void *tail, in do_slab_free() argument
2979 if (likely(page == c->page)) { in do_slab_free()
2992 __slab_free(s, page, head, tail_obj, cnt, addr); in do_slab_free()
2996 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
3005 do_slab_free(s, page, head, tail, cnt, addr); in slab_free()
3026 struct page *page; member
3052 struct page *page; in build_detached_freelist() local
3055 df->page = NULL; in build_detached_freelist()
3065 page = virt_to_head_page(object); in build_detached_freelist()
3068 if (unlikely(!PageSlab(page))) { in build_detached_freelist()
3069 BUG_ON(!PageCompound(page)); in build_detached_freelist()
3071 __free_pages(page, compound_order(page)); in build_detached_freelist()
3076 df->s = page->slab_cache; in build_detached_freelist()
3082 df->page = page; in build_detached_freelist()
3095 if (df->page == virt_to_head_page(object)) { in build_detached_freelist()
3126 if (!df.page) in kmem_cache_free_bulk()
3129 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
3368 struct page *page; in early_kmem_cache_node_alloc() local
3373 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
3375 BUG_ON(!page); in early_kmem_cache_node_alloc()
3376 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
3381 n = page->freelist; in early_kmem_cache_node_alloc()
3389 page->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
3390 page->inuse = 1; in early_kmem_cache_node_alloc()
3391 page->frozen = 0; in early_kmem_cache_node_alloc()
3394 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
3400 __add_partial(n, page, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
3671 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3675 void *addr = page_address(page); in list_slab_objects()
3677 unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC); in list_slab_objects()
3680 slab_err(s, page, text, s->name); in list_slab_objects()
3681 slab_lock(page); in list_slab_objects()
3683 get_map(s, page, map); in list_slab_objects()
3684 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3691 slab_unlock(page); in list_slab_objects()
3704 struct page *page, *h; in free_partial() local
3708 list_for_each_entry_safe(page, h, &n->partial, slab_list) { in free_partial()
3709 if (!page->inuse) { in free_partial()
3710 remove_partial(n, page); in free_partial()
3711 list_add(&page->slab_list, &discard); in free_partial()
3713 list_slab_objects(s, page, in free_partial()
3719 list_for_each_entry_safe(page, h, &discard, slab_list) in free_partial()
3720 discard_slab(s, page); in free_partial()
3811 struct page *page; in kmalloc_large_node() local
3816 page = alloc_pages_node(node, flags, order); in kmalloc_large_node()
3817 if (page) { in kmalloc_large_node()
3818 ptr = page_address(page); in kmalloc_large_node()
3819 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, in kmalloc_large_node()
3866 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, in __check_heap_object() argument
3876 s = page->slab_cache; in __check_heap_object()
3879 if (ptr < page_address(page)) in __check_heap_object()
3884 offset = (ptr - page_address(page)) % s->size; in __check_heap_object()
3919 struct page *page; in __ksize() local
3924 page = virt_to_head_page(object); in __ksize()
3926 if (unlikely(!PageSlab(page))) { in __ksize()
3927 WARN_ON(!PageCompound(page)); in __ksize()
3928 return page_size(page); in __ksize()
3931 return slab_ksize(page->slab_cache); in __ksize()
3937 struct page *page; in kfree() local
3945 page = virt_to_head_page(x); in kfree()
3946 if (unlikely(!PageSlab(page))) { in kfree()
3947 unsigned int order = compound_order(page); in kfree()
3949 BUG_ON(!PageCompound(page)); in kfree()
3951 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, in kfree()
3953 __free_pages(page, order); in kfree()
3956 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); in kfree()
3976 struct page *page; in __kmem_cache_shrink() local
3977 struct page *t; in __kmem_cache_shrink()
3997 list_for_each_entry_safe(page, t, &n->partial, slab_list) { in __kmem_cache_shrink()
3998 int free = page->objects - page->inuse; in __kmem_cache_shrink()
4006 if (free == page->objects) { in __kmem_cache_shrink()
4007 list_move(&page->slab_list, &discard); in __kmem_cache_shrink()
4010 list_move(&page->slab_list, promote + free - 1); in __kmem_cache_shrink()
4023 list_for_each_entry_safe(page, t, &discard, slab_list) in __kmem_cache_shrink()
4024 discard_slab(s, page); in __kmem_cache_shrink()
4207 struct page *p; in bootstrap()
4374 static int count_inuse(struct page *page) in count_inuse() argument
4376 return page->inuse; in count_inuse()
4379 static int count_total(struct page *page) in count_total() argument
4381 return page->objects; in count_total()
4386 static int validate_slab(struct kmem_cache *s, struct page *page, in validate_slab() argument
4390 void *addr = page_address(page); in validate_slab()
4392 if (!check_slab(s, page) || in validate_slab()
4393 !on_freelist(s, page, NULL)) in validate_slab()
4397 bitmap_zero(map, page->objects); in validate_slab()
4399 get_map(s, page, map); in validate_slab()
4400 for_each_object(p, s, addr, page->objects) { in validate_slab()
4402 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
4406 for_each_object(p, s, addr, page->objects) in validate_slab()
4408 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
4413 static void validate_slab_slab(struct kmem_cache *s, struct page *page, in validate_slab_slab() argument
4416 slab_lock(page); in validate_slab_slab()
4417 validate_slab(s, page, map); in validate_slab_slab()
4418 slab_unlock(page); in validate_slab_slab()
4425 struct page *page; in validate_slab_node() local
4430 list_for_each_entry(page, &n->partial, slab_list) { in validate_slab_node()
4431 validate_slab_slab(s, page, map); in validate_slab_node()
4441 list_for_each_entry(page, &n->full, slab_list) { in validate_slab_node()
4442 validate_slab_slab(s, page, map); in validate_slab_node()
4597 struct page *page, enum track_item alloc, in process_slab() argument
4600 void *addr = page_address(page); in process_slab()
4603 bitmap_zero(map, page->objects); in process_slab()
4604 get_map(s, page, map); in process_slab()
4606 for_each_object(p, s, addr, page->objects) in process_slab()
4631 struct page *page; in list_locations() local
4637 list_for_each_entry(page, &n->partial, slab_list) in list_locations()
4638 process_slab(&t, s, page, alloc, map); in list_locations()
4639 list_for_each_entry(page, &n->full, slab_list) in list_locations()
4640 process_slab(&t, s, page, alloc, map); in list_locations()
4806 struct page *page; in show_slab_objects() local
4808 page = READ_ONCE(c->page); in show_slab_objects()
4809 if (!page) in show_slab_objects()
4812 node = page_to_nid(page); in show_slab_objects()
4814 x = page->objects; in show_slab_objects()
4816 x = page->inuse; in show_slab_objects()
4823 page = slub_percpu_partial_read_once(c); in show_slab_objects()
4824 if (page) { in show_slab_objects()
4825 node = page_to_nid(page); in show_slab_objects()
4831 x = page->pages; in show_slab_objects()
5060 struct page *page; in slabs_cpu_partial_show() local
5062 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5064 if (page) { in slabs_cpu_partial_show()
5065 pages += page->pages; in slabs_cpu_partial_show()
5066 objects += page->pobjects; in slabs_cpu_partial_show()
5074 struct page *page; in slabs_cpu_partial_show() local
5076 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5078 if (page && len < PAGE_SIZE - 20) in slabs_cpu_partial_show()
5080 page->pobjects, page->pages); in slabs_cpu_partial_show()