Lines Matching refs:p
128 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
131 p += s->red_left_pad; in fixup_red_left()
133 return p; in fixup_red_left()
223 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
291 void *p; in get_freepointer_safe() local
297 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); in get_freepointer_safe()
298 return freelist_ptr(s, p, freepointer_addr); in get_freepointer_safe()
319 static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument
321 return (kasan_reset_tag(p) - addr) / s->size; in slab_index()
452 void *p; in get_map() local
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
456 set_bit(slab_index(p, s, addr), map); in get_map()
467 static inline void *restore_red_left(struct kmem_cache *s, void *p) in restore_red_left() argument
470 p -= s->red_left_pad; in restore_red_left()
472 return p; in restore_red_left()
539 struct track *p; in get_track() local
542 p = object + s->offset + sizeof(void *); in get_track()
544 p = object + s->inuse; in get_track()
546 return p + alloc; in get_track()
552 struct track *p = get_track(s, object, alloc); in set_track() local
559 nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); in set_track()
563 p->addrs[nr_entries] = 0; in set_track()
565 p->addr = addr; in set_track()
566 p->cpu = smp_processor_id(); in set_track()
567 p->pid = current->pid; in set_track()
568 p->when = jiffies; in set_track()
570 memset(p, 0, sizeof(struct track)); in set_track()
647 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
652 print_tracking(s, p); in print_trailer()
657 p, p - addr, get_freepointer(s, p)); in print_trailer()
660 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
662 else if (p > addr + 16) in print_trailer()
663 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); in print_trailer()
665 print_section(KERN_ERR, "Object ", p, in print_trailer()
668 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
683 print_section(KERN_ERR, "Padding ", p + off, in print_trailer()
712 u8 *p = object; in init_object() local
715 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
718 memset(p, POISON_FREE, s->object_size - 1); in init_object()
719 p[s->object_size - 1] = POISON_END; in init_object()
723 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
797 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
814 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
815 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
857 u8 *p = object; in check_object() local
870 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
878 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
880 !check_bytes_and_report(s, page, p, "Poison", in check_object()
881 p + s->object_size - 1, POISON_END, 1))) in check_object()
886 check_pad_bytes(s, page, p); in check_object()
897 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
898 object_err(s, page, p, "Freepointer corrupt"); in check_object()
904 set_freepointer(s, p, NULL); in check_object()
1621 void *start, *p, *next; in allocate_slab() local
1673 for (idx = 0, p = start; idx < page->objects - 1; idx++) { in allocate_slab()
1674 next = p + s->size; in allocate_slab()
1676 set_freepointer(s, p, next); in allocate_slab()
1677 p = next; in allocate_slab()
1679 set_freepointer(s, p, NULL); in allocate_slab()
1716 void *p; in __free_slab() local
1719 for_each_object(p, s, page_address(page), in __free_slab()
1721 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
2635 void *p; in __slab_alloc() local
2648 p = ___slab_alloc(s, gfpflags, node, addr, c); in __slab_alloc()
2650 return p; in __slab_alloc()
3047 void **p, struct detached_freelist *df) in build_detached_freelist() argument
3058 object = p[--size]; in build_detached_freelist()
3072 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3086 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3090 object = p[--size]; in build_detached_freelist()
3100 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3117 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3125 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3136 void **p) in kmem_cache_alloc_bulk() argument
3161 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in kmem_cache_alloc_bulk()
3163 if (unlikely(!p[i])) in kmem_cache_alloc_bulk()
3167 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3172 p[i] = object; in kmem_cache_alloc_bulk()
3173 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3183 memset(p[j], 0, s->object_size); in kmem_cache_alloc_bulk()
3187 slab_post_alloc_hook(s, flags, size, p); in kmem_cache_alloc_bulk()
3191 slab_post_alloc_hook(s, flags, i, p); in kmem_cache_alloc_bulk()
3192 __kmem_cache_free_bulk(s, i, p); in kmem_cache_alloc_bulk()
3676 void *p; in list_slab_objects() local
3684 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3686 if (!test_bit(slab_index(p, s, addr), map)) { in list_slab_objects()
3687 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); in list_slab_objects()
3688 print_tracking(s, p); in list_slab_objects()
4207 struct page *p; in bootstrap() local
4209 list_for_each_entry(p, &n->partial, slab_list) in bootstrap()
4210 p->slab_cache = s; in bootstrap()
4213 list_for_each_entry(p, &n->full, slab_list) in bootstrap()
4214 p->slab_cache = s; in bootstrap()
4389 void *p; in validate_slab() local
4400 for_each_object(p, s, addr, page->objects) { in validate_slab()
4401 if (test_bit(slab_index(p, s, addr), map)) in validate_slab()
4402 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
4406 for_each_object(p, s, addr, page->objects) in validate_slab()
4407 if (!test_bit(slab_index(p, s, addr), map)) in validate_slab()
4408 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
4601 void *p; in process_slab() local
4606 for_each_object(p, s, addr, page->objects) in process_slab()
4607 if (!test_bit(slab_index(p, s, addr), map)) in process_slab()
4608 add_location(t, s, get_track(s, p, alloc)); in process_slab()
4699 u8 *p; in resiliency_test() local
4708 p = kzalloc(16, GFP_KERNEL); in resiliency_test()
4709 p[16] = 0x12; in resiliency_test()
4711 p + 16); in resiliency_test()
4716 p = kzalloc(32, GFP_KERNEL); in resiliency_test()
4717 p[32 + sizeof(void *)] = 0x34; in resiliency_test()
4719 p); in resiliency_test()
4723 p = kzalloc(64, GFP_KERNEL); in resiliency_test()
4724 p += 64 + (get_cycles() & 0xff) * sizeof(void *); in resiliency_test()
4725 *p = 0x56; in resiliency_test()
4727 p); in resiliency_test()
4732 p = kzalloc(128, GFP_KERNEL); in resiliency_test()
4733 kfree(p); in resiliency_test()
4734 *p = 0x78; in resiliency_test()
4735 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); in resiliency_test()
4738 p = kzalloc(256, GFP_KERNEL); in resiliency_test()
4739 kfree(p); in resiliency_test()
4740 p[50] = 0x9a; in resiliency_test()
4741 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); in resiliency_test()
4744 p = kzalloc(512, GFP_KERNEL); in resiliency_test()
4745 kfree(p); in resiliency_test()
4746 p[512] = 0xab; in resiliency_test()
4747 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); in resiliency_test()
5672 char *p = name; in create_unique_id() local
5676 *p++ = ':'; in create_unique_id()
5685 *p++ = 'd'; in create_unique_id()
5687 *p++ = 'D'; in create_unique_id()
5689 *p++ = 'a'; in create_unique_id()
5691 *p++ = 'F'; in create_unique_id()
5693 *p++ = 'A'; in create_unique_id()
5694 if (p != name + 1) in create_unique_id()
5695 *p++ = '-'; in create_unique_id()
5696 p += sprintf(p, "%07u", s->size); in create_unique_id()
5698 BUG_ON(p > name + ID_STR_LENGTH - 1); in create_unique_id()