Home
last modified time | relevance | path

Searched refs:p (Results 1 – 25 of 39) sorted by relevance

12

/mm/
Dmemory-failure.c81 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev() argument
93 if (PageSlab(p)) in hwpoison_filter_dev()
96 mapping = page_mapping(p); in hwpoison_filter_dev()
111 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags() argument
116 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == in hwpoison_filter_flags()
136 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task() argument
141 if (page_cgroup_ino(p) != hwpoison_filter_memcg) in hwpoison_filter_task()
147 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task() argument
150 int hwpoison_filter(struct page *p) in hwpoison_filter() argument
155 if (hwpoison_filter_dev(p)) in hwpoison_filter()
[all …]
Dswapfile.c535 static void inc_cluster_info_page(struct swap_info_struct *p, in inc_cluster_info_page() argument
543 alloc_cluster(p, idx); in inc_cluster_info_page()
555 static void dec_cluster_info_page(struct swap_info_struct *p, in dec_cluster_info_page() argument
568 free_cluster(p, idx); in dec_cluster_info_page()
658 static void __del_from_avail_list(struct swap_info_struct *p) in __del_from_avail_list() argument
663 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list()
666 static void del_from_avail_list(struct swap_info_struct *p) in del_from_avail_list() argument
669 __del_from_avail_list(p); in del_from_avail_list()
690 static void add_to_avail_list(struct swap_info_struct *p) in add_to_avail_list() argument
696 WARN_ON(!plist_node_empty(&p->avail_lists[nid])); in add_to_avail_list()
[all …]
Doom_kill.c132 struct task_struct *find_lock_task_mm(struct task_struct *p) in find_lock_task_mm() argument
138 for_each_thread(p, t) { in find_lock_task_mm()
161 static bool oom_unkillable_task(struct task_struct *p) in oom_unkillable_task() argument
163 if (is_global_init(p)) in oom_unkillable_task()
165 if (p->flags & PF_KTHREAD) in oom_unkillable_task()
198 unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) in oom_badness() argument
203 if (oom_unkillable_task(p)) in oom_badness()
206 p = find_lock_task_mm(p); in oom_badness()
207 if (!p) in oom_badness()
215 adj = (long)p->signal->oom_score_adj; in oom_badness()
[all …]
Dsparse-vmemmap.c148 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); in vmemmap_pte_populate() local
149 if (!p) in vmemmap_pte_populate()
151 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); in vmemmap_pte_populate()
159 void *p = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_zero() local
161 if (!p) in vmemmap_alloc_block_zero()
163 memset(p, 0, size); in vmemmap_alloc_block_zero()
165 return p; in vmemmap_alloc_block_zero()
172 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); in vmemmap_pmd_populate() local
173 if (!p) in vmemmap_pmd_populate()
175 pmd_populate_kernel(&init_mm, pmd, p); in vmemmap_pmd_populate()
[all …]
Dutil.c125 void *p; in kmemdup() local
127 p = kmalloc_track_caller(len, gfp); in kmemdup()
128 if (p) in kmemdup()
129 memcpy(p, src, len); in kmemdup()
130 return p; in kmemdup()
170 void *p; in memdup_user() local
172 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); in memdup_user()
173 if (!p) in memdup_user()
176 if (copy_from_user(p, src, len)) { in memdup_user()
177 kfree(p); in memdup_user()
[all …]
Dvmstat.c320 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state() local
324 x = delta + __this_cpu_read(*p); in __mod_zone_page_state()
332 __this_cpu_write(*p, x); in __mod_zone_page_state()
340 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __mod_node_page_state() local
344 x = delta + __this_cpu_read(*p); in __mod_node_page_state()
352 __this_cpu_write(*p, x); in __mod_node_page_state()
382 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state() local
385 v = __this_cpu_inc_return(*p); in __inc_zone_state()
391 __this_cpu_write(*p, -overstep); in __inc_zone_state()
398 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __inc_node_state() local
[all …]
Dslub.c128 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
131 p += s->red_left_pad; in fixup_red_left()
133 return p; in fixup_red_left()
223 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
291 void *p; in get_freepointer_safe() local
297 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); in get_freepointer_safe()
298 return freelist_ptr(s, p, freepointer_addr); in get_freepointer_safe()
319 static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument
321 return (kasan_reset_tag(p) - addr) / s->size; in slab_index()
452 void *p; in get_map() local
[all …]
Dcma_debug.c20 struct page *p; member
26 unsigned long *p = data; in cma_debugfs_get() local
28 *val = *p; in cma_debugfs_get()
102 cma_release(cma, mem->p, mem->n); in cma_free_mem()
106 cma_release(cma, mem->p, count); in cma_free_mem()
107 mem->p += count; in cma_free_mem()
134 struct page *p; in cma_alloc_mem() local
140 p = cma_alloc(cma, count, 0, false); in cma_alloc_mem()
141 if (!p) { in cma_alloc_mem()
146 mem->p = p; in cma_alloc_mem()
Dslab_common.c104 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument
110 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
112 kfree(p[i]); in __kmem_cache_free_bulk()
117 void **p) in __kmem_cache_alloc_bulk() argument
122 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
124 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
1421 void *slab_next(struct seq_file *m, void *p, loff_t *pos) in slab_next() argument
1423 return seq_list_next(p, &slab_root_caches, pos); in slab_next()
1426 void slab_stop(struct seq_file *m, void *p) in slab_stop() argument
1473 static int slab_show(struct seq_file *m, void *p) in slab_show() argument
[all …]
Dmemtest.c37 u64 *p, *start, *end; in memtest() local
48 for (p = start; p < end; p++) in memtest()
49 *p = pattern; in memtest()
51 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest()
52 if (*p == pattern) in memtest()
Dmempolicy.c130 struct mempolicy *get_task_policy(struct task_struct *p) in get_task_policy() argument
132 struct mempolicy *pol = p->mempolicy; in get_task_policy()
287 void __mpol_put(struct mempolicy *p) in __mpol_put() argument
289 if (!atomic_dec_and_test(&p->refcnt)) in __mpol_put()
291 kmem_cache_free(policy_cache, p); in __mpol_put()
833 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) in get_policy_nodemask() argument
836 if (p == &default_policy) in get_policy_nodemask()
839 switch (p->mode) { in get_policy_nodemask()
843 *nodes = p->v.nodes; in get_policy_nodemask()
846 if (!(p->flags & MPOL_F_LOCAL)) in get_policy_nodemask()
[all …]
Dslab.h298 struct kmem_cache *p) in slab_equal_or_root() argument
300 return p == s || p == s->memcg_params.root_cache; in slab_equal_or_root()
426 struct kmem_cache *p) in slab_equal_or_root() argument
428 return s == p; in slab_equal_or_root()
578 size_t size, void **p) in slab_post_alloc_hook() argument
584 p[i] = kasan_slab_alloc(s, p[i], flags); in slab_post_alloc_hook()
586 kmemleak_alloc_recursive(p[i], s->object_size, 1, in slab_post_alloc_hook()
644 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
645 void slab_stop(struct seq_file *m, void *p);
647 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
[all …]
Dnommu.c455 struct rb_node *p, *lastp; in validate_nommu_regions() local
465 while ((p = rb_next(lastp))) { in validate_nommu_regions()
466 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
473 lastp = p; in validate_nommu_regions()
488 struct rb_node **p, *parent; in add_nommu_region() local
493 p = &nommu_region_tree.rb_node; in add_nommu_region()
494 while (*p) { in add_nommu_region()
495 parent = *p; in add_nommu_region()
498 p = &(*p)->rb_left; in add_nommu_region()
500 p = &(*p)->rb_right; in add_nommu_region()
[all …]
Dhwpoison-inject.c17 struct page *p; in hwpoison_inject() local
27 p = pfn_to_page(pfn); in hwpoison_inject()
28 hpage = compound_head(p); in hwpoison_inject()
32 if (!get_hwpoison_page(p)) in hwpoison_inject()
42 if (!PageLRU(hpage) && !PageHuge(p)) in hwpoison_inject()
58 put_hwpoison_page(p); in hwpoison_inject()
Dpercpu-stats.c57 int *alloc_sizes, *p; in chunk_map_stats() local
111 for (i = 0, p = alloc_sizes; *p < 0 && i < as_len; i++, p++) { in chunk_map_stats()
112 sum_frag -= *p; in chunk_map_stats()
113 max_frag = max(max_frag, -1 * (*p)); in chunk_map_stats()
Dbacking-dev.c886 struct rb_node **p = &bdi_tree.rb_node; in bdi_lookup_rb_node() local
892 while (*p) { in bdi_lookup_rb_node()
893 parent = *p; in bdi_lookup_rb_node()
897 p = &(*p)->rb_left; in bdi_lookup_rb_node()
899 p = &(*p)->rb_right; in bdi_lookup_rb_node()
906 return p; in bdi_lookup_rb_node()
919 struct rb_node **p; in bdi_get_by_id() local
922 p = bdi_lookup_rb_node(id, NULL); in bdi_get_by_id()
923 if (*p) { in bdi_get_by_id()
924 bdi = rb_entry(*p, struct backing_dev_info, rb_node); in bdi_get_by_id()
[all …]
Dvmalloc.c54 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); in free_work() local
57 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
1820 struct vm_struct *tmp, **p; in vm_area_add_early() local
1823 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
1830 vm->next = *p; in vm_area_add_early()
1831 *p = vm; in vm_area_add_early()
1913 struct vfree_deferred *p; in vmalloc_init() local
1918 p = &per_cpu(vfree_deferred, i); in vmalloc_init()
1919 init_llist_head(&p->list); in vmalloc_init()
1920 INIT_WORK(&p->wq, free_work); in vmalloc_init()
[all …]
Dpage_alloc.c447 #define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o) argument
698 struct page *p = page + i; in prep_compound_page() local
699 set_page_count(p, 0); in prep_compound_page()
700 p->mapping = TAIL_MAPPING; in prep_compound_page()
701 set_compound_head(p, page); in prep_compound_page()
1445 struct page *p = page; in __free_pages_core() local
1448 prefetchw(p); in __free_pages_core()
1449 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core()
1450 prefetchw(p + 1); in __free_pages_core()
1451 __ClearPageReserved(p); in __free_pages_core()
[all …]
Dearly_ioremap.c257 char *p; in copy_from_early_mem() local
264 p = early_memremap(src & PAGE_MASK, clen + slop); in copy_from_early_mem()
265 memcpy(dest, p + slop, clen); in copy_from_early_mem()
266 early_memunmap(p, clen + slop); in copy_from_early_mem()
Dslab.c2184 struct list_head *p; in drain_freelist() local
2192 p = n->slabs_free.prev; in drain_freelist()
2193 if (p == &n->slabs_free) { in drain_freelist()
2198 page = list_entry(p, struct page, slab_list); in drain_freelist()
3494 size_t size, void **p, unsigned long caller) in cache_alloc_debugcheck_after_bulk() argument
3499 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); in cache_alloc_debugcheck_after_bulk()
3503 void **p) in kmem_cache_alloc_bulk() argument
3519 p[i] = objp; in kmem_cache_alloc_bulk()
3523 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); in kmem_cache_alloc_bulk()
3528 memset(p[i], 0, s->object_size); in kmem_cache_alloc_bulk()
[all …]
Dmemcontrol.c525 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded() local
536 while (*p) { in __mem_cgroup_insert_exceeded()
537 parent = *p; in __mem_cgroup_insert_exceeded()
541 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
550 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
556 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
767 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) in __mod_lruvec_slab_state() argument
769 struct page *page = virt_to_head_page(p); in __mod_lruvec_slab_state()
920 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) in mem_cgroup_from_task() argument
927 if (unlikely(!p)) in mem_cgroup_from_task()
[all …]
/mm/kasan/
Dinit.c129 pte_t *p; in zero_pmd_populate() local
132 p = pte_alloc_one_kernel(&init_mm); in zero_pmd_populate()
134 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); in zero_pmd_populate()
135 if (!p) in zero_pmd_populate()
138 pmd_populate_kernel(&init_mm, pmd, p); in zero_pmd_populate()
166 pmd_t *p; in zero_pud_populate() local
169 p = pmd_alloc(&init_mm, pud, addr); in zero_pud_populate()
170 if (!p) in zero_pud_populate()
207 pud_t *p; in zero_p4d_populate() local
210 p = pud_alloc(&init_mm, p4d, addr); in zero_p4d_populate()
[all …]
Dtags_report.c69 void *p = reset_tag(addr); in find_first_bad_addr() local
70 void *end = p + size; in find_first_bad_addr()
72 while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) in find_first_bad_addr()
73 p += KASAN_SHADOW_SCALE_SIZE; in find_first_bad_addr()
74 return p; in find_first_bad_addr()
Dgeneric_report.c39 void *p = addr; in find_first_bad_addr() local
41 while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) in find_first_bad_addr()
42 p += KASAN_SHADOW_SCALE_SIZE; in find_first_bad_addr()
43 return p; in find_first_bad_addr()
Dcommon.c90 bool __kasan_check_read(const volatile void *p, unsigned int size) in __kasan_check_read() argument
92 return check_memory_region((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read()
96 bool __kasan_check_write(const volatile void *p, unsigned int size) in __kasan_check_write() argument
98 return check_memory_region((unsigned long)p, size, true, _RET_IP_); in __kasan_check_write()

12