Home
last modified time | relevance | path

Searched refs:p (Results 1 – 25 of 41) sorted by relevance

12

/mm/
Dmemory-failure.c108 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev() argument
120 if (PageSlab(p)) in hwpoison_filter_dev()
123 mapping = page_mapping(p); in hwpoison_filter_dev()
138 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags() argument
143 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == in hwpoison_filter_flags()
163 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task() argument
168 if (page_cgroup_ino(p) != hwpoison_filter_memcg) in hwpoison_filter_task()
174 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task() argument
177 int hwpoison_filter(struct page *p) in hwpoison_filter() argument
182 if (hwpoison_filter_dev(p)) in hwpoison_filter()
[all …]
Dswapfile.c549 static void inc_cluster_info_page(struct swap_info_struct *p, in inc_cluster_info_page() argument
557 alloc_cluster(p, idx); in inc_cluster_info_page()
569 static void dec_cluster_info_page(struct swap_info_struct *p, in dec_cluster_info_page() argument
582 free_cluster(p, idx); in dec_cluster_info_page()
667 static void __del_from_avail_list(struct swap_info_struct *p) in __del_from_avail_list() argument
671 assert_spin_locked(&p->lock); in __del_from_avail_list()
673 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list()
676 static void del_from_avail_list(struct swap_info_struct *p) in del_from_avail_list() argument
680 trace_android_vh_del_from_avail_list(p, &skip); in del_from_avail_list()
685 __del_from_avail_list(p); in del_from_avail_list()
[all …]
Doom_kill.c140 struct task_struct *find_lock_task_mm(struct task_struct *p) in find_lock_task_mm() argument
146 for_each_thread(p, t) { in find_lock_task_mm()
169 static bool oom_unkillable_task(struct task_struct *p) in oom_unkillable_task() argument
171 if (is_global_init(p)) in oom_unkillable_task()
173 if (p->flags & PF_KTHREAD) in oom_unkillable_task()
206 long oom_badness(struct task_struct *p, unsigned long totalpages) in oom_badness() argument
211 if (oom_unkillable_task(p)) in oom_badness()
214 p = find_lock_task_mm(p); in oom_badness()
215 if (!p) in oom_badness()
223 adj = (long)p->signal->oom_score_adj; in oom_badness()
[all …]
Dsparse-vmemmap.c149 void *p; in vmemmap_pte_populate() local
151 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); in vmemmap_pte_populate()
152 if (!p) in vmemmap_pte_populate()
154 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); in vmemmap_pte_populate()
162 void *p = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_zero() local
164 if (!p) in vmemmap_alloc_block_zero()
166 memset(p, 0, size); in vmemmap_alloc_block_zero()
168 return p; in vmemmap_alloc_block_zero()
175 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); in vmemmap_pmd_populate() local
176 if (!p) in vmemmap_pmd_populate()
[all …]
Dutil.c130 void *p; in kmemdup() local
132 p = kmalloc_track_caller(len, gfp); in kmemdup()
133 if (p) in kmemdup()
134 memcpy(p, src, len); in kmemdup()
135 return p; in kmemdup()
175 void *p; in memdup_user() local
177 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); in memdup_user()
178 if (!p) in memdup_user()
181 if (copy_from_user(p, src, len)) { in memdup_user()
182 kfree(p); in memdup_user()
[all …]
Dvmstat.c320 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state() local
324 x = delta + __this_cpu_read(*p); in __mod_zone_page_state()
332 __this_cpu_write(*p, x); in __mod_zone_page_state()
340 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __mod_node_page_state() local
349 x = delta + __this_cpu_read(*p); in __mod_node_page_state()
357 __this_cpu_write(*p, x); in __mod_node_page_state()
387 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state() local
390 v = __this_cpu_inc_return(*p); in __inc_zone_state()
396 __this_cpu_write(*p, -overstep); in __inc_zone_state()
403 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __inc_node_state() local
[all …]
Dslub.c134 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
137 p += s->red_left_pad; in fixup_red_left()
139 return p; in fixup_red_left()
211 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
284 void *p; in get_freepointer_safe() local
291 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); in get_freepointer_safe()
292 return freelist_ptr(s, p, freepointer_addr); in get_freepointer_safe()
440 void *p; in __fill_map() local
444 for (p = page->freelist; p; p = get_freepointer(s, p)) in __fill_map()
445 set_bit(__obj_to_index(s, addr, p), obj_map); in __fill_map()
[all …]
Dslab_common.c108 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument
114 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
116 kfree(p[i]); in __kmem_cache_free_bulk()
121 void **p) in __kmem_cache_alloc_bulk() argument
126 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
128 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
961 void *slab_next(struct seq_file *m, void *p, loff_t *pos) in slab_next() argument
963 return seq_list_next(p, &slab_caches, pos); in slab_next()
966 void slab_stop(struct seq_file *m, void *p) in slab_stop() argument
990 static int slab_show(struct seq_file *m, void *p) in slab_show() argument
[all …]
Dslab.h363 void **p) in memcg_slab_post_alloc_hook() argument
373 if (likely(p[i])) { in memcg_slab_post_alloc_hook()
374 page = virt_to_head_page(p[i]); in memcg_slab_post_alloc_hook()
382 off = obj_to_index(s, page, p[i]); in memcg_slab_post_alloc_hook()
395 void **p, int objects) in memcg_slab_free_hook() argument
407 if (unlikely(!p[i])) in memcg_slab_free_hook()
410 page = virt_to_head_page(p[i]); in memcg_slab_free_hook()
419 off = obj_to_index(s, page, p[i]); in memcg_slab_free_hook()
463 void **p) in memcg_slab_post_alloc_hook() argument
468 void **p, int objects) in memcg_slab_free_hook() argument
[all …]
Dmemtest.c37 u64 *p, *start, *end; in memtest() local
48 for (p = start; p < end; p++) in memtest()
49 *p = pattern; in memtest()
51 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest()
52 if (*p == pattern) in memtest()
Dcma_debug.c20 struct page *p; member
26 unsigned long *p = data; in cma_debugfs_get() local
28 *val = *p; in cma_debugfs_get()
102 cma_release(cma, mem->p, mem->n); in cma_free_mem()
106 cma_release(cma, mem->p, count); in cma_free_mem()
107 mem->p += count; in cma_free_mem()
134 struct page *p; in cma_alloc_mem() local
140 p = cma_alloc(cma, count, 0, GFP_KERNEL); in cma_alloc_mem()
141 if (!p) { in cma_alloc_mem()
146 mem->p = p; in cma_alloc_mem()
Dmempolicy.c156 struct mempolicy *get_task_policy(struct task_struct *p) in get_task_policy() argument
158 struct mempolicy *pol = p->mempolicy; in get_task_policy()
313 void __mpol_put(struct mempolicy *p) in __mpol_put() argument
315 if (!atomic_dec_and_test(&p->refcnt)) in __mpol_put()
317 kmem_cache_free(policy_cache, p); in __mpol_put()
907 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) in get_policy_nodemask() argument
910 if (p == &default_policy) in get_policy_nodemask()
913 switch (p->mode) { in get_policy_nodemask()
916 *nodes = p->v.nodes; in get_policy_nodemask()
919 if (!(p->flags & MPOL_F_LOCAL)) in get_policy_nodemask()
[all …]
Dnommu.c436 struct rb_node *p, *lastp; in validate_nommu_regions() local
446 while ((p = rb_next(lastp))) { in validate_nommu_regions()
447 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
454 lastp = p; in validate_nommu_regions()
469 struct rb_node **p, *parent; in add_nommu_region() local
474 p = &nommu_region_tree.rb_node; in add_nommu_region()
475 while (*p) { in add_nommu_region()
476 parent = *p; in add_nommu_region()
479 p = &(*p)->rb_left; in add_nommu_region()
481 p = &(*p)->rb_right; in add_nommu_region()
[all …]
Dbacking-dev.c763 struct rb_node **p = &bdi_tree.rb_node; in bdi_lookup_rb_node() local
769 while (*p) { in bdi_lookup_rb_node()
770 parent = *p; in bdi_lookup_rb_node()
774 p = &(*p)->rb_left; in bdi_lookup_rb_node()
776 p = &(*p)->rb_right; in bdi_lookup_rb_node()
783 return p; in bdi_lookup_rb_node()
796 struct rb_node **p; in bdi_get_by_id() local
799 p = bdi_lookup_rb_node(id, NULL); in bdi_get_by_id()
800 if (*p) { in bdi_get_by_id()
801 bdi = rb_entry(*p, struct backing_dev_info, rb_node); in bdi_get_by_id()
[all …]
Dpercpu-stats.c61 int *alloc_sizes, *p; in chunk_map_stats() local
115 for (i = 0, p = alloc_sizes; *p < 0 && i < as_len; i++, p++) { in chunk_map_stats()
116 sum_frag -= *p; in chunk_map_stats()
117 max_frag = max(max_frag, -1 * (*p)); in chunk_map_stats()
Dhwpoison-inject.c17 struct page *p; in hwpoison_inject() local
27 p = pfn_to_page(pfn); in hwpoison_inject()
28 hpage = compound_head(p); in hwpoison_inject()
37 if (!PageLRU(hpage) && !PageHuge(p)) in hwpoison_inject()
Dvmalloc.c64 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); in free_work() local
67 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
1895 struct vm_struct *tmp, **p; in vm_area_add_early() local
1898 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
1905 vm->next = *p; in vm_area_add_early()
1906 *p = vm; in vm_area_add_early()
1988 struct vfree_deferred *p; in vmalloc_init() local
1993 p = &per_cpu(vfree_deferred, i); in vmalloc_init()
1994 init_llist_head(&p->list); in vmalloc_init()
1995 INIT_WORK(&p->wq, free_work); in vmalloc_init()
[all …]
Dearly_ioremap.c257 char *p; in copy_from_early_mem() local
264 p = early_memremap(src & PAGE_MASK, clen + slop); in copy_from_early_mem()
265 memcpy(dest, p + slop, clen); in copy_from_early_mem()
266 early_memunmap(p, clen + slop); in copy_from_early_mem()
Dpage_alloc.c727 struct page *p = page + i; in prep_compound_page() local
728 set_page_count(p, 0); in prep_compound_page()
729 p->mapping = TAIL_MAPPING; in prep_compound_page()
730 set_compound_head(p, page); in prep_compound_page()
1631 struct page *p = page; in __free_pages_core() local
1639 prefetchw(p); in __free_pages_core()
1640 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core()
1641 prefetchw(p + 1); in __free_pages_core()
1642 __ClearPageReserved(p); in __free_pages_core()
1643 set_page_count(p, 0); in __free_pages_core()
[all …]
Dmemcontrol.c610 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded() local
621 while (*p) { in __mem_cgroup_insert_exceeded()
622 parent = *p; in __mem_cgroup_insert_exceeded()
626 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
635 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
641 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
864 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) in __mod_lruvec_slab_state() argument
866 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); in __mod_lruvec_slab_state()
871 memcg = mem_cgroup_from_obj(p); in __mod_lruvec_slab_state()
888 void mod_memcg_obj_state(void *p, int idx, int val) in mod_memcg_obj_state() argument
[all …]
Dslob.c682 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
684 __kmem_cache_free_bulk(s, size, p); in kmem_cache_free_bulk()
689 void **p) in kmem_cache_alloc_bulk() argument
691 return __kmem_cache_alloc_bulk(s, flags, size, p); in kmem_cache_alloc_bulk()
/mm/kasan/
Dinit.c125 pte_t *p; in zero_pmd_populate() local
128 p = pte_alloc_one_kernel(&init_mm); in zero_pmd_populate()
130 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); in zero_pmd_populate()
131 if (!p) in zero_pmd_populate()
134 pmd_populate_kernel(&init_mm, pmd, p); in zero_pmd_populate()
162 pmd_t *p; in zero_pud_populate() local
165 p = pmd_alloc(&init_mm, pud, addr); in zero_pud_populate()
166 if (!p) in zero_pud_populate()
203 pud_t *p; in zero_p4d_populate() local
206 p = pud_alloc(&init_mm, p4d, addr); in zero_p4d_populate()
[all …]
Dreport_sw_tags.c78 void *p = kasan_reset_tag(addr); in kasan_find_first_bad_addr() local
79 void *end = p + size; in kasan_find_first_bad_addr()
81 while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) in kasan_find_first_bad_addr()
82 p += KASAN_GRANULE_SIZE; in kasan_find_first_bad_addr()
83 return p; in kasan_find_first_bad_addr()
Dreport_generic.c35 void *p = addr; in kasan_find_first_bad_addr() local
37 while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) in kasan_find_first_bad_addr()
38 p += KASAN_GRANULE_SIZE; in kasan_find_first_bad_addr()
39 return p; in kasan_find_first_bad_addr()
Dshadow.c29 bool __kasan_check_read(const volatile void *p, unsigned int size) in __kasan_check_read() argument
31 return kasan_check_range((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read()
35 bool __kasan_check_write(const volatile void *p, unsigned int size) in __kasan_check_write() argument
37 return kasan_check_range((unsigned long)p, size, true, _RET_IP_); in __kasan_check_write()

12