Home
last modified time | relevance | path

Searched refs:n (Results 1 – 21 of 21) sorted by relevance

/mm/
Dslab.c210 struct kmem_cache_node *n, int tofree);
220 struct kmem_cache_node *n, struct page *page,
554 struct kmem_cache_node *n; in cache_free_pfmemalloc() local
559 n = get_node(cachep, page_node); in cache_free_pfmemalloc()
561 spin_lock(&n->list_lock); in cache_free_pfmemalloc()
563 spin_unlock(&n->list_lock); in cache_free_pfmemalloc()
594 #define reap_alien(cachep, n) do { } while (0) argument
688 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache() local
691 spin_lock(&n->list_lock); in __drain_alien_cache()
697 if (n->shared) in __drain_alien_cache()
[all …]
Dusercopy.c103 static bool overlaps(const unsigned long ptr, unsigned long n, in overlaps() argument
107 unsigned long check_high = check_low + n; in overlaps()
118 unsigned long n, bool to_user) in check_kernel_text_object() argument
124 if (overlaps(ptr, n, textlow, texthigh)) in check_kernel_text_object()
125 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); in check_kernel_text_object()
142 if (overlaps(ptr, n, textlow_linear, texthigh_linear)) in check_kernel_text_object()
144 ptr - textlow_linear, n); in check_kernel_text_object()
147 static inline void check_bogus_address(const unsigned long ptr, unsigned long n, in check_bogus_address() argument
151 if (ptr + (n - 1) < ptr) in check_bogus_address()
152 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); in check_bogus_address()
[all …]
Dslub.c368 const char *n) in __cmpxchg_double_slab() argument
396 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
405 const char *n) in cmpxchg_double_slab() argument
437 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
1012 struct kmem_cache_node *n, struct page *page) in add_full() argument
1017 lockdep_assert_held(&n->list_lock); in add_full()
1018 list_add(&page->slab_list, &n->full); in add_full()
1021 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
1026 lockdep_assert_held(&n->list_lock); in remove_full()
1033 struct kmem_cache_node *n = get_node(s, node); in slabs_node() local
[all …]
DMakefile6 KASAN_SANITIZE_slab_common.o := n
7 KASAN_SANITIZE_slab.o := n
8 KASAN_SANITIZE_slub.o := n
13 KCOV_INSTRUMENT_slab_common.o := n
14 KCOV_INSTRUMENT_slob.o := n
15 KCOV_INSTRUMENT_slab.o := n
16 KCOV_INSTRUMENT_slub.o := n
17 KCOV_INSTRUMENT_page_alloc.o := n
18 KCOV_INSTRUMENT_debug-pagealloc.o := n
19 KCOV_INSTRUMENT_kmemleak.o := n
[all …]
Dmempolicy.c1889 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) in offset_il_node() argument
1898 target = (unsigned int)n % nnodes; in offset_il_node()
2291 struct rb_node *n = sp->root.rb_node; in sp_lookup() local
2293 while (n) { in sp_lookup()
2294 struct sp_node *p = rb_entry(n, struct sp_node, nd); in sp_lookup()
2297 n = n->rb_right; in sp_lookup()
2299 n = n->rb_left; in sp_lookup()
2303 if (!n) in sp_lookup()
2307 struct rb_node *prev = rb_prev(n); in sp_lookup()
2313 n = prev; in sp_lookup()
[all …]
Dvmalloc.c415 struct rb_node *n = vmap_area_root.rb_node; in __find_vmap_area() local
417 while (n) { in __find_vmap_area()
420 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
422 n = n->rb_left; in __find_vmap_area()
424 n = n->rb_right; in __find_vmap_area()
556 augment_tree_propagate_check(struct rb_node *n) in augment_tree_propagate_check() argument
563 if (n == NULL) in augment_tree_propagate_check()
566 va = rb_entry(n, struct vmap_area, rb_node); in augment_tree_propagate_check()
568 node = n; in augment_tree_propagate_check()
586 va = rb_entry(n, struct vmap_area, rb_node); in augment_tree_propagate_check()
[all …]
Dcma_debug.c21 unsigned long n; member
101 if (mem->n <= count) { in cma_free_mem()
102 cma_release(cma, mem->p, mem->n); in cma_free_mem()
103 count -= mem->n; in cma_free_mem()
108 mem->n -= count; in cma_free_mem()
147 mem->n = count; in cma_alloc_mem()
Dpage_poison.c51 static void poison_pages(struct page *page, int n) in poison_pages() argument
55 for (i = 0; i < n; i++) in poison_pages()
110 static void unpoison_pages(struct page *page, int n) in unpoison_pages() argument
114 for (i = 0; i < n; i++) in unpoison_pages()
Dvmstat.c485 long o, n, t, z; in mod_zone_state() local
503 n = delta + o; in mod_zone_state()
505 if (n > t || n < -t) { in mod_zone_state()
509 z = n + os; in mod_zone_state()
510 n = -os; in mod_zone_state()
512 } while (this_cpu_cmpxchg(*p, o, n) != o); in mod_zone_state()
542 long o, n, t, z; in mod_node_state() local
560 n = delta + o; in mod_node_state()
562 if (n > t || n < -t) { in mod_node_state()
566 z = n + os; in mod_node_state()
[all …]
Dswapfile.c298 unsigned int n) in cluster_set_next() argument
300 info->data = n; in cluster_set_next()
304 unsigned int n, unsigned int f) in cluster_set_next_flag() argument
307 info->data = n; in cluster_set_next_flag()
1396 void swapcache_free_entries(swp_entry_t *entries, int n) in swapcache_free_entries() argument
1401 if (n <= 0) in swapcache_free_entries()
1413 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); in swapcache_free_entries()
1414 for (i = 0; i < n; ++i) { in swapcache_free_entries()
1498 int count, tmp_count, n; in swp_swapcount() local
1518 n = SWAP_MAP_MAX + 1; in swp_swapcount()
[all …]
Dutil.c218 char *strndup_user(const char __user *s, long n) in strndup_user() argument
223 length = strnlen_user(s, n); in strndup_user()
228 if (length > n) in strndup_user()
Dlist_lru.c216 struct list_head *item, *n; in __list_lru_walk_one() local
221 list_for_each_safe(item, n, &l->list) { in __list_lru_walk_one()
Dslab_common.c1295 const char *n = kmalloc_cache_name("dma-kmalloc", size); in create_kmalloc_caches() local
1297 BUG_ON(!n); in create_kmalloc_caches()
1299 n, size, SLAB_CACHE_DMA | flags, 0, 0); in create_kmalloc_caches()
DKconfig150 def_bool n
274 may say n to override this.
410 def_bool n
485 If unsure, say "n".
Dmemory.c4502 int i, n, base, l; in process_huge_page() local
4508 n = (addr_hint - addr) / PAGE_SIZE; in process_huge_page()
4509 if (2 * n <= pages_per_huge_page) { in process_huge_page()
4512 l = n; in process_huge_page()
4514 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { in process_huge_page()
4520 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); in process_huge_page()
4521 l = pages_per_huge_page - n; in process_huge_page()
Dzswap.c1205 struct zswap_entry *entry, *n; in zswap_frontswap_invalidate_area() local
1212 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) in zswap_frontswap_invalidate_area()
Dpage_alloc.c5546 int n, val; in find_next_best_node() local
5557 for_each_node_state(n, N_MEMORY) { in find_next_best_node()
5560 if (node_isset(n, *used_node_mask)) in find_next_best_node()
5564 val = node_distance(node, n); in find_next_best_node()
5567 val += (n < node); in find_next_best_node()
5570 tmp = cpumask_of_node(n); in find_next_best_node()
5576 val += node_load[n]; in find_next_best_node()
5580 best_node = n; in find_next_best_node()
Dkmemleak.c1617 loff_t n = *pos; in kmemleak_seq_start() local
1626 if (n-- > 0) in kmemleak_seq_start()
Dfilemap.c968 int n; in __page_cache_alloc() local
975 n = cpuset_mem_spread_node(); in __page_cache_alloc()
976 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
Dmemcontrol.c4919 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_get_many() argument
4921 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
4924 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_put_many() argument
4926 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
/mm/kasan/
DMakefile2 KASAN_SANITIZE := n
3 UBSAN_SANITIZE_common.o := n
4 UBSAN_SANITIZE_generic.o := n
5 UBSAN_SANITIZE_generic_report.o := n
6 UBSAN_SANITIZE_tags.o := n
7 KCOV_INSTRUMENT := n