Home
last modified time | relevance | path

Searched refs:m (Results 1 – 12 of 12) sorted by relevance

/mm/
Dvmstat.c877 static void *frag_start(struct seq_file *m, loff_t *pos) in frag_start() argument
890 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) in frag_next() argument
898 static void frag_stop(struct seq_file *m, void *arg) in frag_stop() argument
903 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, in walk_zones_in_node() argument
904 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) in walk_zones_in_node() argument
915 print(m, pgdat, zone); in walk_zones_in_node()
935 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, in frag_show_print() argument
940 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in frag_show_print()
942 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); in frag_show_print()
943 seq_putc(m, '\n'); in frag_show_print()
[all …]
Dslab_common.c1040 static void print_slabinfo_header(struct seq_file *m) in print_slabinfo_header() argument
1047 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); in print_slabinfo_header()
1049 seq_puts(m, "slabinfo - version: 2.1\n"); in print_slabinfo_header()
1051 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); in print_slabinfo_header()
1052 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); in print_slabinfo_header()
1053 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); in print_slabinfo_header()
1055 …seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallo… in print_slabinfo_header()
1056 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); in print_slabinfo_header()
1058 seq_putc(m, '\n'); in print_slabinfo_header()
1061 void *slab_start(struct seq_file *m, loff_t *pos) in slab_start() argument
[all …]
Dslob.c429 unsigned int *m; in __do_kmalloc_node() local
441 m = slob_alloc(size + align, gfp, align, node); in __do_kmalloc_node()
443 if (!m) in __do_kmalloc_node()
445 *m = size; in __do_kmalloc_node()
446 ret = (void *)m + align; in __do_kmalloc_node()
499 unsigned int *m = (unsigned int *)(block - align); in kfree() local
500 slob_free(m, *m + align); in kfree()
511 unsigned int *m; in ksize() local
522 m = (unsigned int *)(block - align); in ksize()
523 return SLOB_UNITS(*m) * SLOB_UNIT; in ksize()
Dmemblock.c872 struct memblock_region *m = &type_a->regions[idx_a]; in __next_mem_range() local
874 phys_addr_t m_start = m->base; in __next_mem_range()
875 phys_addr_t m_end = m->base + m->size; in __next_mem_range()
876 int m_nid = memblock_get_region_node(m); in __next_mem_range()
883 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) in __next_mem_range()
887 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) in __next_mem_range()
891 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) in __next_mem_range()
985 struct memblock_region *m = &type_a->regions[idx_a]; in __next_mem_range_rev() local
987 phys_addr_t m_start = m->base; in __next_mem_range_rev()
988 phys_addr_t m_end = m->base + m->size; in __next_mem_range_rev()
[all …]
Dslab.h162 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
369 void *slab_start(struct seq_file *m, loff_t *pos);
370 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
371 void slab_stop(struct seq_file *m, void *p);
372 int memcg_slab_show(struct seq_file *m, void *p);
Dmemcontrol.c3117 static int memcg_numa_stat_show(struct seq_file *m, void *v) in memcg_numa_stat_show() argument
3133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memcg_numa_stat_show()
3137 seq_printf(m, "%s=%lu", stat->name, nr); in memcg_numa_stat_show()
3141 seq_printf(m, " N%d=%lu", nid, nr); in memcg_numa_stat_show()
3143 seq_putc(m, '\n'); in memcg_numa_stat_show()
3152 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); in memcg_numa_stat_show()
3158 seq_printf(m, " N%d=%lu", nid, nr); in memcg_numa_stat_show()
3160 seq_putc(m, '\n'); in memcg_numa_stat_show()
3167 static int memcg_stat_show(struct seq_file *m, void *v) in memcg_stat_show() argument
3169 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memcg_stat_show()
[all …]
Dvmalloc.c2572 static void *s_start(struct seq_file *m, loff_t *pos) in s_start() argument
2591 static void *s_next(struct seq_file *m, void *p, loff_t *pos) in s_next() argument
2603 static void s_stop(struct seq_file *m, void *p) in s_stop() argument
2609 static void show_numa_info(struct seq_file *m, struct vm_struct *v) in show_numa_info() argument
2612 unsigned int nr, *counters = m->private; in show_numa_info()
2629 seq_printf(m, " N%u=%u", nr, counters[nr]); in show_numa_info()
2633 static int s_show(struct seq_file *m, void *p) in s_show() argument
2647 seq_printf(m, "0x%pK-0x%pK %7ld", in s_show()
2651 seq_printf(m, " %pS", v->caller); in s_show()
2654 seq_printf(m, " pages=%d", v->nr_pages); in s_show()
[all …]
Dslab.c4018 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) in slabinfo_show_stats() argument
4032 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu", in slabinfo_show_stats()
4044 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", in slabinfo_show_stats()
4169 static void show_symbol(struct seq_file *m, unsigned long address) in show_symbol() argument
4176 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); in show_symbol()
4178 seq_printf(m, " [%s]", modname); in show_symbol()
4182 seq_printf(m, "%p", (void *)address); in show_symbol()
4185 static int leaks_show(struct seq_file *m, void *p) in leaks_show() argument
4191 unsigned long *x = m->private; in leaks_show()
4229 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); in leaks_show()
[all …]
Dzsmalloc.c420 unsigned long m; in get_zspage_mapping() local
423 m = (unsigned long)page->mapping; in get_zspage_mapping()
424 *fullness = m & FULLNESS_MASK; in get_zspage_mapping()
425 *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; in get_zspage_mapping()
431 unsigned long m; in set_zspage_mapping() local
434 m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | in set_zspage_mapping()
436 page->mapping = (struct address_space *)m; in set_zspage_mapping()
Dhugetlb.c2026 struct huge_bootmem_page *m; in alloc_bootmem_huge_page() local
2041 m = addr; in alloc_bootmem_huge_page()
2048 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); in alloc_bootmem_huge_page()
2050 list_add(&m->list, &huge_boot_pages); in alloc_bootmem_huge_page()
2051 m->hstate = h; in alloc_bootmem_huge_page()
2067 struct huge_bootmem_page *m; in gather_bootmem_prealloc() local
2069 list_for_each_entry(m, &huge_boot_pages, list) { in gather_bootmem_prealloc()
2070 struct hstate *h = m->hstate; in gather_bootmem_prealloc()
2074 page = pfn_to_page(m->phys >> PAGE_SHIFT); in gather_bootmem_prealloc()
2075 memblock_free_late(__pa(m), in gather_bootmem_prealloc()
[all …]
Dbacking-dev.c47 static int bdi_debug_stats_show(struct seq_file *m, void *v) in bdi_debug_stats_show() argument
49 struct backing_dev_info *bdi = m->private; in bdi_debug_stats_show()
74 seq_printf(m, in bdi_debug_stats_show()
Dslub.c1915 enum slab_modes l = M_NONE, m = M_NONE; in deactivate_slab() local
1986 m = M_FREE; in deactivate_slab()
1988 m = M_PARTIAL; in deactivate_slab()
1999 m = M_FULL; in deactivate_slab()
2011 if (l != m) { in deactivate_slab()
2021 if (m == M_PARTIAL) { in deactivate_slab()
2026 } else if (m == M_FULL) { in deactivate_slab()
2034 l = m; in deactivate_slab()
2044 if (m == M_FREE) { in deactivate_slab()
5698 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument