Home
last modified time | relevance | path

Searched refs:count (Results 1 – 25 of 39) sorted by relevance

12

/mm/
Dcma.c53 return cma->count << PAGE_SHIFT; in cma_get_size()
82 unsigned int count) in cma_clear_bitmap() argument
87 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
98 unsigned i = cma->count >> pageblock_order; in cma_activate_area()
138 cma->count = 0; in cma_activate_area()
199 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
362 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) in cma_alloc() argument
371 if (!cma || !cma->count) in cma_alloc()
375 count, align); in cma_alloc()
377 if (!count) in cma_alloc()
[all …]
Dpage_owner.c122 int i, count; in check_recursive_alloc() local
127 for (i = 0, count = 0; i < trace->nr_entries; i++) { in check_recursive_alloc()
128 if (trace->entries[i] == ip && ++count == 2) in check_recursive_alloc()
250 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showmixedcount_print() local
303 count[MIGRATE_MOVABLE]++; in pagetypeinfo_showmixedcount_print()
305 count[pageblock_mt]++; in pagetypeinfo_showmixedcount_print()
317 seq_printf(m, "%12lu ", count[i]); in pagetypeinfo_showmixedcount_print()
322 print_page_owner(char __user *buf, size_t count, unsigned long pfn, in print_page_owner() argument
337 kbuf = kmalloc(count, GFP_KERNEL); in print_page_owner()
341 ret = snprintf(kbuf, count, in print_page_owner()
[all …]
Dcma_debug.c60 if (start >= cma->count) in cma_maxchunk_get()
93 static int cma_free_mem(struct cma *cma, int count) in cma_free_mem() argument
97 while (count) { in cma_free_mem()
102 if (mem->n <= count) { in cma_free_mem()
104 count -= mem->n; in cma_free_mem()
107 cma_release(cma, mem->p, count); in cma_free_mem()
108 mem->p += count; in cma_free_mem()
109 mem->n -= count; in cma_free_mem()
110 count = 0; in cma_free_mem()
132 static int cma_alloc_mem(struct cma *cma, int count) in cma_alloc_mem() argument
[all …]
Dpage_counter.c24 new = atomic_long_sub_return(nr_pages, &counter->count); in page_counter_cancel()
43 new = atomic_long_add_return(nr_pages, &c->count); in page_counter_charge()
84 new = atomic_long_add_return(nr_pages, &c->count); in page_counter_try_charge()
86 atomic_long_sub(nr_pages, &c->count); in page_counter_try_charge()
138 long count; in page_counter_limit() local
151 count = atomic_long_read(&counter->count); in page_counter_limit()
153 if (count > limit) in page_counter_limit()
158 if (atomic_long_read(&counter->count) <= count) in page_counter_limit()
Dswapfile.c774 unsigned char count; in swap_entry_free() local
777 count = p->swap_map[offset]; in swap_entry_free()
778 has_cache = count & SWAP_HAS_CACHE; in swap_entry_free()
779 count &= ~SWAP_HAS_CACHE; in swap_entry_free()
784 } else if (count == SWAP_MAP_SHMEM) { in swap_entry_free()
789 count = 0; in swap_entry_free()
790 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { in swap_entry_free()
791 if (count == COUNT_CONTINUED) { in swap_entry_free()
792 if (swap_count_continued(p, offset, count)) in swap_entry_free()
793 count = SWAP_MAP_MAX | COUNT_CONTINUED; in swap_entry_free()
[all …]
Dvmalloc.c1116 void vm_unmap_ram(const void *mem, unsigned int count) in vm_unmap_ram() argument
1118 unsigned long size = (unsigned long)count << PAGE_SHIFT; in vm_unmap_ram()
1129 if (likely(count <= VMAP_MAX_ALLOC)) in vm_unmap_ram()
1151 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) in vm_map_ram() argument
1153 unsigned long size = (unsigned long)count << PAGE_SHIFT; in vm_map_ram()
1157 if (likely(count <= VMAP_MAX_ALLOC)) { in vm_map_ram()
1173 vm_unmap_ram(mem, count); in vm_map_ram()
1583 void *vmap(struct page **pages, unsigned int count, in vmap() argument
1591 if (count > totalram_pages) in vmap()
1594 size = (unsigned long)count << PAGE_SHIFT; in vmap()
[all …]
Dpage_idle.c118 loff_t pos, size_t count) in page_idle_bitmap_read() argument
125 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read()
132 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read()
163 loff_t pos, size_t count) in page_idle_bitmap_write() argument
170 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write()
177 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write()
Dnommu.c271 long vread(char *buf, char *addr, unsigned long count) in vread() argument
274 if ((unsigned long) buf + count < count) in vread()
275 count = -(unsigned long) buf; in vread()
277 memcpy(buf, addr, count); in vread()
278 return count; in vread()
281 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument
284 if ((unsigned long) addr + count < count) in vwrite()
285 count = -(unsigned long) addr; in vwrite()
287 memcpy(addr, buf, count); in vwrite()
288 return count; in vwrite()
[all …]
Dmaccess.c86 long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) in strncpy_from_unsafe() argument
92 if (unlikely(count <= 0)) in strncpy_from_unsafe()
100 } while (dst[-1] && ret == 0 && src - unsafe_addr < count); in strncpy_from_unsafe()
Dquicklist.c91 unsigned long count = 0; in quicklist_total_size() local
98 count += q->nr_pages; in quicklist_total_size()
100 return count; in quicklist_total_size()
Dearly_ioremap.c81 int count = 0; in check_early_ioremap_leak() local
86 count++; in check_early_ioremap_leak()
88 if (WARN(count, KERN_WARNING in check_early_ioremap_leak()
91 count)) in check_early_ioremap_leak()
Dhighmem.c216 int count; in map_new_virtual() local
221 count = get_pkmap_entries_count(color); in map_new_virtual()
227 count = get_pkmap_entries_count(color); in map_new_virtual()
231 if (--count) in map_new_virtual()
Dhuge_memory.c148 const char *buf, size_t count, in triple_flag_store() argument
154 min(sizeof("defer")-1, count))) { in triple_flag_store()
161 min(sizeof("always")-1, count))) { in triple_flag_store()
166 min(sizeof("madvise")-1, count))) { in triple_flag_store()
171 min(sizeof("never")-1, count))) { in triple_flag_store()
178 return count; in triple_flag_store()
194 const char *buf, size_t count) in enabled_store() argument
198 ret = triple_flag_store(kobj, attr, buf, count, in enabled_store()
224 const char *buf, size_t count, in single_hugepage_flag_store() argument
241 return count; in single_hugepage_flag_store()
[all …]
Dkhugepaged.c116 const char *buf, size_t count) in scan_sleep_millisecs_store() argument
129 return count; in scan_sleep_millisecs_store()
144 const char *buf, size_t count) in alloc_sleep_millisecs_store() argument
157 return count; in alloc_sleep_millisecs_store()
171 const char *buf, size_t count) in pages_to_scan_store() argument
182 return count; in pages_to_scan_store()
214 const char *buf, size_t count) in khugepaged_defrag_store() argument
216 return single_hugepage_flag_store(kobj, attr, buf, count, in khugepaged_defrag_store()
239 const char *buf, size_t count) in khugepaged_max_ptes_none_store() argument
250 return count; in khugepaged_max_ptes_none_store()
[all …]
Dcma.h6 unsigned long count; member
21 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno()
Dbootmem.c172 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local
211 count += BITS_PER_LONG; in free_all_bootmem_core()
221 count++; in free_all_bootmem_core()
233 count += pages; in free_all_bootmem_core()
238 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); in free_all_bootmem_core()
240 return count; in free_all_bootmem_core()
Dslub.c1423 unsigned long i, count = oo_objects(s->oo); in init_cache_random_seq() local
1429 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq()
1438 for (i = 0; i < count; i++) in init_cache_random_seq()
4322 unsigned long count = 0; in validate_slab_node() local
4330 count++; in validate_slab_node()
4332 if (count != n->nr_partial) in validate_slab_node()
4334 s->name, count, n->nr_partial); in validate_slab_node()
4341 count++; in validate_slab_node()
4343 if (count != atomic_long_read(&n->nr_slabs)) in validate_slab_node()
4345 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
[all …]
Dpage_alloc.c1105 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1119 while (count) { in free_pcppages_bulk()
1139 batch_free = count; in free_pcppages_bulk()
1160 } while (--count && --batch_free && !list_empty(list)); in free_pcppages_bulk()
2228 unsigned long count, struct list_head *list, in rmqueue_bulk() argument
2234 for (i = 0; i < count; ++i) { in rmqueue_bulk()
2289 to_drain = min(pcp->count, batch); in drain_zone_pages()
2292 pcp->count -= to_drain; in drain_zone_pages()
2315 if (pcp->count) { in drain_pages_zone()
2316 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
[all …]
Dmemcontrol.c575 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat()
605 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], in mem_cgroup_charge_statistics()
608 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], in mem_cgroup_charge_statistics()
613 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], in mem_cgroup_charge_statistics()
1078 unsigned long count; in mem_cgroup_margin() local
1081 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1083 if (count < limit) in mem_cgroup_margin()
1084 margin = limit - count; in mem_cgroup_margin()
1087 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1089 if (count <= limit) in mem_cgroup_margin()
[all …]
Dvmstat.c679 !__this_cpu_read(p->pcp.count)) in refresh_cpu_vm_stats()
693 if (__this_cpu_read(p->pcp.count)) { in refresh_cpu_vm_stats()
796 unsigned long count = 0; in sum_zone_node_page_state() local
799 count += zone_page_state(zones + i, item); in sum_zone_node_page_state()
801 return count; in sum_zone_node_page_state()
1213 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showblockcount_print() local
1233 count[mtype]++; in pagetypeinfo_showblockcount_print()
1239 seq_printf(m, "%12lu ", count[mtype]); in pagetypeinfo_showblockcount_print()
1416 pageset->pcp.count, in zoneinfo_show_print()
Dkmemleak.c163 int count; member
330 return object->count != KMEMLEAK_BLACK && in color_white()
331 object->count < object->min_count; in color_white()
337 object->count >= object->min_count; in color_gray()
393 pr_notice(" count = %d\n", object->count); in dump_object_info()
564 object->count = 0; /* white color initially */ in create_object()
1272 object->count++; in scan_block()
1406 object->count = 0; in kmemleak_scan()
1484 object->count = object->min_count; in kmemleak_scan()
Dslab_common.c1064 size_t count) in freelist_randomize() argument
1069 for (i = 0; i < count; i++) in freelist_randomize()
1073 for (i = count - 1; i > 0; i--) { in freelist_randomize()
1081 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, in cache_random_seq_create() argument
1086 if (count < 2 || cachep->random_seq) in cache_random_seq_create()
1089 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); in cache_random_seq_create()
1096 freelist_randomize(&state, cachep->random_seq, count); in cache_random_seq_create()
Dmlock.c637 int count = 0; in count_mm_mlocked_page_nr() local
653 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
655 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr()
658 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
662 return count >> PAGE_SHIFT; in count_mm_mlocked_page_nr()
Dnobootmem.c131 unsigned long count = 0; in free_low_memory_core_early() local
147 count += __free_memory_core(start, end); in free_low_memory_core_early()
149 return count; in free_low_memory_core_early()
Dhugetlb.c74 bool free = (spool->count == 0) && (spool->used_hpages == 0); in unlock_or_release_subpool()
99 spool->count = 1; in hugepage_new_subpool()
116 BUG_ON(!spool->count); in hugepage_put_subpool()
117 spool->count--; in hugepage_put_subpool()
2231 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2243 if (count >= h->nr_huge_pages) in try_to_free_low()
2255 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2294 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, in set_max_huge_pages() argument
2314 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
2319 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
[all …]

12