Home
last modified time | relevance | path

Searched refs:count (Results 1 – 25 of 40) sorted by relevance

12

/mm/
Dcma.c59 return cma->count << PAGE_SHIFT; in cma_get_size()
94 unsigned int count) in cma_clear_bitmap() argument
99 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
122 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area()
128 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area()
145 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area()
147 totalcma_pages -= cma->count; in cma_activate_area()
148 cma->count = 0; in cma_activate_area()
216 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
420 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas()
[all …]
Dpage_pinner.c23 atomic_t count; member
35 int count; member
128 record->count = page_count(page); in capture_page_state()
180 atomic_set(&page_pinner->count, 0); in __reset_page_pinner()
204 atomic_inc(&page_pinner->count); in __set_page_pinner_handle()
224 print_page_pinner(bool longterm, char __user *buf, size_t count, struct captured_pinner *record) in print_page_pinner() argument
231 count = min_t(size_t, count, PAGE_SIZE); in print_page_pinner()
232 kbuf = kmalloc(count, GFP_KERNEL); in print_page_pinner()
237 ret = snprintf(kbuf, count, "Page pinned for %lld us\n", in print_page_pinner()
243 ret = snprintf(kbuf, count, in print_page_pinner()
[all …]
Dmaccess.c65 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) in strncpy_from_kernel_nofault() argument
69 if (unlikely(count <= 0)) in strncpy_from_kernel_nofault()
71 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) in strncpy_from_kernel_nofault()
79 } while (dst[-1] && src - unsafe_addr < count); in strncpy_from_kernel_nofault()
170 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) in strncpy_from_kernel_nofault() argument
176 if (unlikely(count <= 0)) in strncpy_from_kernel_nofault()
178 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) in strncpy_from_kernel_nofault()
186 } while (dst[-1] && ret == 0 && src - unsafe_addr < count); in strncpy_from_kernel_nofault()
269 long count) in strncpy_from_user_nofault() argument
274 if (unlikely(count <= 0)) in strncpy_from_user_nofault()
[all …]
Dpage_owner.c308 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showmixedcount_print() local
365 count[MIGRATE_MOVABLE]++; in pagetypeinfo_showmixedcount_print()
367 count[pageblock_mt]++; in pagetypeinfo_showmixedcount_print()
382 seq_printf(m, "%12lu ", count[i]); in pagetypeinfo_showmixedcount_print()
387 print_page_owner(char __user *buf, size_t count, unsigned long pfn, in print_page_owner() argument
396 count = min_t(size_t, count, PAGE_SIZE); in print_page_owner()
397 kbuf = kmalloc(count, GFP_KERNEL); in print_page_owner()
401 ret = snprintf(kbuf, count, in print_page_owner()
407 if (ret >= count) in print_page_owner()
413 ret += snprintf(kbuf + ret, count - ret, in print_page_owner()
[all …]
Dcma_debug.c92 static int cma_free_mem(struct cma *cma, int count) in cma_free_mem() argument
96 while (count) { in cma_free_mem()
101 if (mem->n <= count) { in cma_free_mem()
103 count -= mem->n; in cma_free_mem()
106 cma_release(cma, mem->p, count); in cma_free_mem()
107 mem->p += count; in cma_free_mem()
108 mem->n -= count; in cma_free_mem()
109 count = 0; in cma_free_mem()
131 static int cma_alloc_mem(struct cma *cma, int count) in cma_alloc_mem() argument
140 p = cma_alloc(cma, count, 0, GFP_KERNEL); in cma_alloc_mem()
[all …]
Dswapfile.c1258 unsigned char count; in __swap_entry_free_locked() local
1261 count = p->swap_map[offset]; in __swap_entry_free_locked()
1263 has_cache = count & SWAP_HAS_CACHE; in __swap_entry_free_locked()
1264 count &= ~SWAP_HAS_CACHE; in __swap_entry_free_locked()
1269 } else if (count == SWAP_MAP_SHMEM) { in __swap_entry_free_locked()
1274 count = 0; in __swap_entry_free_locked()
1275 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { in __swap_entry_free_locked()
1276 if (count == COUNT_CONTINUED) { in __swap_entry_free_locked()
1277 if (swap_count_continued(p, offset, count)) in __swap_entry_free_locked()
1278 count = SWAP_MAP_MAX | COUNT_CONTINUED; in __swap_entry_free_locked()
[all …]
Dvmalloc.c1807 void vm_unmap_ram(const void *mem, unsigned int count) in vm_unmap_ram() argument
1809 unsigned long size = (unsigned long)count << PAGE_SHIFT; in vm_unmap_ram()
1821 if (likely(count <= VMAP_MAX_ALLOC)) { in vm_unmap_ram()
1849 void *vm_map_ram(struct page **pages, unsigned int count, int node) in vm_map_ram() argument
1851 unsigned long size = (unsigned long)count << PAGE_SHIFT; in vm_map_ram()
1855 if (likely(count <= VMAP_MAX_ALLOC)) { in vm_map_ram()
1874 vm_unmap_ram(mem, count); in vm_map_ram()
2396 void *vmap(struct page **pages, unsigned int count, in vmap() argument
2404 if (count > totalram_pages()) in vmap()
2407 size = (unsigned long)count << PAGE_SHIFT; in vmap()
[all …]
Dpage_idle.c120 loff_t pos, size_t count) in page_idle_bitmap_read() argument
127 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read()
134 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read()
165 loff_t pos, size_t count) in page_idle_bitmap_write() argument
172 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write()
179 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write()
Dkhugepaged.c132 const char *buf, size_t count) in scan_sleep_millisecs_store() argument
145 return count; in scan_sleep_millisecs_store()
160 const char *buf, size_t count) in alloc_sleep_millisecs_store() argument
173 return count; in alloc_sleep_millisecs_store()
187 const char *buf, size_t count) in pages_to_scan_store() argument
198 return count; in pages_to_scan_store()
230 const char *buf, size_t count) in khugepaged_defrag_store() argument
232 return single_hugepage_flag_store(kobj, attr, buf, count, in khugepaged_defrag_store()
255 const char *buf, size_t count) in khugepaged_max_ptes_none_store() argument
266 return count; in khugepaged_max_ptes_none_store()
[all …]
Dnommu.c203 long vread(char *buf, char *addr, unsigned long count) in vread() argument
206 if ((unsigned long) buf + count < count) in vread()
207 count = -(unsigned long) buf; in vread()
209 memcpy(buf, addr, count); in vread()
210 return count; in vread()
213 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument
216 if ((unsigned long) addr + count < count) in vwrite()
217 count = -(unsigned long) addr; in vwrite()
219 memcpy(addr, buf, count); in vwrite()
220 return count; in vwrite()
[all …]
Dearly_ioremap.c89 int count = 0; in check_early_ioremap_leak() local
94 count++; in check_early_ioremap_leak()
96 if (WARN(count, KERN_WARNING in check_early_ioremap_leak()
99 count)) in check_early_ioremap_leak()
Dhighmem.c216 int count; in map_new_virtual() local
221 count = get_pkmap_entries_count(color); in map_new_virtual()
227 count = get_pkmap_entries_count(color); in map_new_virtual()
231 if (--count) in map_new_virtual()
Dslab_common.c883 unsigned int count) in freelist_randomize() argument
888 for (i = 0; i < count; i++) in freelist_randomize()
892 for (i = count - 1; i > 0; i--) { in freelist_randomize()
900 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, in cache_random_seq_create() argument
905 if (count < 2 || cachep->random_seq) in cache_random_seq_create()
908 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); in cache_random_seq_create()
915 freelist_randomize(&state, cachep->random_seq, count); in cache_random_seq_create()
Dvmstat.c811 !__this_cpu_read(p->pcp.count)) in refresh_cpu_vm_stats()
825 if (__this_cpu_read(p->pcp.count)) { in refresh_cpu_vm_stats()
978 unsigned long count = 0; in sum_zone_node_page_state() local
981 count += zone_page_state(zones + i, item); in sum_zone_node_page_state()
983 return count; in sum_zone_node_page_state()
995 unsigned long count = 0; in sum_zone_numa_state() local
998 count += zone_numa_state_snapshot(zones + i, item); in sum_zone_numa_state()
1000 return count; in sum_zone_numa_state()
1503 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showblockcount_print() local
1518 count[mtype]++; in pagetypeinfo_showblockcount_print()
[all …]
Dksm.c2848 const char *buf, size_t count) in sleep_millisecs_store() argument
2860 return count; in sleep_millisecs_store()
2872 const char *buf, size_t count) in pages_to_scan_store() argument
2883 return count; in pages_to_scan_store()
2894 const char *buf, size_t count) in run_store() argument
2922 count = err; in run_store()
2931 return count; in run_store()
2944 const char *buf, size_t count) in merge_across_nodes_store() argument
2988 return err ? err : count; in merge_across_nodes_store()
3000 const char *buf, size_t count) in use_zero_pages_store() argument
[all …]
Dcma.h16 unsigned long count; member
42 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno()
Dslub.c1688 unsigned int count = oo_objects(s->oo); in init_cache_random_seq() local
1695 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq()
1706 for (i = 0; i < count; i++) in init_cache_random_seq()
4627 unsigned long count = 0; in validate_slab_node() local
4635 count++; in validate_slab_node()
4637 if (count != n->nr_partial) in validate_slab_node()
4639 s->name, count, n->nr_partial); in validate_slab_node()
4646 count++; in validate_slab_node()
4648 if (count != atomic_long_read(&n->nr_slabs)) in validate_slab_node()
4650 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
[all …]
Dkmemleak.c154 int count; member
309 return object->count != KMEMLEAK_BLACK && in color_white()
310 object->count < object->min_count; in color_white()
316 object->count >= object->min_count; in color_gray()
367 pr_notice(" count = %d\n", object->count); in dump_object_info()
596 object->count = 0; /* white color initially */ in create_object()
1202 object->count++; in update_refs()
1432 object->count = 0; in kmemleak_scan()
1508 object->count = object->min_count; in kmemleak_scan()
Dhugetlb.c104 bool free = (spool->count == 0) && (spool->used_hpages == 0); in unlock_or_release_subpool()
129 spool->count = 1; in hugepage_new_subpool()
146 BUG_ON(!spool->count); in hugepage_put_subpool()
147 spool->count--; in hugepage_put_subpool()
2590 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2602 if (count >= h->nr_huge_pages) in try_to_free_low()
2614 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2653 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
2678 unsigned long old_count = count; in set_max_huge_pages()
2680 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
[all …]
Dmlock.c659 unsigned long count = 0; in count_mm_mlocked_page_nr() local
675 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
677 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr()
680 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
684 return count >> PAGE_SHIFT; in count_mm_mlocked_page_nr()
Dreadahead.c623 ssize_t ksys_readahead(int fd, loff_t offset, size_t count) in ksys_readahead() argument
644 ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); in ksys_readahead()
650 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) in SYSCALL_DEFINE3() argument
652 return ksys_readahead(fd, offset, count); in SYSCALL_DEFINE3()
Dworkingset.c445 if (node->count && node->count == node->nr_values) { in workingset_update_node()
558 if (WARN_ON_ONCE(node->count != node->nr_values)) in shadow_lru_isolate()
Dbacking-dev.c134 const char *buf, size_t count) in read_ahead_kb_store() argument
146 return count; in read_ahead_kb_store()
164 struct device_attribute *attr, const char *buf, size_t count) in min_ratio_store() argument
176 ret = count; in min_ratio_store()
183 struct device_attribute *attr, const char *buf, size_t count) in max_ratio_store() argument
195 ret = count; in max_ratio_store()
Dpage_alloc.c1435 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1449 count = min(pcp->count, count); in free_pcppages_bulk()
1450 while (count) { in free_pcppages_bulk()
1469 batch_free = count; in free_pcppages_bulk()
1475 pcp->count--; in free_pcppages_bulk()
1493 } while (--count && --batch_free && !list_empty(list)); in free_pcppages_bulk()
3000 unsigned long count, struct list_head *list, in rmqueue_bulk() argument
3006 for (i = 0; i < count; ++i) { in rmqueue_bulk()
3065 pcp->count += rmqueue_bulk(zone, order, in get_populated_pcp_list()
3091 to_drain = min(pcp->count, batch); in drain_zone_pages()
[all …]
/mm/damon/
Ddbgfs.c26 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) in user_input_str() argument
35 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); in user_input_str()
39 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count); in user_input_str()
40 if (ret != count) { in user_input_str()
50 char __user *buf, size_t count, loff_t *ppos) in dbgfs_attrs_read() argument
63 return simple_read_from_buffer(buf, count, ppos, kbuf, ret); in dbgfs_attrs_read()
67 const char __user *buf, size_t count, loff_t *ppos) in dbgfs_attrs_write() argument
74 kbuf = user_input_str(buf, count, ppos); in dbgfs_attrs_write()
92 ret = count; in dbgfs_attrs_write()
132 size_t count, loff_t *ppos) in dbgfs_schemes_read() argument
[all …]

12