/mm/ |
D | cma.c | 57 return cma->count << PAGE_SHIFT; in cma_get_size() 92 unsigned long count) in cma_clear_bitmap() argument 98 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap() 121 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area() 127 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area() 144 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area() 146 totalcma_pages -= cma->count; in cma_activate_area() 147 cma->count = 0; in cma_activate_area() 215 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem() 419 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas() [all …]
|
D | page_pinner.c | 23 atomic_t count; member 41 int count; member 136 record->count = page_count(page); in capture_page_state() 182 atomic_set(&page_pinner->count, 0); in __free_page_pinner() 191 print_page_pinner(char __user *buf, size_t count, struct captured_pinner *record) in print_page_pinner() argument 198 count = min_t(size_t, count, PAGE_SIZE); in print_page_pinner() 199 kbuf = kmalloc(count, GFP_KERNEL); in print_page_pinner() 204 ret = snprintf(kbuf, count, "At least, pinned for %llu us\n", in print_page_pinner() 210 ret = snprintf(kbuf, count, in print_page_pinner() 217 if (ret >= count) in print_page_pinner() [all …]
|
D | maccess.c | 81 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) in strncpy_from_kernel_nofault() argument 85 if (unlikely(count <= 0)) in strncpy_from_kernel_nofault() 87 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) in strncpy_from_kernel_nofault() 95 } while (dst[-1] && src - unsafe_addr < count); in strncpy_from_kernel_nofault() 186 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) in strncpy_from_kernel_nofault() argument 192 if (unlikely(count <= 0)) in strncpy_from_kernel_nofault() 194 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) in strncpy_from_kernel_nofault() 202 } while (dst[-1] && ret == 0 && src - unsafe_addr < count); in strncpy_from_kernel_nofault() 285 long count) in strncpy_from_user_nofault() argument 290 if (unlikely(count <= 0)) in strncpy_from_user_nofault() [all …]
|
D | page_owner.c | 291 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showmixedcount_print() local 345 count[MIGRATE_MOVABLE]++; in pagetypeinfo_showmixedcount_print() 347 count[pageblock_mt]++; in pagetypeinfo_showmixedcount_print() 362 seq_printf(m, "%12lu ", count[i]); in pagetypeinfo_showmixedcount_print() 367 print_page_owner(char __user *buf, size_t count, unsigned long pfn, in print_page_owner() argument 376 count = min_t(size_t, count, PAGE_SIZE); in print_page_owner() 377 kbuf = kmalloc(count, GFP_KERNEL); in print_page_owner() 381 ret = snprintf(kbuf, count, in print_page_owner() 387 if (ret >= count) in print_page_owner() 393 ret += snprintf(kbuf + ret, count - ret, in print_page_owner() [all …]
|
D | cma_debug.c | 92 static int cma_free_mem(struct cma *cma, int count) in cma_free_mem() argument 96 while (count) { in cma_free_mem() 101 if (mem->n <= count) { in cma_free_mem() 103 count -= mem->n; in cma_free_mem() 106 cma_release(cma, mem->p, count); in cma_free_mem() 107 mem->p += count; in cma_free_mem() 108 mem->n -= count; in cma_free_mem() 109 count = 0; in cma_free_mem() 131 static int cma_alloc_mem(struct cma *cma, int count) in cma_alloc_mem() argument 140 p = cma_alloc(cma, count, 0, false); in cma_alloc_mem() [all …]
|
D | swapfile.c | 1203 unsigned char count; in __swap_entry_free_locked() local 1206 count = p->swap_map[offset]; in __swap_entry_free_locked() 1208 has_cache = count & SWAP_HAS_CACHE; in __swap_entry_free_locked() 1209 count &= ~SWAP_HAS_CACHE; in __swap_entry_free_locked() 1214 } else if (count == SWAP_MAP_SHMEM) { in __swap_entry_free_locked() 1219 count = 0; in __swap_entry_free_locked() 1220 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { in __swap_entry_free_locked() 1221 if (count == COUNT_CONTINUED) { in __swap_entry_free_locked() 1222 if (swap_count_continued(p, offset, count)) in __swap_entry_free_locked() 1223 count = SWAP_MAP_MAX | COUNT_CONTINUED; in __swap_entry_free_locked() [all …]
|
D | page_idle.c | 116 loff_t pos, size_t count) in page_idle_bitmap_read() argument 123 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read() 130 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 161 loff_t pos, size_t count) in page_idle_bitmap_write() argument 168 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write() 175 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write()
|
D | vmalloc.c | 2150 void vm_unmap_ram(const void *mem, unsigned int count) in vm_unmap_ram() argument 2152 unsigned long size = (unsigned long)count << PAGE_SHIFT; in vm_unmap_ram() 2164 if (likely(count <= VMAP_MAX_ALLOC)) { in vm_unmap_ram() 2192 void *vm_map_ram(struct page **pages, unsigned int count, int node) in vm_map_ram() argument 2194 unsigned long size = (unsigned long)count << PAGE_SHIFT; in vm_map_ram() 2198 if (likely(count <= VMAP_MAX_ALLOC)) { in vm_map_ram() 2216 vm_unmap_ram(mem, count); in vm_map_ram() 2768 void *vmap(struct page **pages, unsigned int count, in vmap() argument 2777 if (count > totalram_pages()) in vmap() 2780 size = (unsigned long)count << PAGE_SHIFT; in vmap() [all …]
|
D | khugepaged.c | 134 const char *buf, size_t count) in scan_sleep_millisecs_store() argument 147 return count; in scan_sleep_millisecs_store() 162 const char *buf, size_t count) in alloc_sleep_millisecs_store() argument 175 return count; in alloc_sleep_millisecs_store() 189 const char *buf, size_t count) in pages_to_scan_store() argument 200 return count; in pages_to_scan_store() 232 const char *buf, size_t count) in khugepaged_defrag_store() argument 234 return single_hugepage_flag_store(kobj, attr, buf, count, in khugepaged_defrag_store() 257 const char *buf, size_t count) in khugepaged_max_ptes_none_store() argument 268 return count; in khugepaged_max_ptes_none_store() [all …]
|
D | early_ioremap.c | 84 int count = 0; in check_early_ioremap_leak() local 89 count++; in check_early_ioremap_leak() 91 if (WARN(count, KERN_WARNING in check_early_ioremap_leak() 94 count)) in check_early_ioremap_leak()
|
D | vmstat.c | 859 !__this_cpu_read(pcp->count)) in refresh_cpu_vm_stats() 873 if (__this_cpu_read(pcp->count)) { in refresh_cpu_vm_stats() 999 unsigned long count = 0; in sum_zone_node_page_state() local 1002 count += zone_page_state(zones + i, item); in sum_zone_node_page_state() 1004 return count; in sum_zone_node_page_state() 1012 unsigned long count = 0; in sum_zone_numa_event_state() local 1016 count += zone_numa_event_state(zones + i, item); in sum_zone_numa_event_state() 1018 return count; in sum_zone_numa_event_state() 1550 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showblockcount_print() local 1565 count[mtype]++; in pagetypeinfo_showblockcount_print() [all …]
|
D | ksm.c | 2839 const char *buf, size_t count) in sleep_millisecs_store() argument 2851 return count; in sleep_millisecs_store() 2863 const char *buf, size_t count) in pages_to_scan_store() argument 2874 return count; in pages_to_scan_store() 2885 const char *buf, size_t count) in run_store() argument 2913 count = err; in run_store() 2922 return count; in run_store() 2935 const char *buf, size_t count) in merge_across_nodes_store() argument 2979 return err ? err : count; in merge_across_nodes_store() 2991 const char *buf, size_t count) in use_zero_pages_store() argument [all …]
|
D | cma.h | 15 unsigned long count; member 40 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno()
|
D | slab_common.c | 1004 unsigned int count) in freelist_randomize() argument 1009 for (i = 0; i < count; i++) in freelist_randomize() 1013 for (i = count - 1; i > 0; i--) { in freelist_randomize() 1021 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, in cache_random_seq_create() argument 1026 if (count < 2 || cachep->random_seq) in cache_random_seq_create() 1029 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); in cache_random_seq_create() 1036 freelist_randomize(&state, cachep->random_seq, count); in cache_random_seq_create()
|
D | slub.c | 1806 unsigned int count = oo_objects(s->oo); in init_cache_random_seq() local 1813 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq() 1824 for (i = 0; i < count; i++) in init_cache_random_seq() 5031 unsigned long count = 0; in validate_slab_node() local 5039 count++; in validate_slab_node() 5041 if (count != n->nr_partial) { in validate_slab_node() 5043 s->name, count, n->nr_partial); in validate_slab_node() 5052 count++; in validate_slab_node() 5054 if (count != atomic_long_read(&n->nr_slabs)) { in validate_slab_node() 5056 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node() [all …]
|
D | nommu.c | 203 long vread(char *buf, char *addr, unsigned long count) in vread() argument 206 if ((unsigned long) buf + count < count) in vread() 207 count = -(unsigned long) buf; in vread() 209 memcpy(buf, addr, count); in vread() 210 return count; in vread() 316 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) in vmap() argument 329 void *vm_map_ram(struct page **pages, unsigned int count, int node) in vm_map_ram() argument 336 void vm_unmap_ram(const void *mem, unsigned int count) in vm_unmap_ram() argument
|
D | kmemleak.c | 155 int count; member 310 return object->count != KMEMLEAK_BLACK && in color_white() 311 object->count < object->min_count; in color_white() 317 object->count >= object->min_count; in color_gray() 368 pr_notice(" count = %d\n", object->count); in dump_object_info() 603 object->count = 0; /* white color initially */ in create_object() 1210 object->count++; in update_refs() 1440 object->count = 0; in kmemleak_scan() 1516 object->count = object->min_count; in kmemleak_scan()
|
D | mlock.c | 619 unsigned long count = 0; in count_mm_mlocked_page_nr() local 635 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr() 637 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr() 640 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr() 644 return count >> PAGE_SHIFT; in count_mm_mlocked_page_nr()
|
D | hugetlb.c | 90 if (spool->count) in subpool_is_free() 126 spool->count = 1; in hugepage_new_subpool() 145 BUG_ON(!spool->count); in hugepage_put_subpool() 146 spool->count--; in hugepage_put_subpool() 3017 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument 3034 if (count >= h->nr_huge_pages) in try_to_free_low() 3049 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument 3089 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument 3122 unsigned long old_count = count; in set_max_huge_pages() 3124 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages() [all …]
|
D | page_alloc.c | 1572 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument 1588 count = min(pcp->count, count); in free_pcppages_bulk() 1589 while (count > 0) { in free_pcppages_bulk() 1608 batch_free = count; in free_pcppages_bulk() 1617 count -= 1 << order; in free_pcppages_bulk() 1641 } while (count > 0 && --batch_free && !list_empty(list)); in free_pcppages_bulk() 1643 pcp->count -= nr_freed; in free_pcppages_bulk() 3224 unsigned long count, struct list_head *list, in rmqueue_bulk() argument 3231 for (i = 0; i < count; ++i) { in rmqueue_bulk() 3305 pcp->count += alloced << order; in get_populated_pcp_list() [all …]
|
D | highmem.c | 208 int count; in map_new_virtual() local 213 count = get_pkmap_entries_count(color); in map_new_virtual() 219 count = get_pkmap_entries_count(color); in map_new_virtual() 223 if (--count) in map_new_virtual()
|
D | truncate.c | 475 unsigned long count = 0; in __invalidate_mapping_pages() local 487 count += invalidate_exceptional_entry(mapping, in __invalidate_mapping_pages() 506 count += ret; in __invalidate_mapping_pages() 513 return count; in __invalidate_mapping_pages()
|
D | readahead.c | 628 ssize_t ksys_readahead(int fd, loff_t offset, size_t count) in ksys_readahead() argument 649 ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); in ksys_readahead() 655 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) in SYSCALL_DEFINE3() argument 657 return ksys_readahead(fd, offset, count); in SYSCALL_DEFINE3()
|
D | backing-dev.c | 135 const char *buf, size_t count) in read_ahead_kb_store() argument 147 return count; in read_ahead_kb_store() 163 struct device_attribute *attr, const char *buf, size_t count) in min_ratio_store() argument 175 ret = count; in min_ratio_store() 182 struct device_attribute *attr, const char *buf, size_t count) in max_ratio_store() argument 194 ret = count; in max_ratio_store()
|
/mm/damon/ |
D | dbgfs.c | 26 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) in user_input_str() argument 35 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); in user_input_str() 39 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count); in user_input_str() 40 if (ret != count) { in user_input_str() 50 char __user *buf, size_t count, loff_t *ppos) in dbgfs_attrs_read() argument 63 return simple_read_from_buffer(buf, count, ppos, kbuf, ret); in dbgfs_attrs_read() 67 const char __user *buf, size_t count, loff_t *ppos) in dbgfs_attrs_write() argument 74 kbuf = user_input_str(buf, count, ppos); in dbgfs_attrs_write() 92 ret = count; in dbgfs_attrs_write() 132 size_t count, loff_t *ppos) in dbgfs_schemes_read() argument [all …]
|