/mm/ |
D | cma.c | 53 return cma->count << PAGE_SHIFT; in cma_get_size() 82 unsigned int count) in cma_clear_bitmap() argument 87 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap() 98 unsigned i = cma->count >> pageblock_order; in cma_activate_area() 104 cma->count = 0; in cma_activate_area() 140 cma->count = 0; in cma_activate_area() 201 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem() 379 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) in cma_alloc() argument 388 if (!cma || !cma->count) in cma_alloc() 392 count, align); in cma_alloc() [all …]
|
D | page_owner.c | 99 print_page_owner(char __user *buf, size_t count, unsigned long pfn, in print_page_owner() argument 110 kbuf = kmalloc(count, GFP_KERNEL); in print_page_owner() 114 ret = snprintf(kbuf, count, in print_page_owner() 118 if (ret >= count) in print_page_owner() 124 ret += snprintf(kbuf + ret, count - ret, in print_page_owner() 143 if (ret >= count) in print_page_owner() 146 ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); in print_page_owner() 147 if (ret >= count) in print_page_owner() 150 ret += snprintf(kbuf + ret, count - ret, "\n"); in print_page_owner() 151 if (ret >= count) in print_page_owner() [all …]
|
D | page_counter.c | 24 new = atomic_long_sub_return(nr_pages, &counter->count); in page_counter_cancel() 43 new = atomic_long_add_return(nr_pages, &c->count); in page_counter_charge() 84 new = atomic_long_add_return(nr_pages, &c->count); in page_counter_try_charge() 86 atomic_long_sub(nr_pages, &c->count); in page_counter_try_charge() 138 long count; in page_counter_limit() local 151 count = atomic_long_read(&counter->count); in page_counter_limit() 153 if (count > limit) in page_counter_limit() 158 if (atomic_long_read(&counter->count) <= count) in page_counter_limit()
|
D | cma_debug.c | 93 static int cma_free_mem(struct cma *cma, int count) in cma_free_mem() argument 97 while (count) { in cma_free_mem() 102 if (mem->n <= count) { in cma_free_mem() 104 count -= mem->n; in cma_free_mem() 107 cma_release(cma, mem->p, count); in cma_free_mem() 108 mem->p += count; in cma_free_mem() 109 mem->n -= count; in cma_free_mem() 110 count = 0; in cma_free_mem() 132 static int cma_alloc_mem(struct cma *cma, int count) in cma_alloc_mem() argument 141 p = cma_alloc(cma, count, 0); in cma_alloc_mem() [all …]
|
D | maccess.c | 160 long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) in strncpy_from_unsafe() argument 166 if (unlikely(count <= 0)) in strncpy_from_unsafe() 175 } while (dst[-1] && ret == 0 && src - unsafe_addr < count); in strncpy_from_unsafe() 203 long count) in strncpy_from_unsafe_user() argument 208 if (unlikely(count <= 0)) in strncpy_from_unsafe_user() 213 ret = strncpy_from_user(dst, unsafe_addr, count); in strncpy_from_unsafe_user() 217 if (ret >= count) { in strncpy_from_unsafe_user() 218 ret = count; in strncpy_from_unsafe_user() 243 long strnlen_unsafe_user(const void __user *unsafe_addr, long count) in strnlen_unsafe_user() argument 250 ret = strnlen_user(unsafe_addr, count); in strnlen_unsafe_user()
|
D | swapfile.c | 765 unsigned char count; in swap_entry_free() local 768 count = p->swap_map[offset]; in swap_entry_free() 769 has_cache = count & SWAP_HAS_CACHE; in swap_entry_free() 770 count &= ~SWAP_HAS_CACHE; in swap_entry_free() 775 } else if (count == SWAP_MAP_SHMEM) { in swap_entry_free() 780 count = 0; in swap_entry_free() 781 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { in swap_entry_free() 782 if (count == COUNT_CONTINUED) { in swap_entry_free() 783 if (swap_count_continued(p, offset, count)) in swap_entry_free() 784 count = SWAP_MAP_MAX | COUNT_CONTINUED; in swap_entry_free() [all …]
|
D | vmalloc.c | 1086 void vm_unmap_ram(const void *mem, unsigned int count) in vm_unmap_ram() argument 1088 unsigned long size = count << PAGE_SHIFT; in vm_unmap_ram() 1099 if (likely(count <= VMAP_MAX_ALLOC)) in vm_unmap_ram() 1121 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) in vm_map_ram() argument 1123 unsigned long size = count << PAGE_SHIFT; in vm_map_ram() 1127 if (likely(count <= VMAP_MAX_ALLOC)) { in vm_map_ram() 1143 vm_unmap_ram(mem, count); in vm_map_ram() 1557 void *vmap(struct page **pages, unsigned int count, in vmap() argument 1564 if (count > totalram_pages) in vmap() 1567 area = get_vm_area_caller((count << PAGE_SHIFT), flags, in vmap() [all …]
|
D | page_idle.c | 117 loff_t pos, size_t count) in page_idle_bitmap_read() argument 124 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read() 131 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 162 loff_t pos, size_t count) in page_idle_bitmap_write() argument 169 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write() 176 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write()
|
D | nobootmem.c | 127 unsigned long count = 0; in free_low_memory_core_early() local 138 count += __free_memory_core(start, end); in free_low_memory_core_early() 147 count += __free_memory_core(start, start + size); in free_low_memory_core_early() 152 count += __free_memory_core(start, start + size); in free_low_memory_core_early() 156 return count; in free_low_memory_core_early()
|
D | huge_memory.c | 254 const char *buf, size_t count, in double_flag_store() argument 259 min(sizeof("always")-1, count))) { in double_flag_store() 263 min(sizeof("madvise")-1, count))) { in double_flag_store() 267 min(sizeof("never")-1, count))) { in double_flag_store() 273 return count; in double_flag_store() 285 const char *buf, size_t count) in enabled_store() argument 289 ret = double_flag_store(kobj, attr, buf, count, in enabled_store() 319 const char *buf, size_t count, in single_flag_store() argument 336 return count; in single_flag_store() 353 const char *buf, size_t count) in defrag_store() argument [all …]
|
D | quicklist.c | 91 unsigned long count = 0; in quicklist_total_size() local 98 count += q->nr_pages; in quicklist_total_size() 100 return count; in quicklist_total_size()
|
D | early_ioremap.c | 81 int count = 0; in check_early_ioremap_leak() local 86 count++; in check_early_ioremap_leak() 88 if (WARN(count, KERN_WARNING in check_early_ioremap_leak() 91 count)) in check_early_ioremap_leak()
|
D | nommu.c | 298 long vread(char *buf, char *addr, unsigned long count) in vread() argument 301 if ((unsigned long) buf + count < count) in vread() 302 count = -(unsigned long) buf; in vread() 304 memcpy(buf, addr, count); in vread() 305 return count; in vread() 308 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument 311 if ((unsigned long) addr + count < count) in vwrite() 312 count = -(unsigned long) addr; in vwrite() 314 memcpy(addr, buf, count); in vwrite() 315 return count; in vwrite() [all …]
|
D | highmem.c | 220 int count; in map_new_virtual() local 225 count = get_pkmap_entries_count(color); in map_new_virtual() 231 count = get_pkmap_entries_count(color); in map_new_virtual() 235 if (--count) in map_new_virtual()
|
D | page_alloc.c | 845 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument 861 count = min(pcp->count, count); in free_pcppages_bulk() 862 while (count) { in free_pcppages_bulk() 882 batch_free = count; in free_pcppages_bulk() 900 } while (--count && --batch_free && !list_empty(list)); in free_pcppages_bulk() 1898 unsigned long count, struct list_head *list, in rmqueue_bulk() argument 1904 for (i = 0; i < count; ++i) { in rmqueue_bulk() 1948 to_drain = min(pcp->count, batch); in drain_zone_pages() 1951 pcp->count -= to_drain; in drain_zone_pages() 1974 if (pcp->count) { in drain_pages_zone() [all …]
|
D | cma.h | 6 unsigned long count; member 21 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno()
|
D | vmstat.c | 498 !__this_cpu_read(p->pcp.count)) in refresh_cpu_vm_stats() 512 if (__this_cpu_read(p->pcp.count)) { in refresh_cpu_vm_stats() 1008 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showblockcount_print() local 1025 count[mtype]++; in pagetypeinfo_showblockcount_print() 1031 seq_printf(m, "%12lu ", count[mtype]); in pagetypeinfo_showblockcount_print() 1059 unsigned long count[MIGRATE_TYPES] = { 0, }; in pagetypeinfo_showmixedcount_print() local 1106 count[MIGRATE_MOVABLE]++; in pagetypeinfo_showmixedcount_print() 1108 count[pageblock_mt]++; in pagetypeinfo_showmixedcount_print() 1120 seq_printf(m, "%12lu ", count[i]); in pagetypeinfo_showmixedcount_print() 1255 pageset->pcp.count, in zoneinfo_show_print()
|
D | bootmem.c | 175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local 214 count += BITS_PER_LONG; in free_all_bootmem_core() 224 count++; in free_all_bootmem_core() 236 count += pages; in free_all_bootmem_core() 241 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); in free_all_bootmem_core() 243 return count; in free_all_bootmem_core()
|
D | kmemleak.c | 162 int count; member 327 return object->count != KMEMLEAK_BLACK && in color_white() 328 object->count < object->min_count; in color_white() 334 object->count >= object->min_count; in color_gray() 390 pr_notice(" count = %d\n", object->count); in dump_object_info() 561 object->count = 0; /* white color initially */ in create_object() 1224 object->count++; in scan_block() 1358 object->count = 0; in kmemleak_scan() 1432 object->count = object->min_count; in kmemleak_scan()
|
D | slub.c | 4264 unsigned long count = 0; in validate_slab_node() local 4272 count++; in validate_slab_node() 4274 if (count != n->nr_partial) in validate_slab_node() 4276 s->name, count, n->nr_partial); in validate_slab_node() 4283 count++; in validate_slab_node() 4285 if (count != atomic_long_read(&n->nr_slabs)) in validate_slab_node() 4287 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node() 4291 return count; in validate_slab_node() 4297 unsigned long count = 0; in validate_slab_cache() local 4307 count += validate_slab_node(s, n, map); in validate_slab_cache() [all …]
|
D | memcontrol.c | 663 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat() 693 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], in mem_cgroup_charge_statistics() 696 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], in mem_cgroup_charge_statistics() 700 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], in mem_cgroup_charge_statistics() 1184 unsigned long count; in mem_cgroup_margin() local 1187 count = page_counter_read(&memcg->memory); in mem_cgroup_margin() 1189 if (count < limit) in mem_cgroup_margin() 1190 margin = limit - count; in mem_cgroup_margin() 1193 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin() 1195 if (count <= limit) in mem_cgroup_margin() [all …]
|
D | hugetlb.c | 89 bool free = (spool->count == 0) && (spool->used_hpages == 0); in unlock_or_release_subpool() 114 spool->count = 1; in hugepage_new_subpool() 131 BUG_ON(!spool->count); in hugepage_put_subpool() 132 spool->count--; in hugepage_put_subpool() 2150 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument 2162 if (count >= h->nr_huge_pages) in try_to_free_low() 2174 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument 2213 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, in set_max_huge_pages() argument 2233 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages() 2238 while (count > persistent_huge_pages(h)) { in set_max_huge_pages() [all …]
|
D | backing-dev.c | 146 const char *buf, size_t count) in read_ahead_kb_store() argument 158 return count; in read_ahead_kb_store() 176 struct device_attribute *attr, const char *buf, size_t count) in min_ratio_store() argument 188 ret = count; in min_ratio_store() 195 struct device_attribute *attr, const char *buf, size_t count) in max_ratio_store() argument 207 ret = count; in max_ratio_store()
|
D | fadvise.c | 135 unsigned long count = invalidate_mapping_pages(mapping, in SYSCALL_DEFINE4() local 144 if (count < (end_index - start_index + 1)) { in SYSCALL_DEFINE4()
|
D | ksm.c | 2143 const char *buf, size_t count) in sleep_millisecs_store() argument 2154 return count; in sleep_millisecs_store() 2166 const char *buf, size_t count) in pages_to_scan_store() argument 2177 return count; in pages_to_scan_store() 2188 const char *buf, size_t count) in run_store() argument 2216 count = err; in run_store() 2225 return count; in run_store() 2238 const char *buf, size_t count) in merge_across_nodes_store() argument 2282 return err ? err : count; in merge_across_nodes_store()
|