/mm/ |
D | cma_debug.c | 165 struct dentry *tmp; in cma_debugfs_add_one() local 170 tmp = debugfs_create_dir(name, root_dentry); in cma_debugfs_add_one() 172 debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); in cma_debugfs_add_one() 173 debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); in cma_debugfs_add_one() 174 debugfs_create_file("base_pfn", 0444, tmp, in cma_debugfs_add_one() 176 debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); in cma_debugfs_add_one() 177 debugfs_create_file("order_per_bit", 0444, tmp, in cma_debugfs_add_one() 179 debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); in cma_debugfs_add_one() 180 debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); in cma_debugfs_add_one() 185 debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); in cma_debugfs_add_one()
|
D | mincore.c | 235 unsigned char *tmp; in SYSCALL_DEFINE3() local 254 tmp = (void *) __get_free_page(GFP_USER); in SYSCALL_DEFINE3() 255 if (!tmp) in SYSCALL_DEFINE3() 265 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3() 270 if (copy_to_user(vec, tmp, retval)) { in SYSCALL_DEFINE3() 279 free_page((unsigned long) tmp); in SYSCALL_DEFINE3()
|
D | zswap.c | 938 u8 *src, *tmp = NULL; in zswap_writeback_entry() local 946 tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC); in zswap_writeback_entry() 947 if (!tmp) in zswap_writeback_entry() 964 kfree(tmp); in zswap_writeback_entry() 972 memcpy(tmp, src, entry->length); in zswap_writeback_entry() 973 src = tmp; in zswap_writeback_entry() 1051 kfree(tmp); in zswap_writeback_entry() 1254 u8 *src, *dst, *tmp; in zswap_frontswap_load() local 1278 tmp = kmalloc(entry->length, GFP_ATOMIC); in zswap_frontswap_load() 1279 if (!tmp) { in zswap_frontswap_load() [all …]
|
D | balloon_compaction.c | 43 struct page *page, *tmp; in balloon_page_list_enqueue() local 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 79 struct page *page, *tmp; in balloon_page_list_dequeue() local 84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue()
|
D | vmalloc.c | 817 struct vmap_area *tmp; in find_vmap_area_exceed_addr() local 819 tmp = rb_entry(n, struct vmap_area, rb_node); in find_vmap_area_exceed_addr() 820 if (tmp->va_end > addr) { in find_vmap_area_exceed_addr() 821 va = tmp; in find_vmap_area_exceed_addr() 822 if (tmp->va_start <= addr) in find_vmap_area_exceed_addr() 1943 struct vmap_block *tmp; in free_vmap_block() local 1945 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block() 1946 BUG_ON(tmp != vb); in free_vmap_block() 2263 struct vm_struct *tmp, **p; in vm_area_add_early() local 2266 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early() [all …]
|
D | mprotect.c | 531 unsigned long nstart, end, tmp, reqprot; in do_mprotect_pkey() local 634 tmp = vma->vm_end; in do_mprotect_pkey() 635 if (tmp > end) in do_mprotect_pkey() 636 tmp = end; in do_mprotect_pkey() 639 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey() 644 error = mprotect_fixup(&tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey() 648 nstart = tmp; in do_mprotect_pkey()
|
D | mlock.c | 562 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local 587 tmp = vma->vm_end; in apply_vma_lock_flags() 588 if (tmp > end) in apply_vma_lock_flags() 589 tmp = end; in apply_vma_lock_flags() 590 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags() 593 nstart = tmp; in apply_vma_lock_flags()
|
D | mempolicy.c | 191 nodemask_t tmp; in mpol_relative_nodemask() local 192 nodes_fold(tmp, *orig, nodes_weight(*rel)); in mpol_relative_nodemask() 193 nodes_onto(*ret, tmp, *rel); in mpol_relative_nodemask() 318 nodemask_t tmp; in mpol_rebind_nodemask() local 321 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask() 323 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask() 325 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, in mpol_rebind_nodemask() 330 if (nodes_empty(tmp)) in mpol_rebind_nodemask() 331 tmp = *nodes; in mpol_rebind_nodemask() 333 pol->nodes = tmp; in mpol_rebind_nodemask() [all …]
|
D | debug.c | 70 unsigned long tmp = (unsigned long)page->mapping; in __dump_page() local 72 if (tmp & PAGE_MAPPING_ANON) in __dump_page() 75 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); in __dump_page()
|
D | mmap.c | 2290 struct vm_area_struct *tmp; in find_vma_from_tree() local 2292 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); in find_vma_from_tree() 2294 if (tmp->vm_end > addr) { in find_vma_from_tree() 2295 vma = tmp; in find_vma_from_tree() 2296 if (tmp->vm_start <= addr) in find_vma_from_tree() 2815 struct vm_area_struct *tmp = start; in unlock_range() local 2817 while (tmp && tmp->vm_start < limit) { in unlock_range() 2818 if (tmp->vm_flags & VM_LOCKED) { in unlock_range() 2819 mm->locked_vm -= vma_pages(tmp); in unlock_range() 2820 munlock_vma_pages_all(tmp); in unlock_range() [all …]
|
D | page_pinner.c | 318 u64 tmp; in read_buffer() local 336 tmp = pp_buffer.index - 1 - i + pp_buf_size; in read_buffer() 337 idx = do_div(tmp, pp_buf_size); in read_buffer()
|
D | kmemleak.c | 473 struct hlist_node *tmp; in free_object_rcu() local 482 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 1382 struct kmemleak_object *object, *tmp; in scan_gray_list() local 1397 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list() 1404 object = tmp; in scan_gray_list() 1868 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local 1874 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
|
D | madvise.c | 1176 unsigned long tmp; in madvise_walk_vmas() local 1204 tmp = vma->vm_end; in madvise_walk_vmas() 1205 if (end < tmp) in madvise_walk_vmas() 1206 tmp = end; in madvise_walk_vmas() 1209 error = visit(vma, &prev, start, tmp, arg); in madvise_walk_vmas() 1212 start = tmp; in madvise_walk_vmas()
|
D | sparse.c | 656 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; in clear_subsection_map() 663 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); in clear_subsection_map() 665 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), in clear_subsection_map()
|
D | swapfile.c | 624 unsigned long tmp, max; in scan_swap_map_try_ssd_cluster() local 651 tmp = cluster->next; in scan_swap_map_try_ssd_cluster() 654 if (tmp < max) { in scan_swap_map_try_ssd_cluster() 655 ci = lock_cluster(si, tmp); in scan_swap_map_try_ssd_cluster() 656 while (tmp < max) { in scan_swap_map_try_ssd_cluster() 657 if (!si->swap_map[tmp]) in scan_swap_map_try_ssd_cluster() 659 tmp++; in scan_swap_map_try_ssd_cluster() 663 if (tmp >= max) { in scan_swap_map_try_ssd_cluster() 667 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster() 668 *offset = tmp; in scan_swap_map_try_ssd_cluster() [all …]
|
D | khugepaged.c | 573 struct page *page, *tmp; in release_pte_pages() local 584 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { in release_pte_pages() 749 struct page *src_page, *tmp; in __collapse_huge_page_copy() local 792 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { in __collapse_huge_page_copy() 1941 struct page *page, *tmp; in collapse_file() local 1948 list_for_each_entry_safe(page, tmp, &pagelist, lru) { in collapse_file()
|
D | dmapool.c | 269 struct dma_page *page, *tmp; in dma_pool_destroy() local 285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { in dma_pool_destroy()
|
D | memory_hotplug.c | 2384 uint8_t *online_types, *tmp; in offline_and_remove_memory() local 2409 tmp = online_types; in offline_and_remove_memory() 2410 rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); in offline_and_remove_memory() 2427 tmp = online_types; in offline_and_remove_memory() 2428 walk_memory_blocks(start, size, &tmp, in offline_and_remove_memory()
|
D | memory.c | 3403 vm_fault_t tmp; in wp_page_shared() local 3406 tmp = do_page_mkwrite(vmf); in wp_page_shared() 3407 if (unlikely(!tmp || (tmp & in wp_page_shared() 3410 return tmp; in wp_page_shared() 3412 tmp = finish_mkwrite_fault(vmf); in wp_page_shared() 3413 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { in wp_page_shared() 3416 return tmp; in wp_page_shared() 4565 vm_fault_t ret, tmp; in do_shared_fault() local 4579 tmp = do_page_mkwrite(vmf); in do_shared_fault() 4580 if (unlikely(!tmp || in do_shared_fault() [all …]
|
D | hugetlb.c | 2225 struct page *page, *tmp; in gather_surplus_pages() local 2286 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages() 2299 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages() 3921 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common() local 3928 &tmp); in hugetlb_sysctl_handler_common() 3934 NUMA_NO_NODE, tmp, *length); in hugetlb_sysctl_handler_common() 3960 unsigned long tmp; in hugetlb_overcommit_handler() local 3966 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler() 3972 &tmp); in hugetlb_overcommit_handler() 3978 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler() [all …]
|
D | page_alloc.c | 1581 struct page *page, *tmp; in free_pcppages_bulk() local 1653 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk() 6265 char tmp[MIGRATE_TYPES + 1]; in show_migration_types() local 6266 char *p = tmp; in show_migration_types() 6275 printk(KERN_CONT "(%s) ", tmp); in show_migration_types() 8801 u64 tmp; in __setup_per_zone_wmarks() local 8804 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks() 8805 do_div(tmp, lowmem_pages); in __setup_per_zone_wmarks() 8826 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks() 8834 tmp = max_t(u64, tmp >> 2, in __setup_per_zone_wmarks() [all …]
|
D | shmem.c | 4025 char tmp[16]; in shmem_enabled_store() local 4028 if (count + 1 > sizeof(tmp)) in shmem_enabled_store() 4030 memcpy(tmp, buf, count); in shmem_enabled_store() 4031 tmp[count] = '\0'; in shmem_enabled_store() 4032 if (count && tmp[count - 1] == '\n') in shmem_enabled_store() 4033 tmp[count - 1] = '\0'; in shmem_enabled_store() 4035 huge = shmem_parse_huge(tmp); in shmem_enabled_store()
|
D | slab.c | 4136 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; in slabinfo_write() local 4146 tmp = strchr(kbuf, ' '); in slabinfo_write() 4147 if (!tmp) in slabinfo_write() 4149 *tmp = '\0'; in slabinfo_write() 4150 tmp++; in slabinfo_write() 4151 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) in slabinfo_write()
|
D | memcontrol.c | 4477 struct mem_cgroup_eventfd_list *ev, *tmp; in mem_cgroup_oom_unregister_event() local 4481 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event() 5166 int tmp = node; in alloc_mem_cgroup_per_node_info() local 5176 tmp = -1; in alloc_mem_cgroup_per_node_info() 5177 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); in alloc_mem_cgroup_per_node_info() 5372 struct mem_cgroup_event *event, *tmp; in mem_cgroup_css_offline() local 5381 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
|
D | zsmalloc.c | 2171 struct zspage *zspage, *tmp; in async_free_zspage() local 2187 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { in async_free_zspage()
|