Home
last modified time | relevance | path

Searched refs:tmp (Results 1 – 24 of 24) sorted by relevance

/mm/
Dcma_debug.c165 struct dentry *tmp; in cma_debugfs_add_one() local
170 tmp = debugfs_create_dir(name, root_dentry); in cma_debugfs_add_one()
172 debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); in cma_debugfs_add_one()
173 debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); in cma_debugfs_add_one()
174 debugfs_create_file("base_pfn", 0444, tmp, in cma_debugfs_add_one()
176 debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); in cma_debugfs_add_one()
177 debugfs_create_file("order_per_bit", 0444, tmp, in cma_debugfs_add_one()
179 debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); in cma_debugfs_add_one()
180 debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); in cma_debugfs_add_one()
185 debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); in cma_debugfs_add_one()
Dmincore.c234 unsigned char *tmp; in SYSCALL_DEFINE3() local
253 tmp = (void *) __get_free_page(GFP_USER); in SYSCALL_DEFINE3()
254 if (!tmp) in SYSCALL_DEFINE3()
264 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
269 if (copy_to_user(vec, tmp, retval)) { in SYSCALL_DEFINE3()
278 free_page((unsigned long) tmp); in SYSCALL_DEFINE3()
Dballoon_compaction.c43 struct page *page, *tmp; in balloon_page_list_enqueue() local
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
79 struct page *page, *tmp; in balloon_page_list_dequeue() local
84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue()
Dmmap.c2388 struct vm_area_struct *tmp; in __find_vma() local
2390 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); in __find_vma()
2392 if (tmp->vm_end > addr) { in __find_vma()
2393 vma = tmp; in __find_vma()
2394 if (tmp->vm_start <= addr) in __find_vma()
3023 struct vm_area_struct *tmp = vma; in __do_munmap() local
3024 while (tmp && tmp->vm_start < end) { in __do_munmap()
3025 if (tmp->vm_flags & VM_LOCKED) { in __do_munmap()
3026 mm->locked_vm -= vma_pages(tmp); in __do_munmap()
3027 munlock_vma_pages_all(tmp); in __do_munmap()
[all …]
Dvmalloc.c1600 struct vmap_block *tmp; in free_vmap_block() local
1602 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
1603 BUG_ON(tmp != vb); in free_vmap_block()
1895 struct vm_struct *tmp, **p; in vm_area_add_early() local
1898 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
1899 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1900 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1903 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1978 struct vm_struct *tmp; in vmalloc_init() local
1999 for (tmp = vmlist; tmp; tmp = tmp->next) { in vmalloc_init()
[all …]
Dmempolicy.c188 nodemask_t tmp; in mpol_relative_nodemask() local
189 nodes_fold(tmp, *orig, nodes_weight(*rel)); in mpol_relative_nodemask()
190 nodes_onto(*ret, tmp, *rel); in mpol_relative_nodemask()
326 nodemask_t tmp; in mpol_rebind_nodemask() local
329 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask()
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask()
333 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, in mpol_rebind_nodemask()
338 if (nodes_empty(tmp)) in mpol_rebind_nodemask()
339 tmp = *nodes; in mpol_rebind_nodemask()
341 pol->v.nodes = tmp; in mpol_rebind_nodemask()
[all …]
Dmlock.c602 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local
627 tmp = vma->vm_end; in apply_vma_lock_flags()
628 if (tmp > end) in apply_vma_lock_flags()
629 tmp = end; in apply_vma_lock_flags()
630 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags()
633 nstart = tmp; in apply_vma_lock_flags()
Dmprotect.c517 unsigned long nstart, end, tmp, reqprot; in do_mprotect_pkey() local
618 tmp = vma->vm_end; in do_mprotect_pkey()
619 if (tmp > end) in do_mprotect_pkey()
620 tmp = end; in do_mprotect_pkey()
621 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
624 nstart = tmp; in do_mprotect_pkey()
Ddebug.c80 unsigned long tmp = (unsigned long)page->mapping; in __dump_page() local
82 if (tmp & PAGE_MAPPING_ANON) in __dump_page()
85 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); in __dump_page()
Dmadvise.c1099 unsigned long end, tmp; in do_madvise() local
1166 tmp = vma->vm_end; in do_madvise()
1167 if (end < tmp) in do_madvise()
1168 tmp = end; in do_madvise()
1171 error = madvise_vma(vma, &prev, start, tmp, behavior); in do_madvise()
1174 start = tmp; in do_madvise()
Dkmemleak.c467 struct hlist_node *tmp; in free_object_rcu() local
476 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
1374 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1389 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1396 object = tmp; in scan_gray_list()
1854 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local
1860 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
Dswapfile.c618 unsigned long tmp, max; in scan_swap_map_try_ssd_cluster() local
645 tmp = cluster->next; in scan_swap_map_try_ssd_cluster()
648 if (tmp < max) { in scan_swap_map_try_ssd_cluster()
649 ci = lock_cluster(si, tmp); in scan_swap_map_try_ssd_cluster()
650 while (tmp < max) { in scan_swap_map_try_ssd_cluster()
651 if (!si->swap_map[tmp]) in scan_swap_map_try_ssd_cluster()
653 tmp++; in scan_swap_map_try_ssd_cluster()
657 if (tmp >= max) { in scan_swap_map_try_ssd_cluster()
661 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster()
662 *offset = tmp; in scan_swap_map_try_ssd_cluster()
[all …]
Dsparse.c676 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; in clear_subsection_map()
683 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); in clear_subsection_map()
685 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), in clear_subsection_map()
Dkhugepaged.c571 struct page *page, *tmp; in release_pte_pages() local
582 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { in release_pte_pages()
747 struct page *src_page, *tmp; in __collapse_huge_page_copy() local
790 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { in __collapse_huge_page_copy()
1946 struct page *page, *tmp; in collapse_file() local
1953 list_for_each_entry_safe(page, tmp, &pagelist, lru) { in collapse_file()
Dmemory_hotplug.c1924 uint8_t *online_types, *tmp; in offline_and_remove_memory() local
1949 tmp = online_types; in offline_and_remove_memory()
1950 rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); in offline_and_remove_memory()
1967 tmp = online_types; in offline_and_remove_memory()
1968 walk_memory_blocks(start, size, &tmp, in offline_and_remove_memory()
Ddmapool.c269 struct dma_page *page, *tmp; in dma_pool_destroy() local
285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { in dma_pool_destroy()
Dhugetlb.c2015 struct page *page, *tmp; in gather_surplus_pages() local
2074 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
2089 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages()
3460 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common() local
3467 &tmp); in hugetlb_sysctl_handler_common()
3473 NUMA_NO_NODE, tmp, *length); in hugetlb_sysctl_handler_common()
3499 unsigned long tmp; in hugetlb_overcommit_handler() local
3505 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
3511 &tmp); in hugetlb_overcommit_handler()
3517 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
[all …]
Dmemory.c3361 vm_fault_t tmp; in wp_page_shared() local
3364 tmp = do_page_mkwrite(vmf); in wp_page_shared()
3365 if (unlikely(!tmp || (tmp & in wp_page_shared()
3368 return tmp; in wp_page_shared()
3370 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
3371 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { in wp_page_shared()
3374 return tmp; in wp_page_shared()
4446 vm_fault_t ret, tmp; in do_shared_fault() local
4458 tmp = do_page_mkwrite(vmf); in do_shared_fault()
4459 if (unlikely(!tmp || in do_shared_fault()
[all …]
Dpage_alloc.c1442 struct page *page, *tmp; in free_pcppages_bulk() local
1503 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk()
5682 char tmp[MIGRATE_TYPES + 1]; in show_migration_types() local
5683 char *p = tmp; in show_migration_types()
5692 printk(KERN_CONT "(%s) ", tmp); in show_migration_types()
8146 u64 tmp, low; in __setup_per_zone_wmarks() local
8149 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8150 do_div(tmp, lowmem_pages); in __setup_per_zone_wmarks()
8173 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8181 tmp = max_t(u64, tmp >> 2, in __setup_per_zone_wmarks()
[all …]
Dshmem.c4045 char tmp[16]; in shmem_enabled_store() local
4048 if (count + 1 > sizeof(tmp)) in shmem_enabled_store()
4050 memcpy(tmp, buf, count); in shmem_enabled_store()
4051 tmp[count] = '\0'; in shmem_enabled_store()
4052 if (count && tmp[count - 1] == '\n') in shmem_enabled_store()
4053 tmp[count - 1] = '\0'; in shmem_enabled_store()
4055 huge = shmem_parse_huge(tmp); in shmem_enabled_store()
Dvmscan.c188 int tmp; in kswapd_per_node_setup() local
190 if (kstrtoint(str, 0, &tmp) < 0) in kswapd_per_node_setup()
193 if (tmp > MAX_KSWAPD_THREADS || tmp <= 0) in kswapd_per_node_setup()
196 kswapd_threads = tmp; in kswapd_per_node_setup()
Dslab.c4113 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; in slabinfo_write() local
4123 tmp = strchr(kbuf, ' '); in slabinfo_write()
4124 if (!tmp) in slabinfo_write()
4126 *tmp = '\0'; in slabinfo_write()
4127 tmp++; in slabinfo_write()
4128 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) in slabinfo_write()
Dmemcontrol.c4539 struct mem_cgroup_eventfd_list *ev, *tmp; in mem_cgroup_oom_unregister_event() local
4543 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
5243 int tmp = node; in alloc_mem_cgroup_per_node_info() local
5253 tmp = -1; in alloc_mem_cgroup_per_node_info()
5254 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); in alloc_mem_cgroup_per_node_info()
5479 struct mem_cgroup_event *event, *tmp; in mem_cgroup_css_offline() local
5488 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
Dzsmalloc.c2178 struct zspage *zspage, *tmp; in async_free_zspage() local
2194 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { in async_free_zspage()