Home
last modified time | relevance | path

Searched refs:tmp (Results 1 – 22 of 22) sorted by relevance

/mm/
Dcma_debug.c166 struct dentry *tmp; in cma_debugfs_add_one() local
172 tmp = debugfs_create_dir(name, cma_debugfs_root); in cma_debugfs_add_one()
174 debugfs_create_file("alloc", S_IWUSR, tmp, cma, in cma_debugfs_add_one()
177 debugfs_create_file("free", S_IWUSR, tmp, cma, in cma_debugfs_add_one()
180 debugfs_create_file("base_pfn", S_IRUGO, tmp, in cma_debugfs_add_one()
182 debugfs_create_file("count", S_IRUGO, tmp, in cma_debugfs_add_one()
184 debugfs_create_file("order_per_bit", S_IRUGO, tmp, in cma_debugfs_add_one()
186 debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops); in cma_debugfs_add_one()
187 debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops); in cma_debugfs_add_one()
190 debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s); in cma_debugfs_add_one()
Dmincore.c227 unsigned char *tmp; in SYSCALL_DEFINE3() local
244 tmp = (void *) __get_free_page(GFP_USER); in SYSCALL_DEFINE3()
245 if (!tmp) in SYSCALL_DEFINE3()
255 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
260 if (copy_to_user(vec, tmp, retval)) { in SYSCALL_DEFINE3()
269 free_page((unsigned long) tmp); in SYSCALL_DEFINE3()
Dvmalloc.c327 struct rb_node *tmp; in __insert_vmap_area() local
346 tmp = rb_prev(&va->rb_node); in __insert_vmap_area()
347 if (tmp) { in __insert_vmap_area()
349 prev = rb_entry(tmp, struct vmap_area, rb_node); in __insert_vmap_area()
432 struct vmap_area *tmp; in alloc_vmap_area() local
433 tmp = rb_entry(n, struct vmap_area, rb_node); in alloc_vmap_area()
434 if (tmp->va_end >= addr) { in alloc_vmap_area()
435 first = tmp; in alloc_vmap_area()
436 if (tmp->va_start <= addr) in alloc_vmap_area()
912 struct vmap_block *tmp; in free_vmap_block() local
[all …]
Dmempolicy.c172 nodemask_t tmp; in mpol_relative_nodemask() local
173 nodes_fold(tmp, *orig, nodes_weight(*rel)); in mpol_relative_nodemask()
174 nodes_onto(*ret, tmp, *rel); in mpol_relative_nodemask()
316 nodemask_t tmp; in mpol_rebind_nodemask() local
319 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask()
321 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask()
328 nodes_remap(tmp, pol->v.nodes, in mpol_rebind_nodemask()
330 pol->w.cpuset_mems_allowed = step ? tmp : *nodes; in mpol_rebind_nodemask()
332 tmp = pol->w.cpuset_mems_allowed; in mpol_rebind_nodemask()
338 if (nodes_empty(tmp)) in mpol_rebind_nodemask()
[all …]
Dmmap.c2133 struct vm_area_struct *tmp; in find_vma() local
2135 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); in find_vma()
2137 if (tmp->vm_end > addr) { in find_vma()
2138 vma = tmp; in find_vma()
2139 if (tmp->vm_start <= addr) in find_vma()
2690 struct vm_area_struct *tmp = vma; in do_munmap() local
2691 while (tmp && tmp->vm_start < end) { in do_munmap()
2692 if (tmp->vm_flags & VM_LOCKED) { in do_munmap()
2693 mm->locked_vm -= vma_pages(tmp); in do_munmap()
2694 munlock_vma_pages_all(tmp); in do_munmap()
[all …]
Dmprotect.c364 unsigned long nstart, end, tmp, reqprot; in do_mprotect_pkey() local
457 tmp = vma->vm_end; in do_mprotect_pkey()
458 if (tmp > end) in do_mprotect_pkey()
459 tmp = end; in do_mprotect_pkey()
460 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
463 nstart = tmp; in do_mprotect_pkey()
Dmlock.c580 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local
605 tmp = vma->vm_end; in apply_vma_lock_flags()
606 if (tmp > end) in apply_vma_lock_flags()
607 tmp = end; in apply_vma_lock_flags()
608 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags()
611 nstart = tmp; in apply_vma_lock_flags()
Dmadvise.c685 unsigned long end, tmp; in SYSCALL_DEFINE3() local
749 tmp = vma->vm_end; in SYSCALL_DEFINE3()
750 if (end < tmp) in SYSCALL_DEFINE3()
751 tmp = end; in SYSCALL_DEFINE3()
754 error = madvise_vma(vma, &prev, start, tmp, behavior); in SYSCALL_DEFINE3()
757 start = tmp; in SYSCALL_DEFINE3()
Dkmemleak-test.c100 struct test_node *elem, *tmp; in kmemleak_test_exit() local
106 list_for_each_entry_safe(elem, tmp, &test_list, list) in kmemleak_test_exit()
Dballoon_compaction.c59 struct page *page, *tmp; in balloon_page_dequeue() local
65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
Dpage_alloc.c4275 char tmp[MIGRATE_TYPES + 1]; in show_migration_types() local
4276 char *p = tmp; in show_migration_types()
4285 printk(KERN_CONT "(%s) ", tmp); in show_migration_types()
4653 const struct cpumask *tmp = cpumask_of_node(0); in find_next_best_node() local
4674 tmp = cpumask_of_node(n); in find_next_best_node()
4675 if (!cpumask_empty(tmp)) in find_next_best_node()
5054 struct memblock_region *r = NULL, *tmp; in memmap_init_zone() local
5090 for_each_memblock(memory, tmp) in memmap_init_zone()
5091 if (pfn < memblock_region_memory_end_pfn(tmp)) in memmap_init_zone()
5093 r = tmp; in memmap_init_zone()
[all …]
Dkmemleak.c445 struct hlist_node *tmp; in free_object_rcu() local
454 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
1349 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1364 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1371 object = tmp; in scan_gray_list()
Dswapfile.c451 unsigned long tmp; in scan_swap_map_try_ssd_cluster() local
478 tmp = cluster->next; in scan_swap_map_try_ssd_cluster()
479 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * in scan_swap_map_try_ssd_cluster()
481 if (!si->swap_map[tmp]) { in scan_swap_map_try_ssd_cluster()
485 tmp++; in scan_swap_map_try_ssd_cluster()
491 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster()
492 *offset = tmp; in scan_swap_map_try_ssd_cluster()
493 *scan_base = tmp; in scan_swap_map_try_ssd_cluster()
Dhugetlb.c1696 struct page *page, *tmp; in gather_surplus_pages() local
1753 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
1768 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages()
2917 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common() local
2923 table->data = &tmp; in hugetlb_sysctl_handler_common()
2931 NUMA_NO_NODE, tmp, *length); in hugetlb_sysctl_handler_common()
2958 unsigned long tmp; in hugetlb_overcommit_handler() local
2964 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
2969 table->data = &tmp; in hugetlb_overcommit_handler()
2977 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
Dmemory.c2317 int tmp; in wp_page_shared() local
2320 tmp = do_page_mkwrite(vma, old_page, fe->address); in wp_page_shared()
2321 if (unlikely(!tmp || (tmp & in wp_page_shared()
2324 return tmp; in wp_page_shared()
3240 int ret, tmp; in do_shared_fault() local
3252 tmp = do_page_mkwrite(vma, fault_page, fe->address); in do_shared_fault()
3253 if (unlikely(!tmp || in do_shared_fault()
3254 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { in do_shared_fault()
3256 return tmp; in do_shared_fault()
Dshmem.c3865 char tmp[16]; in shmem_enabled_store() local
3868 if (count + 1 > sizeof(tmp)) in shmem_enabled_store()
3870 memcpy(tmp, buf, count); in shmem_enabled_store()
3871 tmp[count] = '\0'; in shmem_enabled_store()
3872 if (count && tmp[count - 1] == '\n') in shmem_enabled_store()
3873 tmp[count - 1] = '\0'; in shmem_enabled_store()
3875 huge = shmem_parse_huge(tmp); in shmem_enabled_store()
Dslab_common.c82 char tmp; in kmem_cache_sanity_check() local
90 res = probe_kernel_address(s->name, tmp); in kmem_cache_sanity_check()
Dmemcontrol.c3565 struct mem_cgroup_eventfd_list *ev, *tmp; in mem_cgroup_oom_unregister_event() local
3569 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4118 int tmp = node; in alloc_mem_cgroup_per_node_info() local
4128 tmp = -1; in alloc_mem_cgroup_per_node_info()
4129 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); in alloc_mem_cgroup_per_node_info()
4288 struct mem_cgroup_event *event, *tmp; in mem_cgroup_css_offline() local
4296 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
Dslab.c4215 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; in slabinfo_write() local
4225 tmp = strchr(kbuf, ' '); in slabinfo_write()
4226 if (!tmp) in slabinfo_write()
4228 *tmp = '\0'; in slabinfo_write()
4229 tmp++; in slabinfo_write()
4230 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) in slabinfo_write()
Dkhugepaged.c1306 struct page *page, *new_page, *tmp; in collapse_shmem() local
1485 list_for_each_entry_safe(page, tmp, &pagelist, lru) { in collapse_shmem()
Dslub.c324 struct page tmp; in set_page_slub_counters() local
325 tmp.counters = counters_new; in set_page_slub_counters()
332 page->frozen = tmp.frozen; in set_page_slub_counters()
333 page->inuse = tmp.inuse; in set_page_slub_counters()
334 page->objects = tmp.objects; in set_page_slub_counters()
Dzsmalloc.c2190 struct zspage *zspage, *tmp; in async_free_zspage() local
2206 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { in async_free_zspage()