Home
last modified time | relevance | path

Searched refs:tmp (Results 1 – 11 of 11) sorted by relevance

/mm/
Dvmalloc.c282 struct rb_node *tmp; in __insert_vmap_area() local
285 struct vmap_area *tmp; in __insert_vmap_area() local
288 tmp = rb_entry(parent, struct vmap_area, rb_node); in __insert_vmap_area()
289 if (va->va_start < tmp->va_end) in __insert_vmap_area()
291 else if (va->va_end > tmp->va_start) in __insert_vmap_area()
301 tmp = rb_prev(&va->rb_node); in __insert_vmap_area()
302 if (tmp) { in __insert_vmap_area()
304 prev = rb_entry(tmp, struct vmap_area, rb_node); in __insert_vmap_area()
347 struct vmap_area *tmp; in alloc_vmap_area() local
348 tmp = rb_entry(n, struct vmap_area, rb_node); in alloc_vmap_area()
[all …]
Dmincore.c185 unsigned char *tmp; in SYSCALL_DEFINE3() local
202 tmp = (void *) __get_free_page(GFP_USER); in SYSCALL_DEFINE3()
203 if (!tmp) in SYSCALL_DEFINE3()
213 retval = do_mincore(start, tmp, min(pages, PAGE_SIZE)); in SYSCALL_DEFINE3()
218 if (copy_to_user(vec, tmp, retval)) { in SYSCALL_DEFINE3()
227 free_page((unsigned long) tmp); in SYSCALL_DEFINE3()
Dmadvise.c286 unsigned long end, tmp; in SYSCALL_DEFINE3() local
339 tmp = vma->vm_end; in SYSCALL_DEFINE3()
340 if (end < tmp) in SYSCALL_DEFINE3()
341 tmp = end; in SYSCALL_DEFINE3()
344 error = madvise_vma(vma, &prev, start, tmp, behavior); in SYSCALL_DEFINE3()
347 start = tmp; in SYSCALL_DEFINE3()
Dmprotect.c224 unsigned long vm_flags, nstart, end, tmp, reqprot; in SYSCALL_DEFINE3() local
296 tmp = vma->vm_end; in SYSCALL_DEFINE3()
297 if (tmp > end) in SYSCALL_DEFINE3()
298 tmp = end; in SYSCALL_DEFINE3()
299 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in SYSCALL_DEFINE3()
302 nstart = tmp; in SYSCALL_DEFINE3()
Dmempolicy.c153 nodemask_t tmp; in mpol_relative_nodemask() local
154 nodes_fold(tmp, *orig, nodes_weight(*rel)); in mpol_relative_nodemask()
155 nodes_onto(*ret, tmp, *rel); in mpol_relative_nodemask()
266 nodemask_t tmp; in mpol_rebind_nodemask() local
269 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask()
271 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask()
273 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, in mpol_rebind_nodemask()
278 pol->v.nodes = tmp; in mpol_rebind_nodemask()
279 if (!node_isset(current->il_next, tmp)) { in mpol_rebind_nodemask()
280 current->il_next = next_node(current->il_next, tmp); in mpol_rebind_nodemask()
[all …]
Dmlock.c446 unsigned long nstart, end, tmp; in do_mlock() local
472 tmp = vma->vm_end; in do_mlock()
473 if (tmp > end) in do_mlock()
474 tmp = end; in do_mlock()
475 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in do_mlock()
478 nstart = tmp; in do_mlock()
Dhugetlb.c777 struct page *page, *tmp; in gather_surplus_pages() local
833 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
843 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
1484 unsigned long tmp; in hugetlb_sysctl_handler() local
1487 tmp = h->max_huge_pages; in hugetlb_sysctl_handler()
1489 table->data = &tmp; in hugetlb_sysctl_handler()
1494 h->max_huge_pages = set_max_huge_pages(h, tmp); in hugetlb_sysctl_handler()
1516 unsigned long tmp; in hugetlb_overcommit_handler() local
1519 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
1521 table->data = &tmp; in hugetlb_overcommit_handler()
[all …]
Dmmap.c340 struct vm_area_struct *tmp = mm->mmap; in validate_mm() local
341 while (tmp) { in validate_mm()
342 tmp = tmp->vm_next; in validate_mm()
1936 struct vm_area_struct *tmp = vma; in do_munmap() local
1937 while (tmp && tmp->vm_start < end) { in do_munmap()
1938 if (tmp->vm_flags & VM_LOCKED) { in do_munmap()
1939 mm->locked_vm -= vma_pages(tmp); in do_munmap()
1940 munlock_vma_pages_all(tmp); in do_munmap()
1942 tmp = tmp->vm_next; in do_munmap()
Dmemcontrol.c597 struct page_cgroup *pc, *tmp; in mem_cgroup_isolate_pages() local
608 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { in mem_cgroup_isolate_pages()
1847 unsigned long long min_limit, min_memsw_limit, tmp; in memcg_get_hierarchical_limit() local
1860 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); in memcg_get_hierarchical_limit()
1861 min_limit = min(min_limit, tmp); in memcg_get_hierarchical_limit()
1862 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); in memcg_get_hierarchical_limit()
1863 min_memsw_limit = min(min_memsw_limit, tmp); in memcg_get_hierarchical_limit()
2114 int zone, tmp = node; in alloc_mem_cgroup_per_zone_info() local
2124 tmp = -1; in alloc_mem_cgroup_per_zone_info()
2125 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); in alloc_mem_cgroup_per_zone_info()
Dpage_alloc.c2138 node_to_cpumask_ptr(tmp, 0); in find_next_best_node()
2159 node_to_cpumask_ptr_next(tmp, n); in find_next_best_node()
2160 if (!cpus_empty(*tmp)) in find_next_best_node()
4307 u64 tmp; in setup_per_zone_pages_min() local
4310 tmp = (u64)pages_min * zone->present_pages; in setup_per_zone_pages_min()
4311 do_div(tmp, lowmem_pages); in setup_per_zone_pages_min()
4335 zone->pages_min = tmp; in setup_per_zone_pages_min()
4338 zone->pages_low = zone->pages_min + (tmp >> 2); in setup_per_zone_pages_min()
4339 zone->pages_high = zone->pages_min + (tmp >> 1); in setup_per_zone_pages_min()
Dslab.c2166 char tmp; in kmem_cache_create() local
2174 res = probe_kernel_address(pc->name, tmp); in kmem_cache_create()
4217 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; in slabinfo_write() local
4227 tmp = strchr(kbuf, ' '); in slabinfo_write()
4228 if (!tmp) in slabinfo_write()
4230 *tmp = '\0'; in slabinfo_write()
4231 tmp++; in slabinfo_write()
4232 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) in slabinfo_write()