/mm/ |
D | vmalloc.c | 310 struct rb_node *tmp; in __insert_vmap_area() local 329 tmp = rb_prev(&va->rb_node); in __insert_vmap_area() 330 if (tmp) { in __insert_vmap_area() 332 prev = rb_entry(tmp, struct vmap_area, rb_node); in __insert_vmap_area() 411 struct vmap_area *tmp; in alloc_vmap_area() local 412 tmp = rb_entry(n, struct vmap_area, rb_node); in alloc_vmap_area() 413 if (tmp->va_end >= addr) { in alloc_vmap_area() 414 first = tmp; in alloc_vmap_area() 415 if (tmp->va_start <= addr) in alloc_vmap_area() 850 struct vmap_block *tmp; in free_vmap_block() local [all …]
|
D | mincore.c | 273 unsigned char *tmp; in SYSCALL_DEFINE3() local 290 tmp = (void *) __get_free_page(GFP_USER); in SYSCALL_DEFINE3() 291 if (!tmp) in SYSCALL_DEFINE3() 301 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3() 306 if (copy_to_user(vec, tmp, retval)) { in SYSCALL_DEFINE3() 315 free_page((unsigned long) tmp); in SYSCALL_DEFINE3()
|
D | mempolicy.c | 179 nodemask_t tmp; in mpol_relative_nodemask() local 180 nodes_fold(tmp, *orig, nodes_weight(*rel)); in mpol_relative_nodemask() 181 nodes_onto(*ret, tmp, *rel); in mpol_relative_nodemask() 323 nodemask_t tmp; in mpol_rebind_nodemask() local 326 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask() 328 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask() 335 nodes_remap(tmp, pol->v.nodes, in mpol_rebind_nodemask() 337 pol->w.cpuset_mems_allowed = step ? tmp : *nodes; in mpol_rebind_nodemask() 339 tmp = pol->w.cpuset_mems_allowed; in mpol_rebind_nodemask() 345 if (nodes_empty(tmp)) in mpol_rebind_nodemask() [all …]
|
D | madvise.c | 464 unsigned long end, tmp; in SYSCALL_DEFINE3() local 526 tmp = vma->vm_end; in SYSCALL_DEFINE3() 527 if (end < tmp) in SYSCALL_DEFINE3() 528 tmp = end; in SYSCALL_DEFINE3() 531 error = madvise_vma(vma, &prev, start, tmp, behavior); in SYSCALL_DEFINE3() 534 start = tmp; in SYSCALL_DEFINE3()
|
D | mprotect.c | 338 unsigned long vm_flags, nstart, end, tmp, reqprot; in SYSCALL_DEFINE3() local 411 tmp = vma->vm_end; in SYSCALL_DEFINE3() 412 if (tmp > end) in SYSCALL_DEFINE3() 413 tmp = end; in SYSCALL_DEFINE3() 414 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in SYSCALL_DEFINE3() 417 nstart = tmp; in SYSCALL_DEFINE3()
|
D | mmap.c | 2091 struct vm_area_struct *tmp; in find_vma() local 2093 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); in find_vma() 2095 if (tmp->vm_end > addr) { in find_vma() 2096 vma = tmp; in find_vma() 2097 if (tmp->vm_start <= addr) in find_vma() 2641 struct vm_area_struct *tmp = vma; in do_munmap() local 2642 while (tmp && tmp->vm_start < end) { in do_munmap() 2643 if (tmp->vm_flags & VM_LOCKED) { in do_munmap() 2644 mm->locked_vm -= vma_pages(tmp); in do_munmap() 2645 munlock_vma_pages_all(tmp); in do_munmap() [all …]
|
D | mlock.c | 616 unsigned long nstart, end, tmp; in do_mlock() local 644 tmp = vma->vm_end; in do_mlock() 645 if (tmp > end) in do_mlock() 646 tmp = end; in do_mlock() 647 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in do_mlock() 650 nstart = tmp; in do_mlock()
|
D | kmemleak-test.c | 100 struct test_node *elem, *tmp; in kmemleak_test_exit() local 106 list_for_each_entry_safe(elem, tmp, &test_list, list) in kmemleak_test_exit()
|
D | balloon_compaction.c | 59 struct page *page, *tmp; in balloon_page_dequeue() local 65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
|
D | memcontrol.c | 3049 struct memcg_cache_params *params, *tmp; in memcg_unregister_all_caches() local 3055 list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) { in memcg_unregister_all_caches() 4242 unsigned long long min_limit, min_memsw_limit, tmp; in memcg_get_hierarchical_limit() local 4253 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); in memcg_get_hierarchical_limit() 4254 min_limit = min(min_limit, tmp); in memcg_get_hierarchical_limit() 4255 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); in memcg_get_hierarchical_limit() 4256 min_memsw_limit = min(min_memsw_limit, tmp); in memcg_get_hierarchical_limit() 4803 struct mem_cgroup_eventfd_list *ev, *tmp; in mem_cgroup_oom_unregister_event() local 4807 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event() 5271 int zone, tmp = node; in alloc_mem_cgroup_per_zone_info() local [all …]
|
D | hugetlb.c | 1233 struct page *page, *tmp; in gather_surplus_pages() local 1290 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages() 1305 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages() 2293 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common() local 2299 table->data = &tmp; in hugetlb_sysctl_handler_common() 2307 NUMA_NO_NODE, tmp, *length); in hugetlb_sysctl_handler_common() 2334 unsigned long tmp; in hugetlb_overcommit_handler() local 2340 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler() 2345 table->data = &tmp; in hugetlb_overcommit_handler() 2353 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler() [all …]
|
D | kmemleak.c | 442 struct hlist_node *tmp; in free_object_rcu() local 451 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 1261 struct kmemleak_object *object, *tmp; in scan_gray_list() local 1276 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list() 1283 object = tmp; in scan_gray_list()
|
D | swapfile.c | 442 unsigned long tmp; in scan_swap_map_try_ssd_cluster() local 469 tmp = cluster->next; in scan_swap_map_try_ssd_cluster() 470 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * in scan_swap_map_try_ssd_cluster() 472 if (!si->swap_map[tmp]) { in scan_swap_map_try_ssd_cluster() 476 tmp++; in scan_swap_map_try_ssd_cluster() 482 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster() 483 *offset = tmp; in scan_swap_map_try_ssd_cluster() 484 *scan_base = tmp; in scan_swap_map_try_ssd_cluster()
|
D | memory.c | 2092 int tmp; in do_wp_page() local 2095 tmp = do_page_mkwrite(vma, old_page, address); in do_wp_page() 2096 if (unlikely(!tmp || (tmp & in do_wp_page() 2099 return tmp; in do_wp_page() 2939 int ret, tmp; in do_shared_fault() local 2951 tmp = do_page_mkwrite(vma, fault_page, address); in do_shared_fault() 2952 if (unlikely(!tmp || in do_shared_fault() 2953 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { in do_shared_fault() 2955 return tmp; in do_shared_fault()
|
D | slab_common.c | 82 char tmp; in kmem_cache_sanity_check() local 90 res = probe_kernel_address(s->name, tmp); in kmem_cache_sanity_check()
|
D | slab.c | 4011 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; in slabinfo_write() local 4021 tmp = strchr(kbuf, ' '); in slabinfo_write() 4022 if (!tmp) in slabinfo_write() 4024 *tmp = '\0'; in slabinfo_write() 4025 tmp++; in slabinfo_write() 4026 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) in slabinfo_write()
|
D | page_alloc.c | 3203 char tmp[MIGRATE_TYPES + 1]; in show_migration_types() local 3204 char *p = tmp; in show_migration_types() 3213 printk("(%s) ", tmp); in show_migration_types() 3549 const struct cpumask *tmp = cpumask_of_node(0); in find_next_best_node() local 3570 tmp = cpumask_of_node(n); in find_next_best_node() 3571 if (!cpumask_empty(tmp)) in find_next_best_node()
|
D | zsmalloc.c | 883 struct page *nextp, *tmp, *head_extra; in free_zspage() local 897 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { in free_zspage()
|
D | slub.c | 342 struct page tmp; in set_page_slub_counters() local 343 tmp.counters = counters_new; in set_page_slub_counters() 350 page->frozen = tmp.frozen; in set_page_slub_counters() 351 page->inuse = tmp.inuse; in set_page_slub_counters() 352 page->objects = tmp.objects; in set_page_slub_counters()
|