Lines Matching refs:address
138 unsigned long address; member
204 unsigned long address; /* + low bits used for flags below */ member
536 unsigned long addr = rmap_item->address; in break_cow()
555 unsigned long addr = rmap_item->address; in get_mergeable_page()
646 rmap_item->address &= PAGE_MASK; in remove_node_from_stable_tree()
775 if (rmap_item->address & STABLE_FLAG) { in remove_rmap_item_from_tree()
796 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
798 } else if (rmap_item->address & UNSTABLE_FLAG) { in remove_rmap_item_from_tree()
807 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); in remove_rmap_item_from_tree()
813 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
1044 pvmw.address = page_address_in_vma(page, vma); in write_protect_page()
1045 if (pvmw.address == -EFAULT) in write_protect_page()
1051 pvmw.address, in write_protect_page()
1052 pvmw.address + PAGE_SIZE); in write_protect_page()
1066 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); in write_protect_page()
1081 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page()
1087 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page()
1097 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); in write_protect_page()
1284 vma = find_mergeable_vma(mm, rmap_item->address); in try_to_merge_with_ksm_page()
1979 rmap_item->address |= UNSTABLE_FLAG; in unstable_tree_search_insert()
1980 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); in unstable_tree_search_insert()
2017 rmap_item->address |= STABLE_FLAG; in stable_tree_append()
2114 vma = find_mergeable_vma(mm, rmap_item->address); in cmp_and_merge_page()
2116 ZERO_PAGE(rmap_item->address)); in cmp_and_merge_page()
2196 if ((rmap_item->address & PAGE_MASK) == addr) in get_next_rmap_item()
2198 if (rmap_item->address > addr) in get_next_rmap_item()
2209 rmap_item->address = addr; in get_next_rmap_item()
2275 ksm_scan.address = 0; in scan_get_next_rmap_item()
2284 vma = find_vma(mm, ksm_scan.address); in scan_get_next_rmap_item()
2289 if (ksm_scan.address < vma->vm_start) in scan_get_next_rmap_item()
2290 ksm_scan.address = vma->vm_start; in scan_get_next_rmap_item()
2292 ksm_scan.address = vma->vm_end; in scan_get_next_rmap_item()
2294 while (ksm_scan.address < vma->vm_end) { in scan_get_next_rmap_item()
2297 *page = follow_page(vma, ksm_scan.address, FOLL_GET); in scan_get_next_rmap_item()
2299 ksm_scan.address += PAGE_SIZE; in scan_get_next_rmap_item()
2304 flush_anon_page(vma, *page, ksm_scan.address); in scan_get_next_rmap_item()
2307 ksm_scan.rmap_list, ksm_scan.address); in scan_get_next_rmap_item()
2311 ksm_scan.address += PAGE_SIZE; in scan_get_next_rmap_item()
2318 ksm_scan.address += PAGE_SIZE; in scan_get_next_rmap_item()
2324 ksm_scan.address = 0; in scan_get_next_rmap_item()
2336 if (ksm_scan.address == 0) { in scan_get_next_rmap_item()
2560 struct vm_area_struct *vma, unsigned long address) in ksm_might_need_to_copy() argument
2572 page->index == linear_page_index(vma, address)) { in ksm_might_need_to_copy()
2578 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy()
2580 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
2623 addr = rmap_item->address & ~KSM_FLAG_MASK; in rmap_walk_ksm()
2656 unsigned long address) in reuse_ksm_page() argument
2674 page->index = linear_page_index(vma, address); in reuse_ksm_page()