Home
last modified time | relevance | path

Searched refs:entry (Results 1 – 25 of 29) sorted by relevance

12

/mm/
Dzswap.c250 struct zswap_entry *entry; in zswap_entry_cache_alloc() local
251 entry = kmem_cache_alloc(zswap_entry_cache, gfp); in zswap_entry_cache_alloc()
252 if (!entry) in zswap_entry_cache_alloc()
254 entry->refcount = 1; in zswap_entry_cache_alloc()
255 RB_CLEAR_NODE(&entry->rbnode); in zswap_entry_cache_alloc()
256 return entry; in zswap_entry_cache_alloc()
259 static void zswap_entry_cache_free(struct zswap_entry *entry) in zswap_entry_cache_free() argument
261 kmem_cache_free(zswap_entry_cache, entry); in zswap_entry_cache_free()
270 struct zswap_entry *entry; in zswap_rb_search() local
273 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search()
[all …]
Dswap_state.c79 swp_entry_t entry = swp_entry(i, 1); in total_swapcache_pages() local
82 if (!swp_swap_info(entry)) in total_swapcache_pages()
85 si = get_swap_device(entry); in total_swapcache_pages()
114 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) in add_to_swap_cache() argument
116 struct address_space *address_space = swap_address_space(entry); in add_to_swap_cache()
117 pgoff_t idx = swp_offset(entry); in add_to_swap_cache()
135 set_page_private(page + i, entry.val + i); in add_to_swap_cache()
158 void __delete_from_swap_cache(struct page *page, swp_entry_t entry) in __delete_from_swap_cache() argument
160 struct address_space *address_space = swap_address_space(entry); in __delete_from_swap_cache()
162 pgoff_t idx = swp_offset(entry); in __delete_from_swap_cache()
[all …]
Dhuge_memory.c618 pmd_t entry; in __do_huge_pmd_anonymous_page() local
637 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
638 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
643 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
703 pmd_t entry; in set_huge_zero_page() local
706 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
707 entry = pmd_mkhuge(entry); in set_huge_zero_page()
710 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
782 pmd_t entry; in insert_pfn_pmd() local
792 entry = pmd_mkyoung(*pmd); in insert_pfn_pmd()
[all …]
Dswapfile.c130 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap() local
134 page = find_get_page(swap_address_space(entry), offset); in __try_to_reclaim_swap()
980 swp_entry_t entry; in scan_swap_map() local
983 n_ret = scan_swap_map_slots(si, usage, 1, &entry); in scan_swap_map()
986 return swp_offset(entry); in scan_swap_map()
1104 static struct swap_info_struct *__swap_info_get(swp_entry_t entry) in __swap_info_get() argument
1109 if (!entry.val) in __swap_info_get()
1111 p = swp_swap_info(entry); in __swap_info_get()
1116 offset = swp_offset(entry); in __swap_info_get()
1122 pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val); in __swap_info_get()
[all …]
Dpage_vma_mapped.c40 swp_entry_t entry; in map_pte() local
43 entry = pte_to_swp_entry(*pvmw->pte); in map_pte()
44 if (!is_device_private_entry(entry)) in map_pte()
86 swp_entry_t entry; in check_pte() local
89 entry = pte_to_swp_entry(*pvmw->pte); in check_pte()
91 if (!is_migration_entry(entry)) in check_pte()
94 pfn = migration_entry_to_pfn(entry); in check_pte()
96 swp_entry_t entry; in check_pte() local
99 entry = pte_to_swp_entry(*pvmw->pte); in check_pte()
100 if (!is_device_private_entry(entry)) in check_pte()
[all …]
Dswap_slots.c278 int free_swap_slot(swp_entry_t entry) in free_swap_slot() argument
300 cache->slots_ret[cache->n_ret++] = entry; in free_swap_slot()
304 swapcache_free_entries(&entry, 1); in free_swap_slot()
312 swp_entry_t entry, *pentry; in get_swap_page() local
315 entry.val = 0; in get_swap_page()
319 get_swap_pages(1, &entry, HPAGE_PMD_NR); in get_swap_page()
340 entry = *pentry; in get_swap_page()
349 if (entry.val) in get_swap_page()
353 get_swap_pages(1, &entry, 1); in get_swap_page()
355 if (mem_cgroup_try_charge_swap(page, entry)) { in get_swap_page()
[all …]
Dmemory.c707 swp_entry_t entry = pte_to_swp_entry(pte); in copy_one_pte() local
709 if (likely(!non_swap_entry(entry))) { in copy_one_pte()
710 if (swap_duplicate(entry) < 0) in copy_one_pte()
711 return entry.val; in copy_one_pte()
722 } else if (is_migration_entry(entry)) { in copy_one_pte()
723 page = migration_entry_to_page(entry); in copy_one_pte()
727 if (is_write_migration_entry(entry) && in copy_one_pte()
733 make_migration_entry_read(&entry); in copy_one_pte()
734 pte = swp_entry_to_pte(entry); in copy_one_pte()
739 } else if (is_device_private_entry(entry)) { in copy_one_pte()
[all …]
Drmap.c910 pte_t entry; in page_mkclean_one() local
917 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
918 entry = pte_wrprotect(entry); in page_mkclean_one()
919 entry = pte_mkclean(entry); in page_mkclean_one()
920 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
925 pmd_t entry; in page_mkclean_one() local
931 entry = pmdp_invalidate(vma, address, pmd); in page_mkclean_one()
932 entry = pmd_wrprotect(entry); in page_mkclean_one()
933 entry = pmd_mkclean(entry); in page_mkclean_one()
934 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
[all …]
Dworkingset.c199 unsigned long entry = xa_to_value(shadow); in unpack_shadow() local
203 workingset = entry & 1; in unpack_shadow()
204 entry >>= 1; in unpack_shadow()
205 nid = entry & ((1UL << NODES_SHIFT) - 1); in unpack_shadow()
206 entry >>= NODES_SHIFT; in unpack_shadow()
207 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); in unpack_shadow()
208 entry >>= MEM_CGROUP_ID_SHIFT; in unpack_shadow()
212 *evictionp = entry << bucket_order; in unpack_shadow()
Dmigrate.c215 swp_entry_t entry; in remove_migration_pte() local
242 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte()
243 if (is_write_migration_entry(entry)) in remove_migration_pte()
248 entry = make_device_private_entry(new, pte_write(pte)); in remove_migration_pte()
249 pte = swp_entry_to_pte(entry); in remove_migration_pte()
311 swp_entry_t entry; in __migration_entry_wait() local
319 entry = pte_to_swp_entry(pte); in __migration_entry_wait()
320 if (!is_migration_entry(entry)) in __migration_entry_wait()
323 page = migration_entry_to_page(entry); in __migration_entry_wait()
2005 pmd_t *pmd, pmd_t entry, in migrate_misplaced_transhuge_page() argument
[all …]
Dpgtable-generic.c57 pte_t entry, int dirty) in ptep_set_access_flags() argument
59 int changed = !pte_same(*ptep, entry); in ptep_set_access_flags()
61 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
98 pmd_t entry, int dirty) in pmdp_set_access_flags() argument
100 int changed = !pmd_same(*pmdp, entry); in pmdp_set_access_flags()
103 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
Dhugetlb.c3367 pte_t entry; in make_huge_pte() local
3370 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, in make_huge_pte()
3373 entry = huge_pte_wrprotect(mk_huge_pte(page, in make_huge_pte()
3376 entry = pte_mkyoung(entry); in make_huge_pte()
3377 entry = pte_mkhuge(entry); in make_huge_pte()
3378 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3380 return entry; in make_huge_pte()
3386 pte_t entry; in set_huge_ptep_writable() local
3388 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); in set_huge_ptep_writable()
3389 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable()
[all …]
Dfrontswap.c248 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_store() local
249 int type = swp_type(entry); in __frontswap_store()
251 pgoff_t offset = swp_offset(entry); in __frontswap_store()
297 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_load() local
298 int type = swp_type(entry); in __frontswap_load()
300 pgoff_t offset = swp_offset(entry); in __frontswap_load()
Dtruncate.c35 pgoff_t index, void *entry) in __clear_shadow_entry() argument
40 if (xas_load(&xas) != entry) in __clear_shadow_entry()
47 void *entry) in clear_shadow_entry() argument
50 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
112 pgoff_t index, void *entry) in invalidate_exceptional_entry() argument
117 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry()
126 pgoff_t index, void *entry) in invalidate_exceptional_entry2() argument
133 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry2()
Dmprotect.c126 swp_entry_t entry = pte_to_swp_entry(oldpte); in change_pte_range() local
128 if (is_write_migration_entry(entry)) { in change_pte_range()
134 make_migration_entry_read(&entry); in change_pte_range()
135 newpte = swp_entry_to_pte(entry); in change_pte_range()
143 if (is_write_device_private_entry(entry)) { in change_pte_range()
150 make_device_private_entry_read(&entry); in change_pte_range()
151 newpte = swp_entry_to_pte(entry); in change_pte_range()
Dmincore.c153 swp_entry_t entry = pte_to_swp_entry(pte); in mincore_pte_range() local
155 if (non_swap_entry(entry)) { in mincore_pte_range()
163 *vec = mincore_page(swap_address_space(entry), in mincore_pte_range()
164 swp_offset(entry)); in mincore_pte_range()
Dhmm.c477 swp_entry_t entry = pte_to_swp_entry(pte); in hmm_vma_handle_pte() local
479 if (!non_swap_entry(entry)) { in hmm_vma_handle_pte()
492 if (is_device_private_entry(entry)) { in hmm_vma_handle_pte()
495 cpu_flags |= is_write_device_private_entry(entry) ? in hmm_vma_handle_pte()
502 swp_offset(entry)); in hmm_vma_handle_pte()
507 if (is_migration_entry(entry)) { in hmm_vma_handle_pte()
737 pte_t entry; in hmm_vma_walk_hugetlb_entry() local
741 entry = huge_ptep_get(pte); in hmm_vma_walk_hugetlb_entry()
746 cpu_flags = pte_to_hmm_pfn_flags(range, entry); in hmm_vma_walk_hugetlb_entry()
755 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); in hmm_vma_walk_hugetlb_entry()
Dmadvise.c195 swp_entry_t entry; in swapin_walk_pmd_entry() local
205 entry = pte_to_swp_entry(pte); in swapin_walk_pmd_entry()
206 if (unlikely(non_swap_entry(entry))) in swapin_walk_pmd_entry()
209 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, in swapin_walk_pmd_entry()
592 swp_entry_t entry; in madvise_free_pte_range() local
594 entry = pte_to_swp_entry(ptent); in madvise_free_pte_range()
595 if (non_swap_entry(entry)) in madvise_free_pte_range()
598 free_swap_and_cache(entry); in madvise_free_pte_range()
Dslab.c189 void *entry[]; /* member
583 memcpy(to->entry + to->avail, from->entry + from->avail -nr, in transfer_objects()
700 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
770 ac->entry[ac->avail++] = objp; in __cache_free_alien()
902 free_block(cachep, n->shared->entry, in setup_kmem_cache_node()
964 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
974 free_block(cachep, shared->entry, in cpuup_canceled()
2131 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2133 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); in drain_array_locked()
2148 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
[all …]
Dmemory-failure.c1461 struct memory_failure_entry entry = { in memory_failure_queue() local
1468 if (kfifo_put(&mf_cpu->fifo, entry)) in memory_failure_queue()
1481 struct memory_failure_entry entry = { 0, }; in memory_failure_work_func() local
1488 gotten = kfifo_get(&mf_cpu->fifo, &entry); in memory_failure_work_func()
1492 if (entry.flags & MF_SOFT_OFFLINE) in memory_failure_work_func()
1493 soft_offline_page(pfn_to_page(entry.pfn), entry.flags); in memory_failure_work_func()
1495 memory_failure(entry.pfn, entry.flags); in memory_failure_work_func()
Dpage_io.c77 swp_entry_t entry; in swap_slot_free_notify() local
109 entry.val = page_private(page); in swap_slot_free_notify()
110 if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { in swap_slot_free_notify()
113 offset = swp_offset(entry); in swap_slot_free_notify()
Dsparse-vmemmap.c147 pte_t entry; in vmemmap_pte_populate() local
151 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); in vmemmap_pte_populate()
152 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_pte_populate()
Dgup.c147 pte_t entry = *pte; in follow_pfn_pte() local
150 entry = pte_mkdirty(entry); in follow_pfn_pte()
151 entry = pte_mkyoung(entry); in follow_pfn_pte()
153 if (!pte_same(*pte, entry)) { in follow_pfn_pte()
154 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
189 swp_entry_t entry; in follow_page_pte() local
199 entry = pte_to_swp_entry(pte); in follow_page_pte()
200 if (!is_migration_entry(entry)) in follow_page_pte()
Dksm.c1063 pte_t entry; in write_protect_page() local
1081 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page()
1087 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page()
1090 if (pte_dirty(entry)) in write_protect_page()
1093 if (pte_protnone(entry)) in write_protect_page()
1094 entry = pte_mkclean(pte_clear_savedwrite(entry)); in write_protect_page()
1096 entry = pte_mkclean(pte_wrprotect(entry)); in write_protect_page()
1097 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); in write_protect_page()
Dmemcontrol.c1970 INIT_LIST_HEAD(&owait.wait.entry); in mem_cgroup_oom_synchronize()
3079 static int mem_cgroup_move_swap_account(swp_entry_t entry, in mem_cgroup_move_swap_account() argument
3087 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { in mem_cgroup_move_swap_account()
3095 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, in mem_cgroup_move_swap_account() argument
5319 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() argument
5349 entry->val = ent.val; in mc_handle_swap_pte()
5355 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() argument
5362 unsigned long addr, pte_t ptent, swp_entry_t *entry) in mc_handle_file_pte() argument
5384 *entry = swp; in mc_handle_file_pte()
6558 swp_entry_t entry = { .val = page_private(page) }; in mem_cgroup_commit_charge() local
[all …]

12