Home
last modified time | relevance | path

Searched refs:vmf (Results 1 – 16 of 16) sorted by relevance

/include/trace/events/
Dfs_dax.h11 TP_PROTO(struct inode *inode, struct vm_fault *vmf,
13 TP_ARGS(inode, vmf, max_pgoff, result),
29 __entry->vm_start = vmf->vma->vm_start;
30 __entry->vm_end = vmf->vma->vm_end;
31 __entry->vm_flags = vmf->vma->vm_flags;
32 __entry->address = vmf->address;
33 __entry->flags = vmf->flags;
34 __entry->pgoff = vmf->pgoff;
56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
58 TP_ARGS(inode, vmf, max_pgoff, result))
[all …]
/include/linux/
Dhuge_mm.h11 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
15 void huge_pmd_set_accessed(struct vm_fault *vmf);
21 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
23 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() argument
28 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
41 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
42 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
456 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
602 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) in do_huge_pmd_numa_page() argument
Dpage_size_compat.h44 extern vm_fault_t shmem_fault(struct vm_fault *vmf);
48 extern vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf);
Dmempolicy.h170 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
286 struct vm_fault *vmf, in mpol_misplaced() argument
Dmm.h617 vm_fault_t (*fault)(struct vm_fault *vmf);
618 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
619 vm_fault_t (*map_pages)(struct vm_fault *vmf,
625 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
628 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
891 static inline void release_fault_lock(struct vm_fault *vmf) in release_fault_lock() argument
893 if (vmf->flags & FAULT_FLAG_VMA_LOCK) in release_fault_lock()
894 vma_end_read(vmf->vma); in release_fault_lock()
896 mmap_read_unlock(vmf->vma->vm_mm); in release_fault_lock()
899 static inline void assert_fault_locked(struct vm_fault *vmf) in assert_fault_locked() argument
[all …]
Dmemremap.h90 vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
Duserfaultfd_k.h85 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
300 static inline vm_fault_t handle_userfault(struct vm_fault *vmf, in handle_userfault() argument
Ddax.h245 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
247 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
Diomap.h313 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
Dpagemap.h1103 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1208 struct vm_fault *vmf) in folio_lock_or_retry() argument
1212 return __folio_lock_or_retry(folio, vmf); in folio_lock_or_retry()
Dnetfs.h428 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
Dbuffer_head.h277 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
Dmm_types.h1369 struct vm_fault *vmf);
Dkvm_host.h1485 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
/include/trace/hooks/
Dmm.h140 TP_PROTO(struct vm_fault *vmf, unsigned long *fault_around_pages),
141 TP_ARGS(vmf, fault_around_pages), 1);
245 TP_PROTO(struct vm_fault *vmf, bool *should_around),
246 TP_ARGS(vmf, should_around));
326 TP_PROTO(struct vm_fault *vmf, struct folio *folio, bool *skip),
327 TP_ARGS(vmf, folio, skip));
375 TP_PROTO(struct vm_fault *vmf, unsigned long fault_around_bytes),
376 TP_ARGS(vmf, fault_around_bytes));
378 TP_PROTO(struct vm_fault *vmf, struct folio *folio),
379 TP_ARGS(vmf, folio));
[all …]
/include/drm/ttm/
Dttm_bo.h425 struct vm_fault *vmf);
426 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
429 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
434 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);