1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (c) 2023 Huawei Device Co., Ltd.
4 */
5
6 #ifndef _XPM_H
7 #define _XPM_H
8
9 #include <linux/mm.h>
10 #include <linux/types.h>
11 #include <linux/printk.h>
12 #include <linux/xpm_types.h>
13 #include <linux/hck/lite_hck_xpm.h>
14
15 /**
16 * when inodes are destroyed, the corresponding cache must be destroyed
17 */
xpm_delete_cache_node_hook(struct inode * file_node)18 static inline void xpm_delete_cache_node_hook(struct inode *file_node)
19 {
20 CALL_HCK_LITE_HOOK(xpm_delete_cache_node_lhck, file_node);
21 }
22
23 /**
24 * check whether input address range is out of the xpm region
25 */
xpm_region_outer_hook(unsigned long addr_start,unsigned long addr_end,unsigned long flags)26 static inline bool xpm_region_outer_hook(unsigned long addr_start,
27 unsigned long addr_end, unsigned long flags)
28 {
29 bool ret = true;
30
31 CALL_HCK_LITE_HOOK(xpm_region_outer_lhck, addr_start,
32 addr_end, flags, &ret);
33 return ret;
34 }
35
36 /**
37 * get unmapped area in xpm region
38 */
xpm_get_unmapped_area_hook(unsigned long addr,unsigned long len,unsigned long map_flags,unsigned long unmapped_flags)39 static inline unsigned long xpm_get_unmapped_area_hook(unsigned long addr,
40 unsigned long len, unsigned long map_flags,
41 unsigned long unmapped_flags)
42 {
43 unsigned long ret = 0;
44
45 CALL_HCK_LITE_HOOK(xpm_get_unmapped_area_lhck, addr, len,
46 map_flags, unmapped_flags, &ret);
47 return ret;
48 }
49
50 /*
51 * check the confliction of a page's xpm flags, make sure a process will
52 * not map any RO page into a writable vma or a WT page into a execuable/XPM
53 * memory region.
54 */
xpm_integrity_check_hook(struct vm_area_struct * vma,unsigned int vflags,unsigned long addr,struct page * page)55 static inline vm_fault_t xpm_integrity_check_hook(struct vm_area_struct *vma,
56 unsigned int vflags, unsigned long addr, struct page *page)
57 {
58 vm_fault_t ret = 0;
59
60 CALL_HCK_LITE_HOOK(xpm_integrity_check_lhck, vma, vflags,
61 addr, page, &ret);
62 return ret;
63 }
64
65 static inline
xpm_integrity_validate_hook(struct vm_area_struct * vma,unsigned int vflags,unsigned long addr,struct page * page)66 vm_fault_t xpm_integrity_validate_hook(struct vm_area_struct *vma,
67 unsigned int vflags, unsigned long addr, struct page *page)
68 {
69 vm_fault_t ret = 0;
70
71 CALL_HCK_LITE_HOOK(xpm_integrity_validate_lhck, vma, vflags,
72 addr, page, &ret);
73 return ret;
74 }
75
76 static inline
xpm_integrity_update_hook(struct vm_area_struct * vma,unsigned int vflags,struct page * page)77 void xpm_integrity_update_hook(struct vm_area_struct *vma,
78 unsigned int vflags, struct page *page)
79 {
80 CALL_HCK_LITE_HOOK(xpm_integrity_update_lhck, vma, vflags, page);
81 }
82
xpm_integrity_check_one_page_merge(struct page * page,struct page * kpage)83 static inline bool xpm_integrity_check_one_page_merge(struct page *page,
84 struct page *kpage)
85 {
86 bool ret = true;
87
88 CALL_HCK_LITE_HOOK(xpm_integrity_equal_lhck, page, kpage, &ret);
89 return ret;
90 }
91
92 #ifdef CONFIG_ARM64
93 #define pte_user_mkexec(oldpte, ptent) \
94 ((!pte_user_exec(oldpte) && pte_user_exec(ptent)))
95 #else
96 #define pte_user_mkexec(oldpte, ptent) 1
97 #endif
98
99 #endif /* _XPM_H */
100