• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2022 Huawei Device Co., Ltd.
4  */
5 
6 #include <asm/page.h>
7 
8 #include <linux/dcache.h>
9 #include <linux/fs.h>
10 #include <linux/hck/lite_hck_xpm.h>
11 #include <linux/mman.h>
12 #include <linux/sched.h>
13 #include <linux/sched/mm.h>
14 
15 #include "exec_signature_info.h"
16 #include "xpm_common.h"
17 #include "xpm_debugfs.h"
18 #include "xpm_hck_hooks.h"
19 #include "xpm_log.h"
20 #include "xpm_report.h"
21 
xpm_delete_cache_node(struct inode * file_node)22 static void xpm_delete_cache_node(struct inode *file_node)
23 {
24 	delete_exec_file_signature_info(file_node);
25 }
26 
xpm_region_outer(unsigned long addr_start,unsigned long addr_end,unsigned long flags,bool * ret)27 static void xpm_region_outer(unsigned long addr_start, unsigned long addr_end,
28 	unsigned long flags, bool *ret)
29 {
30 	struct mm_struct *mm = current->mm;
31 
32 	if (!mm)
33 		return;
34 
35 	/*
36 	 * VM_UNMAPPED_AREA_XPM identifies the address to allocated in the
37 	 * xpm_region, just ignore.
38 	 */
39 	if (flags & VM_UNMAPPED_AREA_XPM)
40 		return;
41 
42 	*ret = ((addr_start >= mm->xpm_region.addr_end) ||
43 		(addr_end <= mm->xpm_region.addr_start));
44 }
45 
xpm_get_unmapped_area(unsigned long addr,unsigned long len,unsigned long map_flags,unsigned long unmapped_flags,unsigned long * ret)46 void xpm_get_unmapped_area(unsigned long addr, unsigned long len,
47 	unsigned long map_flags, unsigned long unmapped_flags,
48 	unsigned long *ret)
49 {
50 	struct vm_unmapped_area_info info;
51 	struct mm_struct *mm = current->mm;
52 
53 	if (!mm)
54 		return;
55 
56 	if ((mm->xpm_region.addr_start == 0) && (mm->xpm_region.addr_end == 0))
57 		return;
58 
59 	if ((map_flags & MAP_FIXED) && !(addr >= mm->xpm_region.addr_end ||
60 		addr + len <= mm->xpm_region.addr_start)) {
61 		xpm_log_error("xpm region not allow mmap with MAP_FIXED");
62 		*ret = -EFAULT;
63 		return;
64 	}
65 
66 	if (map_flags & MAP_XPM) {
67 		if (addr) {
68 			xpm_log_error("xpm region not allow specify addr");
69 			*ret = -EPERM;
70 			return;
71 		}
72 
73 		info.flags = VM_UNMAPPED_AREA_XPM | unmapped_flags;
74 		info.length = len;
75 		info.low_limit = mm->xpm_region.addr_start;
76 		info.high_limit = mm->xpm_region.addr_end;
77 		info.align_mask = 0;
78 		info.align_offset = 0;
79 
80 		*ret = vm_unmapped_area(&info);
81 	}
82 }
83 
84 /*
85  * A xpm readonly region is an area where any page mapped
86  * will be marked with XPMReadonly.
87  *
88  * Return 1 if a region is readonly, otherwise, return 0.
89  */
is_xpm_readonly_region(struct vm_area_struct * vma)90 static bool is_xpm_readonly_region(struct vm_area_struct *vma)
91 {
92 	/* xpm region */
93 	if (vma->vm_flags & VM_XPM)
94 		return true;
95 
96 	/* !anonymous && executable */
97 	if (!xpm_is_anonymous_vma(vma) && (vma->vm_flags & VM_EXEC))
98 		return true;
99 
100 	return false;
101 }
102 
xpm_integrity_check(struct vm_area_struct * vma,unsigned int vflags,unsigned long addr,struct page * page,vm_fault_t * ret)103 void xpm_integrity_check(struct vm_area_struct *vma, unsigned int vflags,
104 	unsigned long addr, struct page *page, vm_fault_t *ret)
105 {
106 	if (!page)
107 		return;
108 
109 	/* integrity violation: write a readonly page */
110 	if ((vflags & FAULT_FLAG_WRITE) && (vma->vm_flags & VM_WRITE) &&
111 			PageXPMReadonly(page)) {
112 		report_integrity_event(INTEGRITY_RO, vma, page);
113 		*ret = xpm_ret(VM_FAULT_SIGSEGV);
114 		return;
115 	}
116 
117 	/* integrity violation: execute a writetained page */
118 	if (PageXPMWritetainted(page) && is_xpm_readonly_region(vma)) {
119 		report_integrity_event(INTEGRITY_WT, vma, page);
120 		*ret = xpm_ret(VM_FAULT_SIGSEGV);
121 		return;
122 	}
123 }
124 
xpm_integrity_update(struct vm_area_struct * vma,unsigned int vflags,struct page * page)125 void xpm_integrity_update(struct vm_area_struct *vma, unsigned int vflags,
126 	struct page *page)
127 {
128 	/* set writetainted only if a real write occurred */
129 	if ((vflags & FAULT_FLAG_WRITE) && (vma->vm_flags & VM_WRITE) &&
130 			!PageXPMWritetainted(page)) {
131 		SetPageXPMWritetainted(page);
132 		return;
133 	}
134 
135 	/* set xpm readonly flag */
136 	if (is_xpm_readonly_region(vma) && !PageXPMReadonly(page))
137 		SetPageXPMReadonly(page);
138 }
139 
xpm_integrity_validate(struct vm_area_struct * vma,unsigned int vflags,unsigned long addr,struct page * page,vm_fault_t * ret)140 void xpm_integrity_validate(struct vm_area_struct *vma, unsigned int vflags,
141 	unsigned long addr, struct page *page, vm_fault_t *ret)
142 {
143 	if (!page)
144 		return;
145 
146 	xpm_integrity_check(vma, vflags, addr, page, ret);
147 	if (!*ret)
148 		xpm_integrity_update(vma, vflags, page);
149 }
150 
151 /*
152  * check the integrity of these two pages.
153  *
154  * Return true if equal, otherwise false.
155  */
xpm_integrity_equal(struct page * page,struct page * kpage,bool * ret)156 void xpm_integrity_equal(struct page *page, struct page *kpage, bool *ret)
157 {
158 	if (!page || !kpage)
159 		return;
160 
161 	*ret = ((PageXPMWritetainted(page) == PageXPMWritetainted(kpage)) &&
162 		(PageXPMReadonly(page) == PageXPMReadonly(kpage)));
163 }
164 
xpm_register_hck_hooks(void)165 void xpm_register_hck_hooks(void)
166 {
167 	REGISTER_HCK_LITE_HOOK(xpm_delete_cache_node_lhck,
168 		xpm_delete_cache_node);
169 
170 	REGISTER_HCK_LITE_HOOK(xpm_region_outer_lhck, xpm_region_outer);
171 	REGISTER_HCK_LITE_HOOK(xpm_get_unmapped_area_lhck,
172 		xpm_get_unmapped_area);
173 
174 	/* xpm integrity */
175 	REGISTER_HCK_LITE_HOOK(xpm_integrity_equal_lhck, xpm_integrity_equal);
176 	REGISTER_HCK_LITE_HOOK(xpm_integrity_check_lhck, xpm_integrity_check);
177 	REGISTER_HCK_LITE_HOOK(xpm_integrity_update_lhck, xpm_integrity_update);
178 	REGISTER_HCK_LITE_HOOK(xpm_integrity_validate_lhck,
179 		xpm_integrity_validate);
180 }
181