• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
4 
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/bug.h>
8 #include <linux/mm.h>
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
11 
12 #include <asm/cacheflush.h>
13 
14 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
16 {
17 }
18 #endif
19 
20 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)21 static inline void flush_kernel_dcache_page(struct page *page)
22 {
23 }
flush_kernel_vmap_range(void * vaddr,int size)24 static inline void flush_kernel_vmap_range(void *vaddr, int size)
25 {
26 }
invalidate_kernel_vmap_range(void * vaddr,int size)27 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28 {
29 }
30 #endif
31 
32 #include <asm/kmap_types.h>
33 
34 #ifdef CONFIG_HIGHMEM
35 #include <asm/highmem.h>
36 
37 /* declarations for linux/mm/highmem.c */
38 unsigned int nr_free_highpages(void);
39 extern atomic_long_t _totalhigh_pages;
totalhigh_pages(void)40 static inline unsigned long totalhigh_pages(void)
41 {
42 	return (unsigned long)atomic_long_read(&_totalhigh_pages);
43 }
44 
totalhigh_pages_inc(void)45 static inline void totalhigh_pages_inc(void)
46 {
47 	atomic_long_inc(&_totalhigh_pages);
48 }
49 
totalhigh_pages_dec(void)50 static inline void totalhigh_pages_dec(void)
51 {
52 	atomic_long_dec(&_totalhigh_pages);
53 }
54 
totalhigh_pages_add(long count)55 static inline void totalhigh_pages_add(long count)
56 {
57 	atomic_long_add(count, &_totalhigh_pages);
58 }
59 
totalhigh_pages_set(long val)60 static inline void totalhigh_pages_set(long val)
61 {
62 	atomic_long_set(&_totalhigh_pages, val);
63 }
64 
65 void kmap_flush_unused(void);
66 
67 struct page *kmap_to_page(void *addr);
68 
69 #else /* CONFIG_HIGHMEM */
70 
nr_free_highpages(void)71 static inline unsigned int nr_free_highpages(void) { return 0; }
72 
kmap_to_page(void * addr)73 static inline struct page *kmap_to_page(void *addr)
74 {
75 	return virt_to_page(addr);
76 }
77 
totalhigh_pages(void)78 static inline unsigned long totalhigh_pages(void) { return 0UL; }
79 
80 #ifndef ARCH_HAS_KMAP
kmap(struct page * page)81 static inline void *kmap(struct page *page)
82 {
83 	might_sleep();
84 	return page_address(page);
85 }
86 
kunmap(struct page * page)87 static inline void kunmap(struct page *page)
88 {
89 }
90 
kmap_atomic(struct page * page)91 static inline void *kmap_atomic(struct page *page)
92 {
93 	preempt_disable();
94 	pagefault_disable();
95 	return page_address(page);
96 }
97 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
98 
__kunmap_atomic(void * addr)99 static inline void __kunmap_atomic(void *addr)
100 {
101 	pagefault_enable();
102 	preempt_enable();
103 }
104 
105 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
106 
107 #define kmap_flush_unused()	do {} while(0)
108 #endif
109 
110 #endif /* CONFIG_HIGHMEM */
111 
112 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
113 
114 DECLARE_PER_CPU(int, __kmap_atomic_idx);
115 
kmap_atomic_idx_push(void)116 static inline int kmap_atomic_idx_push(void)
117 {
118 	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
119 
120 #ifdef CONFIG_DEBUG_HIGHMEM
121 	WARN_ON_ONCE(in_irq() && !irqs_disabled());
122 	BUG_ON(idx >= KM_TYPE_NR);
123 #endif
124 	return idx;
125 }
126 
kmap_atomic_idx(void)127 static inline int kmap_atomic_idx(void)
128 {
129 	return __this_cpu_read(__kmap_atomic_idx) - 1;
130 }
131 
kmap_atomic_idx_pop(void)132 static inline void kmap_atomic_idx_pop(void)
133 {
134 #ifdef CONFIG_DEBUG_HIGHMEM
135 	int idx = __this_cpu_dec_return(__kmap_atomic_idx);
136 
137 	BUG_ON(idx < 0);
138 #else
139 	__this_cpu_dec(__kmap_atomic_idx);
140 #endif
141 }
142 
143 #endif
144 
145 /*
146  * Prevent people trying to call kunmap_atomic() as if it were kunmap()
147  * kunmap_atomic() should get the return value of kmap_atomic, not the page.
148  */
149 #define kunmap_atomic(addr)                                     \
150 do {                                                            \
151 	BUILD_BUG_ON(__same_type((addr), struct page *));       \
152 	__kunmap_atomic(addr);                                  \
153 } while (0)
154 
155 
156 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
157 #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)158 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
159 {
160 	void *addr = kmap_atomic(page);
161 	clear_user_page(addr, vaddr, page);
162 	kunmap_atomic(addr);
163 }
164 #endif
165 
166 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
167 /**
168  * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
169  * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
170  * @vma: The VMA the page is to be allocated for
171  * @vaddr: The virtual address the page will be inserted into
172  *
173  * This function will allocate a page for a VMA but the caller is expected
174  * to specify via movableflags whether the page will be movable in the
175  * future or not
176  *
177  * An architecture may override this function by defining
178  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
179  * implementation.
180  */
181 static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,struct vm_area_struct * vma,unsigned long vaddr)182 __alloc_zeroed_user_highpage(gfp_t movableflags,
183 			struct vm_area_struct *vma,
184 			unsigned long vaddr)
185 {
186 	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
187 			vma, vaddr);
188 
189 	if (page)
190 		clear_user_highpage(page, vaddr);
191 
192 	return page;
193 }
194 #endif
195 
196 /**
197  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
198  * @vma: The VMA the page is to be allocated for
199  * @vaddr: The virtual address the page will be inserted into
200  *
201  * This function will allocate a page for a VMA that the caller knows will
202  * be able to migrate in the future using move_pages() or reclaimed
203  */
204 static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct * vma,unsigned long vaddr)205 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
206 					unsigned long vaddr)
207 {
208 #ifndef CONFIG_CMA
209 	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
210 #else
211 	return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
212 						vaddr);
213 #endif
214 }
215 
clear_highpage(struct page * page)216 static inline void clear_highpage(struct page *page)
217 {
218 	void *kaddr = kmap_atomic(page);
219 	clear_page(kaddr);
220 	kunmap_atomic(kaddr);
221 }
222 
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)223 static inline void zero_user_segments(struct page *page,
224 	unsigned start1, unsigned end1,
225 	unsigned start2, unsigned end2)
226 {
227 	void *kaddr = kmap_atomic(page);
228 
229 	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
230 
231 	if (end1 > start1)
232 		memset(kaddr + start1, 0, end1 - start1);
233 
234 	if (end2 > start2)
235 		memset(kaddr + start2, 0, end2 - start2);
236 
237 	kunmap_atomic(kaddr);
238 	flush_dcache_page(page);
239 }
240 
zero_user_segment(struct page * page,unsigned start,unsigned end)241 static inline void zero_user_segment(struct page *page,
242 	unsigned start, unsigned end)
243 {
244 	zero_user_segments(page, start, end, 0, 0);
245 }
246 
zero_user(struct page * page,unsigned start,unsigned size)247 static inline void zero_user(struct page *page,
248 	unsigned start, unsigned size)
249 {
250 	zero_user_segments(page, start, start + size, 0, 0);
251 }
252 
253 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
254 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)255 static inline void copy_user_highpage(struct page *to, struct page *from,
256 	unsigned long vaddr, struct vm_area_struct *vma)
257 {
258 	char *vfrom, *vto;
259 
260 	vfrom = kmap_atomic(from);
261 	vto = kmap_atomic(to);
262 	copy_user_page(vto, vfrom, vaddr, to);
263 	kunmap_atomic(vto);
264 	kunmap_atomic(vfrom);
265 }
266 
267 #endif
268 
269 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
270 
copy_highpage(struct page * to,struct page * from)271 static inline void copy_highpage(struct page *to, struct page *from)
272 {
273 	char *vfrom, *vto;
274 
275 	vfrom = kmap_atomic(from);
276 	vto = kmap_atomic(to);
277 	copy_page(vto, vfrom);
278 	kunmap_atomic(vto);
279 	kunmap_atomic(vfrom);
280 }
281 
282 #endif
283 
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)284 static inline void memcpy_from_page(char *to, struct page *page,
285 				    size_t offset, size_t len)
286 {
287 	char *from = kmap_atomic(page);
288 
289 	memcpy(to, from + offset, len);
290 	kunmap_atomic(from);
291 }
292 
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)293 static inline void memcpy_to_page(struct page *page, size_t offset,
294 				  const char *from, size_t len)
295 {
296 	char *to = kmap_atomic(page);
297 
298 	memcpy(to + offset, from, len);
299 	kunmap_atomic(to);
300 }
301 
302 #endif /* _LINUX_HIGHMEM_H */
303