1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm/mm/highmem.c -- ARM highmem support
4 *
5 * Author: Nicolas Pitre
6 * Created: september 8, 2008
7 * Copyright: Marvell Semiconductors Inc.
8 */
9
10 #include <linux/module.h>
11 #include <linux/highmem.h>
12 #include <linux/interrupt.h>
13 #include <asm/fixmap.h>
14 #include <asm/cacheflush.h>
15 #include <asm/tlbflush.h>
16 #include "mm.h"
17
set_fixmap_pte(int idx,pte_t pte)18 static inline void set_fixmap_pte(int idx, pte_t pte)
19 {
20 unsigned long vaddr = __fix_to_virt(idx);
21 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
22
23 set_pte_ext(ptep, pte, 0);
24 local_flush_tlb_kernel_page(vaddr);
25 }
26
get_fixmap_pte(unsigned long vaddr)27 static inline pte_t get_fixmap_pte(unsigned long vaddr)
28 {
29 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
30
31 return *ptep;
32 }
33
kmap(struct page * page)34 void *kmap(struct page *page)
35 {
36 might_sleep();
37 if (!PageHighMem(page))
38 return page_address(page);
39 return kmap_high(page);
40 }
41 EXPORT_SYMBOL(kmap);
42
kunmap(struct page * page)43 void kunmap(struct page *page)
44 {
45 BUG_ON(in_interrupt());
46 if (!PageHighMem(page))
47 return;
48 kunmap_high(page);
49 }
50 EXPORT_SYMBOL(kunmap);
51
kmap_atomic(struct page * page)52 void *kmap_atomic(struct page *page)
53 {
54 unsigned int idx;
55 unsigned long vaddr;
56 void *kmap;
57 int type;
58
59 preempt_disable();
60 pagefault_disable();
61 if (!PageHighMem(page))
62 return page_address(page);
63
64 #ifdef CONFIG_DEBUG_HIGHMEM
65 /*
66 * There is no cache coherency issue when non VIVT, so force the
67 * dedicated kmap usage for better debugging purposes in that case.
68 */
69 if (!cache_is_vivt())
70 kmap = NULL;
71 else
72 #endif
73 kmap = kmap_high_get(page);
74 if (kmap)
75 return kmap;
76
77 type = kmap_atomic_idx_push();
78
79 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
80 vaddr = __fix_to_virt(idx);
81 #ifdef CONFIG_DEBUG_HIGHMEM
82 /*
83 * With debugging enabled, kunmap_atomic forces that entry to 0.
84 * Make sure it was indeed properly unmapped.
85 */
86 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
87 #endif
88 /*
89 * When debugging is off, kunmap_atomic leaves the previous mapping
90 * in place, so the contained TLB flush ensures the TLB is updated
91 * with the new mapping.
92 */
93 set_fixmap_pte(idx, mk_pte(page, kmap_prot));
94
95 return (void *)vaddr;
96 }
97 EXPORT_SYMBOL(kmap_atomic);
98
__kunmap_atomic(void * kvaddr)99 void __kunmap_atomic(void *kvaddr)
100 {
101 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
102 int idx, type;
103
104 if (kvaddr >= (void *)FIXADDR_START) {
105 type = kmap_atomic_idx();
106 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
107
108 if (cache_is_vivt())
109 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
110 #ifdef CONFIG_DEBUG_HIGHMEM
111 BUG_ON(vaddr != __fix_to_virt(idx));
112 set_fixmap_pte(idx, __pte(0));
113 #else
114 (void) idx; /* to kill a warning */
115 #endif
116 kmap_atomic_idx_pop();
117 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
118 /* this address was obtained through kmap_high_get() */
119 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
120 }
121 pagefault_enable();
122 preempt_enable();
123 }
124 EXPORT_SYMBOL(__kunmap_atomic);
125
kmap_atomic_pfn(unsigned long pfn)126 void *kmap_atomic_pfn(unsigned long pfn)
127 {
128 unsigned long vaddr;
129 int idx, type;
130 struct page *page = pfn_to_page(pfn);
131
132 preempt_disable();
133 pagefault_disable();
134 if (!PageHighMem(page))
135 return page_address(page);
136
137 type = kmap_atomic_idx_push();
138 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
139 vaddr = __fix_to_virt(idx);
140 #ifdef CONFIG_DEBUG_HIGHMEM
141 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
142 #endif
143 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
144
145 return (void *)vaddr;
146 }
147