• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <asm/fixmap.h>
8 #include <asm/tlbflush.h>
9 
10 static pte_t *kmap_pte;
11 
12 unsigned long highstart_pfn, highend_pfn;
13 
kmap(struct page * page)14 void *kmap(struct page *page)
15 {
16 	might_sleep();
17 	if (!PageHighMem(page))
18 		return page_address(page);
19 	return kmap_high(page);
20 }
21 EXPORT_SYMBOL(kmap);
22 
kunmap(struct page * page)23 void kunmap(struct page *page)
24 {
25 	BUG_ON(in_interrupt());
26 	if (!PageHighMem(page))
27 		return;
28 	kunmap_high(page);
29 }
30 EXPORT_SYMBOL(kunmap);
31 
32 /*
33  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
34  * no global lock is needed and because the kmap code must perform a global TLB
35  * invalidation when the kmap pool wraps.
36  *
37  * However when holding an atomic kmap is is not legal to sleep, so atomic
38  * kmaps are appropriate for short, tight code paths only.
39  */
40 
kmap_atomic(struct page * page)41 void *kmap_atomic(struct page *page)
42 {
43 	enum fixed_addresses idx;
44 	unsigned long vaddr;
45 	int type;
46 
47 	preempt_disable();
48 	pagefault_disable();
49 	if (!PageHighMem(page))
50 		return page_address(page);
51 
52 	type = kmap_atomic_idx_push();
53 	idx = type + KM_TYPE_NR * smp_processor_id();
54 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 #ifdef CONFIG_DEBUG_HIGHMEM
56 	BUG_ON(!pte_none(*(kmap_pte - idx)));
57 #endif
58 	set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
59 
60 	return (void *)vaddr;
61 }
62 EXPORT_SYMBOL(kmap_atomic);
63 
__kunmap_atomic(void * kvaddr)64 void __kunmap_atomic(void *kvaddr)
65 {
66 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
67 	int idx, type;
68 
69 	if (kvaddr >= (void *)FIXADDR_START) {
70 		type = kmap_atomic_idx();
71 		idx = type + KM_TYPE_NR * smp_processor_id();
72 
73 		/*
74 		 * Force other mappings to Oops if they'll try to access this
75 		 * pte without first remap it.  Keeping stale mappings around
76 		 * is a bad idea also, in case the page changes cacheability
77 		 * attributes or becomes a protected page in a hypervisor.
78 		 */
79 		pte_clear(&init_mm, vaddr, kmap_pte-idx);
80 		flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
81 
82 		kmap_atomic_idx_pop();
83 	}
84 
85 	pagefault_enable();
86 	preempt_enable();
87 }
88 EXPORT_SYMBOL(__kunmap_atomic);
89 
90 /*
91  * This is the same as kmap_atomic() but can map memory that doesn't
92  * have a struct page associated with it.
93  */
kmap_atomic_pfn(unsigned long pfn)94 void *kmap_atomic_pfn(unsigned long pfn)
95 {
96 	enum fixed_addresses idx;
97 	unsigned long vaddr;
98 	int type;
99 
100 	preempt_disable();
101 	pagefault_disable();
102 
103 	type = kmap_atomic_idx_push();
104 	idx = type + KM_TYPE_NR * smp_processor_id();
105 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106 #ifdef CONFIG_DEBUG_HIGHMEM
107 	BUG_ON(!pte_none(*(kmap_pte - idx)));
108 #endif
109 	set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
110 	flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
111 
112 	return (void *)vaddr;
113 }
114 
kmap_init(void)115 void __init kmap_init(void)
116 {
117 	unsigned long kmap_vstart;
118 
119 	/* cache the first kmap pte */
120 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
121 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
122 }
123