• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/export.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/smp.h>
5 #include <linux/interrupt.h>
6 #include <asm/fixmap.h>
7 #include <asm/tlbflush.h>
8 
9 static pte_t *kmap_pte;
10 
11 unsigned long highstart_pfn, highend_pfn;
12 
kmap(struct page * page)13 void *kmap(struct page *page)
14 {
15 	might_sleep();
16 	if (!PageHighMem(page))
17 		return page_address(page);
18 	return kmap_high(page);
19 }
20 EXPORT_SYMBOL(kmap);
21 
kunmap(struct page * page)22 void kunmap(struct page *page)
23 {
24 	BUG_ON(in_interrupt());
25 	if (!PageHighMem(page))
26 		return;
27 	kunmap_high(page);
28 }
29 EXPORT_SYMBOL(kunmap);
30 
31 /*
32  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
33  * no global lock is needed and because the kmap code must perform a global TLB
34  * invalidation when the kmap pool wraps.
35  *
36  * However when holding an atomic kmap is is not legal to sleep, so atomic
37  * kmaps are appropriate for short, tight code paths only.
38  */
39 
kmap_atomic(struct page * page)40 void *kmap_atomic(struct page *page)
41 {
42 	enum fixed_addresses idx;
43 	unsigned long vaddr;
44 	int type;
45 
46 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
47 	pagefault_disable();
48 	if (!PageHighMem(page))
49 		return page_address(page);
50 
51 	type = kmap_atomic_idx_push();
52 	idx = type + KM_TYPE_NR * smp_processor_id();
53 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
54 #ifdef CONFIG_DEBUG_HIGHMEM
55 	BUG_ON(!pte_none(*(kmap_pte - idx)));
56 #endif
57 	set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
58 
59 	return (void *)vaddr;
60 }
61 EXPORT_SYMBOL(kmap_atomic);
62 
__kunmap_atomic(void * kvaddr)63 void __kunmap_atomic(void *kvaddr)
64 {
65 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
66 	int idx, type;
67 
68 	if (kvaddr >= (void *)FIXADDR_START) {
69 		type = kmap_atomic_idx();
70 		idx = type + KM_TYPE_NR * smp_processor_id();
71 
72 		/*
73 		 * Force other mappings to Oops if they'll try to access this
74 		 * pte without first remap it.  Keeping stale mappings around
75 		 * is a bad idea also, in case the page changes cacheability
76 		 * attributes or becomes a protected page in a hypervisor.
77 		 */
78 		pte_clear(&init_mm, vaddr, kmap_pte-idx);
79 		flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
80 
81 		kmap_atomic_idx_pop();
82 	}
83 
84 	pagefault_enable();
85 }
86 EXPORT_SYMBOL(__kunmap_atomic);
87 
88 /*
89  * This is the same as kmap_atomic() but can map memory that doesn't
90  * have a struct page associated with it.
91  */
kmap_atomic_pfn(unsigned long pfn)92 void *kmap_atomic_pfn(unsigned long pfn)
93 {
94 	enum fixed_addresses idx;
95 	unsigned long vaddr;
96 	int type;
97 
98 	pagefault_disable();
99 
100 	type = kmap_atomic_idx_push();
101 	idx = type + KM_TYPE_NR * smp_processor_id();
102 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
103 #ifdef CONFIG_DEBUG_HIGHMEM
104 	BUG_ON(!pte_none(*(kmap_pte - idx)));
105 #endif
106 	set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
107 	flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
108 
109 	return (void *)vaddr;
110 }
111 
kmap_atomic_to_page(void * ptr)112 struct page *kmap_atomic_to_page(void *ptr)
113 {
114 	unsigned long vaddr = (unsigned long)ptr;
115 	int idx;
116 	pte_t *pte;
117 
118 	if (vaddr < FIXADDR_START)
119 		return virt_to_page(ptr);
120 
121 	idx = virt_to_fix(vaddr);
122 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
123 	return pte_page(*pte);
124 }
125 
kmap_init(void)126 void __init kmap_init(void)
127 {
128 	unsigned long kmap_vstart;
129 
130 	/* cache the first kmap pte */
131 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
132 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
133 }
134