1 /*
2 * highmem.c: virtual kernel memory mappings for high memory
3 *
4 * Provides kernel-static versions of atomic kmap functions originally
5 * found as inlines in include/asm-sparc/highmem.h. These became
6 * needed as kmap_atomic() and kunmap_atomic() started getting
7 * called from within modules.
8 * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
9 *
10 * But kmap_atomic() and kunmap_atomic() cannot be inlined in
11 * modules because they are loaded with btfixup-ped functions.
12 */
13
14 /*
15 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
16 * gives a more generic (and caching) interface. But kmap_atomic can
17 * be used in IRQ contexts, so in some (very limited) cases we need it.
18 *
19 * XXX This is an old text. Actually, it's good to use atomic kmaps,
20 * provided you remember that they are atomic and not try to sleep
21 * with a kmap taken, much like a spinlock. Non-atomic kmaps are
22 * shared by CPUs, and so precious, and establishing them requires IPI.
23 * Atomic kmaps are lightweight and we may have NCPUS more of them.
24 */
25 #include <linux/mm.h>
26 #include <linux/highmem.h>
27 #include <linux/export.h>
28 #include <asm/pgalloc.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/fixmap.h>
32
kmap_atomic(struct page * page)33 void *kmap_atomic(struct page *page)
34 {
35 unsigned long vaddr;
36 long idx, type;
37
38 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
39 pagefault_disable();
40 if (!PageHighMem(page))
41 return page_address(page);
42
43 type = kmap_atomic_idx_push();
44 idx = type + KM_TYPE_NR*smp_processor_id();
45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46
47 /* XXX Fix - Anton */
48 #if 0
49 __flush_cache_one(vaddr);
50 #else
51 flush_cache_all();
52 #endif
53
54 #ifdef CONFIG_DEBUG_HIGHMEM
55 BUG_ON(!pte_none(*(kmap_pte-idx)));
56 #endif
57 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
58 /* XXX Fix - Anton */
59 #if 0
60 __flush_tlb_one(vaddr);
61 #else
62 flush_tlb_all();
63 #endif
64
65 return (void*) vaddr;
66 }
67 EXPORT_SYMBOL(kmap_atomic);
68
__kunmap_atomic(void * kvaddr)69 void __kunmap_atomic(void *kvaddr)
70 {
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 int type;
73
74 if (vaddr < FIXADDR_START) { // FIXME
75 pagefault_enable();
76 return;
77 }
78
79 type = kmap_atomic_idx();
80
81 #ifdef CONFIG_DEBUG_HIGHMEM
82 {
83 unsigned long idx;
84
85 idx = type + KM_TYPE_NR * smp_processor_id();
86 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
87
88 /* XXX Fix - Anton */
89 #if 0
90 __flush_cache_one(vaddr);
91 #else
92 flush_cache_all();
93 #endif
94
95 /*
96 * force other mappings to Oops if they'll try to access
97 * this pte without first remap it
98 */
99 pte_clear(&init_mm, vaddr, kmap_pte-idx);
100 /* XXX Fix - Anton */
101 #if 0
102 __flush_tlb_one(vaddr);
103 #else
104 flush_tlb_all();
105 #endif
106 }
107 #endif
108
109 kmap_atomic_idx_pop();
110 pagefault_enable();
111 }
112 EXPORT_SYMBOL(__kunmap_atomic);
113
114 /* We may be fed a pagetable here by ptep_to_xxx and others. */
kmap_atomic_to_page(void * ptr)115 struct page *kmap_atomic_to_page(void *ptr)
116 {
117 unsigned long idx, vaddr = (unsigned long)ptr;
118 pte_t *pte;
119
120 if (vaddr < SRMMU_NOCACHE_VADDR)
121 return virt_to_page(ptr);
122 if (vaddr < PKMAP_BASE)
123 return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
124 BUG_ON(vaddr < FIXADDR_START);
125 BUG_ON(vaddr > FIXADDR_TOP);
126
127 idx = virt_to_fix(vaddr);
128 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
129 return pte_page(*pte);
130 }
131