• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  highmem.c: virtual kernel memory mappings for high memory
3  *
4  *  Provides kernel-static versions of atomic kmap functions originally
5  *  found as inlines in include/asm-sparc/highmem.h.  These became
6  *  needed as kmap_atomic() and kunmap_atomic() started getting
7  *  called from within modules.
8  *  -- Tomas Szepe <szepe@pinerecords.com>, September 2002
9  *
10  *  But kmap_atomic() and kunmap_atomic() cannot be inlined in
11  *  modules because they are loaded with btfixup-ped functions.
12  */
13 
14 /*
15  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
16  * gives a more generic (and caching) interface. But kmap_atomic can
17  * be used in IRQ contexts, so in some (very limited) cases we need it.
18  *
19  * XXX This is an old text. Actually, it's good to use atomic kmaps,
20  * provided you remember that they are atomic and not try to sleep
21  * with a kmap taken, much like a spinlock. Non-atomic kmaps are
22  * shared by CPUs, and so precious, and establishing them requires IPI.
23  * Atomic kmaps are lightweight and we may have NCPUS more of them.
24  */
25 #include <linux/mm.h>
26 #include <linux/highmem.h>
27 #include <asm/pgalloc.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30 #include <asm/fixmap.h>
31 
kmap_atomic(struct page * page,enum km_type type)32 void *kmap_atomic(struct page *page, enum km_type type)
33 {
34 	unsigned long idx;
35 	unsigned long vaddr;
36 
37 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 	pagefault_disable();
39 	if (!PageHighMem(page))
40 		return page_address(page);
41 
42 	idx = type + KM_TYPE_NR*smp_processor_id();
43 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 
45 /* XXX Fix - Anton */
46 #if 0
47 	__flush_cache_one(vaddr);
48 #else
49 	flush_cache_all();
50 #endif
51 
52 #ifdef CONFIG_DEBUG_HIGHMEM
53 	BUG_ON(!pte_none(*(kmap_pte-idx)));
54 #endif
55 	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
56 /* XXX Fix - Anton */
57 #if 0
58 	__flush_tlb_one(vaddr);
59 #else
60 	flush_tlb_all();
61 #endif
62 
63 	return (void*) vaddr;
64 }
65 EXPORT_SYMBOL(kmap_atomic);
66 
kunmap_atomic(void * kvaddr,enum km_type type)67 void kunmap_atomic(void *kvaddr, enum km_type type)
68 {
69 #ifdef CONFIG_DEBUG_HIGHMEM
70 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71 	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
72 
73 	if (vaddr < FIXADDR_START) { // FIXME
74 		pagefault_enable();
75 		return;
76 	}
77 
78 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
79 
80 /* XXX Fix - Anton */
81 #if 0
82 	__flush_cache_one(vaddr);
83 #else
84 	flush_cache_all();
85 #endif
86 
87 	/*
88 	 * force other mappings to Oops if they'll try to access
89 	 * this pte without first remap it
90 	 */
91 	pte_clear(&init_mm, vaddr, kmap_pte-idx);
92 /* XXX Fix - Anton */
93 #if 0
94 	__flush_tlb_one(vaddr);
95 #else
96 	flush_tlb_all();
97 #endif
98 #endif
99 
100 	pagefault_enable();
101 }
102 EXPORT_SYMBOL(kunmap_atomic);
103 
104 /* We may be fed a pagetable here by ptep_to_xxx and others. */
kmap_atomic_to_page(void * ptr)105 struct page *kmap_atomic_to_page(void *ptr)
106 {
107 	unsigned long idx, vaddr = (unsigned long)ptr;
108 	pte_t *pte;
109 
110 	if (vaddr < SRMMU_NOCACHE_VADDR)
111 		return virt_to_page(ptr);
112 	if (vaddr < PKMAP_BASE)
113 		return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
114 	BUG_ON(vaddr < FIXADDR_START);
115 	BUG_ON(vaddr > FIXADDR_TOP);
116 
117 	idx = virt_to_fix(vaddr);
118 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
119 	return pte_page(*pte);
120 }
121