1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4 #include <linux/bootmem.h>
5
kmap(struct page * page)6 void *kmap(struct page *page)
7 {
8 might_sleep();
9 if (!PageHighMem(page))
10 return page_address(page);
11 return kmap_high(page);
12 }
13 EXPORT_SYMBOL(kmap);
14
kunmap(struct page * page)15 void kunmap(struct page *page)
16 {
17 if (in_interrupt())
18 BUG();
19 if (!PageHighMem(page))
20 return;
21 kunmap_high(page);
22 }
23 EXPORT_SYMBOL(kunmap);
24
25 /*
26 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27 * no global lock is needed and because the kmap code must perform a global TLB
28 * invalidation when the kmap pool wraps.
29 *
30 * However when holding an atomic kmap it is not legal to sleep, so atomic
31 * kmaps are appropriate for short, tight code paths only.
32 */
kmap_atomic_prot(struct page * page,pgprot_t prot)33 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34 {
35 unsigned long vaddr;
36 int idx, type;
37
38 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
39 pagefault_disable();
40
41 if (!PageHighMem(page))
42 return page_address(page);
43
44 type = kmap_atomic_idx_push();
45 idx = type + KM_TYPE_NR*smp_processor_id();
46 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
47 BUG_ON(!pte_none(*(kmap_pte-idx)));
48 set_pte(kmap_pte-idx, mk_pte(page, prot));
49 arch_flush_lazy_mmu_mode();
50
51 return (void *)vaddr;
52 }
53 EXPORT_SYMBOL(kmap_atomic_prot);
54
kmap_atomic(struct page * page)55 void *kmap_atomic(struct page *page)
56 {
57 return kmap_atomic_prot(page, kmap_prot);
58 }
59 EXPORT_SYMBOL(kmap_atomic);
60
61 /*
62 * This is the same as kmap_atomic() but can map memory that doesn't
63 * have a struct page associated with it.
64 */
kmap_atomic_pfn(unsigned long pfn)65 void *kmap_atomic_pfn(unsigned long pfn)
66 {
67 return kmap_atomic_prot_pfn(pfn, kmap_prot);
68 }
69 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
70
__kunmap_atomic(void * kvaddr)71 void __kunmap_atomic(void *kvaddr)
72 {
73 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
74
75 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
76 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
77 int idx, type;
78
79 type = kmap_atomic_idx();
80 idx = type + KM_TYPE_NR * smp_processor_id();
81
82 #ifdef CONFIG_DEBUG_HIGHMEM
83 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
84 #endif
85 /*
86 * Force other mappings to Oops if they'll try to access this
87 * pte without first remap it. Keeping stale mappings around
88 * is a bad idea also, in case the page changes cacheability
89 * attributes or becomes a protected page in a hypervisor.
90 */
91 kpte_clear_flush(kmap_pte-idx, vaddr);
92 kmap_atomic_idx_pop();
93 arch_flush_lazy_mmu_mode();
94 }
95 #ifdef CONFIG_DEBUG_HIGHMEM
96 else {
97 BUG_ON(vaddr < PAGE_OFFSET);
98 BUG_ON(vaddr >= (unsigned long)high_memory);
99 }
100 #endif
101
102 pagefault_enable();
103 }
104 EXPORT_SYMBOL(__kunmap_atomic);
105
kmap_atomic_to_page(void * ptr)106 struct page *kmap_atomic_to_page(void *ptr)
107 {
108 unsigned long idx, vaddr = (unsigned long)ptr;
109 pte_t *pte;
110
111 if (vaddr < FIXADDR_START)
112 return virt_to_page(ptr);
113
114 idx = virt_to_fix(vaddr);
115 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
116 return pte_page(*pte);
117 }
118 EXPORT_SYMBOL(kmap_atomic_to_page);
119
set_highmem_pages_init(void)120 void __init set_highmem_pages_init(void)
121 {
122 struct zone *zone;
123 int nid;
124
125 /*
126 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
127 * is invoked before free_all_bootmem()
128 */
129 reset_all_zones_managed_pages();
130 for_each_zone(zone) {
131 unsigned long zone_start_pfn, zone_end_pfn;
132
133 if (!is_highmem(zone))
134 continue;
135
136 zone_start_pfn = zone->zone_start_pfn;
137 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
138
139 nid = zone_to_nid(zone);
140 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
141 zone->name, nid, zone_start_pfn, zone_end_pfn);
142
143 add_highpages_with_active_regions(nid, zone_start_pfn,
144 zone_end_pfn);
145 }
146 }
147