• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* MN10300 Virtual kernel memory mappings for high memory
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  * - Derived from include/asm-i386/highmem.h
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public Licence
9  * as published by the Free Software Foundation; either version
10  * 2 of the Licence, or (at your option) any later version.
11  */
12 #ifndef _ASM_HIGHMEM_H
13 #define _ASM_HIGHMEM_H
14 
15 #ifdef __KERNEL__
16 
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/highmem.h>
20 #include <asm/kmap_types.h>
21 #include <asm/pgtable.h>
22 
23 /* undef for production */
24 #undef HIGHMEM_DEBUG
25 
26 /* declarations for highmem.c */
27 extern unsigned long highstart_pfn, highend_pfn;
28 
29 extern pte_t *kmap_pte;
30 extern pgprot_t kmap_prot;
31 extern pte_t *pkmap_page_table;
32 
33 extern void __init kmap_init(void);
34 
35 /*
36  * Right now we initialize only a single pte table. It can be extended
37  * easily, subsequent pte tables have to be allocated in one physical
38  * chunk of RAM.
39  */
40 #define PKMAP_BASE	0xfe000000UL
41 #define LAST_PKMAP	1024
42 #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
43 #define PKMAP_NR(virt)  ((virt - PKMAP_BASE) >> PAGE_SHIFT)
44 #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
45 
46 extern unsigned long kmap_high(struct page *page);
47 extern void kunmap_high(struct page *page);
48 
kmap(struct page * page)49 static inline unsigned long kmap(struct page *page)
50 {
51 	if (in_interrupt())
52 		BUG();
53 	if (page < highmem_start_page)
54 		return page_address(page);
55 	return kmap_high(page);
56 }
57 
kunmap(struct page * page)58 static inline void kunmap(struct page *page)
59 {
60 	if (in_interrupt())
61 		BUG();
62 	if (page < highmem_start_page)
63 		return;
64 	kunmap_high(page);
65 }
66 
67 /*
68  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
69  * gives a more generic (and caching) interface. But kmap_atomic can
70  * be used in IRQ contexts, so in some (very limited) cases we need
71  * it.
72  */
kmap_atomic(struct page * page)73 static inline void *kmap_atomic(struct page *page)
74 {
75 	unsigned long vaddr;
76 	int idx, type;
77 
78 	preempt_disable();
79 	pagefault_disable();
80 	if (page < highmem_start_page)
81 		return page_address(page);
82 
83 	type = kmap_atomic_idx_push();
84 	idx = type + KM_TYPE_NR * smp_processor_id();
85 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
86 #if HIGHMEM_DEBUG
87 	if (!pte_none(*(kmap_pte - idx)))
88 		BUG();
89 #endif
90 	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
91 	local_flush_tlb_one(vaddr);
92 
93 	return (void *)vaddr;
94 }
95 
__kunmap_atomic(unsigned long vaddr)96 static inline void __kunmap_atomic(unsigned long vaddr)
97 {
98 	int type;
99 
100 	if (vaddr < FIXADDR_START) { /* FIXME */
101 		pagefault_enable();
102 		preempt_enable();
103 		return;
104 	}
105 
106 	type = kmap_atomic_idx();
107 
108 #if HIGHMEM_DEBUG
109 	{
110 		unsigned int idx;
111 		idx = type + KM_TYPE_NR * smp_processor_id();
112 
113 		if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
114 			BUG();
115 
116 		/*
117 		 * force other mappings to Oops if they'll try to access
118 		 * this pte without first remap it
119 		 */
120 		pte_clear(kmap_pte - idx);
121 		local_flush_tlb_one(vaddr);
122 	}
123 #endif
124 
125 	kmap_atomic_idx_pop();
126 	pagefault_enable();
127 	preempt_enable();
128 }
129 #endif /* __KERNEL__ */
130 
131 #endif /* _ASM_HIGHMEM_H */
132