• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* highmem.h: virtual kernel memory mappings for high memory
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  * - Derived from include/asm-i386/highmem.h
6  *
7  * See Documentation/frv/mmu-layout.txt for more information.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #ifndef _ASM_HIGHMEM_H
16 #define _ASM_HIGHMEM_H
17 
18 #ifdef __KERNEL__
19 
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <asm/mem-layout.h>
23 #include <asm/spr-regs.h>
24 #include <asm/mb-regs.h>
25 
26 #define NR_TLB_LINES		64	/* number of lines in the TLB */
27 
28 #ifndef __ASSEMBLY__
29 
30 #include <linux/interrupt.h>
31 #include <asm/kmap_types.h>
32 #include <asm/pgtable.h>
33 
34 #ifdef CONFIG_DEBUG_HIGHMEM
35 #define HIGHMEM_DEBUG 1
36 #else
37 #define HIGHMEM_DEBUG 0
38 #endif
39 
40 /* declarations for highmem.c */
41 extern unsigned long highstart_pfn, highend_pfn;
42 
43 #define kmap_prot PAGE_KERNEL
44 #define kmap_pte ______kmap_pte_in_TLB
45 extern pte_t *pkmap_page_table;
46 
47 #define flush_cache_kmaps()  do { } while (0)
48 
49 /*
50  * Right now we initialize only a single pte table. It can be extended
51  * easily, subsequent pte tables have to be allocated in one physical
52  * chunk of RAM.
53  */
54 #define LAST_PKMAP	PTRS_PER_PTE
55 #define LAST_PKMAP_MASK	(LAST_PKMAP - 1)
56 #define PKMAP_NR(virt)	((virt - PKMAP_BASE) >> PAGE_SHIFT)
57 #define PKMAP_ADDR(nr)	(PKMAP_BASE + ((nr) << PAGE_SHIFT))
58 
59 extern void *kmap_high(struct page *page);
60 extern void kunmap_high(struct page *page);
61 
62 extern void *kmap(struct page *page);
63 extern void kunmap(struct page *page);
64 
65 extern struct page *kmap_atomic_to_page(void *ptr);
66 
67 #endif /* !__ASSEMBLY__ */
68 
69 /*
70  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
71  * gives a more generic (and caching) interface. But kmap_atomic can
72  * be used in IRQ contexts, so in some (very limited) cases we need
73  * it.
74  */
75 #define KMAP_ATOMIC_CACHE_DAMR		8
76 
77 #ifndef __ASSEMBLY__
78 
79 #define __kmap_atomic_primary(cached, paddr, ampr)						\
80 ({												\
81 	unsigned long damlr, dampr;								\
82 												\
83 	dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V;		\
84 												\
85 	if (!cached)										\
86 		asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory");			\
87 	else											\
88 		/* cache flush page attachment point */						\
89 		asm volatile("movgs %0,iampr"#ampr"\n"						\
90 			     "movgs %0,dampr"#ampr"\n"						\
91 			     :: "r"(dampr) : "memory"						\
92 			     );									\
93 												\
94 	asm("movsg damlr"#ampr",%0" : "=r"(damlr));						\
95 												\
96 	/*printk("DAMR"#ampr": PRIM sl=%d L=%08lx P=%08lx\n", type, damlr, dampr);*/		\
97 												\
98 	(void *) damlr;										\
99 })
100 
101 #define __kmap_atomic_secondary(slot, paddr)							  \
102 ({												  \
103 	unsigned long damlr = KMAP_ATOMIC_SECONDARY_FRAME + (slot) * PAGE_SIZE;			  \
104 	unsigned long dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
105 												  \
106 	asm volatile("movgs %0,tplr \n"								  \
107 		     "movgs %1,tppr \n"								  \
108 		     "tlbpr %0,gr0,#2,#1"							  \
109 		     : : "r"(damlr), "r"(dampr) : "memory");					  \
110 												  \
111 	/*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/			  \
112 												  \
113 	(void *) damlr;										  \
114 })
115 
kmap_atomic_primary(struct page * page)116 static inline void *kmap_atomic_primary(struct page *page)
117 {
118 	unsigned long paddr;
119 
120 	pagefault_disable();
121 	paddr = page_to_phys(page);
122 
123         return __kmap_atomic_primary(1, paddr, 2);
124 }
125 
126 #define __kunmap_atomic_primary(cached, ampr)				\
127 do {									\
128 	asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory");		\
129 	if (cached)							\
130 		asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory");	\
131 } while(0)
132 
133 #define __kunmap_atomic_secondary(slot, vaddr)				\
134 do {									\
135 	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");	\
136 } while(0)
137 
kunmap_atomic_primary(void * kvaddr)138 static inline void kunmap_atomic_primary(void *kvaddr)
139 {
140         __kunmap_atomic_primary(1, 2);
141 	pagefault_enable();
142 }
143 
144 void *kmap_atomic(struct page *page);
145 void __kunmap_atomic(void *kvaddr);
146 
147 #endif /* !__ASSEMBLY__ */
148 
149 #endif /* __KERNEL__ */
150 
151 #endif /* _ASM_HIGHMEM_H */
152