• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file IA64minstate.h
3  *
4  * @remark Copy of source code from linux kernel
5  * @remark linux/arch/ia64/kernel/minstate.h
6  *
7  */
8 
9 #include <linux/config.h>
10 
11 #include "IA64entry.h"
12 
13 /*
14  * A couple of convenience macros that make writing and reading
15  * SAVE_MIN and SAVE_REST easier.
16  */
17 #define rARPR		r31
18 #define rCRIFS		r30
19 #define rCRIPSR		r29
20 #define rCRIIP		r28
21 #define rARRSC		r27
22 #define rARPFS		r26
23 #define rARUNAT		r25
24 #define rARRNAT		r24
25 #define rARBSPSTORE	r23
26 #define rKRBS		r22
27 #define rB6		r21
28 #define rR1		r20
29 
30 /*
31  * Here start the source dependent macros.
32  */
33 
34 /*
35  * For ivt.s we want to access the stack virtually so we dont have
36  * to disable translation on interrupts.
37  */
38 #define MINSTATE_START_SAVE_MIN_VIRT					\
39 	/* r1 = current (virtual) */					\
40 	dep r1=-1, r1, 61, 3;						\
41 	/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
42 (pUser)	mov ar.rsc=0;							\
43 	;;								\
44 	/* compute base of RBS */					\
45 (pUser)	addl rKRBS=IA64_RBS_OFFSET, r1;					\
46 (pUser)	mov rARRNAT=ar.rnat;						\
47 	/* get sp  */							\
48 (pKern) mov r1=sp;							\
49 	;;								\
50 	/* compute base of memory stack */				\
51 (pUser)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE, r1;			\
52 	/* save ar.bspstore */						\
53 (pUser)	mov rARBSPSTORE=ar.bspstore;					\
54 	;;								\
55 	/* if in kernel mode, use sp (r12) */				\
56 (pKern) addl r1=-IA64_PT_REGS_SIZE, r1;					\
57 	/* switch to kernel RBS */					\
58 (pUser)	mov ar.bspstore=rKRBS;						\
59 	;;								\
60 (pUser)	mov r18=ar.bsp;							\
61 	/* set eager mode, pl 0, little-endian, loadrs=0 */		\
62 (pUser)	mov ar.rsc=0x3;
63 
64 #define MINSTATE_END_SAVE_MIN_VIRT					\
65 	/* make `current' a kernel virtual address */			\
66 	or r13=r13, r14;						\
67 	/* switch back to bank 1 (must be last in insn group) */	\
68 	bsw.1;								\
69 	;;
70 
71 /*
72  * For mca_asm.S we want to access the stack physically since the state
73  * is saved before we go virtual and dont want to destroy the iip or ipsr.
74  */
75 #define MINSTATE_START_SAVE_MIN_PHYS					\
76 (pKern) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE;	\
77 	/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
78 (pUser)	mov ar.rsc=0;							\
79 	/* compute base of register backing store */			\
80 (pUser)	addl rKRBS=IA64_RBS_OFFSET, r1;					\
81 	;;								\
82 (pUser)	mov rARRNAT=ar.rnat;						\
83 	/* compute physical addr of sp	*/				\
84 (pKern) dep r1=0, sp, 61, 3;						\
85 	/* compute base of memory stack */				\
86 (pUser)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE, r1;			\
87 	/* save ar.bspstore */						\
88 (pUser)	mov rARBSPSTORE=ar.bspstore;					\
89 	/* compute kernel virtual addr of RBS */			\
90 (pUser)	dep rKRBS=-1, rKRBS, 61, 3;					\
91 	;;								\
92 	/* if in kernel mode, use sp (r12) */				\
93 (pKern) addl r1=-IA64_PT_REGS_SIZE, r1;					\
94 	/* switch to kernel RBS */					\
95 (pUser)	mov ar.bspstore=rKRBS;						\
96 	;;								\
97 (pUser)	mov r18=ar.bsp;							\
98 	/* set eager mode, pl 0, little-endian, loadrs=0 */		\
99 (pUser)	mov ar.rsc=0x3;
100 
101 #define MINSTATE_END_SAVE_MIN_PHYS					\
102 	/* make sp a kernel virtual address */				\
103 	or r12=r12, r14;						\
104 	/* make `current' a kernel virtual address */			\
105 	or r13=r13, r14;						\
106 	;;
107 
108 #ifdef MINSTATE_VIRT
109 # define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_VIRT
110 # define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_VIRT
111 #endif
112 
113 #ifdef MINSTATE_PHYS
114 # define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
115 # define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
116 #endif
117 
118 /*
119  * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
120  * the minimum state necessary that allows us to turn psr.ic back
121  * on.
122  *
123  * Assumed state upon entry:
124  *	psr.ic: off
125  *	psr.dt: off
126  *	r31:	contains saved predicates (pr)
127  *
128  * Upon exit, the state is as follows:
129  *	psr.ic: off
130  *	r2 = points to &pt_regs.r16
131  *	r12 = kernel sp (kernel virtual address)
132  *	r13 = points to current task_struct (kernel virtual address)
133  *	p15 = TRUE if psr.i is set in cr.ipsr
134  *	predicate registers (other than p2, p3, and p15), b6, r3, r8, r9,
135  *		r10, r11, r14, r15: preserved
136  *
137  * Note that psr.ic is NOT turned on by this macro.  This is so that
138  * we can pass interruption state as arguments to a handler.
139  */
140 #define DO_SAVE_MIN(COVER, SAVE_IFS, EXTRA)				\
141 	mov rARRSC=ar.rsc;						\
142 	mov rARPFS=ar.pfs;						\
143 	mov rR1=r1;							\
144 	mov rARUNAT=ar.unat;						\
145 	mov rCRIPSR=cr.ipsr;						\
146 	/* rB6 = branch reg 6 */			  		\
147 	mov rB6=b6;							\
148 	mov rCRIIP=cr.iip;						\
149 	/* r1 = current (physical) */			  		\
150 	mov r1=IA64_KR(CURRENT);					\
151 	COVER;								\
152 	;;								\
153 	invala;								\
154 	/* extract psr.cpl */				  		\
155 	extr.u r16=rCRIPSR, 32, 2;					\
156 	;;								\
157 	/* are we in kernel mode already? (psr.cpl==0) */ 		\
158 	cmp.eq pKern, pUser=r0, r16;					\
159 	/* switch from user to kernel RBS: */				\
160 	;;								\
161 	SAVE_IFS;							\
162 	MINSTATE_START_SAVE_MIN						\
163 	;;								\
164 	/* initialize first base pointer */	  			\
165 	mov r16=r1;							\
166 	/* initialize second base pointer */	  			\
167 	adds r17=8, r1;							\
168 	;;								\
169 	st8 [r16]=rCRIPSR, 16;	/* save cr.ipsr */			\
170 	st8 [r17]=rCRIIP, 16;	/* save cr.iip */			\
171 (pKern)	mov r18=r0;		/* make sure r18 isn't NaT */		\
172 	;;								\
173 	st8 [r16]=rCRIFS, 16;	/* save cr.ifs */			\
174 	st8 [r17]=rARUNAT, 16;	/* save ar.unat */			\
175 (pUser)	sub r18=r18, rKRBS;	/* r18=RSE.ndirty*8 */			\
176 	;;								\
177 	st8 [r16]=rARPFS, 16;	/* save ar.pfs */			\
178 	st8 [r17]=rARRSC, 16;	/* save ar.rsc */			\
179 	tbit.nz p15, p0=rCRIPSR, IA64_PSR_I_BIT				\
180 	;;			/* avoid RAW on r16 & r17 */		\
181 (pKern)	adds r16=16, r16;	/* skip over ar_rnat field */		\
182 (pKern)	adds r17=16, r17;	/* skip over ar_bspstore field */	\
183 (pUser)	st8 [r16]=rARRNAT, 16;	/* save ar.rnat */			\
184 (pUser)	st8 [r17]=rARBSPSTORE, 16;	/* save ar.bspstore */		\
185 	;;								\
186 	st8 [r16]=rARPR, 16;	/* save predicates */			\
187 	st8 [r17]=rB6, 16;	/* save b6 */				\
188 	/* compute ar.rsc to be used for "loadrs" */			\
189 	shl r18=r18, 16;						\
190 	;;								\
191 	st8 [r16]=r18, 16;	/* save ar.rsc value for "loadrs" */	\
192 	st8.spill [r17]=rR1, 16;	/* save original r1 */		\
193 	;;								\
194 .mem.offset 0, 0;	st8.spill [r16]=r2, 16;				\
195 .mem.offset 8, 0;	st8.spill [r17]=r3, 16;				\
196 	adds r2=IA64_PT_REGS_R16_OFFSET, r1;				\
197 	;;								\
198 .mem.offset 0, 0;		st8.spill [r16]=r12, 16;		\
199 .mem.offset 8, 0;		st8.spill [r17]=r13, 16;		\
200 	/* initialize pSys=0, pNonSys=1 */			  	\
201 	cmp.eq pNonSys, pSys=r0, r0					\
202 	;;								\
203 .mem.offset 0, 0;		st8.spill [r16]=r14, 16;		\
204 .mem.offset 8, 0;		st8.spill [r17]=r15, 16;		\
205 	dep r14=-1, r0, 61, 3;						\
206 	;;								\
207 .mem.offset 0, 0;		st8.spill [r16]=r8, 16;			\
208 .mem.offset 8, 0;		st8.spill [r17]=r9, 16;			\
209 	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
210 	adds r12=-16, r1;						\
211 	;;								\
212 .mem.offset 0, 0;		st8.spill [r16]=r10, 16;		\
213 .mem.offset 8, 0;		st8.spill [r17]=r11, 16;		\
214 	mov r13=IA64_KR(CURRENT);	/* establish `current' */	\
215 	;;								\
216 	EXTRA;								\
217 	movl r1=__gp;		/* establish kernel global pointer */	\
218 	;;								\
219 	MINSTATE_END_SAVE_MIN
220 
221 /*
222  * SAVE_REST saves the remainder of pt_regs (with psr.ic on).  This
223  * macro guarantees to preserve all predicate registers, r8, r9, r10,
224  * r11, r14, and r15.
225  *
226  * Assumed state upon entry:
227  *	psr.ic: on
228  *	psr.dt: on
229  *	r2:	points to &pt_regs.r16
230  *	r3:	points to &pt_regs.r17
231  */
232 #define SAVE_REST				\
233 .mem.offset 0, 0;	st8.spill [r2]=r16, 16;	\
234 .mem.offset 8, 0;	st8.spill [r3]=r17, 16;	\
235 	;;					\
236 .mem.offset 0, 0;	st8.spill [r2]=r18, 16;	\
237 .mem.offset 8, 0;	st8.spill [r3]=r19, 16;	\
238 	;;					\
239 	mov r16=ar.ccv;		/* M-unit */	\
240 	movl r18=FPSR_DEFAULT	/* L-unit */	\
241 	;;					\
242 	mov r17=ar.fpsr;	/* M-unit */	\
243 	mov ar.fpsr=r18;	/* M-unit */	\
244 	;;					\
245 .mem.offset 0, 0;	st8.spill [r2]=r20, 16;	\
246 .mem.offset 8, 0;	st8.spill [r3]=r21, 16;	\
247 	mov r18=b0;				\
248 	;;					\
249 .mem.offset 0, 0;	st8.spill [r2]=r22, 16;	\
250 .mem.offset 8, 0;	st8.spill [r3]=r23, 16;	\
251 	mov r19=b7;				\
252 	;;					\
253 .mem.offset 0, 0;	st8.spill [r2]=r24, 16;	\
254 .mem.offset 8, 0;	st8.spill [r3]=r25, 16;	\
255 	;;					\
256 .mem.offset 0, 0;	st8.spill [r2]=r26, 16;	\
257 .mem.offset 8, 0;	st8.spill [r3]=r27, 16;	\
258 	;;					\
259 .mem.offset 0, 0;	st8.spill [r2]=r28, 16;	\
260 .mem.offset 8, 0;	st8.spill [r3]=r29, 16;	\
261 	;;					\
262 .mem.offset 0, 0;	st8.spill [r2]=r30, 16;	\
263 .mem.offset 8, 0;	st8.spill [r3]=r31, 16;	\
264 	;;					\
265 	st8 [r2]=r16, 16;	/* ar.ccv */	\
266 	st8 [r3]=r17, 16;	/* ar.fpsr */	\
267 	;;					\
268 	st8 [r2]=r18, 16;	/* b0 */	\
269 	st8 [r3]=r19, 16+8;	/* b7 */	\
270 	;;					\
271 	stf.spill [r2]=f6, 32;			\
272 	stf.spill [r3]=f7, 32;			\
273 	;;					\
274 	stf.spill [r2]=f8, 32;			\
275 	stf.spill [r3]=f9, 32
276 
277 #define SAVE_MIN_WITH_COVER	DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
278 #define SAVE_MIN_WITH_COVER_R19	\
279 	DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
280 #define SAVE_MIN		DO_SAVE_MIN(, mov rCRIFS=r0,)
281