• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Register definitions for the Hexagon architecture
3  */
4 
5 
6 #ifndef _ASM_REGISTERS_H
7 #define _ASM_REGISTERS_H
8 
9 #ifndef __ASSEMBLY__
10 
11 /*  See kernel/entry.S for further documentation.  */
12 
13 /*
14  * Entry code copies the event record out of guest registers into
15  * this structure (which is on the stack).
16  */
17 
18 struct hvm_event_record {
19 	unsigned long vmel;     /* Event Linkage (return address) */
20 	unsigned long vmest;    /* Event context - pre-event SSR values */
21 	unsigned long vmpsp;    /* Previous stack pointer */
22 	unsigned long vmbadva;  /* Bad virtual address for addressing events */
23 };
24 
25 struct pt_regs {
26 	long restart_r0;        /* R0 checkpoint for syscall restart */
27 	long syscall_nr;        /* Only used in system calls */
28 	union {
29 		struct {
30 			unsigned long usr;
31 			unsigned long preds;
32 		};
33 		long long int predsusr;
34 	};
35 	union {
36 		struct {
37 			unsigned long m0;
38 			unsigned long m1;
39 		};
40 		long long int m1m0;
41 	};
42 	union {
43 		struct {
44 			unsigned long sa1;
45 			unsigned long lc1;
46 		};
47 		long long int lc1sa1;
48 	};
49 	union {
50 		struct {
51 			unsigned long sa0;
52 			unsigned long lc0;
53 		};
54 		long long int lc0sa0;
55 	};
56 	union {
57 		struct {
58 			unsigned long ugp;
59 			unsigned long gp;
60 		};
61 		long long int gpugp;
62 	};
63 	union {
64 		struct {
65 			unsigned long cs0;
66 			unsigned long cs1;
67 		};
68 		long long int cs1cs0;
69 	};
70 	/*
71 	* Be extremely careful with rearranging these, if at all.  Some code
72 	* assumes the 32 registers exist exactly like this in memory;
73 	* e.g. kernel/ptrace.c
74 	* e.g. kernel/signal.c (restore_sigcontext)
75 	*/
76 	union {
77 		struct {
78 			unsigned long r00;
79 			unsigned long r01;
80 		};
81 		long long int r0100;
82 	};
83 	union {
84 		struct {
85 			unsigned long r02;
86 			unsigned long r03;
87 		};
88 		long long int r0302;
89 	};
90 	union {
91 		struct {
92 			unsigned long r04;
93 			unsigned long r05;
94 		};
95 		long long int r0504;
96 	};
97 	union {
98 		struct {
99 			unsigned long r06;
100 			unsigned long r07;
101 		};
102 		long long int r0706;
103 	};
104 	union {
105 		struct {
106 			unsigned long r08;
107 			unsigned long r09;
108 		};
109 		long long int r0908;
110 	};
111 	union {
112 	       struct {
113 			unsigned long r10;
114 			unsigned long r11;
115 	       };
116 	       long long int r1110;
117 	};
118 	union {
119 	       struct {
120 			unsigned long r12;
121 			unsigned long r13;
122 	       };
123 	       long long int r1312;
124 	};
125 	union {
126 	       struct {
127 			unsigned long r14;
128 			unsigned long r15;
129 	       };
130 	       long long int r1514;
131 	};
132 	union {
133 		struct {
134 			unsigned long r16;
135 			unsigned long r17;
136 		};
137 		long long int r1716;
138 	};
139 	union {
140 		struct {
141 			unsigned long r18;
142 			unsigned long r19;
143 		};
144 		long long int r1918;
145 	};
146 	union {
147 		struct {
148 			unsigned long r20;
149 			unsigned long r21;
150 		};
151 		long long int r2120;
152 	};
153 	union {
154 		struct {
155 			unsigned long r22;
156 			unsigned long r23;
157 		};
158 		long long int r2322;
159 	};
160 	union {
161 		struct {
162 			unsigned long r24;
163 			unsigned long r25;
164 		};
165 		long long int r2524;
166 	};
167 	union {
168 		struct {
169 			unsigned long r26;
170 			unsigned long r27;
171 		};
172 		long long int r2726;
173 	};
174 	union {
175 		struct {
176 			unsigned long r28;
177 			unsigned long r29;
178 	       };
179 	       long long int r2928;
180 	};
181 	union {
182 		struct {
183 			unsigned long r30;
184 			unsigned long r31;
185 		};
186 		long long int r3130;
187 	};
188 	/* VM dispatch pushes event record onto stack - we can build on it */
189 	struct hvm_event_record hvmer;
190 };
191 
192 /* Defines to conveniently access the values  */
193 
194 /*
195  * As of the VM spec 0.5, these registers are now set/retrieved via a
196  * VM call.  On the in-bound side, we just fetch the values
197  * at the entry points and stuff them into the old record in pt_regs.
198  * However, on the outbound side, probably at VM rte, we set the
199  * registers back.
200  */
201 
202 #define pt_elr(regs) ((regs)->hvmer.vmel)
203 #define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
204 #define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
205 #define user_mode(regs) \
206 	(((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
207 #define ints_enabled(regs) \
208 	(((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
209 #define pt_psp(regs) ((regs)->hvmer.vmpsp)
210 #define pt_badva(regs) ((regs)->hvmer.vmbadva)
211 
212 #define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))
213 #define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
214 
215 #define pt_set_rte_sp(regs, sp) do {\
216 	pt_psp(regs) = (regs)->r29 = (sp);\
217 	} while (0)
218 
219 #define pt_set_kmode(regs) \
220 	(regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
221 
222 #define pt_set_usermode(regs) \
223 	(regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
224 			    | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
225 
226 #endif  /*  ifndef __ASSEMBLY  */
227 
228 #endif
229