1 /*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7 #ifndef __ASM_SWITCH_TO_H
8 #define __ASM_SWITCH_TO_H
9
10 #include <linux/thread_info.h>
11 #include <asm/ptrace.h>
12
13 extern struct task_struct *__switch_to(void *, void *);
14 extern void update_cr_regs(struct task_struct *task);
15
test_fp_ctl(u32 fpc)16 static inline int test_fp_ctl(u32 fpc)
17 {
18 u32 orig_fpc;
19 int rc;
20
21 if (!MACHINE_HAS_IEEE)
22 return 0;
23
24 asm volatile(
25 " efpc %1\n"
26 " sfpc %2\n"
27 "0: sfpc %1\n"
28 " la %0,0\n"
29 "1:\n"
30 EX_TABLE(0b,1b)
31 : "=d" (rc), "=d" (orig_fpc)
32 : "d" (fpc), "0" (-EINVAL));
33 return rc;
34 }
35
save_fp_ctl(u32 * fpc)36 static inline void save_fp_ctl(u32 *fpc)
37 {
38 if (!MACHINE_HAS_IEEE)
39 return;
40
41 asm volatile(
42 " stfpc %0\n"
43 : "+Q" (*fpc));
44 }
45
restore_fp_ctl(u32 * fpc)46 static inline int restore_fp_ctl(u32 *fpc)
47 {
48 int rc;
49
50 if (!MACHINE_HAS_IEEE)
51 return 0;
52
53 asm volatile(
54 " lfpc %1\n"
55 "0: la %0,0\n"
56 "1:\n"
57 EX_TABLE(0b,1b)
58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
59 return rc;
60 }
61
save_fp_regs(freg_t * fprs)62 static inline void save_fp_regs(freg_t *fprs)
63 {
64 asm volatile("std 0,%0" : "=Q" (fprs[0]));
65 asm volatile("std 2,%0" : "=Q" (fprs[2]));
66 asm volatile("std 4,%0" : "=Q" (fprs[4]));
67 asm volatile("std 6,%0" : "=Q" (fprs[6]));
68 if (!MACHINE_HAS_IEEE)
69 return;
70 asm volatile("std 1,%0" : "=Q" (fprs[1]));
71 asm volatile("std 3,%0" : "=Q" (fprs[3]));
72 asm volatile("std 5,%0" : "=Q" (fprs[5]));
73 asm volatile("std 7,%0" : "=Q" (fprs[7]));
74 asm volatile("std 8,%0" : "=Q" (fprs[8]));
75 asm volatile("std 9,%0" : "=Q" (fprs[9]));
76 asm volatile("std 10,%0" : "=Q" (fprs[10]));
77 asm volatile("std 11,%0" : "=Q" (fprs[11]));
78 asm volatile("std 12,%0" : "=Q" (fprs[12]));
79 asm volatile("std 13,%0" : "=Q" (fprs[13]));
80 asm volatile("std 14,%0" : "=Q" (fprs[14]));
81 asm volatile("std 15,%0" : "=Q" (fprs[15]));
82 }
83
restore_fp_regs(freg_t * fprs)84 static inline void restore_fp_regs(freg_t *fprs)
85 {
86 asm volatile("ld 0,%0" : : "Q" (fprs[0]));
87 asm volatile("ld 2,%0" : : "Q" (fprs[2]));
88 asm volatile("ld 4,%0" : : "Q" (fprs[4]));
89 asm volatile("ld 6,%0" : : "Q" (fprs[6]));
90 if (!MACHINE_HAS_IEEE)
91 return;
92 asm volatile("ld 1,%0" : : "Q" (fprs[1]));
93 asm volatile("ld 3,%0" : : "Q" (fprs[3]));
94 asm volatile("ld 5,%0" : : "Q" (fprs[5]));
95 asm volatile("ld 7,%0" : : "Q" (fprs[7]));
96 asm volatile("ld 8,%0" : : "Q" (fprs[8]));
97 asm volatile("ld 9,%0" : : "Q" (fprs[9]));
98 asm volatile("ld 10,%0" : : "Q" (fprs[10]));
99 asm volatile("ld 11,%0" : : "Q" (fprs[11]));
100 asm volatile("ld 12,%0" : : "Q" (fprs[12]));
101 asm volatile("ld 13,%0" : : "Q" (fprs[13]));
102 asm volatile("ld 14,%0" : : "Q" (fprs[14]));
103 asm volatile("ld 15,%0" : : "Q" (fprs[15]));
104 }
105
save_vx_regs(__vector128 * vxrs)106 static inline void save_vx_regs(__vector128 *vxrs)
107 {
108 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
109
110 asm volatile(
111 " la 1,%0\n"
112 " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
113 " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
114 : "=Q" (*(addrtype *) vxrs) : : "1");
115 }
116
save_vx_regs_safe(__vector128 * vxrs)117 static inline void save_vx_regs_safe(__vector128 *vxrs)
118 {
119 unsigned long cr0, flags;
120
121 flags = arch_local_irq_save();
122 __ctl_store(cr0, 0, 0);
123 __ctl_set_bit(0, 17);
124 __ctl_set_bit(0, 18);
125 save_vx_regs(vxrs);
126 __ctl_load(cr0, 0, 0);
127 arch_local_irq_restore(flags);
128 }
129
restore_vx_regs(__vector128 * vxrs)130 static inline void restore_vx_regs(__vector128 *vxrs)
131 {
132 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
133
134 asm volatile(
135 " la 1,%0\n"
136 " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
137 " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
138 : : "Q" (*(addrtype *) vxrs) : "1");
139 }
140
save_fp_vx_regs(struct task_struct * task)141 static inline void save_fp_vx_regs(struct task_struct *task)
142 {
143 #ifdef CONFIG_64BIT
144 if (task->thread.vxrs)
145 save_vx_regs(task->thread.vxrs);
146 else
147 #endif
148 save_fp_regs(task->thread.fp_regs.fprs);
149 }
150
restore_fp_vx_regs(struct task_struct * task)151 static inline void restore_fp_vx_regs(struct task_struct *task)
152 {
153 #ifdef CONFIG_64BIT
154 if (task->thread.vxrs)
155 restore_vx_regs(task->thread.vxrs);
156 else
157 #endif
158 restore_fp_regs(task->thread.fp_regs.fprs);
159 }
160
save_access_regs(unsigned int * acrs)161 static inline void save_access_regs(unsigned int *acrs)
162 {
163 typedef struct { int _[NUM_ACRS]; } acrstype;
164
165 asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
166 }
167
restore_access_regs(unsigned int * acrs)168 static inline void restore_access_regs(unsigned int *acrs)
169 {
170 typedef struct { int _[NUM_ACRS]; } acrstype;
171
172 asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
173 }
174
175 #define switch_to(prev,next,last) do { \
176 if (prev->mm) { \
177 save_fp_ctl(&prev->thread.fp_regs.fpc); \
178 save_fp_vx_regs(prev); \
179 save_access_regs(&prev->thread.acrs[0]); \
180 save_ri_cb(prev->thread.ri_cb); \
181 } \
182 if (next->mm) { \
183 update_cr_regs(next); \
184 restore_fp_ctl(&next->thread.fp_regs.fpc); \
185 restore_fp_vx_regs(next); \
186 restore_access_regs(&next->thread.acrs[0]); \
187 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
188 } \
189 prev = __switch_to(prev,next); \
190 } while (0)
191
192 #endif /* __ASM_SWITCH_TO_H */
193