• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2002 MontaVista Software Inc.
3  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation;  either version 2 of the  License, or (at your
8  * option) any later version.
9  */
10 #ifndef _ASM_FPU_H
11 #define _ASM_FPU_H
12 
13 #include <linux/sched.h>
14 #include <linux/thread_info.h>
15 #include <linux/bitops.h>
16 
17 #include <asm/mipsregs.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu-features.h>
20 #include <asm/hazards.h>
21 #include <asm/processor.h>
22 #include <asm/current.h>
23 #include <asm/msa.h>
24 
25 #ifdef CONFIG_MIPS_MT_FPAFF
26 #include <asm/mips_mt.h>
27 #endif
28 
29 struct sigcontext;
30 struct sigcontext32;
31 
32 extern void fpu_emulator_init_fpu(struct task_struct *target);
33 extern int _init_fpu(void);
34 extern void _save_fp(struct task_struct *);
35 extern void _restore_fp(struct task_struct *);
36 
37 /*
38  * This macro is used only to obtain FIR from FPU and it seems
39  * like a BUG in 34K with single FPU affinity to VPE0.
40  */
41 #define __enable_fpu()                                                  \
42 do {									\
43 	set_c0_status(ST0_CU1);						\
44 	enable_fpu_hazard();						\
45 } while (0)
46 
47 #define clear_fpu_owner()	clear_thread_flag(TIF_USEDFPU)
48 
__is_fpu_owner(void)49 static inline int __is_fpu_owner(void)
50 {
51 	return test_thread_flag(TIF_USEDFPU);
52 }
53 
is_fpu_owner(void)54 static inline int is_fpu_owner(void)
55 {
56 	return cpu_has_fpu && __is_fpu_owner();
57 }
58 
thread_fpu_flags_update(void)59 static inline void thread_fpu_flags_update(void)
60 {
61 	if (current->mm)
62 		change_thread_local_flags(LTIF_FPU_FR|LTIF_FPU_FRE,
63 			current->mm->context.thread_flags & (LTIF_FPU_FR|LTIF_FPU_FRE));
64 }
65 
__own_fpu(void)66 static inline int __own_fpu(void)
67 {
68 	int ret = 0;
69 
70 #if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_CPU_MIPS64)
71 	u32 status;
72 
73 	thread_fpu_flags_update();
74 
75 	if (!test_thread_local_flags(LTIF_FPU_FR)) {
76 		status = change_c0_status(ST0_CU1|ST0_FR,ST0_CU1);
77 		enable_fpu_hazard();
78 		if (read_c0_status() & ST0_FR) {
79 			if (cpu_has_fre) {
80 				set_c0_config5(MIPS_CONF5_FRE);
81 				back_to_back_c0_hazard();
82 				KSTK_STATUS(current) |= ST0_CU1|ST0_FR;
83 			} else {
84 				write_c0_status(status & ~ST0_CU1);
85 				disable_fpu_hazard();
86 				return(SIGFPE);
87 			}
88 		} else {
89 			if (test_thread_local_flags(LTIF_FPU_FRE)) {
90 				if (cpu_has_fre) {
91 					set_c0_config5(MIPS_CONF5_FRE);
92 					back_to_back_c0_hazard();
93 				} else {
94 					write_c0_status(status & ~ST0_CU1);
95 					disable_fpu_hazard();
96 					return(SIGFPE);
97 				}
98 			}
99 			KSTK_STATUS(current) = (KSTK_STATUS(current) & ~ST0_FR) | ST0_CU1;
100 		}
101 	} else {
102 		status = set_c0_status(ST0_CU1|ST0_FR);
103 		enable_fpu_hazard();
104 		if (!(read_c0_status() & ST0_FR)) {
105 			write_c0_status(status & ~ST0_CU1);
106 			disable_fpu_hazard();
107 			return(SIGFPE);
108 		}
109 		if (cpu_has_fre) {
110 			if (test_thread_local_flags(LTIF_FPU_FRE)) {
111 				set_c0_config5(MIPS_CONF5_FRE);
112 				back_to_back_c0_hazard();
113 			} else {
114 				clear_c0_config5(MIPS_CONF5_FRE);
115 				back_to_back_c0_hazard();
116 			}
117 		} else if (test_thread_local_flags(LTIF_FPU_FRE)) {
118 			write_c0_status(status & ~ST0_CU1);
119 			disable_fpu_hazard();
120 			return(SIGFPE);
121 		}
122 		KSTK_STATUS(current) |= ST0_CU1|ST0_FR;
123 	}
124 #else
125 	thread_fpu_flags_update();
126 
127 	if (test_thread_local_flags(LTIF_FPU_FR))
128 		return SIGFPE;  /* core has no 64bit FPU, so ... */
129 
130 	set_c0_status(ST0_CU1);
131 	KSTK_STATUS(current) |= ST0_CU1;
132 	enable_fpu_hazard();
133 #endif
134 	set_thread_flag(TIF_USEDFPU);
135 	return ret;
136 }
137 
own_fpu_inatomic(int restore)138 static inline int own_fpu_inatomic(int restore)
139 {
140 	int ret = 0;
141 
142 	if (cpu_has_fpu && !__is_fpu_owner()) {
143 		ret =__own_fpu();
144 		if (restore && !ret)
145 			_restore_fp(current);
146 	}
147 	return ret;
148 }
149 
own_fpu(int restore)150 static inline int own_fpu(int restore)
151 {
152 	int ret;
153 
154 	preempt_disable();
155 	ret = own_fpu_inatomic(restore);
156 	preempt_enable();
157 
158 	return ret;
159 }
160 
fpu_get_fcr31(void)161 static inline unsigned int fpu_get_fcr31(void)
162 {
163 	unsigned int cp1status = read_32bit_cp1_register(CP1_STATUS);
164 
165 #ifdef CONFIG_CPU_MIPSR6
166 	cp1status |= (current->thread.fpu.fcr31 &
167 		(FPU_CSR_COND0|FPU_CSR_COND1|FPU_CSR_COND2|FPU_CSR_COND3|
168 		 FPU_CSR_COND4|FPU_CSR_COND5|FPU_CSR_COND6|FPU_CSR_COND7));
169 #endif
170 	return cp1status;
171 }
172 
lose_fpu_inatomic(int save)173 static inline void lose_fpu_inatomic(int save)
174 {
175 	if (is_msa_enabled()) {
176 		if (save) {
177 			save_msa(current);
178 			current->thread.fpu.fcr31 = fpu_get_fcr31();
179 		}
180 		disable_msa();
181 		clear_thread_flag(TIF_USEDMSA);
182 	} else if (is_fpu_owner()) {
183 		if (save)
184 			_save_fp(current);
185 	}
186 	clear_c0_status(ST0_CU1);
187 	disable_fpu_hazard();
188 	KSTK_STATUS(current) &= ~ST0_CU1;
189 	clear_thread_flag(TIF_USEDFPU);
190 }
191 
lose_fpu(int save)192 static inline void lose_fpu(int save)
193 {
194 	preempt_disable();
195 	lose_fpu_inatomic(save);
196 	preempt_enable();
197 }
198 
init_fpu(void)199 static inline int init_fpu(void)
200 {
201 	int ret = 0;
202 
203 	preempt_disable();
204 	if (cpu_has_fpu && !(ret = __own_fpu())) {
205 		if (cpu_has_fre) {
206 			unsigned int config5 = clear_c0_config5(MIPS_CONF5_FRE);
207 			back_to_back_c0_hazard();
208 			_init_fpu();
209 			write_c0_config5(config5);
210 			back_to_back_c0_hazard();
211 		} else
212 			_init_fpu();
213 	} else
214 		fpu_emulator_init_fpu(current);
215 
216 	preempt_enable();
217 
218 	set_used_math();
219 
220 	return ret;
221 }
222 
init_fp_ctx(struct task_struct * target)223 static inline void init_fp_ctx(struct task_struct *target)
224 {
225 	/* If FP has been used then the target already has context */
226 	if (used_math())
227 		return;
228 
229 	fpu_emulator_init_fpu(target);
230 
231 	/*
232 	 * Record that the target has "used" math, such that the context
233 	 * just initialised, and any modifications made by the caller,
234 	 * aren't discarded.
235 	 */
236 	set_used_math();
237 }
238 
save_fp(struct task_struct * tsk)239 static inline void save_fp(struct task_struct *tsk)
240 {
241 	if (cpu_has_fpu)
242 		_save_fp(tsk);
243 }
244 
restore_fp(struct task_struct * tsk)245 static inline void restore_fp(struct task_struct *tsk)
246 {
247 	if (cpu_has_fpu)
248 		_restore_fp(tsk);
249 }
250 
get_fpu_regs(struct task_struct * tsk)251 static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
252 {
253 	if (tsk == current) {
254 		preempt_disable();
255 		if (is_fpu_owner())
256 			_save_fp(current);
257 		preempt_enable();
258 	}
259 
260 	return tsk->thread.fpu.fpr;
261 }
262 
263 #endif /* _ASM_FPU_H */
264