• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* MN10300 IRQ flag handling
2  *
3  * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #ifndef _ASM_IRQFLAGS_H
13 #define _ASM_IRQFLAGS_H
14 
15 #include <asm/cpu-regs.h>
16 /* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
17 #include <asm/smp.h>
18 
19 /*
20  * interrupt control
21  * - "disabled": run in IM1/2
22  *   - level 0 - kernel debugger
23  *   - level 1 - virtual serial DMA (if present)
24  *   - level 5 - normal interrupt priority
25  *   - level 6 - timer interrupt
26  * - "enabled":  run in IM7
27  */
28 #define MN10300_CLI_LEVEL	(CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
29 
30 #ifndef __ASSEMBLY__
31 
arch_local_save_flags(void)32 static inline unsigned long arch_local_save_flags(void)
33 {
34 	unsigned long flags;
35 
36 	asm volatile("mov epsw,%0" : "=d"(flags));
37 	return flags;
38 }
39 
arch_local_irq_disable(void)40 static inline void arch_local_irq_disable(void)
41 {
42 	asm volatile(
43 		"	and %0,epsw	\n"
44 		"	or %1,epsw	\n"
45 		"	nop		\n"
46 		"	nop		\n"
47 		"	nop		\n"
48 		:
49 		: "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL)
50 		: "memory");
51 }
52 
arch_local_irq_save(void)53 static inline unsigned long arch_local_irq_save(void)
54 {
55 	unsigned long flags;
56 
57 	flags = arch_local_save_flags();
58 	arch_local_irq_disable();
59 	return flags;
60 }
61 
62 /*
63  * we make sure arch_irq_enable() doesn't cause priority inversion
64  */
65 extern unsigned long __mn10300_irq_enabled_epsw[];
66 
arch_local_irq_enable(void)67 static inline void arch_local_irq_enable(void)
68 {
69 	unsigned long tmp;
70 	int cpu = raw_smp_processor_id();
71 
72 	asm volatile(
73 		"	mov	epsw,%0		\n"
74 		"	and	%1,%0		\n"
75 		"	or	%2,%0		\n"
76 		"	mov	%0,epsw		\n"
77 		: "=&d"(tmp)
78 		: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
79 		: "memory", "cc");
80 }
81 
arch_local_irq_restore(unsigned long flags)82 static inline void arch_local_irq_restore(unsigned long flags)
83 {
84 	asm volatile(
85 		"	mov %0,epsw	\n"
86 		"	nop		\n"
87 		"	nop		\n"
88 		"	nop		\n"
89 		:
90 		: "d"(flags)
91 		: "memory", "cc");
92 }
93 
arch_irqs_disabled_flags(unsigned long flags)94 static inline bool arch_irqs_disabled_flags(unsigned long flags)
95 {
96 	return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
97 }
98 
arch_irqs_disabled(void)99 static inline bool arch_irqs_disabled(void)
100 {
101 	return arch_irqs_disabled_flags(arch_local_save_flags());
102 }
103 
104 /*
105  * Hook to save power by halting the CPU
106  * - called from the idle loop
107  * - must reenable interrupts (which takes three instruction cycles to complete)
108  */
arch_safe_halt(void)109 static inline void arch_safe_halt(void)
110 {
111 #ifdef CONFIG_SMP
112 	arch_local_irq_enable();
113 #else
114 	asm volatile(
115 		"	or	%0,epsw	\n"
116 		"	nop		\n"
117 		"	nop		\n"
118 		"	bset	%2,(%1)	\n"
119 		:
120 		: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
121 		: "cc");
122 #endif
123 }
124 
125 #define __sleep_cpu()				\
126 do {						\
127 	asm volatile(				\
128 		"	bset	%1,(%0)\n"	\
129 		"1:	btst	%1,(%0)\n"	\
130 		"	bne	1b\n"		\
131 		:				\
132 		: "i"(&CPUM), "i"(CPUM_SLEEP)	\
133 		: "cc"				\
134 		);				\
135 } while (0)
136 
arch_local_cli(void)137 static inline void arch_local_cli(void)
138 {
139 	asm volatile(
140 		"	and	%0,epsw		\n"
141 		"	nop			\n"
142 		"	nop			\n"
143 		"	nop			\n"
144 		:
145 		: "i"(~EPSW_IE)
146 		: "memory"
147 		);
148 }
149 
arch_local_cli_save(void)150 static inline unsigned long arch_local_cli_save(void)
151 {
152 	unsigned long flags = arch_local_save_flags();
153 	arch_local_cli();
154 	return flags;
155 }
156 
arch_local_sti(void)157 static inline void arch_local_sti(void)
158 {
159 	asm volatile(
160 		"	or	%0,epsw		\n"
161 		:
162 		: "i"(EPSW_IE)
163 		: "memory");
164 }
165 
arch_local_change_intr_mask_level(unsigned long level)166 static inline void arch_local_change_intr_mask_level(unsigned long level)
167 {
168 	asm volatile(
169 		"	and	%0,epsw		\n"
170 		"	or	%1,epsw		\n"
171 		:
172 		: "i"(~EPSW_IM), "i"(EPSW_IE | level)
173 		: "cc", "memory");
174 }
175 
176 #else /* !__ASSEMBLY__ */
177 
178 #define LOCAL_SAVE_FLAGS(reg)			\
179 	mov	epsw,reg
180 
181 #define LOCAL_IRQ_DISABLE				\
182 	and	~EPSW_IM,epsw;				\
183 	or	EPSW_IE|MN10300_CLI_LEVEL,epsw;		\
184 	nop;						\
185 	nop;						\
186 	nop
187 
188 #define LOCAL_IRQ_ENABLE		\
189 	or	EPSW_IE|EPSW_IM_7,epsw
190 
191 #define LOCAL_IRQ_RESTORE(reg)	\
192 	mov	reg,epsw
193 
194 #define LOCAL_CLI_SAVE(reg)	\
195 	mov	epsw,reg;	\
196 	and	~EPSW_IE,epsw;	\
197 	nop;			\
198 	nop;			\
199 	nop
200 
201 #define LOCAL_CLI		\
202 	and	~EPSW_IE,epsw;	\
203 	nop;			\
204 	nop;			\
205 	nop
206 
207 #define LOCAL_STI		\
208 	or	EPSW_IE,epsw
209 
210 #define LOCAL_CHANGE_INTR_MASK_LEVEL(level)	\
211 	and	~EPSW_IM,epsw;			\
212 	or	EPSW_IE|(level),epsw
213 
214 #endif /* __ASSEMBLY__ */
215 #endif /* _ASM_IRQFLAGS_H */
216