• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2007 MIPS Technologies, Inc.
7  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8  * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9  */
10 #include <linux/clockchips.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/smp.h>
14 #include <linux/irq.h>
15 
16 #include <asm/smtc_ipi.h>
17 #include <asm/time.h>
18 #include <asm/cevt-r4k.h>
19 
20 /*
21  * Variant clock event timer support for SMTC on MIPS 34K, 1004K
22  * or other MIPS MT cores.
23  *
24  * Notes on SMTC Support:
25  *
26  * SMTC has multiple microthread TCs pretending to be Linux CPUs.
27  * But there's only one Count/Compare pair per VPE, and Compare
28  * interrupts are taken opportunisitically by available TCs
29  * bound to the VPE with the Count register.  The new timer
30  * framework provides for global broadcasts, but we really
31  * want VPE-level multicasts for best behavior. So instead
32  * of invoking the high-level clock-event broadcast code,
33  * this version of SMTC support uses the historical SMTC
34  * multicast mechanisms "under the hood", appearing to the
35  * generic clock layer as if the interrupts are per-CPU.
36  *
37  * The approach taken here is to maintain a set of NR_CPUS
38  * virtual timers, and track which "CPU" needs to be alerted
39  * at each event.
40  *
41  * It's unlikely that we'll see a MIPS MT core with more than
42  * 2 VPEs, but we *know* that we won't need to handle more
43  * VPEs than we have "CPUs".  So NCPUs arrays of NCPUs elements
44  * is always going to be overkill, but always going to be enough.
45  */
46 
47 unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
48 static int smtc_nextinvpe[NR_CPUS];
49 
50 /*
51  * Timestamps stored are absolute values to be programmed
52  * into Count register.	 Valid timestamps will never be zero.
53  * If a Zero Count value is actually calculated, it is converted
54  * to be a 1, which will introduce 1 or two CPU cycles of error
55  * roughly once every four billion events, which at 1000 HZ means
56  * about once every 50 days.  If that's actually a problem, one
57  * could alternate squashing 0 to 1 and to -1.
58  */
59 
60 #define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
61 #define ISVALID(x) ((x) != 0L)
62 
63 /*
64  * Time comparison is subtle, as it's really truncated
65  * modular arithmetic.
66  */
67 
68 #define IS_SOONER(a, b, reference) \
69     (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
70 
71 /*
72  * CATCHUP_INCREMENT, used when the function falls behind the counter.
73  * Could be an increasing function instead of a constant;
74  */
75 
76 #define CATCHUP_INCREMENT 64
77 
mips_next_event(unsigned long delta,struct clock_event_device * evt)78 static int mips_next_event(unsigned long delta,
79 				struct clock_event_device *evt)
80 {
81 	unsigned long flags;
82 	unsigned int mtflags;
83 	unsigned long timestamp, reference, previous;
84 	unsigned long nextcomp = 0L;
85 	int vpe = current_cpu_data.vpe_id;
86 	int cpu = smp_processor_id();
87 	local_irq_save(flags);
88 	mtflags = dmt();
89 
90 	/*
91 	 * Maintain the per-TC virtual timer
92 	 * and program the per-VPE shared Count register
93 	 * as appropriate here...
94 	 */
95 	reference = (unsigned long)read_c0_count();
96 	timestamp = MAKEVALID(reference + delta);
97 	/*
98 	 * To really model the clock, we have to catch the case
99 	 * where the current next-in-VPE timestamp is the old
100 	 * timestamp for the calling CPE, but the new value is
101 	 * in fact later.  In that case, we have to do a full
102 	 * scan and discover the new next-in-VPE CPU id and
103 	 * timestamp.
104 	 */
105 	previous = smtc_nexttime[vpe][cpu];
106 	if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
107 	    && IS_SOONER(previous, timestamp, reference)) {
108 		int i;
109 		int soonest = cpu;
110 
111 		/*
112 		 * Update timestamp array here, so that new
113 		 * value gets considered along with those of
114 		 * other virtual CPUs on the VPE.
115 		 */
116 		smtc_nexttime[vpe][cpu] = timestamp;
117 		for_each_online_cpu(i) {
118 			if (ISVALID(smtc_nexttime[vpe][i])
119 			    && IS_SOONER(smtc_nexttime[vpe][i],
120 				smtc_nexttime[vpe][soonest], reference)) {
121 				    soonest = i;
122 			}
123 		}
124 		smtc_nextinvpe[vpe] = soonest;
125 		nextcomp = smtc_nexttime[vpe][soonest];
126 	/*
127 	 * Otherwise, we don't have to process the whole array rank,
128 	 * we just have to see if the event horizon has gotten closer.
129 	 */
130 	} else {
131 		if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
132 		    IS_SOONER(timestamp,
133 			smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
134 			    smtc_nextinvpe[vpe] = cpu;
135 			    nextcomp = timestamp;
136 		}
137 		/*
138 		 * Since next-in-VPE may me the same as the executing
139 		 * virtual CPU, we update the array *after* checking
140 		 * its value.
141 		 */
142 		smtc_nexttime[vpe][cpu] = timestamp;
143 	}
144 
145 	/*
146 	 * It may be that, in fact, we don't need to update Compare,
147 	 * but if we do, we want to make sure we didn't fall into
148 	 * a crack just behind Count.
149 	 */
150 	if (ISVALID(nextcomp)) {
151 		write_c0_compare(nextcomp);
152 		ehb();
153 		/*
154 		 * We never return an error, we just make sure
155 		 * that we trigger the handlers as quickly as
156 		 * we can if we fell behind.
157 		 */
158 		while ((nextcomp - (unsigned long)read_c0_count())
159 			> (unsigned long)LONG_MAX) {
160 			nextcomp += CATCHUP_INCREMENT;
161 			write_c0_compare(nextcomp);
162 			ehb();
163 		}
164 	}
165 	emt(mtflags);
166 	local_irq_restore(flags);
167 	return 0;
168 }
169 
170 
smtc_distribute_timer(int vpe)171 void smtc_distribute_timer(int vpe)
172 {
173 	unsigned long flags;
174 	unsigned int mtflags;
175 	int cpu;
176 	struct clock_event_device *cd;
177 	unsigned long nextstamp;
178 	unsigned long reference;
179 
180 
181 repeat:
182 	nextstamp = 0L;
183 	for_each_online_cpu(cpu) {
184 	    /*
185 	     * Find virtual CPUs within the current VPE who have
186 	     * unserviced timer requests whose time is now past.
187 	     */
188 	    local_irq_save(flags);
189 	    mtflags = dmt();
190 	    if (cpu_data[cpu].vpe_id == vpe &&
191 		ISVALID(smtc_nexttime[vpe][cpu])) {
192 		reference = (unsigned long)read_c0_count();
193 		if ((smtc_nexttime[vpe][cpu] - reference)
194 			 > (unsigned long)LONG_MAX) {
195 			    smtc_nexttime[vpe][cpu] = 0L;
196 			    emt(mtflags);
197 			    local_irq_restore(flags);
198 			    /*
199 			     * We don't send IPIs to ourself.
200 			     */
201 			    if (cpu != smp_processor_id()) {
202 				smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
203 			    } else {
204 				cd = &per_cpu(mips_clockevent_device, cpu);
205 				cd->event_handler(cd);
206 			    }
207 		} else {
208 			/* Local to VPE but Valid Time not yet reached. */
209 			if (!ISVALID(nextstamp) ||
210 			    IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
211 			    reference)) {
212 				smtc_nextinvpe[vpe] = cpu;
213 				nextstamp = smtc_nexttime[vpe][cpu];
214 			}
215 			emt(mtflags);
216 			local_irq_restore(flags);
217 		}
218 	    } else {
219 		emt(mtflags);
220 		local_irq_restore(flags);
221 
222 	    }
223 	}
224 	/* Reprogram for interrupt at next soonest timestamp for VPE */
225 	if (ISVALID(nextstamp)) {
226 		write_c0_compare(nextstamp);
227 		ehb();
228 		if ((nextstamp - (unsigned long)read_c0_count())
229 			> (unsigned long)LONG_MAX)
230 				goto repeat;
231 	}
232 }
233 
234 
c0_compare_interrupt(int irq,void * dev_id)235 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
236 {
237 	int cpu = smp_processor_id();
238 
239 	/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
240 	handle_perf_irq(1);
241 
242 	if (read_c0_cause() & (1 << 30)) {
243 		/* Clear Count/Compare Interrupt */
244 		write_c0_compare(read_c0_compare());
245 		smtc_distribute_timer(cpu_data[cpu].vpe_id);
246 	}
247 	return IRQ_HANDLED;
248 }
249 
250 
smtc_clockevent_init(void)251 int __cpuinit smtc_clockevent_init(void)
252 {
253 	uint64_t mips_freq = mips_hpt_frequency;
254 	unsigned int cpu = smp_processor_id();
255 	struct clock_event_device *cd;
256 	unsigned int irq;
257 	int i;
258 	int j;
259 
260 	if (!cpu_has_counter || !mips_hpt_frequency)
261 		return -ENXIO;
262 	if (cpu == 0) {
263 		for (i = 0; i < num_possible_cpus(); i++) {
264 			smtc_nextinvpe[i] = 0;
265 			for (j = 0; j < num_possible_cpus(); j++)
266 				smtc_nexttime[i][j] = 0L;
267 		}
268 		/*
269 		 * SMTC also can't have the usablility test
270 		 * run by secondary TCs once Compare is in use.
271 		 */
272 		if (!c0_compare_int_usable())
273 			return -ENXIO;
274 	}
275 
276 	/*
277 	 * With vectored interrupts things are getting platform specific.
278 	 * get_c0_compare_int is a hook to allow a platform to return the
279 	 * interrupt number of it's liking.
280 	 */
281 	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
282 	if (get_c0_compare_int)
283 		irq = get_c0_compare_int();
284 
285 	cd = &per_cpu(mips_clockevent_device, cpu);
286 
287 	cd->name		= "MIPS";
288 	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
289 
290 	/* Calculate the min / max delta */
291 	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
292 	cd->shift		= 32;
293 	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
294 	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
295 
296 	cd->rating		= 300;
297 	cd->irq			= irq;
298 	cd->cpumask		= cpumask_of(cpu);
299 	cd->set_next_event	= mips_next_event;
300 	cd->set_mode		= mips_set_clock_mode;
301 	cd->event_handler	= mips_event_handler;
302 
303 	clockevents_register_device(cd);
304 
305 	/*
306 	 * On SMTC we only want to do the data structure
307 	 * initialization and IRQ setup once.
308 	 */
309 	if (cpu)
310 		return 0;
311 	/*
312 	 * And we need the hwmask associated with the c0_compare
313 	 * vector to be initialized.
314 	 */
315 	irq_hwmask[irq] = (0x100 << cp0_compare_irq);
316 	if (cp0_timer_irq_installed)
317 		return 0;
318 
319 	cp0_timer_irq_installed = 1;
320 
321 	setup_irq(irq, &c0_compare_irqaction);
322 
323 	return 0;
324 }
325