1 /*
2 * Copyright (c) 2012-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <assert.h>
24 #include <bits.h>
25 #include <err.h>
26 #include <sys/types.h>
27 #include <debug.h>
28 #include <dev/interrupt/arm_gic.h>
29 #include <inttypes.h>
30 #include <reg.h>
31 #include <kernel/thread.h>
32 #include <kernel/debug.h>
33 #include <kernel/vm.h>
34 #include <lk/init.h>
35 #include <lk/macros.h>
36 #include <platform/interrupts.h>
37 #include <arch/ops.h>
38 #include <platform/gic.h>
39 #include <trace.h>
40 #include <inttypes.h>
41 #if WITH_LIB_SM
42 #include <lib/sm.h>
43 #include <lib/sm/sm_err.h>
44 #endif
45
46 #include "arm_gic_common.h"
47
48 #if GIC_VERSION > 2
49 #include "gic_v3.h"
50 #endif
51
52 #define LOCAL_TRACE 0
53
54 #if ARCH_ARM
55 #define iframe arm_iframe
56 #define IFRAME_PC(frame) ((frame)->pc)
57 #endif
58 #if ARCH_ARM64
59 #define iframe arm64_iframe_short
60 #define IFRAME_PC(frame) ((frame)->elr)
61 #endif
62
63 void platform_fiq(struct iframe *frame);
64 static status_t arm_gic_set_secure_locked(u_int irq, bool secure);
65 static void gic_set_enable(uint vector, bool enable);
66 static void arm_gic_init_hw(void);
67
68 static spin_lock_t gicd_lock;
69 #if WITH_LIB_SM
70 #define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_IRQ_FIQ
71 #else
72 #define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
73 #endif
74 #define GIC_MAX_PER_CPU_INT 32
75 #define GIC_MAX_SGI_INT 16
76
77 #if ARM_GIC_USE_DOORBELL_NS_IRQ
78 static bool doorbell_enabled;
79 #endif
80
81 struct arm_gic arm_gics[NUM_ARM_GICS];
82
arm_gic_check_init(int irq)83 static bool arm_gic_check_init(int irq)
84 {
85 /* check if we have a vaddr for gicd, both gicv2 and gicv3/4 use this */
86 if (!arm_gics[0].gicd_vaddr) {
87 TRACEF("change to interrupt %d ignored before init\n", irq);
88 return false;
89 }
90 return true;
91 }
92
93 #if WITH_LIB_SM
94 static bool arm_gic_non_secure_interrupts_frozen;
95
arm_gic_interrupt_change_allowed(int irq)96 static bool arm_gic_interrupt_change_allowed(int irq)
97 {
98 if (!arm_gic_non_secure_interrupts_frozen)
99 return arm_gic_check_init(irq);
100
101 TRACEF("change to interrupt %d ignored after booting ns\n", irq);
102 return false;
103 }
104 #else
arm_gic_interrupt_change_allowed(int irq)105 static bool arm_gic_interrupt_change_allowed(int irq)
106 {
107 return arm_gic_check_init(irq);
108 }
109 #endif
110
111 struct int_handler_struct {
112 int_handler handler;
113 void *arg;
114 };
115
116 static struct int_handler_struct int_handler_table_per_cpu[GIC_MAX_PER_CPU_INT][SMP_MAX_CPUS];
117 static struct int_handler_struct int_handler_table_shared[MAX_INT-GIC_MAX_PER_CPU_INT];
118
get_int_handler(unsigned int vector,uint cpu)119 static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu)
120 {
121 if (vector < GIC_MAX_PER_CPU_INT)
122 return &int_handler_table_per_cpu[vector][cpu];
123 else
124 return &int_handler_table_shared[vector - GIC_MAX_PER_CPU_INT];
125 }
126
127 #if ARM_GIC_USE_DOORBELL_NS_IRQ
128 static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority);
129 #endif
130
register_int_handler(unsigned int vector,int_handler handler,void * arg)131 void register_int_handler(unsigned int vector, int_handler handler, void *arg)
132 {
133 struct int_handler_struct *h;
134 uint cpu = arch_curr_cpu_num();
135
136 spin_lock_saved_state_t state;
137
138 if (vector >= MAX_INT)
139 panic("register_int_handler: vector out of range %d\n", vector);
140
141 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
142
143 if (arm_gic_interrupt_change_allowed(vector)) {
144 #if GIC_VERSION > 2
145 arm_gicv3_configure_irq_locked(cpu, vector);
146 #endif
147 h = get_int_handler(vector, cpu);
148 h->handler = handler;
149 h->arg = arg;
150 #if ARM_GIC_USE_DOORBELL_NS_IRQ
151 /*
152 * Use lowest priority Linux does not mask to allow masking the entire
153 * group while still allowing other interrupts to be delivered.
154 */
155 arm_gic_set_priority_locked(vector, 0xf7);
156 #endif
157
158 /*
159 * For GICv3, SGIs are maskable, and on GICv2, whether they are
160 * maskable is implementation defined. As a result, the caller cannot
161 * rely on them being maskable, so we enable all registered SGIs as if
162 * they were non-maskable.
163 */
164 if (vector < GIC_MAX_SGI_INT) {
165 gic_set_enable(vector, true);
166 }
167 }
168
169 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
170 }
171
172 #define GIC_REG_COUNT(bit_per_reg) DIV_ROUND_UP(MAX_INT, (bit_per_reg))
173 #define DEFINE_GIC_SHADOW_REG(name, bit_per_reg, init_val, init_from) \
174 uint32_t (name)[GIC_REG_COUNT(bit_per_reg)] = { \
175 [(init_from / bit_per_reg) ... \
176 (GIC_REG_COUNT(bit_per_reg) - 1)] = (init_val) \
177 }
178
179 #if WITH_LIB_SM
180 static DEFINE_GIC_SHADOW_REG(gicd_igroupr, 32, ~0U, 0);
181 #endif
182 static DEFINE_GIC_SHADOW_REG(gicd_itargetsr, 4, 0x01010101, 32);
183
gic_set_enable(uint vector,bool enable)184 static void gic_set_enable(uint vector, bool enable)
185 {
186 int reg = vector / 32;
187 uint32_t mask = 1ULL << (vector % 32);
188
189 #if GIC_VERSION > 2
190 if (reg == 0) {
191 uint32_t cpu = arch_curr_cpu_num();
192
193 /* On GICv3/v4 these are on GICR */
194 if (enable)
195 GICRREG_WRITE(0, cpu, GICR_ISENABLER0, mask);
196 else
197 GICRREG_WRITE(0, cpu, GICR_ICENABLER0, mask);
198 return;
199 }
200 #endif
201 if (enable)
202 GICDREG_WRITE(0, GICD_ISENABLER(reg), mask);
203 else {
204 GICDREG_WRITE(0, GICD_ICENABLER(reg), mask);
205
206 #if GIC_VERSION > 2
207 /* for GIC V3, make sure write is complete */
208 arm_gicv3_wait_for_write_complete();
209 #endif
210 }
211 }
212
arm_gic_init_percpu(uint level)213 static void arm_gic_init_percpu(uint level)
214 {
215 #if GIC_VERSION > 2
216 /* GICv3/v4 */
217 arm_gicv3_init_percpu();
218 #else
219 /* GICv2 */
220 #if WITH_LIB_SM
221 GICCREG_WRITE(0, GICC_CTLR, 0xb); // enable GIC0 and select fiq mode for secure
222 GICDREG_WRITE(0, GICD_IGROUPR(0), ~0U); /* GICD_IGROUPR0 is banked */
223 #else
224 GICCREG_WRITE(0, GICC_CTLR, 1); // enable GIC0
225 #endif
226 GICCREG_WRITE(0, GICC_PMR, 0xFF); // unmask interrupts at all priority levels
227 #endif /* GIC_VERSION > 2 */
228 }
229
230 LK_INIT_HOOK_FLAGS(arm_gic_init_percpu,
231 arm_gic_init_percpu,
232 LK_INIT_LEVEL_PLATFORM_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
233
arm_gic_suspend_cpu(uint level)234 static void arm_gic_suspend_cpu(uint level)
235 {
236 #if GIC_VERSION > 2
237 arm_gicv3_suspend_cpu(arch_curr_cpu_num());
238 #endif
239 }
240
241 LK_INIT_HOOK_FLAGS(arm_gic_suspend_cpu, arm_gic_suspend_cpu,
242 LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_OFF);
243
arm_gic_resume_cpu(uint level)244 static void arm_gic_resume_cpu(uint level)
245 {
246 spin_lock_saved_state_t state;
247 __UNUSED bool resume_gicd = false;
248
249 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
250
251 #if GIC_VERSION > 2
252 if (!(GICDREG_READ(0, GICD_CTLR) & 5)) {
253 #else
254 if (!(GICDREG_READ(0, GICD_CTLR) & 1)) {
255 #endif
256 dprintf(SPEW, "%s: distibutor is off, calling arm_gic_init instead\n", __func__);
257 arm_gic_init_hw();
258 resume_gicd = true;
259 } else {
260 arm_gic_init_percpu(0);
261 }
262
263 #if GIC_VERSION > 2
264 {
265 uint cpu = arch_curr_cpu_num();
266 uint max_irq = resume_gicd ? MAX_INT : GIC_MAX_PER_CPU_INT;
267
268 for (uint v = 0; v < max_irq; v++) {
269 struct int_handler_struct *h = get_int_handler(v, cpu);
270 if (h->handler) {
271 arm_gicv3_configure_irq_locked(cpu, v);
272 }
273 }
274 arm_gicv3_resume_cpu_locked(cpu, resume_gicd);
275 }
276 #endif
277 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
278 }
279
280 LK_INIT_HOOK_FLAGS(arm_gic_resume_cpu, arm_gic_resume_cpu,
281 LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_RESUME);
282
283 static int arm_gic_max_cpu(void)
284 {
285 return (GICDREG_READ(0, GICD_TYPER) >> 5) & 0x7;
286 }
287
288 static void arm_gic_init_hw(void)
289 {
290 #if GIC_VERSION > 2
291 /* GICv3/v4 */
292 arm_gicv3_init();
293 #else
294 int i;
295
296 for (i = 0; i < MAX_INT; i+= 32) {
297 GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U);
298 GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U);
299 }
300
301 if (arm_gic_max_cpu() > 0) {
302 /* Set external interrupts to target cpu 0 */
303 for (i = 32; i < MAX_INT; i += 4) {
304 GICDREG_WRITE(0, GICD_ITARGETSR(i / 4), gicd_itargetsr[i / 4]);
305 }
306 }
307
308 GICDREG_WRITE(0, GICD_CTLR, 1); // enable GIC0
309 #if WITH_LIB_SM
310 GICDREG_WRITE(0, GICD_CTLR, 3); // enable GIC0 ns interrupts
311 /*
312 * Iterate through all IRQs and set them to non-secure
313 * mode. This will allow the non-secure side to handle
314 * all the interrupts we don't explicitly claim.
315 */
316 for (i = 32; i < MAX_INT; i += 32) {
317 u_int reg = i / 32;
318 GICDREG_WRITE(0, GICD_IGROUPR(reg), gicd_igroupr[reg]);
319 }
320 #endif
321 #endif /* GIC_VERSION > 2 */
322 arm_gic_init_percpu(0);
323 }
324
325 void arm_gic_init(void) {
326 #ifdef GICBASE
327 arm_gics[0].gicd_vaddr = GICBASE(0) + GICD_OFFSET;
328 arm_gics[0].gicd_size = GICD_MIN_SIZE;
329 #if GIC_VERSION > 2
330 arm_gics[0].gicr_vaddr = GICBASE(0) + GICR_OFFSET;
331 arm_gics[0].gicr_size = GICR_CPU_OFFSET(SMP_MAX_CPUS - 1) + GICR_MIN_SIZE;
332 #else /* GIC_VERSION > 2 */
333 arm_gics[0].gicc_vaddr = GICBASE(0) + GICC_OFFSET;
334 arm_gics[0].gicc_size = GICC_MIN_SIZE;
335 #endif /* GIC_VERSION > 2 */
336 #else
337 /* Platforms should define GICBASE if they want to call this */
338 panic("%s: GICBASE not defined\n", __func__);
339 #endif /* GICBASE */
340
341 arm_gic_init_hw();
342 }
343
344 static void arm_map_regs(const char* name,
345 vaddr_t* vaddr,
346 paddr_t paddr,
347 size_t size) {
348 status_t ret;
349 void* vaddrp = (void*)vaddr;
350
351 if (!size) {
352 return;
353 }
354
355 ret = vmm_alloc_physical(vmm_get_kernel_aspace(), "gic", size, &vaddrp, 0,
356 paddr, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE |
357 ARCH_MMU_FLAG_PERM_NO_EXECUTE);
358 if (ret) {
359 panic("%s: failed %d\n", __func__, ret);
360 }
361
362 *vaddr = (vaddr_t)vaddrp;
363 }
364
365 void arm_gic_init_map(struct arm_gic_init_info* init_info)
366 {
367 if (init_info->gicd_size < GICD_MIN_SIZE) {
368 panic("%s: gicd mapping too small %zu\n", __func__,
369 init_info->gicd_size);
370 }
371 arm_map_regs("gicd", &arm_gics[0].gicd_vaddr, init_info->gicd_paddr,
372 init_info->gicd_size);
373 arm_gics[0].gicd_size = init_info->gicd_size;
374
375 #if GIC_VERSION > 2
376 if (init_info->gicr_size < GICR_CPU_OFFSET(SMP_MAX_CPUS - 1) + GICR_MIN_SIZE) {
377 panic("%s: gicr mapping too small %zu\n", __func__,
378 init_info->gicr_size);
379 }
380 arm_map_regs("gicr", &arm_gics[0].gicr_vaddr, init_info->gicr_paddr,
381 init_info->gicr_size);
382 arm_gics[0].gicr_size = init_info->gicr_size;
383 #else /* GIC_VERSION > 2 */
384 if (init_info->gicc_size < GICC_MIN_SIZE) {
385 panic("%s: gicc mapping too small %zu\n", __func__,
386 init_info->gicc_size);
387 }
388 arm_map_regs("gicc", &arm_gics[0].gicc_vaddr, init_info->gicc_paddr,
389 init_info->gicc_size);
390 arm_gics[0].gicc_size = init_info->gicc_size;
391 #endif /* GIC_VERSION > 2 */
392
393 arm_gic_init_hw();
394 }
395
396 static status_t arm_gic_set_secure_locked(u_int irq, bool secure)
397 {
398 #if WITH_LIB_SM
399 int reg = irq / 32;
400 uint32_t mask = 1ULL << (irq % 32);
401
402 if (irq >= MAX_INT)
403 return ERR_INVALID_ARGS;
404
405 if (secure)
406 GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] &= ~mask));
407 else
408 GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] |= mask));
409 LTRACEF("irq %d, secure %d, GICD_IGROUP%d = %x\n",
410 irq, secure, reg, GICDREG_READ(0, GICD_IGROUPR(reg)));
411 #endif
412 return NO_ERROR;
413 }
414
415 static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask)
416 {
417 u_int reg = irq / 4;
418 u_int shift = 8 * (irq % 4);
419 u_int old_val;
420 u_int new_val;
421
422 cpu_mask = (cpu_mask & 0xff) << shift;
423 enable_mask = (enable_mask << shift) & cpu_mask;
424
425 old_val = GICDREG_READ(0, GICD_ITARGETSR(reg));
426 new_val = (gicd_itargetsr[reg] & ~cpu_mask) | enable_mask;
427 GICDREG_WRITE(0, GICD_ITARGETSR(reg), (gicd_itargetsr[reg] = new_val));
428 LTRACEF("irq %i, GICD_ITARGETSR%d %x => %x (got %x)\n",
429 irq, reg, old_val, new_val, GICDREG_READ(0, GICD_ITARGETSR(reg)));
430
431 return NO_ERROR;
432 }
433
434 static status_t arm_gic_get_priority(u_int irq)
435 {
436 u_int reg = irq / 4;
437 u_int shift = 8 * (irq % 4);
438 return (GICDREG_READ(0, GICD_IPRIORITYR(reg)) >> shift) & 0xff;
439 }
440
441 static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority)
442 {
443 u_int reg = irq / 4;
444 u_int shift = 8 * (irq % 4);
445 u_int mask = 0xffU << shift;
446 uint32_t regval;
447
448 #if GIC_VERSION > 2
449 if (irq < 32) {
450 uint cpu = arch_curr_cpu_num();
451
452 /* On GICv3 IPRIORITY registers are on redistributor */
453 regval = GICRREG_READ(0, cpu, GICR_IPRIORITYR(reg));
454 LTRACEF("irq %i, cpu %d: old GICR_IPRIORITYR%d = %x\n", irq, cpu, reg,
455 regval);
456 regval = (regval & ~mask) | ((uint32_t)priority << shift);
457 GICRREG_WRITE(0, cpu, GICR_IPRIORITYR(reg), regval);
458 LTRACEF("irq %i, cpu %d, new GICD_IPRIORITYR%d = %x, req %x\n",
459 irq, cpu, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval);
460 return 0;
461 }
462 #endif
463
464 regval = GICDREG_READ(0, GICD_IPRIORITYR(reg));
465 LTRACEF("irq %i, old GICD_IPRIORITYR%d = %x\n", irq, reg, regval);
466 regval = (regval & ~mask) | ((uint32_t)priority << shift);
467 GICDREG_WRITE(0, GICD_IPRIORITYR(reg), regval);
468 LTRACEF("irq %i, new GICD_IPRIORITYR%d = %x, req %x\n",
469 irq, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval);
470
471 return 0;
472 }
473
474 status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask)
475 {
476 if (irq >= 16) {
477 return ERR_INVALID_ARGS;
478 }
479
480 #if GIC_VERSION > 2
481 for (size_t cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
482 if (!((cpu_mask >> cpu) & 1)) {
483 continue;
484 }
485
486 uint64_t val = arm_gicv3_sgir_val(irq, cpu);
487
488 GICCREG_WRITE(0, GICC_PRIMARY_SGIR, val);
489 }
490
491 #else /* else GIC_VERSION > 2 */
492
493 u_int val =
494 ((flags & ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK) << 24) |
495 ((cpu_mask & 0xff) << 16) |
496 ((flags & ARM_GIC_SGI_FLAG_NS) ? (1U << 15) : 0) |
497 (irq & 0xf);
498
499 LTRACEF("GICD_SGIR: %x\n", val);
500
501 GICDREG_WRITE(0, GICD_SGIR, val);
502
503 #endif /* else GIC_VERSION > 2 */
504
505 return NO_ERROR;
506 }
507
508 status_t mask_interrupt(unsigned int vector)
509 {
510 if (vector >= MAX_INT)
511 return ERR_INVALID_ARGS;
512
513 if (arm_gic_interrupt_change_allowed(vector))
514 gic_set_enable(vector, false);
515
516 return NO_ERROR;
517 }
518
519 status_t unmask_interrupt(unsigned int vector)
520 {
521 if (vector >= MAX_INT)
522 return ERR_INVALID_ARGS;
523
524 if (arm_gic_interrupt_change_allowed(vector))
525 gic_set_enable(vector, true);
526
527 return NO_ERROR;
528 }
529
530 static
531 enum handler_return __platform_irq(struct iframe *frame)
532 {
533 // get the current vector
534 uint32_t iar = GICCREG_READ(0, GICC_PRIMARY_IAR);
535 unsigned int vector = iar & 0x3ff;
536
537 if (vector >= 0x3fe) {
538 #if WITH_LIB_SM && ARM_GIC_USE_DOORBELL_NS_IRQ
539 // spurious or non-secure interrupt
540 return sm_handle_irq();
541 #else
542 // spurious
543 return INT_NO_RESCHEDULE;
544 #endif
545 }
546
547 THREAD_STATS_INC(interrupts);
548 KEVLOG_IRQ_ENTER(vector);
549
550 uint cpu = arch_curr_cpu_num();
551
552 LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%" PRIxPTR "\n", iar, cpu,
553 get_current_thread(), vector, (uintptr_t)IFRAME_PC(frame));
554
555 // deliver the interrupt
556 enum handler_return ret;
557
558 ret = INT_NO_RESCHEDULE;
559 struct int_handler_struct *handler = get_int_handler(vector, cpu);
560 if (handler->handler)
561 ret = handler->handler(handler->arg);
562
563 GICCREG_WRITE(0, GICC_PRIMARY_EOIR, iar);
564
565 LTRACEF_LEVEL(2, "cpu %u exit %d\n", cpu, ret);
566
567 KEVLOG_IRQ_EXIT(vector);
568
569 return ret;
570 }
571
572 enum handler_return platform_irq(struct iframe *frame)
573 {
574 #if WITH_LIB_SM && !ARM_GIC_USE_DOORBELL_NS_IRQ
575 uint32_t ahppir = GICCREG_READ(0, GICC_PRIMARY_HPPIR);
576 uint32_t pending_irq = ahppir & 0x3ff;
577 struct int_handler_struct *h;
578 uint cpu = arch_curr_cpu_num();
579
580 #if ARM_MERGE_FIQ_IRQ
581 {
582 uint32_t hppir = GICCREG_READ(0, GICC_HPPIR);
583 uint32_t pending_fiq = hppir & 0x3ff;
584 if (pending_fiq < MAX_INT) {
585 platform_fiq(frame);
586 return INT_NO_RESCHEDULE;
587 }
588 }
589 #endif
590
591 LTRACEF("ahppir %d\n", ahppir);
592 if (pending_irq < MAX_INT && get_int_handler(pending_irq, cpu)->handler) {
593 enum handler_return ret = 0;
594 uint32_t irq;
595 uint8_t old_priority;
596 spin_lock_saved_state_t state;
597
598 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
599
600 /* Temporarily raise the priority of the interrupt we want to
601 * handle so another interrupt does not take its place before
602 * we can acknowledge it.
603 */
604 old_priority = arm_gic_get_priority(pending_irq);
605 arm_gic_set_priority_locked(pending_irq, 0);
606 DSB;
607 irq = GICCREG_READ(0, GICC_PRIMARY_IAR) & 0x3ff;
608 arm_gic_set_priority_locked(pending_irq, old_priority);
609
610 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
611
612 LTRACEF("irq %d\n", irq);
613 if (irq < MAX_INT && (h = get_int_handler(pending_irq, cpu))->handler)
614 ret = h->handler(h->arg);
615 else
616 TRACEF("unexpected irq %d != %d may get lost\n", irq, pending_irq);
617 GICCREG_WRITE(0, GICC_PRIMARY_EOIR, irq);
618 return ret;
619 }
620 return sm_handle_irq();
621 #else
622 return __platform_irq(frame);
623 #endif
624 }
625
626 void platform_fiq(struct iframe *frame)
627 {
628 #if WITH_LIB_SM
629 sm_handle_fiq();
630 #else
631 PANIC_UNIMPLEMENTED;
632 #endif
633 }
634
635 #if WITH_LIB_SM
636 static status_t arm_gic_get_next_irq_locked(u_int min_irq, uint type)
637 {
638 #if ARM_GIC_USE_DOORBELL_NS_IRQ
639 if (type == TRUSTY_IRQ_TYPE_DOORBELL && min_irq <= ARM_GIC_DOORBELL_IRQ) {
640 doorbell_enabled = true;
641 return ARM_GIC_DOORBELL_IRQ;
642 }
643 #else
644 u_int irq;
645 u_int max_irq = type == TRUSTY_IRQ_TYPE_PER_CPU ? GIC_MAX_PER_CPU_INT :
646 type == TRUSTY_IRQ_TYPE_NORMAL ? MAX_INT : 0;
647 uint cpu = arch_curr_cpu_num();
648
649 if (type == TRUSTY_IRQ_TYPE_NORMAL && min_irq < GIC_MAX_PER_CPU_INT)
650 min_irq = GIC_MAX_PER_CPU_INT;
651
652 for (irq = min_irq; irq < max_irq; irq++)
653 if (get_int_handler(irq, cpu)->handler)
654 return irq;
655 #endif
656
657 return SM_ERR_END_OF_INPUT;
658 }
659
660 long smc_intc_get_next_irq(struct smc32_args *args)
661 {
662 status_t ret;
663 spin_lock_saved_state_t state;
664
665 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
666
667 #if !ARM_GIC_USE_DOORBELL_NS_IRQ
668 arm_gic_non_secure_interrupts_frozen = true;
669 #endif
670 ret = arm_gic_get_next_irq_locked(args->params[0], args->params[1]);
671 LTRACEF("min_irq %d, per_cpu %d, ret %d\n",
672 args->params[0], args->params[1], ret);
673
674 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
675
676 return ret;
677 }
678
679 void sm_intc_enable_interrupts(void)
680 {
681 #if ARM_GIC_USE_DOORBELL_NS_IRQ
682 GICCREG_WRITE(0, icc_igrpen1_el1, 1); /* Enable secure Group 1 */
683 DSB;
684 #endif
685 }
686
687 status_t sm_intc_fiq_enter(void)
688 {
689 u_int cpu = arch_curr_cpu_num();
690 #if GIC_VERSION > 2
691 u_int irq = GICCREG_READ(0, icc_iar0_el1) & 0x3ff;
692 #else
693 u_int irq = GICCREG_READ(0, GICC_IAR) & 0x3ff;
694 #endif
695
696 LTRACEF("cpu %d, irq %i\n", cpu, irq);
697
698 if (irq >= 1020) {
699 #if ARM_GIC_USE_DOORBELL_NS_IRQ
700 uint64_t val = arm_gicv3_sgir_val(ARM_GIC_DOORBELL_IRQ, cpu);
701
702 GICCREG_WRITE(0, icc_igrpen1_el1, 0); /* Disable secure Group 1 */
703 DSB;
704
705 if (doorbell_enabled) {
706 LTRACEF("GICD_SGIR: %" PRIx64 "\n", val);
707 GICCREG_WRITE(0, icc_asgi1r_el1, val);
708 }
709 #else
710 LTRACEF("spurious fiq: cpu %d, new %d\n", cpu, irq);
711 #endif
712 return ERR_NO_MSG;
713 }
714
715 #if GIC_VERSION > 2
716 GICCREG_WRITE(0, icc_eoir0_el1, irq);
717 #else
718 GICCREG_WRITE(0, GICC_EOIR, irq);
719 #endif
720
721 dprintf(INFO, "got disabled fiq: cpu %d, new %d\n", cpu, irq);
722 return ERR_NOT_READY;
723 }
724 #endif
725