1/*
2 * Set up the interrupt priorities
3 *
4 * Copyright  2004-2009 Analog Devices Inc.
5 *                 2003 Bas Vermeulen <bas@buyways.nl>
6 *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
7 *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
8 *                 1999 D. Jeff Dionne <jeff@uclinux.org>
9 *                 1996 Roman Zippel
10 *
11 * Licensed under the GPL-2
12 */
13
14#include <linux/module.h>
15#include <linux/kernel_stat.h>
16#include <linux/seq_file.h>
17#include <linux/irq.h>
18#include <linux/sched.h>
19#include <linux/syscore_ops.h>
20#include <asm/delay.h>
21#ifdef CONFIG_IPIPE
22#include <linux/ipipe.h>
23#endif
24#include <asm/traps.h>
25#include <asm/blackfin.h>
26#include <asm/gpio.h>
27#include <asm/irq_handler.h>
28#include <asm/dpmc.h>
29#include <asm/traps.h>
30
31/*
32 * NOTES:
33 * - we have separated the physical Hardware interrupt from the
34 * levels that the LINUX kernel sees (see the description in irq.h)
35 * -
36 */
37
38#ifndef CONFIG_SMP
39/* Initialize this to an actual value to force it into the .data
40 * section so that we know it is properly initialized at entry into
41 * the kernel but before bss is initialized to zero (which is where
42 * it would live otherwise).  The 0x1f magic represents the IRQs we
43 * cannot actually mask out in hardware.
44 */
45unsigned long bfin_irq_flags = 0x1f;
46EXPORT_SYMBOL(bfin_irq_flags);
47#endif
48
49#ifdef CONFIG_PM
50unsigned long bfin_sic_iwr[3];	/* Up to 3 SIC_IWRx registers */
51unsigned vr_wakeup;
52#endif
53
54#ifndef SEC_GCTL
55static struct ivgx {
56	/* irq number for request_irq, available in mach-bf5xx/irq.h */
57	unsigned int irqno;
58	/* corresponding bit in the SIC_ISR register */
59	unsigned int isrflag;
60} ivg_table[NR_PERI_INTS];
61
62static struct ivg_slice {
63	/* position of first irq in ivg_table for given ivg */
64	struct ivgx *ifirst;
65	struct ivgx *istop;
66} ivg7_13[IVG13 - IVG7 + 1];
67
68
69/*
70 * Search SIC_IAR and fill tables with the irqvalues
71 * and their positions in the SIC_ISR register.
72 */
73static void __init search_IAR(void)
74{
75	unsigned ivg, irq_pos = 0;
76	for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
77		int irqN;
78
79		ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
80
81		for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
82			int irqn;
83			u32 iar =
84				bfin_read32((unsigned long *)SIC_IAR0 +
85#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
86	defined(CONFIG_BF538) || defined(CONFIG_BF539)
87				((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
88#else
89				(irqN >> 3)
90#endif
91				);
92			for (irqn = irqN; irqn < irqN + 4; ++irqn) {
93				int iar_shift = (irqn & 7) * 4;
94				if (ivg == (0xf & (iar >> iar_shift))) {
95					ivg_table[irq_pos].irqno = IVG7 + irqn;
96					ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
97					ivg7_13[ivg].istop++;
98					irq_pos++;
99				}
100			}
101		}
102	}
103}
104#endif
105
106/*
107 * This is for core internal IRQs
108 */
109void bfin_ack_noop(struct irq_data *d)
110{
111	/* Dummy function.  */
112}
113
114static void bfin_core_mask_irq(struct irq_data *d)
115{
116	bfin_irq_flags &= ~(1 << d->irq);
117	if (!hard_irqs_disabled())
118		hard_local_irq_enable();
119}
120
121static void bfin_core_unmask_irq(struct irq_data *d)
122{
123	bfin_irq_flags |= 1 << d->irq;
124	/*
125	 * If interrupts are enabled, IMASK must contain the same value
126	 * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
127	 * are currently disabled we need not do anything; one of the
128	 * callers will take care of setting IMASK to the proper value
129	 * when reenabling interrupts.
130	 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
131	 * what we need.
132	 */
133	if (!hard_irqs_disabled())
134		hard_local_irq_enable();
135	return;
136}
137
138#ifndef SEC_GCTL
139void bfin_internal_mask_irq(unsigned int irq)
140{
141	unsigned long flags = hard_local_irq_save();
142#ifdef SIC_IMASK0
143	unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
144	unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
145	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
146			~(1 << mask_bit));
147# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
148	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
149			~(1 << mask_bit));
150# endif
151#else
152	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
153			~(1 << BFIN_SYSIRQ(irq)));
154#endif /* end of SIC_IMASK0 */
155	hard_local_irq_restore(flags);
156}
157
158static void bfin_internal_mask_irq_chip(struct irq_data *d)
159{
160	bfin_internal_mask_irq(d->irq);
161}
162
163#ifdef CONFIG_SMP
164void bfin_internal_unmask_irq_affinity(unsigned int irq,
165		const struct cpumask *affinity)
166#else
167void bfin_internal_unmask_irq(unsigned int irq)
168#endif
169{
170	unsigned long flags = hard_local_irq_save();
171
172#ifdef SIC_IMASK0
173	unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
174	unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
175# ifdef CONFIG_SMP
176	if (cpumask_test_cpu(0, affinity))
177# endif
178		bfin_write_SIC_IMASK(mask_bank,
179				bfin_read_SIC_IMASK(mask_bank) |
180				(1 << mask_bit));
181# ifdef CONFIG_SMP
182	if (cpumask_test_cpu(1, affinity))
183		bfin_write_SICB_IMASK(mask_bank,
184				bfin_read_SICB_IMASK(mask_bank) |
185				(1 << mask_bit));
186# endif
187#else
188	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
189			(1 << BFIN_SYSIRQ(irq)));
190#endif
191	hard_local_irq_restore(flags);
192}
193
194#ifdef CONFIG_SMP
195static void bfin_internal_unmask_irq_chip(struct irq_data *d)
196{
197	bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
198}
199
200static int bfin_internal_set_affinity(struct irq_data *d,
201				      const struct cpumask *mask, bool force)
202{
203	bfin_internal_mask_irq(d->irq);
204	bfin_internal_unmask_irq_affinity(d->irq, mask);
205
206	return 0;
207}
208#else
209static void bfin_internal_unmask_irq_chip(struct irq_data *d)
210{
211	bfin_internal_unmask_irq(d->irq);
212}
213#endif
214
215#if defined(CONFIG_PM)
216int bfin_internal_set_wake(unsigned int irq, unsigned int state)
217{
218	u32 bank, bit, wakeup = 0;
219	unsigned long flags;
220	bank = BFIN_SYSIRQ(irq) / 32;
221	bit = BFIN_SYSIRQ(irq) % 32;
222
223	switch (irq) {
224#ifdef IRQ_RTC
225	case IRQ_RTC:
226	wakeup |= WAKE;
227	break;
228#endif
229#ifdef IRQ_CAN0_RX
230	case IRQ_CAN0_RX:
231	wakeup |= CANWE;
232	break;
233#endif
234#ifdef IRQ_CAN1_RX
235	case IRQ_CAN1_RX:
236	wakeup |= CANWE;
237	break;
238#endif
239#ifdef IRQ_USB_INT0
240	case IRQ_USB_INT0:
241	wakeup |= USBWE;
242	break;
243#endif
244#ifdef CONFIG_BF54x
245	case IRQ_CNT:
246	wakeup |= ROTWE;
247	break;
248#endif
249	default:
250	break;
251	}
252
253	flags = hard_local_irq_save();
254
255	if (state) {
256		bfin_sic_iwr[bank] |= (1 << bit);
257		vr_wakeup  |= wakeup;
258
259	} else {
260		bfin_sic_iwr[bank] &= ~(1 << bit);
261		vr_wakeup  &= ~wakeup;
262	}
263
264	hard_local_irq_restore(flags);
265
266	return 0;
267}
268
269static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
270{
271	return bfin_internal_set_wake(d->irq, state);
272}
273#else
274inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
275{
276	return 0;
277}
278# define bfin_internal_set_wake_chip NULL
279#endif
280
281#else /* SEC_GCTL */
282static void bfin_sec_preflow_handler(struct irq_data *d)
283{
284	unsigned long flags = hard_local_irq_save();
285	unsigned int sid = BFIN_SYSIRQ(d->irq);
286
287	bfin_write_SEC_SCI(0, SEC_CSID, sid);
288
289	hard_local_irq_restore(flags);
290}
291
292static void bfin_sec_mask_ack_irq(struct irq_data *d)
293{
294	unsigned long flags = hard_local_irq_save();
295	unsigned int sid = BFIN_SYSIRQ(d->irq);
296
297	bfin_write_SEC_SCI(0, SEC_CSID, sid);
298
299	hard_local_irq_restore(flags);
300}
301
302static void bfin_sec_unmask_irq(struct irq_data *d)
303{
304	unsigned long flags = hard_local_irq_save();
305	unsigned int sid = BFIN_SYSIRQ(d->irq);
306
307	bfin_write32(SEC_END, sid);
308
309	hard_local_irq_restore(flags);
310}
311
312static void bfin_sec_enable_ssi(unsigned int sid)
313{
314	unsigned long flags = hard_local_irq_save();
315	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
316
317	reg_sctl |= SEC_SCTL_SRC_EN;
318	bfin_write_SEC_SCTL(sid, reg_sctl);
319
320	hard_local_irq_restore(flags);
321}
322
323static void bfin_sec_disable_ssi(unsigned int sid)
324{
325	unsigned long flags = hard_local_irq_save();
326	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
327
328	reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
329	bfin_write_SEC_SCTL(sid, reg_sctl);
330
331	hard_local_irq_restore(flags);
332}
333
334static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
335{
336	unsigned long flags = hard_local_irq_save();
337	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
338
339	reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
340	bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
341
342	hard_local_irq_restore(flags);
343}
344
345static void bfin_sec_enable_sci(unsigned int sid)
346{
347	unsigned long flags = hard_local_irq_save();
348	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
349
350	if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
351		reg_sctl |= SEC_SCTL_FAULT_EN;
352	else
353		reg_sctl |= SEC_SCTL_INT_EN;
354	bfin_write_SEC_SCTL(sid, reg_sctl);
355
356	hard_local_irq_restore(flags);
357}
358
359static void bfin_sec_disable_sci(unsigned int sid)
360{
361	unsigned long flags = hard_local_irq_save();
362	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
363
364	reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
365	bfin_write_SEC_SCTL(sid, reg_sctl);
366
367	hard_local_irq_restore(flags);
368}
369
370static void bfin_sec_enable(struct irq_data *d)
371{
372	unsigned long flags = hard_local_irq_save();
373	unsigned int sid = BFIN_SYSIRQ(d->irq);
374
375	bfin_sec_enable_sci(sid);
376	bfin_sec_enable_ssi(sid);
377
378	hard_local_irq_restore(flags);
379}
380
381static void bfin_sec_disable(struct irq_data *d)
382{
383	unsigned long flags = hard_local_irq_save();
384	unsigned int sid = BFIN_SYSIRQ(d->irq);
385
386	bfin_sec_disable_sci(sid);
387	bfin_sec_disable_ssi(sid);
388
389	hard_local_irq_restore(flags);
390}
391
392static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
393{
394	unsigned long flags = hard_local_irq_save();
395	uint32_t reg_sctl;
396	int i;
397
398	bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
399
400	for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
401		reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
402		reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
403		bfin_write_SEC_SCTL(i, reg_sctl);
404	}
405
406	hard_local_irq_restore(flags);
407}
408
409void bfin_sec_raise_irq(unsigned int irq)
410{
411	unsigned long flags = hard_local_irq_save();
412	unsigned int sid = BFIN_SYSIRQ(irq);
413
414	bfin_write32(SEC_RAISE, sid);
415
416	hard_local_irq_restore(flags);
417}
418
419static void init_software_driven_irq(void)
420{
421	bfin_sec_set_ssi_coreid(34, 0);
422	bfin_sec_set_ssi_coreid(35, 1);
423
424	bfin_sec_enable_sci(35);
425	bfin_sec_enable_ssi(35);
426	bfin_sec_set_ssi_coreid(36, 0);
427	bfin_sec_set_ssi_coreid(37, 1);
428	bfin_sec_enable_sci(37);
429	bfin_sec_enable_ssi(37);
430}
431
432void handle_sec_sfi_fault(uint32_t gstat)
433{
434
435}
436
437void handle_sec_sci_fault(uint32_t gstat)
438{
439	uint32_t core_id;
440	uint32_t cstat;
441
442	core_id = gstat & SEC_GSTAT_SCI;
443	cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
444	if (cstat & SEC_CSTAT_ERR) {
445		switch (cstat & SEC_CSTAT_ERRC) {
446		case SEC_CSTAT_ACKERR:
447			printk(KERN_DEBUG "sec ack err\n");
448			break;
449		default:
450			printk(KERN_DEBUG "sec sci unknown err\n");
451		}
452	}
453
454}
455
456void handle_sec_ssi_fault(uint32_t gstat)
457{
458	uint32_t sid;
459	uint32_t sstat;
460
461	sid = gstat & SEC_GSTAT_SID;
462	sstat = bfin_read_SEC_SSTAT(sid);
463
464}
465
466void handle_sec_fault(uint32_t sec_gstat)
467{
468	if (sec_gstat & SEC_GSTAT_ERR) {
469
470		switch (sec_gstat & SEC_GSTAT_ERRC) {
471		case 0:
472			handle_sec_sfi_fault(sec_gstat);
473			break;
474		case SEC_GSTAT_SCIERR:
475			handle_sec_sci_fault(sec_gstat);
476			break;
477		case SEC_GSTAT_SSIERR:
478			handle_sec_ssi_fault(sec_gstat);
479			break;
480		}
481
482
483	}
484}
485
486static struct irqaction bfin_fault_irq = {
487	.name = "Blackfin fault",
488};
489
490static irqreturn_t bfin_fault_routine(int irq, void *data)
491{
492	struct pt_regs *fp = get_irq_regs();
493
494	switch (irq) {
495	case IRQ_C0_DBL_FAULT:
496		double_fault_c(fp);
497		break;
498	case IRQ_C0_HW_ERR:
499		dump_bfin_process(fp);
500		dump_bfin_mem(fp);
501		show_regs(fp);
502		printk(KERN_NOTICE "Kernel Stack\n");
503		show_stack(current, NULL);
504		print_modules();
505		panic("Core 0 hardware error");
506		break;
507	case IRQ_C0_NMI_L1_PARITY_ERR:
508		panic("Core 0 NMI L1 parity error");
509		break;
510	case IRQ_SEC_ERR:
511		pr_err("SEC error\n");
512		handle_sec_fault(bfin_read32(SEC_GSTAT));
513		break;
514	default:
515		panic("Unknown fault %d", irq);
516	}
517
518	return IRQ_HANDLED;
519}
520#endif /* SEC_GCTL */
521
522static struct irq_chip bfin_core_irqchip = {
523	.name = "CORE",
524	.irq_mask = bfin_core_mask_irq,
525	.irq_unmask = bfin_core_unmask_irq,
526};
527
528#ifndef SEC_GCTL
529static struct irq_chip bfin_internal_irqchip = {
530	.name = "INTN",
531	.irq_mask = bfin_internal_mask_irq_chip,
532	.irq_unmask = bfin_internal_unmask_irq_chip,
533	.irq_disable = bfin_internal_mask_irq_chip,
534	.irq_enable = bfin_internal_unmask_irq_chip,
535#ifdef CONFIG_SMP
536	.irq_set_affinity = bfin_internal_set_affinity,
537#endif
538	.irq_set_wake = bfin_internal_set_wake_chip,
539};
540#else
541static struct irq_chip bfin_sec_irqchip = {
542	.name = "SEC",
543	.irq_mask_ack = bfin_sec_mask_ack_irq,
544	.irq_mask = bfin_sec_mask_ack_irq,
545	.irq_unmask = bfin_sec_unmask_irq,
546	.irq_eoi = bfin_sec_unmask_irq,
547	.irq_disable = bfin_sec_disable,
548	.irq_enable = bfin_sec_enable,
549};
550#endif
551
552void bfin_handle_irq(unsigned irq)
553{
554#ifdef CONFIG_IPIPE
555	struct pt_regs regs;    /* Contents not used. */
556	ipipe_trace_irq_entry(irq);
557	__ipipe_handle_irq(irq, &regs);
558	ipipe_trace_irq_exit(irq);
559#else /* !CONFIG_IPIPE */
560	generic_handle_irq(irq);
561#endif  /* !CONFIG_IPIPE */
562}
563
564#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
565static int mac_stat_int_mask;
566
567static void bfin_mac_status_ack_irq(unsigned int irq)
568{
569	switch (irq) {
570	case IRQ_MAC_MMCINT:
571		bfin_write_EMAC_MMC_TIRQS(
572			bfin_read_EMAC_MMC_TIRQE() &
573			bfin_read_EMAC_MMC_TIRQS());
574		bfin_write_EMAC_MMC_RIRQS(
575			bfin_read_EMAC_MMC_RIRQE() &
576			bfin_read_EMAC_MMC_RIRQS());
577		break;
578	case IRQ_MAC_RXFSINT:
579		bfin_write_EMAC_RX_STKY(
580			bfin_read_EMAC_RX_IRQE() &
581			bfin_read_EMAC_RX_STKY());
582		break;
583	case IRQ_MAC_TXFSINT:
584		bfin_write_EMAC_TX_STKY(
585			bfin_read_EMAC_TX_IRQE() &
586			bfin_read_EMAC_TX_STKY());
587		break;
588	case IRQ_MAC_WAKEDET:
589		 bfin_write_EMAC_WKUP_CTL(
590			bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
591		break;
592	default:
593		/* These bits are W1C */
594		bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
595		break;
596	}
597}
598
599static void bfin_mac_status_mask_irq(struct irq_data *d)
600{
601	unsigned int irq = d->irq;
602
603	mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
604#ifdef BF537_FAMILY
605	switch (irq) {
606	case IRQ_MAC_PHYINT:
607		bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
608		break;
609	default:
610		break;
611	}
612#else
613	if (!mac_stat_int_mask)
614		bfin_internal_mask_irq(IRQ_MAC_ERROR);
615#endif
616	bfin_mac_status_ack_irq(irq);
617}
618
619static void bfin_mac_status_unmask_irq(struct irq_data *d)
620{
621	unsigned int irq = d->irq;
622
623#ifdef BF537_FAMILY
624	switch (irq) {
625	case IRQ_MAC_PHYINT:
626		bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
627		break;
628	default:
629		break;
630	}
631#else
632	if (!mac_stat_int_mask)
633		bfin_internal_unmask_irq(IRQ_MAC_ERROR);
634#endif
635	mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
636}
637
638#ifdef CONFIG_PM
639int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
640{
641#ifdef BF537_FAMILY
642	return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
643#else
644	return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
645#endif
646}
647#else
648# define bfin_mac_status_set_wake NULL
649#endif
650
651static struct irq_chip bfin_mac_status_irqchip = {
652	.name = "MACST",
653	.irq_mask = bfin_mac_status_mask_irq,
654	.irq_unmask = bfin_mac_status_unmask_irq,
655	.irq_set_wake = bfin_mac_status_set_wake,
656};
657
658void bfin_demux_mac_status_irq(unsigned int int_err_irq,
659			       struct irq_desc *inta_desc)
660{
661	int i, irq = 0;
662	u32 status = bfin_read_EMAC_SYSTAT();
663
664	for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
665		if (status & (1L << i)) {
666			irq = IRQ_MAC_PHYINT + i;
667			break;
668		}
669
670	if (irq) {
671		if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
672			bfin_handle_irq(irq);
673		} else {
674			bfin_mac_status_ack_irq(irq);
675			pr_debug("IRQ %d:"
676					" MASKED MAC ERROR INTERRUPT ASSERTED\n",
677					irq);
678		}
679	} else
680		printk(KERN_ERR
681				"%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
682				" INTERRUPT ASSERTED BUT NO SOURCE FOUND"
683				"(EMAC_SYSTAT=0x%X)\n",
684				__func__, __FILE__, __LINE__, status);
685}
686#endif
687
688static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
689{
690#ifdef CONFIG_IPIPE
691	handle = handle_level_irq;
692#endif
693	__irq_set_handler_locked(irq, handle);
694}
695
696#ifdef CONFIG_GPIO_ADI
697
698static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
699
700static void bfin_gpio_ack_irq(struct irq_data *d)
701{
702	/* AFAIK ack_irq in case mask_ack is provided
703	 * get's only called for edge sense irqs
704	 */
705	set_gpio_data(irq_to_gpio(d->irq), 0);
706}
707
708static void bfin_gpio_mask_ack_irq(struct irq_data *d)
709{
710	unsigned int irq = d->irq;
711	u32 gpionr = irq_to_gpio(irq);
712
713	if (!irqd_is_level_type(d))
714		set_gpio_data(gpionr, 0);
715
716	set_gpio_maska(gpionr, 0);
717}
718
719static void bfin_gpio_mask_irq(struct irq_data *d)
720{
721	set_gpio_maska(irq_to_gpio(d->irq), 0);
722}
723
724static void bfin_gpio_unmask_irq(struct irq_data *d)
725{
726	set_gpio_maska(irq_to_gpio(d->irq), 1);
727}
728
729static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
730{
731	u32 gpionr = irq_to_gpio(d->irq);
732
733	if (__test_and_set_bit(gpionr, gpio_enabled))
734		bfin_gpio_irq_prepare(gpionr);
735
736	bfin_gpio_unmask_irq(d);
737
738	return 0;
739}
740
741static void bfin_gpio_irq_shutdown(struct irq_data *d)
742{
743	u32 gpionr = irq_to_gpio(d->irq);
744
745	bfin_gpio_mask_irq(d);
746	__clear_bit(gpionr, gpio_enabled);
747	bfin_gpio_irq_free(gpionr);
748}
749
750static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
751{
752	unsigned int irq = d->irq;
753	int ret;
754	char buf[16];
755	u32 gpionr = irq_to_gpio(irq);
756
757	if (type == IRQ_TYPE_PROBE) {
758		/* only probe unenabled GPIO interrupt lines */
759		if (test_bit(gpionr, gpio_enabled))
760			return 0;
761		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
762	}
763
764	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
765		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
766
767		snprintf(buf, 16, "gpio-irq%d", irq);
768		ret = bfin_gpio_irq_request(gpionr, buf);
769		if (ret)
770			return ret;
771
772		if (__test_and_set_bit(gpionr, gpio_enabled))
773			bfin_gpio_irq_prepare(gpionr);
774
775	} else {
776		__clear_bit(gpionr, gpio_enabled);
777		return 0;
778	}
779
780	set_gpio_inen(gpionr, 0);
781	set_gpio_dir(gpionr, 0);
782
783	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
784	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
785		set_gpio_both(gpionr, 1);
786	else
787		set_gpio_both(gpionr, 0);
788
789	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
790		set_gpio_polar(gpionr, 1);	/* low or falling edge denoted by one */
791	else
792		set_gpio_polar(gpionr, 0);	/* high or rising edge denoted by zero */
793
794	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
795		set_gpio_edge(gpionr, 1);
796		set_gpio_inen(gpionr, 1);
797		set_gpio_data(gpionr, 0);
798
799	} else {
800		set_gpio_edge(gpionr, 0);
801		set_gpio_inen(gpionr, 1);
802	}
803
804	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
805		bfin_set_irq_handler(irq, handle_edge_irq);
806	else
807		bfin_set_irq_handler(irq, handle_level_irq);
808
809	return 0;
810}
811
812static void bfin_demux_gpio_block(unsigned int irq)
813{
814	unsigned int gpio, mask;
815
816	gpio = irq_to_gpio(irq);
817	mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
818
819	while (mask) {
820		if (mask & 1)
821			bfin_handle_irq(irq);
822		irq++;
823		mask >>= 1;
824	}
825}
826
827void bfin_demux_gpio_irq(unsigned int inta_irq,
828			struct irq_desc *desc)
829{
830	unsigned int irq;
831
832	switch (inta_irq) {
833#if defined(BF537_FAMILY)
834	case IRQ_PF_INTA_PG_INTA:
835		bfin_demux_gpio_block(IRQ_PF0);
836		irq = IRQ_PG0;
837		break;
838	case IRQ_PH_INTA_MAC_RX:
839		irq = IRQ_PH0;
840		break;
841#elif defined(BF533_FAMILY)
842	case IRQ_PROG_INTA:
843		irq = IRQ_PF0;
844		break;
845#elif defined(BF538_FAMILY)
846	case IRQ_PORTF_INTA:
847		irq = IRQ_PF0;
848		break;
849#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
850	case IRQ_PORTF_INTA:
851		irq = IRQ_PF0;
852		break;
853	case IRQ_PORTG_INTA:
854		irq = IRQ_PG0;
855		break;
856	case IRQ_PORTH_INTA:
857		irq = IRQ_PH0;
858		break;
859#elif defined(CONFIG_BF561)
860	case IRQ_PROG0_INTA:
861		irq = IRQ_PF0;
862		break;
863	case IRQ_PROG1_INTA:
864		irq = IRQ_PF16;
865		break;
866	case IRQ_PROG2_INTA:
867		irq = IRQ_PF32;
868		break;
869#endif
870	default:
871		BUG();
872		return;
873	}
874
875	bfin_demux_gpio_block(irq);
876}
877
878#ifdef CONFIG_PM
879
880static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
881{
882	return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
883}
884
885#else
886
887# define bfin_gpio_set_wake NULL
888
889#endif
890
891static struct irq_chip bfin_gpio_irqchip = {
892	.name = "GPIO",
893	.irq_ack = bfin_gpio_ack_irq,
894	.irq_mask = bfin_gpio_mask_irq,
895	.irq_mask_ack = bfin_gpio_mask_ack_irq,
896	.irq_unmask = bfin_gpio_unmask_irq,
897	.irq_disable = bfin_gpio_mask_irq,
898	.irq_enable = bfin_gpio_unmask_irq,
899	.irq_set_type = bfin_gpio_irq_type,
900	.irq_startup = bfin_gpio_irq_startup,
901	.irq_shutdown = bfin_gpio_irq_shutdown,
902	.irq_set_wake = bfin_gpio_set_wake,
903};
904
905#endif
906
907#ifdef CONFIG_PM
908
909#ifdef SEC_GCTL
910static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
911
912static int sec_suspend(void)
913{
914	u32 bank;
915
916	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
917		save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
918	return 0;
919}
920
921static void sec_resume(void)
922{
923	u32 bank;
924
925	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
926	udelay(100);
927	bfin_write_SEC_GCTL(SEC_GCTL_EN);
928	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
929
930	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
931		bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
932}
933
934static struct syscore_ops sec_pm_syscore_ops = {
935	.suspend = sec_suspend,
936	.resume = sec_resume,
937};
938#endif
939
940#endif
941
942void init_exception_vectors(void)
943{
944	/* cannot program in software:
945	 * evt0 - emulation (jtag)
946	 * evt1 - reset
947	 */
948	bfin_write_EVT2(evt_nmi);
949	bfin_write_EVT3(trap);
950	bfin_write_EVT5(evt_ivhw);
951	bfin_write_EVT6(evt_timer);
952	bfin_write_EVT7(evt_evt7);
953	bfin_write_EVT8(evt_evt8);
954	bfin_write_EVT9(evt_evt9);
955	bfin_write_EVT10(evt_evt10);
956	bfin_write_EVT11(evt_evt11);
957	bfin_write_EVT12(evt_evt12);
958	bfin_write_EVT13(evt_evt13);
959	bfin_write_EVT14(evt_evt14);
960	bfin_write_EVT15(evt_system_call);
961	CSYNC();
962}
963
964#ifndef SEC_GCTL
965/*
966 * This function should be called during kernel startup to initialize
967 * the BFin IRQ handling routines.
968 */
969
970int __init init_arch_irq(void)
971{
972	int irq;
973	unsigned long ilat = 0;
974
975	/*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
976#ifdef SIC_IMASK0
977	bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
978	bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
979# ifdef SIC_IMASK2
980	bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
981# endif
982# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
983	bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
984	bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
985# endif
986#else
987	bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
988#endif
989
990	local_irq_disable();
991
992	for (irq = 0; irq <= SYS_IRQS; irq++) {
993		if (irq <= IRQ_CORETMR)
994			irq_set_chip(irq, &bfin_core_irqchip);
995		else
996			irq_set_chip(irq, &bfin_internal_irqchip);
997
998		switch (irq) {
999#if !BFIN_GPIO_PINT
1000#if defined(BF537_FAMILY)
1001		case IRQ_PH_INTA_MAC_RX:
1002		case IRQ_PF_INTA_PG_INTA:
1003#elif defined(BF533_FAMILY)
1004		case IRQ_PROG_INTA:
1005#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1006		case IRQ_PORTF_INTA:
1007		case IRQ_PORTG_INTA:
1008		case IRQ_PORTH_INTA:
1009#elif defined(CONFIG_BF561)
1010		case IRQ_PROG0_INTA:
1011		case IRQ_PROG1_INTA:
1012		case IRQ_PROG2_INTA:
1013#elif defined(BF538_FAMILY)
1014		case IRQ_PORTF_INTA:
1015#endif
1016			irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1017			break;
1018#endif
1019#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1020		case IRQ_MAC_ERROR:
1021			irq_set_chained_handler(irq,
1022						bfin_demux_mac_status_irq);
1023			break;
1024#endif
1025#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1026		case IRQ_SUPPLE_0:
1027		case IRQ_SUPPLE_1:
1028			irq_set_handler(irq, handle_percpu_irq);
1029			break;
1030#endif
1031
1032#ifdef CONFIG_TICKSOURCE_CORETMR
1033		case IRQ_CORETMR:
1034# ifdef CONFIG_SMP
1035			irq_set_handler(irq, handle_percpu_irq);
1036# else
1037			irq_set_handler(irq, handle_simple_irq);
1038# endif
1039			break;
1040#endif
1041
1042#ifdef CONFIG_TICKSOURCE_GPTMR0
1043		case IRQ_TIMER0:
1044			irq_set_handler(irq, handle_simple_irq);
1045			break;
1046#endif
1047
1048		default:
1049#ifdef CONFIG_IPIPE
1050			irq_set_handler(irq, handle_level_irq);
1051#else
1052			irq_set_handler(irq, handle_simple_irq);
1053#endif
1054			break;
1055		}
1056	}
1057
1058	init_mach_irq();
1059
1060#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1061	for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1062		irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1063					 handle_level_irq);
1064#endif
1065	/* if configured as edge, then will be changed to do_edge_IRQ */
1066#ifdef CONFIG_GPIO_ADI
1067	for (irq = GPIO_IRQ_BASE;
1068		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1069		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1070					 handle_level_irq);
1071#endif
1072	bfin_write_IMASK(0);
1073	CSYNC();
1074	ilat = bfin_read_ILAT();
1075	CSYNC();
1076	bfin_write_ILAT(ilat);
1077	CSYNC();
1078
1079	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1080	/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1081	 * local_irq_enable()
1082	 */
1083	program_IAR();
1084	/* Therefore it's better to setup IARs before interrupts enabled */
1085	search_IAR();
1086
1087	/* Enable interrupts IVG7-15 */
1088	bfin_irq_flags |= IMASK_IVG15 |
1089		IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1090		IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1091
1092
1093	/* This implicitly covers ANOMALY_05000171
1094	 * Boot-ROM code modifies SICA_IWRx wakeup registers
1095	 */
1096#ifdef SIC_IWR0
1097	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1098# ifdef SIC_IWR1
1099	/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1100	 * will screw up the bootrom as it relies on MDMA0/1 waking it
1101	 * up from IDLE instructions.  See this report for more info:
1102	 * http://blackfin.uclinux.org/gf/tracker/4323
1103	 */
1104	if (ANOMALY_05000435)
1105		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1106	else
1107		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1108# endif
1109# ifdef SIC_IWR2
1110	bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1111# endif
1112#else
1113	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1114#endif
1115	return 0;
1116}
1117
1118#ifdef CONFIG_DO_IRQ_L1
1119__attribute__((l1_text))
1120#endif
1121static int vec_to_irq(int vec)
1122{
1123	struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1124	struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1125	unsigned long sic_status[3];
1126	if (likely(vec == EVT_IVTMR_P))
1127		return IRQ_CORETMR;
1128#ifdef SIC_ISR
1129	sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1130#else
1131	if (smp_processor_id()) {
1132# ifdef SICB_ISR0
1133		/* This will be optimized out in UP mode. */
1134		sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1135		sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1136# endif
1137	} else {
1138		sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1139		sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1140	}
1141#endif
1142#ifdef SIC_ISR2
1143	sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1144#endif
1145
1146	for (;; ivg++) {
1147		if (ivg >= ivg_stop)
1148			return -1;
1149#ifdef SIC_ISR
1150		if (sic_status[0] & ivg->isrflag)
1151#else
1152		if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1153#endif
1154			return ivg->irqno;
1155	}
1156}
1157
1158#else /* SEC_GCTL */
1159
1160/*
1161 * This function should be called during kernel startup to initialize
1162 * the BFin IRQ handling routines.
1163 */
1164
1165int __init init_arch_irq(void)
1166{
1167	int irq;
1168	unsigned long ilat = 0;
1169
1170	bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1171
1172	local_irq_disable();
1173
1174	for (irq = 0; irq <= SYS_IRQS; irq++) {
1175		if (irq <= IRQ_CORETMR) {
1176			irq_set_chip_and_handler(irq, &bfin_core_irqchip,
1177				handle_simple_irq);
1178#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
1179			if (irq == IRQ_CORETMR)
1180				irq_set_handler(irq, handle_percpu_irq);
1181#endif
1182		} else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1183			irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1184				handle_percpu_irq);
1185		} else {
1186			irq_set_chip(irq, &bfin_sec_irqchip);
1187			irq_set_handler(irq, handle_fasteoi_irq);
1188			__irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1189		}
1190	}
1191
1192	bfin_write_IMASK(0);
1193	CSYNC();
1194	ilat = bfin_read_ILAT();
1195	CSYNC();
1196	bfin_write_ILAT(ilat);
1197	CSYNC();
1198
1199	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1200
1201	bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1202
1203	/* Enable interrupts IVG7-15 */
1204	bfin_irq_flags |= IMASK_IVG15 |
1205	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1206	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1207
1208
1209	bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1210	bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
1211	bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
1212	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1213	udelay(100);
1214	bfin_write_SEC_GCTL(SEC_GCTL_EN);
1215	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1216	bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1217
1218	init_software_driven_irq();
1219
1220#ifdef CONFIG_PM
1221	register_syscore_ops(&sec_pm_syscore_ops);
1222#endif
1223
1224	bfin_fault_irq.handler = bfin_fault_routine;
1225#ifdef CONFIG_L1_PARITY_CHECK
1226	setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
1227#endif
1228	setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
1229	setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
1230
1231	return 0;
1232}
1233
1234#ifdef CONFIG_DO_IRQ_L1
1235__attribute__((l1_text))
1236#endif
1237static int vec_to_irq(int vec)
1238{
1239	if (likely(vec == EVT_IVTMR_P))
1240		return IRQ_CORETMR;
1241
1242	return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1243}
1244#endif  /* SEC_GCTL */
1245
1246#ifdef CONFIG_DO_IRQ_L1
1247__attribute__((l1_text))
1248#endif
1249void do_irq(int vec, struct pt_regs *fp)
1250{
1251	int irq = vec_to_irq(vec);
1252	if (irq == -1)
1253		return;
1254	asm_do_IRQ(irq, fp);
1255}
1256
1257#ifdef CONFIG_IPIPE
1258
1259int __ipipe_get_irq_priority(unsigned irq)
1260{
1261	int ient, prio;
1262
1263	if (irq <= IRQ_CORETMR)
1264		return irq;
1265
1266#ifdef SEC_GCTL
1267	if (irq >= BFIN_IRQ(0))
1268		return IVG11;
1269#else
1270	for (ient = 0; ient < NR_PERI_INTS; ient++) {
1271		struct ivgx *ivg = ivg_table + ient;
1272		if (ivg->irqno == irq) {
1273			for (prio = 0; prio <= IVG13-IVG7; prio++) {
1274				if (ivg7_13[prio].ifirst <= ivg &&
1275				    ivg7_13[prio].istop > ivg)
1276					return IVG7 + prio;
1277			}
1278		}
1279	}
1280#endif
1281
1282	return IVG15;
1283}
1284
1285/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1286#ifdef CONFIG_DO_IRQ_L1
1287__attribute__((l1_text))
1288#endif
1289asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1290{
1291	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1292	struct ipipe_domain *this_domain = __ipipe_current_domain;
1293	int irq, s = 0;
1294
1295	irq = vec_to_irq(vec);
1296	if (irq == -1)
1297		return 0;
1298
1299	if (irq == IRQ_SYSTMR) {
1300#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1301		bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1302#endif
1303		/* This is basically what we need from the register frame. */
1304		__this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend);
1305		__this_cpu_write(__ipipe_tick_regs.pc, regs->pc);
1306		if (this_domain != ipipe_root_domain)
1307			__this_cpu_and(__ipipe_tick_regs.ipend, ~0x10);
1308		else
1309			__this_cpu_or(__ipipe_tick_regs.ipend, 0x10);
1310	}
1311
1312	/*
1313	 * We don't want Linux interrupt handlers to run at the
1314	 * current core priority level (i.e. < EVT15), since this
1315	 * might delay other interrupts handled by a high priority
1316	 * domain. Here is what we do instead:
1317	 *
1318	 * - we raise the SYNCDEFER bit to prevent
1319	 * __ipipe_handle_irq() to sync the pipeline for the root
1320	 * stage for the incoming interrupt. Upon return, that IRQ is
1321	 * pending in the interrupt log.
1322	 *
1323	 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1324	 * that _schedule_and_signal_from_int will eventually sync the
1325	 * pipeline from EVT15.
1326	 */
1327	if (this_domain == ipipe_root_domain) {
1328		s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1329		barrier();
1330	}
1331
1332	ipipe_trace_irq_entry(irq);
1333	__ipipe_handle_irq(irq, regs);
1334	ipipe_trace_irq_exit(irq);
1335
1336	if (user_mode(regs) &&
1337	    !ipipe_test_foreign_stack() &&
1338	    (current->ipipe_flags & PF_EVTRET) != 0) {
1339		/*
1340		 * Testing for user_regs() does NOT fully eliminate
1341		 * foreign stack contexts, because of the forged
1342		 * interrupt returns we do through
1343		 * __ipipe_call_irqtail. In that case, we might have
1344		 * preempted a foreign stack context in a high
1345		 * priority domain, with a single interrupt level now
1346		 * pending after the irqtail unwinding is done. In
1347		 * which case user_mode() is now true, and the event
1348		 * gets dispatched spuriously.
1349		 */
1350		current->ipipe_flags &= ~PF_EVTRET;
1351		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1352	}
1353
1354	if (this_domain == ipipe_root_domain) {
1355		set_thread_flag(TIF_IRQ_SYNC);
1356		if (!s) {
1357			__clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1358			return !test_bit(IPIPE_STALL_FLAG, &p->status);
1359		}
1360	}
1361
1362	return 0;
1363}
1364
1365#endif /* CONFIG_IPIPE */
1366