1 /*
2  *  Atheros AR71xx/AR724x/AR913x specific interrupt handling
3  *
4  *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
5  *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
6  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7  *
8  *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License version 2 as published
12  *  by the Free Software Foundation.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqchip.h>
19 #include <linux/of_irq.h>
20 
21 #include <asm/irq_cpu.h>
22 #include <asm/mipsregs.h>
23 
24 #include <asm/mach-ath79/ath79.h>
25 #include <asm/mach-ath79/ar71xx_regs.h>
26 #include "common.h"
27 #include "machtypes.h"
28 
ath79_misc_irq_handler(struct irq_desc * desc)29 static void ath79_misc_irq_handler(struct irq_desc *desc)
30 {
31 	void __iomem *base = ath79_reset_base;
32 	u32 pending;
33 
34 	pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
35 		  __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
36 
37 	if (!pending) {
38 		spurious_interrupt();
39 		return;
40 	}
41 
42 	while (pending) {
43 		int bit = __ffs(pending);
44 
45 		generic_handle_irq(ATH79_MISC_IRQ(bit));
46 		pending &= ~BIT(bit);
47 	}
48 }
49 
ar71xx_misc_irq_unmask(struct irq_data * d)50 static void ar71xx_misc_irq_unmask(struct irq_data *d)
51 {
52 	unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
53 	void __iomem *base = ath79_reset_base;
54 	u32 t;
55 
56 	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
57 	__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
58 
59 	/* flush write */
60 	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
61 }
62 
ar71xx_misc_irq_mask(struct irq_data * d)63 static void ar71xx_misc_irq_mask(struct irq_data *d)
64 {
65 	unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
66 	void __iomem *base = ath79_reset_base;
67 	u32 t;
68 
69 	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
70 	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
71 
72 	/* flush write */
73 	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
74 }
75 
ar724x_misc_irq_ack(struct irq_data * d)76 static void ar724x_misc_irq_ack(struct irq_data *d)
77 {
78 	unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
79 	void __iomem *base = ath79_reset_base;
80 	u32 t;
81 
82 	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
83 	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
84 
85 	/* flush write */
86 	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
87 }
88 
89 static struct irq_chip ath79_misc_irq_chip = {
90 	.name		= "MISC",
91 	.irq_unmask	= ar71xx_misc_irq_unmask,
92 	.irq_mask	= ar71xx_misc_irq_mask,
93 };
94 
ath79_misc_irq_init(void)95 static void __init ath79_misc_irq_init(void)
96 {
97 	void __iomem *base = ath79_reset_base;
98 	int i;
99 
100 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
101 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
102 
103 	if (soc_is_ar71xx() || soc_is_ar913x())
104 		ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
105 	else if (soc_is_ar724x() ||
106 		 soc_is_ar933x() ||
107 		 soc_is_ar934x() ||
108 		 soc_is_qca955x())
109 		ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
110 	else
111 		BUG();
112 
113 	for (i = ATH79_MISC_IRQ_BASE;
114 	     i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) {
115 		irq_set_chip_and_handler(i, &ath79_misc_irq_chip,
116 					 handle_level_irq);
117 	}
118 
119 	irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
120 }
121 
ar934x_ip2_irq_dispatch(struct irq_desc * desc)122 static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
123 {
124 	u32 status;
125 
126 	status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
127 
128 	if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
129 		ath79_ddr_wb_flush(3);
130 		generic_handle_irq(ATH79_IP2_IRQ(0));
131 	} else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
132 		ath79_ddr_wb_flush(4);
133 		generic_handle_irq(ATH79_IP2_IRQ(1));
134 	} else {
135 		spurious_interrupt();
136 	}
137 }
138 
ar934x_ip2_irq_init(void)139 static void ar934x_ip2_irq_init(void)
140 {
141 	int i;
142 
143 	for (i = ATH79_IP2_IRQ_BASE;
144 	     i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
145 		irq_set_chip_and_handler(i, &dummy_irq_chip,
146 					 handle_level_irq);
147 
148 	irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
149 }
150 
qca955x_ip2_irq_dispatch(struct irq_desc * desc)151 static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
152 {
153 	u32 status;
154 
155 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
156 	status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
157 
158 	if (status == 0) {
159 		spurious_interrupt();
160 		return;
161 	}
162 
163 	if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
164 		/* TODO: flush DDR? */
165 		generic_handle_irq(ATH79_IP2_IRQ(0));
166 	}
167 
168 	if (status & QCA955X_EXT_INT_WMAC_ALL) {
169 		/* TODO: flush DDR? */
170 		generic_handle_irq(ATH79_IP2_IRQ(1));
171 	}
172 }
173 
qca955x_ip3_irq_dispatch(struct irq_desc * desc)174 static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
175 {
176 	u32 status;
177 
178 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
179 	status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
180 		  QCA955X_EXT_INT_USB1 |
181 		  QCA955X_EXT_INT_USB2;
182 
183 	if (status == 0) {
184 		spurious_interrupt();
185 		return;
186 	}
187 
188 	if (status & QCA955X_EXT_INT_USB1) {
189 		/* TODO: flush DDR? */
190 		generic_handle_irq(ATH79_IP3_IRQ(0));
191 	}
192 
193 	if (status & QCA955X_EXT_INT_USB2) {
194 		/* TODO: flush DDR? */
195 		generic_handle_irq(ATH79_IP3_IRQ(1));
196 	}
197 
198 	if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
199 		/* TODO: flush DDR? */
200 		generic_handle_irq(ATH79_IP3_IRQ(2));
201 	}
202 }
203 
qca955x_irq_init(void)204 static void qca955x_irq_init(void)
205 {
206 	int i;
207 
208 	for (i = ATH79_IP2_IRQ_BASE;
209 	     i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
210 		irq_set_chip_and_handler(i, &dummy_irq_chip,
211 					 handle_level_irq);
212 
213 	irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
214 
215 	for (i = ATH79_IP3_IRQ_BASE;
216 	     i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
217 		irq_set_chip_and_handler(i, &dummy_irq_chip,
218 					 handle_level_irq);
219 
220 	irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
221 }
222 
223 /*
224  * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
225  * these devices typically allocate coherent DMA memory, however the
226  * DMA controller may still have some unsynchronized data in the FIFO.
227  * Issue a flush in the handlers to ensure that the driver sees
228  * the update.
229  *
230  * This array map the interrupt lines to the DDR write buffer channels.
231  */
232 
233 static unsigned irq_wb_chan[8] = {
234 	-1, -1, -1, -1, -1, -1, -1, -1,
235 };
236 
plat_irq_dispatch(void)237 asmlinkage void plat_irq_dispatch(void)
238 {
239 	unsigned long pending;
240 	int irq;
241 
242 	pending = read_c0_status() & read_c0_cause() & ST0_IM;
243 
244 	if (!pending) {
245 		spurious_interrupt();
246 		return;
247 	}
248 
249 	pending >>= CAUSEB_IP;
250 	while (pending) {
251 		irq = fls(pending) - 1;
252 		if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
253 			ath79_ddr_wb_flush(irq_wb_chan[irq]);
254 		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
255 		pending &= ~BIT(irq);
256 	}
257 }
258 
259 #ifdef CONFIG_IRQCHIP
misc_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)260 static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
261 {
262 	irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
263 	return 0;
264 }
265 
266 static const struct irq_domain_ops misc_irq_domain_ops = {
267 	.xlate = irq_domain_xlate_onecell,
268 	.map = misc_map,
269 };
270 
ath79_misc_intc_of_init(struct device_node * node,struct device_node * parent)271 static int __init ath79_misc_intc_of_init(
272 	struct device_node *node, struct device_node *parent)
273 {
274 	void __iomem *base = ath79_reset_base;
275 	struct irq_domain *domain;
276 	int irq;
277 
278 	irq = irq_of_parse_and_map(node, 0);
279 	if (!irq)
280 		panic("Failed to get MISC IRQ");
281 
282 	domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT,
283 			ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, NULL);
284 	if (!domain)
285 		panic("Failed to add MISC irqdomain");
286 
287 	/* Disable and clear all interrupts */
288 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
289 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
290 
291 
292 	irq_set_chained_handler(irq, ath79_misc_irq_handler);
293 
294 	return 0;
295 }
296 
ar7100_misc_intc_of_init(struct device_node * node,struct device_node * parent)297 static int __init ar7100_misc_intc_of_init(
298 	struct device_node *node, struct device_node *parent)
299 {
300 	ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
301 	return ath79_misc_intc_of_init(node, parent);
302 }
303 
304 IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
305 		ar7100_misc_intc_of_init);
306 
ar7240_misc_intc_of_init(struct device_node * node,struct device_node * parent)307 static int __init ar7240_misc_intc_of_init(
308 	struct device_node *node, struct device_node *parent)
309 {
310 	ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
311 	return ath79_misc_intc_of_init(node, parent);
312 }
313 
314 IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
315 		ar7240_misc_intc_of_init);
316 
ar79_cpu_intc_of_init(struct device_node * node,struct device_node * parent)317 static int __init ar79_cpu_intc_of_init(
318 	struct device_node *node, struct device_node *parent)
319 {
320 	int err, i, count;
321 
322 	/* Fill the irq_wb_chan table */
323 	count = of_count_phandle_with_args(
324 		node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
325 
326 	for (i = 0; i < count; i++) {
327 		struct of_phandle_args args;
328 		u32 irq = i;
329 
330 		of_property_read_u32_index(
331 			node, "qca,ddr-wb-channel-interrupts", i, &irq);
332 		if (irq >= ARRAY_SIZE(irq_wb_chan))
333 			continue;
334 
335 		err = of_parse_phandle_with_args(
336 			node, "qca,ddr-wb-channels",
337 			"#qca,ddr-wb-channel-cells",
338 			i, &args);
339 		if (err)
340 			return err;
341 
342 		irq_wb_chan[irq] = args.args[0];
343 		pr_info("IRQ: Set flush channel of IRQ%d to %d\n",
344 			irq, args.args[0]);
345 	}
346 
347 	return mips_cpu_irq_of_init(node, parent);
348 }
349 IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
350 		ar79_cpu_intc_of_init);
351 
352 #endif
353 
arch_init_irq(void)354 void __init arch_init_irq(void)
355 {
356 	if (mips_machtype == ATH79_MACH_GENERIC_OF) {
357 		irqchip_init();
358 		return;
359 	}
360 
361 	if (soc_is_ar71xx() || soc_is_ar724x() ||
362 	    soc_is_ar913x() || soc_is_ar933x()) {
363 		irq_wb_chan[2] = 3;
364 		irq_wb_chan[3] = 2;
365 	} else if (soc_is_ar934x()) {
366 		irq_wb_chan[3] = 2;
367 	}
368 
369 	mips_cpu_irq_init();
370 	ath79_misc_irq_init();
371 
372 	if (soc_is_ar934x())
373 		ar934x_ip2_irq_init();
374 	else if (soc_is_qca955x())
375 		qca955x_irq_init();
376 }
377