1/*
2 *  linux/arch/arm/plat-pxa/gpio.c
3 *
4 *  Generic PXA GPIO handling
5 *
6 *  Author:	Nicolas Pitre
7 *  Created:	Jun 15, 2001
8 *  Copyright:	MontaVista Software Inc.
9 *
10 *  This program is free software; you can redistribute it and/or modify
11 *  it under the terms of the GNU General Public License version 2 as
12 *  published by the Free Software Foundation.
13 */
14#include <linux/module.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/gpio-pxa.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/irqdomain.h>
23#include <linux/irqchip/chained_irq.h>
24#include <linux/io.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/platform_device.h>
28#include <linux/syscore_ops.h>
29#include <linux/slab.h>
30
31/*
32 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
33 * one set of registers. The register offsets are organized below:
34 *
35 *           GPLR    GPDR    GPSR    GPCR    GRER    GFER    GEDR
36 * BANK 0 - 0x0000  0x000C  0x0018  0x0024  0x0030  0x003C  0x0048
37 * BANK 1 - 0x0004  0x0010  0x001C  0x0028  0x0034  0x0040  0x004C
38 * BANK 2 - 0x0008  0x0014  0x0020  0x002C  0x0038  0x0044  0x0050
39 *
40 * BANK 3 - 0x0100  0x010C  0x0118  0x0124  0x0130  0x013C  0x0148
41 * BANK 4 - 0x0104  0x0110  0x011C  0x0128  0x0134  0x0140  0x014C
42 * BANK 5 - 0x0108  0x0114  0x0120  0x012C  0x0138  0x0144  0x0150
43 *
44 * BANK 6 - 0x0200  0x020C  0x0218  0x0224  0x0230  0x023C  0x0248
45 *
46 * NOTE:
47 *   BANK 3 is only available on PXA27x and later processors.
48 *   BANK 4 and 5 are only available on PXA935, PXA1928
49 *   BANK 6 is only available on PXA1928
50 */
51
52#define GPLR_OFFSET	0x00
53#define GPDR_OFFSET	0x0C
54#define GPSR_OFFSET	0x18
55#define GPCR_OFFSET	0x24
56#define GRER_OFFSET	0x30
57#define GFER_OFFSET	0x3C
58#define GEDR_OFFSET	0x48
59#define GAFR_OFFSET	0x54
60#define ED_MASK_OFFSET	0x9C	/* GPIO edge detection for AP side */
61
62#define BANK_OFF(n)	(((n) / 3) << 8) + (((n) % 3) << 2)
63
64int pxa_last_gpio;
65static int irq_base;
66
67#ifdef CONFIG_OF
68static struct irq_domain *domain;
69static struct device_node *pxa_gpio_of_node;
70#endif
71
72struct pxa_gpio_chip {
73	struct gpio_chip chip;
74	void __iomem	*regbase;
75	char label[10];
76
77	unsigned long	irq_mask;
78	unsigned long	irq_edge_rise;
79	unsigned long	irq_edge_fall;
80	int (*set_wake)(unsigned int gpio, unsigned int on);
81
82#ifdef CONFIG_PM
83	unsigned long	saved_gplr;
84	unsigned long	saved_gpdr;
85	unsigned long	saved_grer;
86	unsigned long	saved_gfer;
87#endif
88};
89
90enum pxa_gpio_type {
91	PXA25X_GPIO = 0,
92	PXA26X_GPIO,
93	PXA27X_GPIO,
94	PXA3XX_GPIO,
95	PXA93X_GPIO,
96	MMP_GPIO = 0x10,
97	MMP2_GPIO,
98	PXA1928_GPIO,
99};
100
101struct pxa_gpio_id {
102	enum pxa_gpio_type	type;
103	int			gpio_nums;
104};
105
106static DEFINE_SPINLOCK(gpio_lock);
107static struct pxa_gpio_chip *pxa_gpio_chips;
108static enum pxa_gpio_type gpio_type;
109static void __iomem *gpio_reg_base;
110
111static struct pxa_gpio_id pxa25x_id = {
112	.type		= PXA25X_GPIO,
113	.gpio_nums	= 85,
114};
115
116static struct pxa_gpio_id pxa26x_id = {
117	.type		= PXA26X_GPIO,
118	.gpio_nums	= 90,
119};
120
121static struct pxa_gpio_id pxa27x_id = {
122	.type		= PXA27X_GPIO,
123	.gpio_nums	= 121,
124};
125
126static struct pxa_gpio_id pxa3xx_id = {
127	.type		= PXA3XX_GPIO,
128	.gpio_nums	= 128,
129};
130
131static struct pxa_gpio_id pxa93x_id = {
132	.type		= PXA93X_GPIO,
133	.gpio_nums	= 192,
134};
135
136static struct pxa_gpio_id mmp_id = {
137	.type		= MMP_GPIO,
138	.gpio_nums	= 128,
139};
140
141static struct pxa_gpio_id mmp2_id = {
142	.type		= MMP2_GPIO,
143	.gpio_nums	= 192,
144};
145
146static struct pxa_gpio_id pxa1928_id = {
147	.type		= PXA1928_GPIO,
148	.gpio_nums	= 224,
149};
150
151#define for_each_gpio_chip(i, c)			\
152	for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
153
154static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
155{
156	return container_of(c, struct pxa_gpio_chip, chip)->regbase;
157}
158
159static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
160{
161	return &pxa_gpio_chips[gpio_to_bank(gpio)];
162}
163
164static inline int gpio_is_pxa_type(int type)
165{
166	return (type & MMP_GPIO) == 0;
167}
168
169static inline int gpio_is_mmp_type(int type)
170{
171	return (type & MMP_GPIO) != 0;
172}
173
174/* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
175 * as well as their Alternate Function value being '1' for GPIO in GAFRx.
176 */
177static inline int __gpio_is_inverted(int gpio)
178{
179	if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
180		return 1;
181	return 0;
182}
183
184/*
185 * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
186 * function of a GPIO, and GPDRx cannot be altered once configured. It
187 * is attributed as "occupied" here (I know this terminology isn't
188 * accurate, you are welcome to propose a better one :-)
189 */
190static inline int __gpio_is_occupied(unsigned gpio)
191{
192	struct pxa_gpio_chip *pxachip;
193	void __iomem *base;
194	unsigned long gafr = 0, gpdr = 0;
195	int ret, af = 0, dir = 0;
196
197	pxachip = gpio_to_pxachip(gpio);
198	base = gpio_chip_base(&pxachip->chip);
199	gpdr = readl_relaxed(base + GPDR_OFFSET);
200
201	switch (gpio_type) {
202	case PXA25X_GPIO:
203	case PXA26X_GPIO:
204	case PXA27X_GPIO:
205		gafr = readl_relaxed(base + GAFR_OFFSET);
206		af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
207		dir = gpdr & GPIO_bit(gpio);
208
209		if (__gpio_is_inverted(gpio))
210			ret = (af != 1) || (dir == 0);
211		else
212			ret = (af != 0) || (dir != 0);
213		break;
214	default:
215		ret = gpdr & GPIO_bit(gpio);
216		break;
217	}
218	return ret;
219}
220
221static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
222{
223	return chip->base + offset + irq_base;
224}
225
226int pxa_irq_to_gpio(int irq)
227{
228	return irq - irq_base;
229}
230
231static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
232{
233	void __iomem *base = gpio_chip_base(chip);
234	uint32_t value, mask = 1 << offset;
235	unsigned long flags;
236
237	spin_lock_irqsave(&gpio_lock, flags);
238
239	value = readl_relaxed(base + GPDR_OFFSET);
240	if (__gpio_is_inverted(chip->base + offset))
241		value |= mask;
242	else
243		value &= ~mask;
244	writel_relaxed(value, base + GPDR_OFFSET);
245
246	spin_unlock_irqrestore(&gpio_lock, flags);
247	return 0;
248}
249
250static int pxa_gpio_direction_output(struct gpio_chip *chip,
251				     unsigned offset, int value)
252{
253	void __iomem *base = gpio_chip_base(chip);
254	uint32_t tmp, mask = 1 << offset;
255	unsigned long flags;
256
257	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
258
259	spin_lock_irqsave(&gpio_lock, flags);
260
261	tmp = readl_relaxed(base + GPDR_OFFSET);
262	if (__gpio_is_inverted(chip->base + offset))
263		tmp &= ~mask;
264	else
265		tmp |= mask;
266	writel_relaxed(tmp, base + GPDR_OFFSET);
267
268	spin_unlock_irqrestore(&gpio_lock, flags);
269	return 0;
270}
271
272static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
273{
274	u32 gplr = readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET);
275	return !!(gplr & (1 << offset));
276}
277
278static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
279{
280	writel_relaxed(1 << offset, gpio_chip_base(chip) +
281				(value ? GPSR_OFFSET : GPCR_OFFSET));
282}
283
284#ifdef CONFIG_OF_GPIO
285static int pxa_gpio_of_xlate(struct gpio_chip *gc,
286			     const struct of_phandle_args *gpiospec,
287			     u32 *flags)
288{
289	if (gpiospec->args[0] > pxa_last_gpio)
290		return -EINVAL;
291
292	if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
293		return -EINVAL;
294
295	if (flags)
296		*flags = gpiospec->args[1];
297
298	return gpiospec->args[0] % 32;
299}
300#endif
301
302static int pxa_init_gpio_chip(int gpio_end,
303					int (*set_wake)(unsigned int, unsigned int))
304{
305	int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
306	struct pxa_gpio_chip *chips;
307
308	chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL);
309	if (chips == NULL) {
310		pr_err("%s: failed to allocate GPIO chips\n", __func__);
311		return -ENOMEM;
312	}
313
314	for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
315		struct gpio_chip *c = &chips[i].chip;
316
317		sprintf(chips[i].label, "gpio-%d", i);
318		chips[i].regbase = gpio_reg_base + BANK_OFF(i);
319		chips[i].set_wake = set_wake;
320
321		c->base  = gpio;
322		c->label = chips[i].label;
323
324		c->direction_input  = pxa_gpio_direction_input;
325		c->direction_output = pxa_gpio_direction_output;
326		c->get = pxa_gpio_get;
327		c->set = pxa_gpio_set;
328		c->to_irq = pxa_gpio_to_irq;
329#ifdef CONFIG_OF_GPIO
330		c->of_node = pxa_gpio_of_node;
331		c->of_xlate = pxa_gpio_of_xlate;
332		c->of_gpio_n_cells = 2;
333#endif
334
335		/* number of GPIOs on last bank may be less than 32 */
336		c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
337		gpiochip_add(c);
338	}
339	pxa_gpio_chips = chips;
340	return 0;
341}
342
343/* Update only those GRERx and GFERx edge detection register bits if those
344 * bits are set in c->irq_mask
345 */
346static inline void update_edge_detect(struct pxa_gpio_chip *c)
347{
348	uint32_t grer, gfer;
349
350	grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
351	gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
352	grer |= c->irq_edge_rise & c->irq_mask;
353	gfer |= c->irq_edge_fall & c->irq_mask;
354	writel_relaxed(grer, c->regbase + GRER_OFFSET);
355	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
356}
357
358static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
359{
360	struct pxa_gpio_chip *c;
361	int gpio = pxa_irq_to_gpio(d->irq);
362	unsigned long gpdr, mask = GPIO_bit(gpio);
363
364	c = gpio_to_pxachip(gpio);
365
366	if (type == IRQ_TYPE_PROBE) {
367		/* Don't mess with enabled GPIOs using preconfigured edges or
368		 * GPIOs set to alternate function or to output during probe
369		 */
370		if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
371			return 0;
372
373		if (__gpio_is_occupied(gpio))
374			return 0;
375
376		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
377	}
378
379	gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
380
381	if (__gpio_is_inverted(gpio))
382		writel_relaxed(gpdr | mask,  c->regbase + GPDR_OFFSET);
383	else
384		writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
385
386	if (type & IRQ_TYPE_EDGE_RISING)
387		c->irq_edge_rise |= mask;
388	else
389		c->irq_edge_rise &= ~mask;
390
391	if (type & IRQ_TYPE_EDGE_FALLING)
392		c->irq_edge_fall |= mask;
393	else
394		c->irq_edge_fall &= ~mask;
395
396	update_edge_detect(c);
397
398	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
399		((type & IRQ_TYPE_EDGE_RISING)  ? " rising"  : ""),
400		((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
401	return 0;
402}
403
404static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
405{
406	struct pxa_gpio_chip *c;
407	int loop, gpio, gpio_base, n;
408	unsigned long gedr;
409	struct irq_chip *chip = irq_desc_get_chip(desc);
410
411	chained_irq_enter(chip, desc);
412
413	do {
414		loop = 0;
415		for_each_gpio_chip(gpio, c) {
416			gpio_base = c->chip.base;
417
418			gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
419			gedr = gedr & c->irq_mask;
420			writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
421
422			for_each_set_bit(n, &gedr, BITS_PER_LONG) {
423				loop = 1;
424
425				generic_handle_irq(gpio_to_irq(gpio_base + n));
426			}
427		}
428	} while (loop);
429
430	chained_irq_exit(chip, desc);
431}
432
433static void pxa_ack_muxed_gpio(struct irq_data *d)
434{
435	int gpio = pxa_irq_to_gpio(d->irq);
436	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
437
438	writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
439}
440
441static void pxa_mask_muxed_gpio(struct irq_data *d)
442{
443	int gpio = pxa_irq_to_gpio(d->irq);
444	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
445	uint32_t grer, gfer;
446
447	c->irq_mask &= ~GPIO_bit(gpio);
448
449	grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
450	gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
451	writel_relaxed(grer, c->regbase + GRER_OFFSET);
452	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
453}
454
455static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
456{
457	int gpio = pxa_irq_to_gpio(d->irq);
458	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
459
460	if (c->set_wake)
461		return c->set_wake(gpio, on);
462	else
463		return 0;
464}
465
466static void pxa_unmask_muxed_gpio(struct irq_data *d)
467{
468	int gpio = pxa_irq_to_gpio(d->irq);
469	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
470
471	c->irq_mask |= GPIO_bit(gpio);
472	update_edge_detect(c);
473}
474
475static struct irq_chip pxa_muxed_gpio_chip = {
476	.name		= "GPIO",
477	.irq_ack	= pxa_ack_muxed_gpio,
478	.irq_mask	= pxa_mask_muxed_gpio,
479	.irq_unmask	= pxa_unmask_muxed_gpio,
480	.irq_set_type	= pxa_gpio_irq_type,
481	.irq_set_wake	= pxa_gpio_set_wake,
482};
483
484static int pxa_gpio_nums(struct platform_device *pdev)
485{
486	const struct platform_device_id *id = platform_get_device_id(pdev);
487	struct pxa_gpio_id *pxa_id = (struct pxa_gpio_id *)id->driver_data;
488	int count = 0;
489
490	switch (pxa_id->type) {
491	case PXA25X_GPIO:
492	case PXA26X_GPIO:
493	case PXA27X_GPIO:
494	case PXA3XX_GPIO:
495	case PXA93X_GPIO:
496	case MMP_GPIO:
497	case MMP2_GPIO:
498	case PXA1928_GPIO:
499		gpio_type = pxa_id->type;
500		count = pxa_id->gpio_nums - 1;
501		break;
502	default:
503		count = -EINVAL;
504		break;
505	}
506	return count;
507}
508
509#ifdef CONFIG_OF
510static const struct of_device_id pxa_gpio_dt_ids[] = {
511	{ .compatible = "intel,pxa25x-gpio",	.data = &pxa25x_id, },
512	{ .compatible = "intel,pxa26x-gpio",	.data = &pxa26x_id, },
513	{ .compatible = "intel,pxa27x-gpio",	.data = &pxa27x_id, },
514	{ .compatible = "intel,pxa3xx-gpio",	.data = &pxa3xx_id, },
515	{ .compatible = "marvell,pxa93x-gpio",	.data = &pxa93x_id, },
516	{ .compatible = "marvell,mmp-gpio",	.data = &mmp_id, },
517	{ .compatible = "marvell,mmp2-gpio",	.data = &mmp2_id, },
518	{ .compatible = "marvell,pxa1928-gpio",	.data = &pxa1928_id, },
519	{}
520};
521
522static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
523			      irq_hw_number_t hw)
524{
525	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
526				 handle_edge_irq);
527	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
528	return 0;
529}
530
531const struct irq_domain_ops pxa_irq_domain_ops = {
532	.map	= pxa_irq_domain_map,
533	.xlate	= irq_domain_xlate_twocell,
534};
535
536static int pxa_gpio_probe_dt(struct platform_device *pdev)
537{
538	int ret = 0, nr_gpios;
539	struct device_node *np = pdev->dev.of_node;
540	const struct of_device_id *of_id =
541				of_match_device(pxa_gpio_dt_ids, &pdev->dev);
542	const struct pxa_gpio_id *gpio_id;
543
544	if (!of_id || !of_id->data) {
545		dev_err(&pdev->dev, "Failed to find gpio controller\n");
546		return -EFAULT;
547	}
548	gpio_id = of_id->data;
549	gpio_type = gpio_id->type;
550
551	nr_gpios = gpio_id->gpio_nums;
552	pxa_last_gpio = nr_gpios - 1;
553
554	irq_base = irq_alloc_descs(-1, 0, nr_gpios, 0);
555	if (irq_base < 0) {
556		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
557		ret = irq_base;
558		goto err;
559	}
560	domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
561				       &pxa_irq_domain_ops, NULL);
562	pxa_gpio_of_node = np;
563	return 0;
564err:
565	iounmap(gpio_reg_base);
566	return ret;
567}
568#else
569#define pxa_gpio_probe_dt(pdev)		(-1)
570#endif
571
572static int pxa_gpio_probe(struct platform_device *pdev)
573{
574	struct pxa_gpio_chip *c;
575	struct resource *res;
576	struct clk *clk;
577	struct pxa_gpio_platform_data *info;
578	int gpio, irq, ret, use_of = 0;
579	int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
580
581	info = dev_get_platdata(&pdev->dev);
582	if (info) {
583		irq_base = info->irq_base;
584		if (irq_base <= 0)
585			return -EINVAL;
586		pxa_last_gpio = pxa_gpio_nums(pdev);
587	} else {
588		irq_base = 0;
589		use_of = 1;
590		ret = pxa_gpio_probe_dt(pdev);
591		if (ret < 0)
592			return -EINVAL;
593	}
594
595	if (!pxa_last_gpio)
596		return -EINVAL;
597
598	irq0 = platform_get_irq_byname(pdev, "gpio0");
599	irq1 = platform_get_irq_byname(pdev, "gpio1");
600	irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
601	if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
602		|| (irq_mux <= 0))
603		return -EINVAL;
604	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
605	if (!res)
606		return -EINVAL;
607	gpio_reg_base = ioremap(res->start, resource_size(res));
608	if (!gpio_reg_base)
609		return -EINVAL;
610
611	if (irq0 > 0)
612		gpio_offset = 2;
613
614	clk = clk_get(&pdev->dev, NULL);
615	if (IS_ERR(clk)) {
616		dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
617			PTR_ERR(clk));
618		iounmap(gpio_reg_base);
619		return PTR_ERR(clk);
620	}
621	ret = clk_prepare_enable(clk);
622	if (ret) {
623		clk_put(clk);
624		iounmap(gpio_reg_base);
625		return ret;
626	}
627
628	/* Initialize GPIO chips */
629	pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL);
630
631	/* clear all GPIO edge detects */
632	for_each_gpio_chip(gpio, c) {
633		writel_relaxed(0, c->regbase + GFER_OFFSET);
634		writel_relaxed(0, c->regbase + GRER_OFFSET);
635		writel_relaxed(~0, c->regbase + GEDR_OFFSET);
636		/* unmask GPIO edge detect for AP side */
637		if (gpio_is_mmp_type(gpio_type))
638			writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
639	}
640
641	if (!use_of) {
642		if (irq0 > 0) {
643			irq = gpio_to_irq(0);
644			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
645						 handle_edge_irq);
646			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
647		}
648		if (irq1 > 0) {
649			irq = gpio_to_irq(1);
650			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
651						 handle_edge_irq);
652			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
653		}
654
655		for (irq  = gpio_to_irq(gpio_offset);
656			irq <= gpio_to_irq(pxa_last_gpio); irq++) {
657			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
658						 handle_edge_irq);
659			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
660		}
661	}
662
663	if (irq0 > 0)
664		irq_set_chained_handler(irq0, pxa_gpio_demux_handler);
665	if (irq1 > 0)
666		irq_set_chained_handler(irq1, pxa_gpio_demux_handler);
667
668	irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
669	return 0;
670}
671
672static const struct platform_device_id gpio_id_table[] = {
673	{ "pxa25x-gpio",	(unsigned long)&pxa25x_id },
674	{ "pxa26x-gpio",	(unsigned long)&pxa26x_id },
675	{ "pxa27x-gpio",	(unsigned long)&pxa27x_id },
676	{ "pxa3xx-gpio",	(unsigned long)&pxa3xx_id },
677	{ "pxa93x-gpio",	(unsigned long)&pxa93x_id },
678	{ "mmp-gpio",		(unsigned long)&mmp_id },
679	{ "mmp2-gpio",		(unsigned long)&mmp2_id },
680	{ "pxa1928-gpio",	(unsigned long)&pxa1928_id },
681	{ },
682};
683
684static struct platform_driver pxa_gpio_driver = {
685	.probe		= pxa_gpio_probe,
686	.driver		= {
687		.name	= "pxa-gpio",
688		.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
689	},
690	.id_table	= gpio_id_table,
691};
692
693static int __init pxa_gpio_init(void)
694{
695	return platform_driver_register(&pxa_gpio_driver);
696}
697postcore_initcall(pxa_gpio_init);
698
699#ifdef CONFIG_PM
700static int pxa_gpio_suspend(void)
701{
702	struct pxa_gpio_chip *c;
703	int gpio;
704
705	for_each_gpio_chip(gpio, c) {
706		c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
707		c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
708		c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
709		c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
710
711		/* Clear GPIO transition detect bits */
712		writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
713	}
714	return 0;
715}
716
717static void pxa_gpio_resume(void)
718{
719	struct pxa_gpio_chip *c;
720	int gpio;
721
722	for_each_gpio_chip(gpio, c) {
723		/* restore level with set/clear */
724		writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
725		writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
726
727		writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
728		writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
729		writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
730	}
731}
732#else
733#define pxa_gpio_suspend	NULL
734#define pxa_gpio_resume		NULL
735#endif
736
737struct syscore_ops pxa_gpio_syscore_ops = {
738	.suspend	= pxa_gpio_suspend,
739	.resume		= pxa_gpio_resume,
740};
741
742static int __init pxa_gpio_sysinit(void)
743{
744	register_syscore_ops(&pxa_gpio_syscore_ops);
745	return 0;
746}
747postcore_initcall(pxa_gpio_sysinit);
748