1/*
2 * Support functions for OMAP GPIO
3 *
4 * Copyright (C) 2003-2005 Nokia Corporation
5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6 *
7 * Copyright (C) 2009 Texas Instruments
8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/syscore_ops.h>
19#include <linux/err.h>
20#include <linux/clk.h>
21#include <linux/io.h>
22#include <linux/device.h>
23#include <linux/pm_runtime.h>
24#include <linux/pm.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/gpio.h>
28#include <linux/bitops.h>
29#include <linux/platform_data/gpio-omap.h>
30
31#define OFF_MODE	1
32
33static LIST_HEAD(omap_gpio_list);
34
35struct gpio_regs {
36	u32 irqenable1;
37	u32 irqenable2;
38	u32 wake_en;
39	u32 ctrl;
40	u32 oe;
41	u32 leveldetect0;
42	u32 leveldetect1;
43	u32 risingdetect;
44	u32 fallingdetect;
45	u32 dataout;
46	u32 debounce;
47	u32 debounce_en;
48};
49
50struct gpio_bank {
51	struct list_head node;
52	void __iomem *base;
53	u16 irq;
54	u32 non_wakeup_gpios;
55	u32 enabled_non_wakeup_gpios;
56	struct gpio_regs context;
57	u32 saved_datain;
58	u32 level_mask;
59	u32 toggle_mask;
60	spinlock_t lock;
61	struct gpio_chip chip;
62	struct clk *dbck;
63	u32 mod_usage;
64	u32 irq_usage;
65	u32 dbck_enable_mask;
66	bool dbck_enabled;
67	struct device *dev;
68	bool is_mpuio;
69	bool dbck_flag;
70	bool loses_context;
71	bool context_valid;
72	int stride;
73	u32 width;
74	int context_loss_count;
75	int power_mode;
76	bool workaround_enabled;
77
78	void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
79	int (*get_context_loss_count)(struct device *dev);
80
81	struct omap_gpio_reg_offs *regs;
82};
83
84#define GPIO_MOD_CTRL_BIT	BIT(0)
85
86#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
87#define LINE_USED(line, offset) (line & (BIT(offset)))
88
89static void omap_gpio_unmask_irq(struct irq_data *d);
90
91static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
92{
93	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
94	return container_of(chip, struct gpio_bank, chip);
95}
96
97static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
98				    int is_input)
99{
100	void __iomem *reg = bank->base;
101	u32 l;
102
103	reg += bank->regs->direction;
104	l = readl_relaxed(reg);
105	if (is_input)
106		l |= BIT(gpio);
107	else
108		l &= ~(BIT(gpio));
109	writel_relaxed(l, reg);
110	bank->context.oe = l;
111}
112
113
114/* set data out value using dedicate set/clear register */
115static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
116				      int enable)
117{
118	void __iomem *reg = bank->base;
119	u32 l = BIT(offset);
120
121	if (enable) {
122		reg += bank->regs->set_dataout;
123		bank->context.dataout |= l;
124	} else {
125		reg += bank->regs->clr_dataout;
126		bank->context.dataout &= ~l;
127	}
128
129	writel_relaxed(l, reg);
130}
131
132/* set data out value using mask register */
133static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
134				       int enable)
135{
136	void __iomem *reg = bank->base + bank->regs->dataout;
137	u32 gpio_bit = BIT(offset);
138	u32 l;
139
140	l = readl_relaxed(reg);
141	if (enable)
142		l |= gpio_bit;
143	else
144		l &= ~gpio_bit;
145	writel_relaxed(l, reg);
146	bank->context.dataout = l;
147}
148
149static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
150{
151	void __iomem *reg = bank->base + bank->regs->datain;
152
153	return (readl_relaxed(reg) & (BIT(offset))) != 0;
154}
155
156static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
157{
158	void __iomem *reg = bank->base + bank->regs->dataout;
159
160	return (readl_relaxed(reg) & (BIT(offset))) != 0;
161}
162
163static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
164{
165	int l = readl_relaxed(base + reg);
166
167	if (set)
168		l |= mask;
169	else
170		l &= ~mask;
171
172	writel_relaxed(l, base + reg);
173}
174
175static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
176{
177	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
178		clk_prepare_enable(bank->dbck);
179		bank->dbck_enabled = true;
180
181		writel_relaxed(bank->dbck_enable_mask,
182			     bank->base + bank->regs->debounce_en);
183	}
184}
185
186static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
187{
188	if (bank->dbck_enable_mask && bank->dbck_enabled) {
189		/*
190		 * Disable debounce before cutting it's clock. If debounce is
191		 * enabled but the clock is not, GPIO module seems to be unable
192		 * to detect events and generate interrupts at least on OMAP3.
193		 */
194		writel_relaxed(0, bank->base + bank->regs->debounce_en);
195
196		clk_disable_unprepare(bank->dbck);
197		bank->dbck_enabled = false;
198	}
199}
200
201/**
202 * omap2_set_gpio_debounce - low level gpio debounce time
203 * @bank: the gpio bank we're acting upon
204 * @offset: the gpio number on this @bank
205 * @debounce: debounce time to use
206 *
207 * OMAP's debounce time is in 31us steps so we need
208 * to convert and round up to the closest unit.
209 */
210static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
211				    unsigned debounce)
212{
213	void __iomem		*reg;
214	u32			val;
215	u32			l;
216
217	if (!bank->dbck_flag)
218		return;
219
220	if (debounce < 32)
221		debounce = 0x01;
222	else if (debounce > 7936)
223		debounce = 0xff;
224	else
225		debounce = (debounce / 0x1f) - 1;
226
227	l = BIT(offset);
228
229	clk_prepare_enable(bank->dbck);
230	reg = bank->base + bank->regs->debounce;
231	writel_relaxed(debounce, reg);
232
233	reg = bank->base + bank->regs->debounce_en;
234	val = readl_relaxed(reg);
235
236	if (debounce)
237		val |= l;
238	else
239		val &= ~l;
240	bank->dbck_enable_mask = val;
241
242	writel_relaxed(val, reg);
243	clk_disable_unprepare(bank->dbck);
244	/*
245	 * Enable debounce clock per module.
246	 * This call is mandatory because in omap_gpio_request() when
247	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
248	 * runtime callbck fails to turn on dbck because dbck_enable_mask
249	 * used within _gpio_dbck_enable() is still not initialized at
250	 * that point. Therefore we have to enable dbck here.
251	 */
252	omap_gpio_dbck_enable(bank);
253	if (bank->dbck_enable_mask) {
254		bank->context.debounce = debounce;
255		bank->context.debounce_en = val;
256	}
257}
258
259/**
260 * omap_clear_gpio_debounce - clear debounce settings for a gpio
261 * @bank: the gpio bank we're acting upon
262 * @offset: the gpio number on this @bank
263 *
264 * If a gpio is using debounce, then clear the debounce enable bit and if
265 * this is the only gpio in this bank using debounce, then clear the debounce
266 * time too. The debounce clock will also be disabled when calling this function
267 * if this is the only gpio in the bank using debounce.
268 */
269static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
270{
271	u32 gpio_bit = BIT(offset);
272
273	if (!bank->dbck_flag)
274		return;
275
276	if (!(bank->dbck_enable_mask & gpio_bit))
277		return;
278
279	bank->dbck_enable_mask &= ~gpio_bit;
280	bank->context.debounce_en &= ~gpio_bit;
281        writel_relaxed(bank->context.debounce_en,
282		     bank->base + bank->regs->debounce_en);
283
284	if (!bank->dbck_enable_mask) {
285		bank->context.debounce = 0;
286		writel_relaxed(bank->context.debounce, bank->base +
287			     bank->regs->debounce);
288		clk_disable_unprepare(bank->dbck);
289		bank->dbck_enabled = false;
290	}
291}
292
293static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
294						unsigned trigger)
295{
296	void __iomem *base = bank->base;
297	u32 gpio_bit = BIT(gpio);
298
299	omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
300		      trigger & IRQ_TYPE_LEVEL_LOW);
301	omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
302		      trigger & IRQ_TYPE_LEVEL_HIGH);
303	omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
304		      trigger & IRQ_TYPE_EDGE_RISING);
305	omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
306		      trigger & IRQ_TYPE_EDGE_FALLING);
307
308	bank->context.leveldetect0 =
309			readl_relaxed(bank->base + bank->regs->leveldetect0);
310	bank->context.leveldetect1 =
311			readl_relaxed(bank->base + bank->regs->leveldetect1);
312	bank->context.risingdetect =
313			readl_relaxed(bank->base + bank->regs->risingdetect);
314	bank->context.fallingdetect =
315			readl_relaxed(bank->base + bank->regs->fallingdetect);
316
317	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
318		omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
319		bank->context.wake_en =
320			readl_relaxed(bank->base + bank->regs->wkup_en);
321	}
322
323	/* This part needs to be executed always for OMAP{34xx, 44xx} */
324	if (!bank->regs->irqctrl) {
325		/* On omap24xx proceed only when valid GPIO bit is set */
326		if (bank->non_wakeup_gpios) {
327			if (!(bank->non_wakeup_gpios & gpio_bit))
328				goto exit;
329		}
330
331		/*
332		 * Log the edge gpio and manually trigger the IRQ
333		 * after resume if the input level changes
334		 * to avoid irq lost during PER RET/OFF mode
335		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
336		 */
337		if (trigger & IRQ_TYPE_EDGE_BOTH)
338			bank->enabled_non_wakeup_gpios |= gpio_bit;
339		else
340			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
341	}
342
343exit:
344	bank->level_mask =
345		readl_relaxed(bank->base + bank->regs->leveldetect0) |
346		readl_relaxed(bank->base + bank->regs->leveldetect1);
347}
348
349#ifdef CONFIG_ARCH_OMAP1
350/*
351 * This only applies to chips that can't do both rising and falling edge
352 * detection at once.  For all other chips, this function is a noop.
353 */
354static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
355{
356	void __iomem *reg = bank->base;
357	u32 l = 0;
358
359	if (!bank->regs->irqctrl)
360		return;
361
362	reg += bank->regs->irqctrl;
363
364	l = readl_relaxed(reg);
365	if ((l >> gpio) & 1)
366		l &= ~(BIT(gpio));
367	else
368		l |= BIT(gpio);
369
370	writel_relaxed(l, reg);
371}
372#else
373static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
374#endif
375
376static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
377				    unsigned trigger)
378{
379	void __iomem *reg = bank->base;
380	void __iomem *base = bank->base;
381	u32 l = 0;
382
383	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
384		omap_set_gpio_trigger(bank, gpio, trigger);
385	} else if (bank->regs->irqctrl) {
386		reg += bank->regs->irqctrl;
387
388		l = readl_relaxed(reg);
389		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
390			bank->toggle_mask |= BIT(gpio);
391		if (trigger & IRQ_TYPE_EDGE_RISING)
392			l |= BIT(gpio);
393		else if (trigger & IRQ_TYPE_EDGE_FALLING)
394			l &= ~(BIT(gpio));
395		else
396			return -EINVAL;
397
398		writel_relaxed(l, reg);
399	} else if (bank->regs->edgectrl1) {
400		if (gpio & 0x08)
401			reg += bank->regs->edgectrl2;
402		else
403			reg += bank->regs->edgectrl1;
404
405		gpio &= 0x07;
406		l = readl_relaxed(reg);
407		l &= ~(3 << (gpio << 1));
408		if (trigger & IRQ_TYPE_EDGE_RISING)
409			l |= 2 << (gpio << 1);
410		if (trigger & IRQ_TYPE_EDGE_FALLING)
411			l |= BIT(gpio << 1);
412
413		/* Enable wake-up during idle for dynamic tick */
414		omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
415		bank->context.wake_en =
416			readl_relaxed(bank->base + bank->regs->wkup_en);
417		writel_relaxed(l, reg);
418	}
419	return 0;
420}
421
422static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
423{
424	if (bank->regs->pinctrl) {
425		void __iomem *reg = bank->base + bank->regs->pinctrl;
426
427		/* Claim the pin for MPU */
428		writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
429	}
430
431	if (bank->regs->ctrl && !BANK_USED(bank)) {
432		void __iomem *reg = bank->base + bank->regs->ctrl;
433		u32 ctrl;
434
435		ctrl = readl_relaxed(reg);
436		/* Module is enabled, clocks are not gated */
437		ctrl &= ~GPIO_MOD_CTRL_BIT;
438		writel_relaxed(ctrl, reg);
439		bank->context.ctrl = ctrl;
440	}
441}
442
443static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
444{
445	void __iomem *base = bank->base;
446
447	if (bank->regs->wkup_en &&
448	    !LINE_USED(bank->mod_usage, offset) &&
449	    !LINE_USED(bank->irq_usage, offset)) {
450		/* Disable wake-up during idle for dynamic tick */
451		omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
452		bank->context.wake_en =
453			readl_relaxed(bank->base + bank->regs->wkup_en);
454	}
455
456	if (bank->regs->ctrl && !BANK_USED(bank)) {
457		void __iomem *reg = bank->base + bank->regs->ctrl;
458		u32 ctrl;
459
460		ctrl = readl_relaxed(reg);
461		/* Module is disabled, clocks are gated */
462		ctrl |= GPIO_MOD_CTRL_BIT;
463		writel_relaxed(ctrl, reg);
464		bank->context.ctrl = ctrl;
465	}
466}
467
468static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset)
469{
470	void __iomem *reg = bank->base + bank->regs->direction;
471
472	return readl_relaxed(reg) & BIT(offset);
473}
474
475static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset)
476{
477	if (!LINE_USED(bank->mod_usage, offset)) {
478		omap_enable_gpio_module(bank, offset);
479		omap_set_gpio_direction(bank, offset, 1);
480	}
481	bank->irq_usage |= BIT(offset);
482}
483
484static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
485{
486	struct gpio_bank *bank = omap_irq_data_get_bank(d);
487	int retval;
488	unsigned long flags;
489	unsigned offset = d->hwirq;
490
491	if (!BANK_USED(bank))
492		pm_runtime_get_sync(bank->dev);
493
494	if (type & ~IRQ_TYPE_SENSE_MASK)
495		return -EINVAL;
496
497	if (!bank->regs->leveldetect0 &&
498		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
499		return -EINVAL;
500
501	spin_lock_irqsave(&bank->lock, flags);
502	retval = omap_set_gpio_triggering(bank, offset, type);
503	omap_gpio_init_irq(bank, offset);
504	if (!omap_gpio_is_input(bank, offset)) {
505		spin_unlock_irqrestore(&bank->lock, flags);
506		return -EINVAL;
507	}
508	spin_unlock_irqrestore(&bank->lock, flags);
509
510	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
511		__irq_set_handler_locked(d->irq, handle_level_irq);
512	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
513		__irq_set_handler_locked(d->irq, handle_edge_irq);
514
515	return retval;
516}
517
518static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
519{
520	void __iomem *reg = bank->base;
521
522	reg += bank->regs->irqstatus;
523	writel_relaxed(gpio_mask, reg);
524
525	/* Workaround for clearing DSP GPIO interrupts to allow retention */
526	if (bank->regs->irqstatus2) {
527		reg = bank->base + bank->regs->irqstatus2;
528		writel_relaxed(gpio_mask, reg);
529	}
530
531	/* Flush posted write for the irq status to avoid spurious interrupts */
532	readl_relaxed(reg);
533}
534
535static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank,
536					     unsigned offset)
537{
538	omap_clear_gpio_irqbank(bank, BIT(offset));
539}
540
541static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
542{
543	void __iomem *reg = bank->base;
544	u32 l;
545	u32 mask = (BIT(bank->width)) - 1;
546
547	reg += bank->regs->irqenable;
548	l = readl_relaxed(reg);
549	if (bank->regs->irqenable_inv)
550		l = ~l;
551	l &= mask;
552	return l;
553}
554
555static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
556{
557	void __iomem *reg = bank->base;
558	u32 l;
559
560	if (bank->regs->set_irqenable) {
561		reg += bank->regs->set_irqenable;
562		l = gpio_mask;
563		bank->context.irqenable1 |= gpio_mask;
564	} else {
565		reg += bank->regs->irqenable;
566		l = readl_relaxed(reg);
567		if (bank->regs->irqenable_inv)
568			l &= ~gpio_mask;
569		else
570			l |= gpio_mask;
571		bank->context.irqenable1 = l;
572	}
573
574	writel_relaxed(l, reg);
575}
576
577static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
578{
579	void __iomem *reg = bank->base;
580	u32 l;
581
582	if (bank->regs->clr_irqenable) {
583		reg += bank->regs->clr_irqenable;
584		l = gpio_mask;
585		bank->context.irqenable1 &= ~gpio_mask;
586	} else {
587		reg += bank->regs->irqenable;
588		l = readl_relaxed(reg);
589		if (bank->regs->irqenable_inv)
590			l |= gpio_mask;
591		else
592			l &= ~gpio_mask;
593		bank->context.irqenable1 = l;
594	}
595
596	writel_relaxed(l, reg);
597}
598
599static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
600					   unsigned offset, int enable)
601{
602	if (enable)
603		omap_enable_gpio_irqbank(bank, BIT(offset));
604	else
605		omap_disable_gpio_irqbank(bank, BIT(offset));
606}
607
608/*
609 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
610 * 1510 does not seem to have a wake-up register. If JTAG is connected
611 * to the target, system will wake up always on GPIO events. While
612 * system is running all registered GPIO interrupts need to have wake-up
613 * enabled. When system is suspended, only selected GPIO interrupts need
614 * to have wake-up enabled.
615 */
616static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
617				int enable)
618{
619	u32 gpio_bit = BIT(offset);
620	unsigned long flags;
621
622	if (bank->non_wakeup_gpios & gpio_bit) {
623		dev_err(bank->dev,
624			"Unable to modify wakeup on non-wakeup GPIO%d\n",
625			offset);
626		return -EINVAL;
627	}
628
629	spin_lock_irqsave(&bank->lock, flags);
630	if (enable)
631		bank->context.wake_en |= gpio_bit;
632	else
633		bank->context.wake_en &= ~gpio_bit;
634
635	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
636	spin_unlock_irqrestore(&bank->lock, flags);
637
638	return 0;
639}
640
641static void omap_reset_gpio(struct gpio_bank *bank, unsigned offset)
642{
643	omap_set_gpio_direction(bank, offset, 1);
644	omap_set_gpio_irqenable(bank, offset, 0);
645	omap_clear_gpio_irqstatus(bank, offset);
646	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
647	omap_clear_gpio_debounce(bank, offset);
648}
649
650/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
651static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
652{
653	struct gpio_bank *bank = omap_irq_data_get_bank(d);
654	unsigned offset = d->hwirq;
655
656	return omap_set_gpio_wakeup(bank, offset, enable);
657}
658
659static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
660{
661	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
662	unsigned long flags;
663
664	/*
665	 * If this is the first gpio_request for the bank,
666	 * enable the bank module.
667	 */
668	if (!BANK_USED(bank))
669		pm_runtime_get_sync(bank->dev);
670
671	spin_lock_irqsave(&bank->lock, flags);
672	/* Set trigger to none. You need to enable the desired trigger with
673	 * request_irq() or set_irq_type(). Only do this if the IRQ line has
674	 * not already been requested.
675	 */
676	if (!LINE_USED(bank->irq_usage, offset)) {
677		omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
678		omap_enable_gpio_module(bank, offset);
679	}
680	bank->mod_usage |= BIT(offset);
681	spin_unlock_irqrestore(&bank->lock, flags);
682
683	return 0;
684}
685
686static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
687{
688	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
689	unsigned long flags;
690
691	spin_lock_irqsave(&bank->lock, flags);
692	bank->mod_usage &= ~(BIT(offset));
693	omap_disable_gpio_module(bank, offset);
694	omap_reset_gpio(bank, offset);
695	spin_unlock_irqrestore(&bank->lock, flags);
696
697	/*
698	 * If this is the last gpio to be freed in the bank,
699	 * disable the bank module.
700	 */
701	if (!BANK_USED(bank))
702		pm_runtime_put(bank->dev);
703}
704
705/*
706 * We need to unmask the GPIO bank interrupt as soon as possible to
707 * avoid missing GPIO interrupts for other lines in the bank.
708 * Then we need to mask-read-clear-unmask the triggered GPIO lines
709 * in the bank to avoid missing nested interrupts for a GPIO line.
710 * If we wait to unmask individual GPIO lines in the bank after the
711 * line's interrupt handler has been run, we may miss some nested
712 * interrupts.
713 */
714static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
715{
716	void __iomem *isr_reg = NULL;
717	u32 isr;
718	unsigned int bit;
719	struct gpio_bank *bank;
720	int unmasked = 0;
721	struct irq_chip *irqchip = irq_desc_get_chip(desc);
722	struct gpio_chip *chip = irq_get_handler_data(irq);
723
724	chained_irq_enter(irqchip, desc);
725
726	bank = container_of(chip, struct gpio_bank, chip);
727	isr_reg = bank->base + bank->regs->irqstatus;
728	pm_runtime_get_sync(bank->dev);
729
730	if (WARN_ON(!isr_reg))
731		goto exit;
732
733	while (1) {
734		u32 isr_saved, level_mask = 0;
735		u32 enabled;
736
737		enabled = omap_get_gpio_irqbank_mask(bank);
738		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
739
740		if (bank->level_mask)
741			level_mask = bank->level_mask & enabled;
742
743		/* clear edge sensitive interrupts before handler(s) are
744		called so that we don't miss any interrupt occurred while
745		executing them */
746		omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
747		omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
748		omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
749
750		/* if there is only edge sensitive GPIO pin interrupts
751		configured, we could unmask GPIO bank interrupt immediately */
752		if (!level_mask && !unmasked) {
753			unmasked = 1;
754			chained_irq_exit(irqchip, desc);
755		}
756
757		if (!isr)
758			break;
759
760		while (isr) {
761			bit = __ffs(isr);
762			isr &= ~(BIT(bit));
763
764			/*
765			 * Some chips can't respond to both rising and falling
766			 * at the same time.  If this irq was requested with
767			 * both flags, we need to flip the ICR data for the IRQ
768			 * to respond to the IRQ for the opposite direction.
769			 * This will be indicated in the bank toggle_mask.
770			 */
771			if (bank->toggle_mask & (BIT(bit)))
772				omap_toggle_gpio_edge_triggering(bank, bit);
773
774			generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
775							    bit));
776		}
777	}
778	/* if bank has any level sensitive GPIO pin interrupt
779	configured, we must unmask the bank interrupt only after
780	handler(s) are executed in order to avoid spurious bank
781	interrupt */
782exit:
783	if (!unmasked)
784		chained_irq_exit(irqchip, desc);
785	pm_runtime_put(bank->dev);
786}
787
788static unsigned int omap_gpio_irq_startup(struct irq_data *d)
789{
790	struct gpio_bank *bank = omap_irq_data_get_bank(d);
791	unsigned long flags;
792	unsigned offset = d->hwirq;
793
794	if (!BANK_USED(bank))
795		pm_runtime_get_sync(bank->dev);
796
797	spin_lock_irqsave(&bank->lock, flags);
798	omap_gpio_init_irq(bank, offset);
799	spin_unlock_irqrestore(&bank->lock, flags);
800	omap_gpio_unmask_irq(d);
801
802	return 0;
803}
804
805static void omap_gpio_irq_shutdown(struct irq_data *d)
806{
807	struct gpio_bank *bank = omap_irq_data_get_bank(d);
808	unsigned long flags;
809	unsigned offset = d->hwirq;
810
811	spin_lock_irqsave(&bank->lock, flags);
812	bank->irq_usage &= ~(BIT(offset));
813	omap_disable_gpio_module(bank, offset);
814	omap_reset_gpio(bank, offset);
815	spin_unlock_irqrestore(&bank->lock, flags);
816
817	/*
818	 * If this is the last IRQ to be freed in the bank,
819	 * disable the bank module.
820	 */
821	if (!BANK_USED(bank))
822		pm_runtime_put(bank->dev);
823}
824
825static void omap_gpio_ack_irq(struct irq_data *d)
826{
827	struct gpio_bank *bank = omap_irq_data_get_bank(d);
828	unsigned offset = d->hwirq;
829
830	omap_clear_gpio_irqstatus(bank, offset);
831}
832
833static void omap_gpio_mask_irq(struct irq_data *d)
834{
835	struct gpio_bank *bank = omap_irq_data_get_bank(d);
836	unsigned offset = d->hwirq;
837	unsigned long flags;
838
839	spin_lock_irqsave(&bank->lock, flags);
840	omap_set_gpio_irqenable(bank, offset, 0);
841	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
842	spin_unlock_irqrestore(&bank->lock, flags);
843}
844
845static void omap_gpio_unmask_irq(struct irq_data *d)
846{
847	struct gpio_bank *bank = omap_irq_data_get_bank(d);
848	unsigned offset = d->hwirq;
849	u32 trigger = irqd_get_trigger_type(d);
850	unsigned long flags;
851
852	spin_lock_irqsave(&bank->lock, flags);
853	if (trigger)
854		omap_set_gpio_triggering(bank, offset, trigger);
855
856	/* For level-triggered GPIOs, the clearing must be done after
857	 * the HW source is cleared, thus after the handler has run */
858	if (bank->level_mask & BIT(offset)) {
859		omap_set_gpio_irqenable(bank, offset, 0);
860		omap_clear_gpio_irqstatus(bank, offset);
861	}
862
863	omap_set_gpio_irqenable(bank, offset, 1);
864	spin_unlock_irqrestore(&bank->lock, flags);
865}
866
867/*---------------------------------------------------------------------*/
868
869static int omap_mpuio_suspend_noirq(struct device *dev)
870{
871	struct platform_device *pdev = to_platform_device(dev);
872	struct gpio_bank	*bank = platform_get_drvdata(pdev);
873	void __iomem		*mask_reg = bank->base +
874					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
875	unsigned long		flags;
876
877	spin_lock_irqsave(&bank->lock, flags);
878	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
879	spin_unlock_irqrestore(&bank->lock, flags);
880
881	return 0;
882}
883
884static int omap_mpuio_resume_noirq(struct device *dev)
885{
886	struct platform_device *pdev = to_platform_device(dev);
887	struct gpio_bank	*bank = platform_get_drvdata(pdev);
888	void __iomem		*mask_reg = bank->base +
889					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
890	unsigned long		flags;
891
892	spin_lock_irqsave(&bank->lock, flags);
893	writel_relaxed(bank->context.wake_en, mask_reg);
894	spin_unlock_irqrestore(&bank->lock, flags);
895
896	return 0;
897}
898
899static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
900	.suspend_noirq = omap_mpuio_suspend_noirq,
901	.resume_noirq = omap_mpuio_resume_noirq,
902};
903
904/* use platform_driver for this. */
905static struct platform_driver omap_mpuio_driver = {
906	.driver		= {
907		.name	= "mpuio",
908		.pm	= &omap_mpuio_dev_pm_ops,
909	},
910};
911
912static struct platform_device omap_mpuio_device = {
913	.name		= "mpuio",
914	.id		= -1,
915	.dev = {
916		.driver = &omap_mpuio_driver.driver,
917	}
918	/* could list the /proc/iomem resources */
919};
920
921static inline void omap_mpuio_init(struct gpio_bank *bank)
922{
923	platform_set_drvdata(&omap_mpuio_device, bank);
924
925	if (platform_driver_register(&omap_mpuio_driver) == 0)
926		(void) platform_device_register(&omap_mpuio_device);
927}
928
929/*---------------------------------------------------------------------*/
930
931static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
932{
933	struct gpio_bank *bank;
934	unsigned long flags;
935	void __iomem *reg;
936	int dir;
937
938	bank = container_of(chip, struct gpio_bank, chip);
939	reg = bank->base + bank->regs->direction;
940	spin_lock_irqsave(&bank->lock, flags);
941	dir = !!(readl_relaxed(reg) & BIT(offset));
942	spin_unlock_irqrestore(&bank->lock, flags);
943	return dir;
944}
945
946static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
947{
948	struct gpio_bank *bank;
949	unsigned long flags;
950
951	bank = container_of(chip, struct gpio_bank, chip);
952	spin_lock_irqsave(&bank->lock, flags);
953	omap_set_gpio_direction(bank, offset, 1);
954	spin_unlock_irqrestore(&bank->lock, flags);
955	return 0;
956}
957
958static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
959{
960	struct gpio_bank *bank;
961
962	bank = container_of(chip, struct gpio_bank, chip);
963
964	if (omap_gpio_is_input(bank, offset))
965		return omap_get_gpio_datain(bank, offset);
966	else
967		return omap_get_gpio_dataout(bank, offset);
968}
969
970static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
971{
972	struct gpio_bank *bank;
973	unsigned long flags;
974
975	bank = container_of(chip, struct gpio_bank, chip);
976	spin_lock_irqsave(&bank->lock, flags);
977	bank->set_dataout(bank, offset, value);
978	omap_set_gpio_direction(bank, offset, 0);
979	spin_unlock_irqrestore(&bank->lock, flags);
980	return 0;
981}
982
983static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
984			      unsigned debounce)
985{
986	struct gpio_bank *bank;
987	unsigned long flags;
988
989	bank = container_of(chip, struct gpio_bank, chip);
990
991	spin_lock_irqsave(&bank->lock, flags);
992	omap2_set_gpio_debounce(bank, offset, debounce);
993	spin_unlock_irqrestore(&bank->lock, flags);
994
995	return 0;
996}
997
998static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
999{
1000	struct gpio_bank *bank;
1001	unsigned long flags;
1002
1003	bank = container_of(chip, struct gpio_bank, chip);
1004	spin_lock_irqsave(&bank->lock, flags);
1005	bank->set_dataout(bank, offset, value);
1006	spin_unlock_irqrestore(&bank->lock, flags);
1007}
1008
1009/*---------------------------------------------------------------------*/
1010
1011static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1012{
1013	static bool called;
1014	u32 rev;
1015
1016	if (called || bank->regs->revision == USHRT_MAX)
1017		return;
1018
1019	rev = readw_relaxed(bank->base + bank->regs->revision);
1020	pr_info("OMAP GPIO hardware version %d.%d\n",
1021		(rev >> 4) & 0x0f, rev & 0x0f);
1022
1023	called = true;
1024}
1025
1026static void omap_gpio_mod_init(struct gpio_bank *bank)
1027{
1028	void __iomem *base = bank->base;
1029	u32 l = 0xffffffff;
1030
1031	if (bank->width == 16)
1032		l = 0xffff;
1033
1034	if (bank->is_mpuio) {
1035		writel_relaxed(l, bank->base + bank->regs->irqenable);
1036		return;
1037	}
1038
1039	omap_gpio_rmw(base, bank->regs->irqenable, l,
1040		      bank->regs->irqenable_inv);
1041	omap_gpio_rmw(base, bank->regs->irqstatus, l,
1042		      !bank->regs->irqenable_inv);
1043	if (bank->regs->debounce_en)
1044		writel_relaxed(0, base + bank->regs->debounce_en);
1045
1046	/* Save OE default value (0xffffffff) in the context */
1047	bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
1048	 /* Initialize interface clk ungated, module enabled */
1049	if (bank->regs->ctrl)
1050		writel_relaxed(0, base + bank->regs->ctrl);
1051
1052	bank->dbck = clk_get(bank->dev, "dbclk");
1053	if (IS_ERR(bank->dbck))
1054		dev_err(bank->dev, "Could not get gpio dbck\n");
1055}
1056
1057static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1058{
1059	static int gpio;
1060	int irq_base = 0;
1061	int ret;
1062
1063	/*
1064	 * REVISIT eventually switch from OMAP-specific gpio structs
1065	 * over to the generic ones
1066	 */
1067	bank->chip.request = omap_gpio_request;
1068	bank->chip.free = omap_gpio_free;
1069	bank->chip.get_direction = omap_gpio_get_direction;
1070	bank->chip.direction_input = omap_gpio_input;
1071	bank->chip.get = omap_gpio_get;
1072	bank->chip.direction_output = omap_gpio_output;
1073	bank->chip.set_debounce = omap_gpio_debounce;
1074	bank->chip.set = omap_gpio_set;
1075	if (bank->is_mpuio) {
1076		bank->chip.label = "mpuio";
1077		if (bank->regs->wkup_en)
1078			bank->chip.dev = &omap_mpuio_device.dev;
1079		bank->chip.base = OMAP_MPUIO(0);
1080	} else {
1081		bank->chip.label = "gpio";
1082		bank->chip.base = gpio;
1083		gpio += bank->width;
1084	}
1085	bank->chip.ngpio = bank->width;
1086
1087	ret = gpiochip_add(&bank->chip);
1088	if (ret) {
1089		dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
1090		return ret;
1091	}
1092
1093#ifdef CONFIG_ARCH_OMAP1
1094	/*
1095	 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1096	 * irq_alloc_descs() since a base IRQ offset will no longer be needed.
1097	 */
1098	irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1099	if (irq_base < 0) {
1100		dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
1101		return -ENODEV;
1102	}
1103#endif
1104
1105	/* MPUIO is a bit different, reading IRQ status clears it */
1106	if (bank->is_mpuio) {
1107		irqc->irq_ack = dummy_irq_chip.irq_ack;
1108		irqc->irq_mask = irq_gc_mask_set_bit;
1109		irqc->irq_unmask = irq_gc_mask_clr_bit;
1110		if (!bank->regs->wkup_en)
1111			irqc->irq_set_wake = NULL;
1112	}
1113
1114	ret = gpiochip_irqchip_add(&bank->chip, irqc,
1115				   irq_base, omap_gpio_irq_handler,
1116				   IRQ_TYPE_NONE);
1117
1118	if (ret) {
1119		dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
1120		gpiochip_remove(&bank->chip);
1121		return -ENODEV;
1122	}
1123
1124	gpiochip_set_chained_irqchip(&bank->chip, irqc,
1125				     bank->irq, omap_gpio_irq_handler);
1126
1127	return 0;
1128}
1129
1130static const struct of_device_id omap_gpio_match[];
1131
1132static int omap_gpio_probe(struct platform_device *pdev)
1133{
1134	struct device *dev = &pdev->dev;
1135	struct device_node *node = dev->of_node;
1136	const struct of_device_id *match;
1137	const struct omap_gpio_platform_data *pdata;
1138	struct resource *res;
1139	struct gpio_bank *bank;
1140	struct irq_chip *irqc;
1141	int ret;
1142
1143	match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1144
1145	pdata = match ? match->data : dev_get_platdata(dev);
1146	if (!pdata)
1147		return -EINVAL;
1148
1149	bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1150	if (!bank) {
1151		dev_err(dev, "Memory alloc failed\n");
1152		return -ENOMEM;
1153	}
1154
1155	irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
1156	if (!irqc)
1157		return -ENOMEM;
1158
1159	irqc->irq_startup = omap_gpio_irq_startup,
1160	irqc->irq_shutdown = omap_gpio_irq_shutdown,
1161	irqc->irq_ack = omap_gpio_ack_irq,
1162	irqc->irq_mask = omap_gpio_mask_irq,
1163	irqc->irq_unmask = omap_gpio_unmask_irq,
1164	irqc->irq_set_type = omap_gpio_irq_type,
1165	irqc->irq_set_wake = omap_gpio_wake_enable,
1166	irqc->name = dev_name(&pdev->dev);
1167
1168	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1169	if (unlikely(!res)) {
1170		dev_err(dev, "Invalid IRQ resource\n");
1171		return -ENODEV;
1172	}
1173
1174	bank->irq = res->start;
1175	bank->dev = dev;
1176	bank->chip.dev = dev;
1177	bank->dbck_flag = pdata->dbck_flag;
1178	bank->stride = pdata->bank_stride;
1179	bank->width = pdata->bank_width;
1180	bank->is_mpuio = pdata->is_mpuio;
1181	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1182	bank->regs = pdata->regs;
1183#ifdef CONFIG_OF_GPIO
1184	bank->chip.of_node = of_node_get(node);
1185#endif
1186	if (node) {
1187		if (!of_property_read_bool(node, "ti,gpio-always-on"))
1188			bank->loses_context = true;
1189	} else {
1190		bank->loses_context = pdata->loses_context;
1191
1192		if (bank->loses_context)
1193			bank->get_context_loss_count =
1194				pdata->get_context_loss_count;
1195	}
1196
1197	if (bank->regs->set_dataout && bank->regs->clr_dataout)
1198		bank->set_dataout = omap_set_gpio_dataout_reg;
1199	else
1200		bank->set_dataout = omap_set_gpio_dataout_mask;
1201
1202	spin_lock_init(&bank->lock);
1203
1204	/* Static mapping, never released */
1205	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1206	bank->base = devm_ioremap_resource(dev, res);
1207	if (IS_ERR(bank->base)) {
1208		irq_domain_remove(bank->chip.irqdomain);
1209		return PTR_ERR(bank->base);
1210	}
1211
1212	platform_set_drvdata(pdev, bank);
1213
1214	pm_runtime_enable(bank->dev);
1215	pm_runtime_irq_safe(bank->dev);
1216	pm_runtime_get_sync(bank->dev);
1217
1218	if (bank->is_mpuio)
1219		omap_mpuio_init(bank);
1220
1221	omap_gpio_mod_init(bank);
1222
1223	ret = omap_gpio_chip_init(bank, irqc);
1224	if (ret)
1225		return ret;
1226
1227	omap_gpio_show_rev(bank);
1228
1229	pm_runtime_put(bank->dev);
1230
1231	list_add_tail(&bank->node, &omap_gpio_list);
1232
1233	return 0;
1234}
1235
1236#ifdef CONFIG_ARCH_OMAP2PLUS
1237
1238#if defined(CONFIG_PM)
1239static void omap_gpio_restore_context(struct gpio_bank *bank);
1240
1241static int omap_gpio_runtime_suspend(struct device *dev)
1242{
1243	struct platform_device *pdev = to_platform_device(dev);
1244	struct gpio_bank *bank = platform_get_drvdata(pdev);
1245	u32 l1 = 0, l2 = 0;
1246	unsigned long flags;
1247	u32 wake_low, wake_hi;
1248
1249	spin_lock_irqsave(&bank->lock, flags);
1250
1251	/*
1252	 * Only edges can generate a wakeup event to the PRCM.
1253	 *
1254	 * Therefore, ensure any wake-up capable GPIOs have
1255	 * edge-detection enabled before going idle to ensure a wakeup
1256	 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1257	 * NDA TRM 25.5.3.1)
1258	 *
1259	 * The normal values will be restored upon ->runtime_resume()
1260	 * by writing back the values saved in bank->context.
1261	 */
1262	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1263	if (wake_low)
1264		writel_relaxed(wake_low | bank->context.fallingdetect,
1265			     bank->base + bank->regs->fallingdetect);
1266	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1267	if (wake_hi)
1268		writel_relaxed(wake_hi | bank->context.risingdetect,
1269			     bank->base + bank->regs->risingdetect);
1270
1271	if (!bank->enabled_non_wakeup_gpios)
1272		goto update_gpio_context_count;
1273
1274	if (bank->power_mode != OFF_MODE) {
1275		bank->power_mode = 0;
1276		goto update_gpio_context_count;
1277	}
1278	/*
1279	 * If going to OFF, remove triggering for all
1280	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1281	 * generated.  See OMAP2420 Errata item 1.101.
1282	 */
1283	bank->saved_datain = readl_relaxed(bank->base +
1284						bank->regs->datain);
1285	l1 = bank->context.fallingdetect;
1286	l2 = bank->context.risingdetect;
1287
1288	l1 &= ~bank->enabled_non_wakeup_gpios;
1289	l2 &= ~bank->enabled_non_wakeup_gpios;
1290
1291	writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
1292	writel_relaxed(l2, bank->base + bank->regs->risingdetect);
1293
1294	bank->workaround_enabled = true;
1295
1296update_gpio_context_count:
1297	if (bank->get_context_loss_count)
1298		bank->context_loss_count =
1299				bank->get_context_loss_count(bank->dev);
1300
1301	omap_gpio_dbck_disable(bank);
1302	spin_unlock_irqrestore(&bank->lock, flags);
1303
1304	return 0;
1305}
1306
1307static void omap_gpio_init_context(struct gpio_bank *p);
1308
1309static int omap_gpio_runtime_resume(struct device *dev)
1310{
1311	struct platform_device *pdev = to_platform_device(dev);
1312	struct gpio_bank *bank = platform_get_drvdata(pdev);
1313	u32 l = 0, gen, gen0, gen1;
1314	unsigned long flags;
1315	int c;
1316
1317	spin_lock_irqsave(&bank->lock, flags);
1318
1319	/*
1320	 * On the first resume during the probe, the context has not
1321	 * been initialised and so initialise it now. Also initialise
1322	 * the context loss count.
1323	 */
1324	if (bank->loses_context && !bank->context_valid) {
1325		omap_gpio_init_context(bank);
1326
1327		if (bank->get_context_loss_count)
1328			bank->context_loss_count =
1329				bank->get_context_loss_count(bank->dev);
1330	}
1331
1332	omap_gpio_dbck_enable(bank);
1333
1334	/*
1335	 * In ->runtime_suspend(), level-triggered, wakeup-enabled
1336	 * GPIOs were set to edge trigger also in order to be able to
1337	 * generate a PRCM wakeup.  Here we restore the
1338	 * pre-runtime_suspend() values for edge triggering.
1339	 */
1340	writel_relaxed(bank->context.fallingdetect,
1341		     bank->base + bank->regs->fallingdetect);
1342	writel_relaxed(bank->context.risingdetect,
1343		     bank->base + bank->regs->risingdetect);
1344
1345	if (bank->loses_context) {
1346		if (!bank->get_context_loss_count) {
1347			omap_gpio_restore_context(bank);
1348		} else {
1349			c = bank->get_context_loss_count(bank->dev);
1350			if (c != bank->context_loss_count) {
1351				omap_gpio_restore_context(bank);
1352			} else {
1353				spin_unlock_irqrestore(&bank->lock, flags);
1354				return 0;
1355			}
1356		}
1357	}
1358
1359	if (!bank->workaround_enabled) {
1360		spin_unlock_irqrestore(&bank->lock, flags);
1361		return 0;
1362	}
1363
1364	l = readl_relaxed(bank->base + bank->regs->datain);
1365
1366	/*
1367	 * Check if any of the non-wakeup interrupt GPIOs have changed
1368	 * state.  If so, generate an IRQ by software.  This is
1369	 * horribly racy, but it's the best we can do to work around
1370	 * this silicon bug.
1371	 */
1372	l ^= bank->saved_datain;
1373	l &= bank->enabled_non_wakeup_gpios;
1374
1375	/*
1376	 * No need to generate IRQs for the rising edge for gpio IRQs
1377	 * configured with falling edge only; and vice versa.
1378	 */
1379	gen0 = l & bank->context.fallingdetect;
1380	gen0 &= bank->saved_datain;
1381
1382	gen1 = l & bank->context.risingdetect;
1383	gen1 &= ~(bank->saved_datain);
1384
1385	/* FIXME: Consider GPIO IRQs with level detections properly! */
1386	gen = l & (~(bank->context.fallingdetect) &
1387					 ~(bank->context.risingdetect));
1388	/* Consider all GPIO IRQs needed to be updated */
1389	gen |= gen0 | gen1;
1390
1391	if (gen) {
1392		u32 old0, old1;
1393
1394		old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
1395		old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
1396
1397		if (!bank->regs->irqstatus_raw0) {
1398			writel_relaxed(old0 | gen, bank->base +
1399						bank->regs->leveldetect0);
1400			writel_relaxed(old1 | gen, bank->base +
1401						bank->regs->leveldetect1);
1402		}
1403
1404		if (bank->regs->irqstatus_raw0) {
1405			writel_relaxed(old0 | l, bank->base +
1406						bank->regs->leveldetect0);
1407			writel_relaxed(old1 | l, bank->base +
1408						bank->regs->leveldetect1);
1409		}
1410		writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
1411		writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
1412	}
1413
1414	bank->workaround_enabled = false;
1415	spin_unlock_irqrestore(&bank->lock, flags);
1416
1417	return 0;
1418}
1419#endif /* CONFIG_PM */
1420
1421void omap2_gpio_prepare_for_idle(int pwr_mode)
1422{
1423	struct gpio_bank *bank;
1424
1425	list_for_each_entry(bank, &omap_gpio_list, node) {
1426		if (!BANK_USED(bank) || !bank->loses_context)
1427			continue;
1428
1429		bank->power_mode = pwr_mode;
1430
1431		pm_runtime_put_sync_suspend(bank->dev);
1432	}
1433}
1434
1435void omap2_gpio_resume_after_idle(void)
1436{
1437	struct gpio_bank *bank;
1438
1439	list_for_each_entry(bank, &omap_gpio_list, node) {
1440		if (!BANK_USED(bank) || !bank->loses_context)
1441			continue;
1442
1443		pm_runtime_get_sync(bank->dev);
1444	}
1445}
1446
1447#if defined(CONFIG_PM)
1448static void omap_gpio_init_context(struct gpio_bank *p)
1449{
1450	struct omap_gpio_reg_offs *regs = p->regs;
1451	void __iomem *base = p->base;
1452
1453	p->context.ctrl		= readl_relaxed(base + regs->ctrl);
1454	p->context.oe		= readl_relaxed(base + regs->direction);
1455	p->context.wake_en	= readl_relaxed(base + regs->wkup_en);
1456	p->context.leveldetect0	= readl_relaxed(base + regs->leveldetect0);
1457	p->context.leveldetect1	= readl_relaxed(base + regs->leveldetect1);
1458	p->context.risingdetect	= readl_relaxed(base + regs->risingdetect);
1459	p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
1460	p->context.irqenable1	= readl_relaxed(base + regs->irqenable);
1461	p->context.irqenable2	= readl_relaxed(base + regs->irqenable2);
1462
1463	if (regs->set_dataout && p->regs->clr_dataout)
1464		p->context.dataout = readl_relaxed(base + regs->set_dataout);
1465	else
1466		p->context.dataout = readl_relaxed(base + regs->dataout);
1467
1468	p->context_valid = true;
1469}
1470
1471static void omap_gpio_restore_context(struct gpio_bank *bank)
1472{
1473	writel_relaxed(bank->context.wake_en,
1474				bank->base + bank->regs->wkup_en);
1475	writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
1476	writel_relaxed(bank->context.leveldetect0,
1477				bank->base + bank->regs->leveldetect0);
1478	writel_relaxed(bank->context.leveldetect1,
1479				bank->base + bank->regs->leveldetect1);
1480	writel_relaxed(bank->context.risingdetect,
1481				bank->base + bank->regs->risingdetect);
1482	writel_relaxed(bank->context.fallingdetect,
1483				bank->base + bank->regs->fallingdetect);
1484	if (bank->regs->set_dataout && bank->regs->clr_dataout)
1485		writel_relaxed(bank->context.dataout,
1486				bank->base + bank->regs->set_dataout);
1487	else
1488		writel_relaxed(bank->context.dataout,
1489				bank->base + bank->regs->dataout);
1490	writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
1491
1492	if (bank->dbck_enable_mask) {
1493		writel_relaxed(bank->context.debounce, bank->base +
1494					bank->regs->debounce);
1495		writel_relaxed(bank->context.debounce_en,
1496					bank->base + bank->regs->debounce_en);
1497	}
1498
1499	writel_relaxed(bank->context.irqenable1,
1500				bank->base + bank->regs->irqenable);
1501	writel_relaxed(bank->context.irqenable2,
1502				bank->base + bank->regs->irqenable2);
1503}
1504#endif /* CONFIG_PM */
1505#else
1506#define omap_gpio_runtime_suspend NULL
1507#define omap_gpio_runtime_resume NULL
1508static inline void omap_gpio_init_context(struct gpio_bank *p) {}
1509#endif
1510
1511static const struct dev_pm_ops gpio_pm_ops = {
1512	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1513									NULL)
1514};
1515
1516#if defined(CONFIG_OF)
1517static struct omap_gpio_reg_offs omap2_gpio_regs = {
1518	.revision =		OMAP24XX_GPIO_REVISION,
1519	.direction =		OMAP24XX_GPIO_OE,
1520	.datain =		OMAP24XX_GPIO_DATAIN,
1521	.dataout =		OMAP24XX_GPIO_DATAOUT,
1522	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
1523	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
1524	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
1525	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
1526	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
1527	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
1528	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
1529	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
1530	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
1531	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
1532	.ctrl =			OMAP24XX_GPIO_CTRL,
1533	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
1534	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
1535	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
1536	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
1537	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
1538};
1539
1540static struct omap_gpio_reg_offs omap4_gpio_regs = {
1541	.revision =		OMAP4_GPIO_REVISION,
1542	.direction =		OMAP4_GPIO_OE,
1543	.datain =		OMAP4_GPIO_DATAIN,
1544	.dataout =		OMAP4_GPIO_DATAOUT,
1545	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
1546	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
1547	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
1548	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
1549	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
1550	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
1551	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
1552	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
1553	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
1554	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
1555	.ctrl =			OMAP4_GPIO_CTRL,
1556	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
1557	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
1558	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
1559	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
1560	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
1561};
1562
1563static const struct omap_gpio_platform_data omap2_pdata = {
1564	.regs = &omap2_gpio_regs,
1565	.bank_width = 32,
1566	.dbck_flag = false,
1567};
1568
1569static const struct omap_gpio_platform_data omap3_pdata = {
1570	.regs = &omap2_gpio_regs,
1571	.bank_width = 32,
1572	.dbck_flag = true,
1573};
1574
1575static const struct omap_gpio_platform_data omap4_pdata = {
1576	.regs = &omap4_gpio_regs,
1577	.bank_width = 32,
1578	.dbck_flag = true,
1579};
1580
1581static const struct of_device_id omap_gpio_match[] = {
1582	{
1583		.compatible = "ti,omap4-gpio",
1584		.data = &omap4_pdata,
1585	},
1586	{
1587		.compatible = "ti,omap3-gpio",
1588		.data = &omap3_pdata,
1589	},
1590	{
1591		.compatible = "ti,omap2-gpio",
1592		.data = &omap2_pdata,
1593	},
1594	{ },
1595};
1596MODULE_DEVICE_TABLE(of, omap_gpio_match);
1597#endif
1598
1599static struct platform_driver omap_gpio_driver = {
1600	.probe		= omap_gpio_probe,
1601	.driver		= {
1602		.name	= "omap_gpio",
1603		.pm	= &gpio_pm_ops,
1604		.of_match_table = of_match_ptr(omap_gpio_match),
1605	},
1606};
1607
1608/*
1609 * gpio driver register needs to be done before
1610 * machine_init functions access gpio APIs.
1611 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1612 */
1613static int __init omap_gpio_drv_reg(void)
1614{
1615	return platform_driver_register(&omap_gpio_driver);
1616}
1617postcore_initcall(omap_gpio_drv_reg);
1618