1/*
2 * Marvell MMC/SD/SDIO driver
3 *
4 * Authors: Maen Suleiman, Nicolas Pitre
5 * Copyright (C) 2008-2009 Marvell Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/io.h>
15#include <linux/platform_device.h>
16#include <linux/mbus.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/dma-mapping.h>
20#include <linux/scatterlist.h>
21#include <linux/irq.h>
22#include <linux/clk.h>
23#include <linux/gpio.h>
24#include <linux/of_gpio.h>
25#include <linux/of_irq.h>
26#include <linux/mmc/host.h>
27#include <linux/mmc/slot-gpio.h>
28
29#include <asm/sizes.h>
30#include <asm/unaligned.h>
31#include <linux/platform_data/mmc-mvsdio.h>
32
33#include "mvsdio.h"
34
35#define DRIVER_NAME	"mvsdio"
36
37static int maxfreq;
38static int nodma;
39
40struct mvsd_host {
41	void __iomem *base;
42	struct mmc_request *mrq;
43	spinlock_t lock;
44	unsigned int xfer_mode;
45	unsigned int intr_en;
46	unsigned int ctrl;
47	unsigned int pio_size;
48	void *pio_ptr;
49	unsigned int sg_frags;
50	unsigned int ns_per_clk;
51	unsigned int clock;
52	unsigned int base_clock;
53	struct timer_list timer;
54	struct mmc_host *mmc;
55	struct device *dev;
56	struct clk *clk;
57};
58
59#define mvsd_write(offs, val)	writel(val, iobase + (offs))
60#define mvsd_read(offs)		readl(iobase + (offs))
61
62static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
63{
64	void __iomem *iobase = host->base;
65	unsigned int tmout;
66	int tmout_index;
67
68	/*
69	 * Hardware weirdness.  The FIFO_EMPTY bit of the HW_STATE
70	 * register is sometimes not set before a while when some
71	 * "unusual" data block sizes are used (such as with the SWITCH
72	 * command), even despite the fact that the XFER_DONE interrupt
73	 * was raised.  And if another data transfer starts before
74	 * this bit comes to good sense (which eventually happens by
75	 * itself) then the new transfer simply fails with a timeout.
76	 */
77	if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
78		unsigned long t = jiffies + HZ;
79		unsigned int hw_state,  count = 0;
80		do {
81			hw_state = mvsd_read(MVSD_HW_STATE);
82			if (time_after(jiffies, t)) {
83				dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
84				break;
85			}
86			count++;
87		} while (!(hw_state & (1 << 13)));
88		dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
89				   "(hw=0x%04x, count=%d, jiffies=%ld)\n",
90				   hw_state, count, jiffies - (t - HZ));
91	}
92
93	/* If timeout=0 then maximum timeout index is used. */
94	tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
95	tmout += data->timeout_clks;
96	tmout_index = fls(tmout - 1) - 12;
97	if (tmout_index < 0)
98		tmout_index = 0;
99	if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)
100		tmout_index = MVSD_HOST_CTRL_TMOUT_MAX;
101
102	dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
103		(data->flags & MMC_DATA_READ) ? "read" : "write",
104		(u32)sg_virt(data->sg), data->blocks, data->blksz,
105		tmout, tmout_index);
106
107	host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK;
108	host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index);
109	mvsd_write(MVSD_HOST_CTRL, host->ctrl);
110	mvsd_write(MVSD_BLK_COUNT, data->blocks);
111	mvsd_write(MVSD_BLK_SIZE, data->blksz);
112
113	if (nodma || (data->blksz | data->sg->offset) & 3 ||
114	    ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) {
115		/*
116		 * We cannot do DMA on a buffer which offset or size
117		 * is not aligned on a 4-byte boundary.
118		 *
119		 * It also appears the host to card DMA can corrupt
120		 * data when the buffer is not aligned on a 64 byte
121		 * boundary.
122		 */
123		host->pio_size = data->blocks * data->blksz;
124		host->pio_ptr = sg_virt(data->sg);
125		if (!nodma)
126			dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
127				host->pio_ptr, host->pio_size);
128		return 1;
129	} else {
130		dma_addr_t phys_addr;
131		int dma_dir = (data->flags & MMC_DATA_READ) ?
132			DMA_FROM_DEVICE : DMA_TO_DEVICE;
133		host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
134					    data->sg_len, dma_dir);
135		phys_addr = sg_dma_address(data->sg);
136		mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
137		mvsd_write(MVSD_SYS_ADDR_HI,  (u32)phys_addr >> 16);
138		return 0;
139	}
140}
141
142static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
143{
144	struct mvsd_host *host = mmc_priv(mmc);
145	void __iomem *iobase = host->base;
146	struct mmc_command *cmd = mrq->cmd;
147	u32 cmdreg = 0, xfer = 0, intr = 0;
148	unsigned long flags;
149
150	BUG_ON(host->mrq != NULL);
151	host->mrq = mrq;
152
153	dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n",
154		cmd->opcode, mvsd_read(MVSD_HW_STATE));
155
156	cmdreg = MVSD_CMD_INDEX(cmd->opcode);
157
158	if (cmd->flags & MMC_RSP_BUSY)
159		cmdreg |= MVSD_CMD_RSP_48BUSY;
160	else if (cmd->flags & MMC_RSP_136)
161		cmdreg |= MVSD_CMD_RSP_136;
162	else if (cmd->flags & MMC_RSP_PRESENT)
163		cmdreg |= MVSD_CMD_RSP_48;
164	else
165		cmdreg |= MVSD_CMD_RSP_NONE;
166
167	if (cmd->flags & MMC_RSP_CRC)
168		cmdreg |= MVSD_CMD_CHECK_CMDCRC;
169
170	if (cmd->flags & MMC_RSP_OPCODE)
171		cmdreg |= MVSD_CMD_INDX_CHECK;
172
173	if (cmd->flags & MMC_RSP_PRESENT) {
174		cmdreg |= MVSD_UNEXPECTED_RESP;
175		intr |= MVSD_NOR_UNEXP_RSP;
176	}
177
178	if (mrq->data) {
179		struct mmc_data *data = mrq->data;
180		int pio;
181
182		cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16;
183		xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN;
184		if (data->flags & MMC_DATA_READ)
185			xfer |= MVSD_XFER_MODE_TO_HOST;
186
187		pio = mvsd_setup_data(host, data);
188		if (pio) {
189			xfer |= MVSD_XFER_MODE_PIO;
190			/* PIO section of mvsd_irq has comments on those bits */
191			if (data->flags & MMC_DATA_WRITE)
192				intr |= MVSD_NOR_TX_AVAIL;
193			else if (host->pio_size > 32)
194				intr |= MVSD_NOR_RX_FIFO_8W;
195			else
196				intr |= MVSD_NOR_RX_READY;
197		}
198
199		if (data->stop) {
200			struct mmc_command *stop = data->stop;
201			u32 cmd12reg = 0;
202
203			mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff);
204			mvsd_write(MVSD_AUTOCMD12_ARG_HI,  stop->arg >> 16);
205
206			if (stop->flags & MMC_RSP_BUSY)
207				cmd12reg |= MVSD_AUTOCMD12_BUSY;
208			if (stop->flags & MMC_RSP_OPCODE)
209				cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK;
210			cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode);
211			mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg);
212
213			xfer |= MVSD_XFER_MODE_AUTO_CMD12;
214			intr |= MVSD_NOR_AUTOCMD12_DONE;
215		} else {
216			intr |= MVSD_NOR_XFER_DONE;
217		}
218	} else {
219		intr |= MVSD_NOR_CMD_DONE;
220	}
221
222	mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff);
223	mvsd_write(MVSD_ARG_HI,  cmd->arg >> 16);
224
225	spin_lock_irqsave(&host->lock, flags);
226
227	host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
228	host->xfer_mode |= xfer;
229	mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
230
231	mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT);
232	mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
233	mvsd_write(MVSD_CMD, cmdreg);
234
235	host->intr_en &= MVSD_NOR_CARD_INT;
236	host->intr_en |= intr | MVSD_NOR_ERROR;
237	mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
238	mvsd_write(MVSD_ERR_INTR_EN, 0xffff);
239
240	mod_timer(&host->timer, jiffies + 5 * HZ);
241
242	spin_unlock_irqrestore(&host->lock, flags);
243}
244
245static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd,
246			   u32 err_status)
247{
248	void __iomem *iobase = host->base;
249
250	if (cmd->flags & MMC_RSP_136) {
251		unsigned int response[8], i;
252		for (i = 0; i < 8; i++)
253			response[i] = mvsd_read(MVSD_RSP(i));
254		cmd->resp[0] =		((response[0] & 0x03ff) << 22) |
255					((response[1] & 0xffff) << 6) |
256					((response[2] & 0xfc00) >> 10);
257		cmd->resp[1] =		((response[2] & 0x03ff) << 22) |
258					((response[3] & 0xffff) << 6) |
259					((response[4] & 0xfc00) >> 10);
260		cmd->resp[2] =		((response[4] & 0x03ff) << 22) |
261					((response[5] & 0xffff) << 6) |
262					((response[6] & 0xfc00) >> 10);
263		cmd->resp[3] =		((response[6] & 0x03ff) << 22) |
264					((response[7] & 0x3fff) << 8);
265	} else if (cmd->flags & MMC_RSP_PRESENT) {
266		unsigned int response[3], i;
267		for (i = 0; i < 3; i++)
268			response[i] = mvsd_read(MVSD_RSP(i));
269		cmd->resp[0] =		((response[2] & 0x003f) << (8 - 8)) |
270					((response[1] & 0xffff) << (14 - 8)) |
271					((response[0] & 0x03ff) << (30 - 8));
272		cmd->resp[1] =		((response[0] & 0xfc00) >> 10);
273		cmd->resp[2] = 0;
274		cmd->resp[3] = 0;
275	}
276
277	if (err_status & MVSD_ERR_CMD_TIMEOUT) {
278		cmd->error = -ETIMEDOUT;
279	} else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT |
280				 MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) {
281		cmd->error = -EILSEQ;
282	}
283	err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC |
284			MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX |
285			MVSD_ERR_CMD_STARTBIT);
286
287	return err_status;
288}
289
290static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
291			    u32 err_status)
292{
293	void __iomem *iobase = host->base;
294
295	if (host->pio_ptr) {
296		host->pio_ptr = NULL;
297		host->pio_size = 0;
298	} else {
299		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
300			     (data->flags & MMC_DATA_READ) ?
301				DMA_FROM_DEVICE : DMA_TO_DEVICE);
302	}
303
304	if (err_status & MVSD_ERR_DATA_TIMEOUT)
305		data->error = -ETIMEDOUT;
306	else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT))
307		data->error = -EILSEQ;
308	else if (err_status & MVSD_ERR_XFER_SIZE)
309		data->error = -EBADE;
310	err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC |
311			MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE);
312
313	dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n",
314		mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT));
315	data->bytes_xfered =
316		(data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz;
317	/* We can't be sure about the last block when errors are detected */
318	if (data->bytes_xfered && data->error)
319		data->bytes_xfered -= data->blksz;
320
321	/* Handle Auto cmd 12 response */
322	if (data->stop) {
323		unsigned int response[3], i;
324		for (i = 0; i < 3; i++)
325			response[i] = mvsd_read(MVSD_AUTO_RSP(i));
326		data->stop->resp[0] =	((response[2] & 0x003f) << (8 - 8)) |
327					((response[1] & 0xffff) << (14 - 8)) |
328					((response[0] & 0x03ff) << (30 - 8));
329		data->stop->resp[1] =	((response[0] & 0xfc00) >> 10);
330		data->stop->resp[2] = 0;
331		data->stop->resp[3] = 0;
332
333		if (err_status & MVSD_ERR_AUTOCMD12) {
334			u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS);
335			dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12);
336			if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE)
337				data->stop->error = -ENOEXEC;
338			else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT)
339				data->stop->error = -ETIMEDOUT;
340			else if (err_cmd12)
341				data->stop->error = -EILSEQ;
342			err_status &= ~MVSD_ERR_AUTOCMD12;
343		}
344	}
345
346	return err_status;
347}
348
349static irqreturn_t mvsd_irq(int irq, void *dev)
350{
351	struct mvsd_host *host = dev;
352	void __iomem *iobase = host->base;
353	u32 intr_status, intr_done_mask;
354	int irq_handled = 0;
355
356	intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
357	dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
358		intr_status, mvsd_read(MVSD_NOR_INTR_EN),
359		mvsd_read(MVSD_HW_STATE));
360
361	/*
362	 * It looks like, SDIO IP can issue one late, spurious irq
363	 * although all irqs should be disabled. To work around this,
364	 * bail out early, if we didn't expect any irqs to occur.
365	 */
366	if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) {
367		dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
368			mvsd_read(MVSD_NOR_INTR_STATUS),
369			mvsd_read(MVSD_NOR_INTR_EN),
370			mvsd_read(MVSD_ERR_INTR_STATUS),
371			mvsd_read(MVSD_ERR_INTR_EN));
372		return IRQ_HANDLED;
373	}
374
375	spin_lock(&host->lock);
376
377	/* PIO handling, if needed. Messy business... */
378	if (host->pio_size &&
379	    (intr_status & host->intr_en &
380	     (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
381		u16 *p = host->pio_ptr;
382		int s = host->pio_size;
383		while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
384			readsw(iobase + MVSD_FIFO, p, 16);
385			p += 16;
386			s -= 32;
387			intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
388		}
389		/*
390		 * Normally we'd use < 32 here, but the RX_FIFO_8W bit
391		 * doesn't appear to assert when there is exactly 32 bytes
392		 * (8 words) left to fetch in a transfer.
393		 */
394		if (s <= 32) {
395			while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) {
396				put_unaligned(mvsd_read(MVSD_FIFO), p++);
397				put_unaligned(mvsd_read(MVSD_FIFO), p++);
398				s -= 4;
399				intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
400			}
401			if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
402				u16 val[2] = {0, 0};
403				val[0] = mvsd_read(MVSD_FIFO);
404				val[1] = mvsd_read(MVSD_FIFO);
405				memcpy(p, ((void *)&val) + 4 - s, s);
406				s = 0;
407				intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
408			}
409			if (s == 0) {
410				host->intr_en &=
411				     ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
412				mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
413			} else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) {
414				host->intr_en &= ~MVSD_NOR_RX_FIFO_8W;
415				host->intr_en |= MVSD_NOR_RX_READY;
416				mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
417			}
418		}
419		dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
420			s, intr_status, mvsd_read(MVSD_HW_STATE));
421		host->pio_ptr = p;
422		host->pio_size = s;
423		irq_handled = 1;
424	} else if (host->pio_size &&
425		   (intr_status & host->intr_en &
426		    (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
427		u16 *p = host->pio_ptr;
428		int s = host->pio_size;
429		/*
430		 * The TX_FIFO_8W bit is unreliable. When set, bursting
431		 * 16 halfwords all at once in the FIFO drops data. Actually
432		 * TX_AVAIL does go off after only one word is pushed even if
433		 * TX_FIFO_8W remains set.
434		 */
435		while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) {
436			mvsd_write(MVSD_FIFO, get_unaligned(p++));
437			mvsd_write(MVSD_FIFO, get_unaligned(p++));
438			s -= 4;
439			intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
440		}
441		if (s < 4) {
442			if (s && (intr_status & MVSD_NOR_TX_AVAIL)) {
443				u16 val[2] = {0, 0};
444				memcpy(((void *)&val) + 4 - s, p, s);
445				mvsd_write(MVSD_FIFO, val[0]);
446				mvsd_write(MVSD_FIFO, val[1]);
447				s = 0;
448				intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
449			}
450			if (s == 0) {
451				host->intr_en &=
452				     ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
453				mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
454			}
455		}
456		dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
457			s, intr_status, mvsd_read(MVSD_HW_STATE));
458		host->pio_ptr = p;
459		host->pio_size = s;
460		irq_handled = 1;
461	}
462
463	mvsd_write(MVSD_NOR_INTR_STATUS, intr_status);
464
465	intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY |
466			 MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W;
467	if (intr_status & host->intr_en & ~intr_done_mask) {
468		struct mmc_request *mrq = host->mrq;
469		struct mmc_command *cmd = mrq->cmd;
470		u32 err_status = 0;
471
472		del_timer(&host->timer);
473		host->mrq = NULL;
474
475		host->intr_en &= MVSD_NOR_CARD_INT;
476		mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
477		mvsd_write(MVSD_ERR_INTR_EN, 0);
478
479		spin_unlock(&host->lock);
480
481		if (intr_status & MVSD_NOR_UNEXP_RSP) {
482			cmd->error = -EPROTO;
483		} else if (intr_status & MVSD_NOR_ERROR) {
484			err_status = mvsd_read(MVSD_ERR_INTR_STATUS);
485			dev_dbg(host->dev, "err 0x%04x\n", err_status);
486		}
487
488		err_status = mvsd_finish_cmd(host, cmd, err_status);
489		if (mrq->data)
490			err_status = mvsd_finish_data(host, mrq->data, err_status);
491		if (err_status) {
492			dev_err(host->dev, "unhandled error status %#04x\n",
493				err_status);
494			cmd->error = -ENOMSG;
495		}
496
497		mmc_request_done(host->mmc, mrq);
498		irq_handled = 1;
499	} else
500		spin_unlock(&host->lock);
501
502	if (intr_status & MVSD_NOR_CARD_INT) {
503		mmc_signal_sdio_irq(host->mmc);
504		irq_handled = 1;
505	}
506
507	if (irq_handled)
508		return IRQ_HANDLED;
509
510	dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
511		intr_status, host->intr_en, host->pio_size);
512	return IRQ_NONE;
513}
514
515static void mvsd_timeout_timer(unsigned long data)
516{
517	struct mvsd_host *host = (struct mvsd_host *)data;
518	void __iomem *iobase = host->base;
519	struct mmc_request *mrq;
520	unsigned long flags;
521
522	spin_lock_irqsave(&host->lock, flags);
523	mrq = host->mrq;
524	if (mrq) {
525		dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
526		dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
527			mvsd_read(MVSD_HW_STATE),
528			mvsd_read(MVSD_NOR_INTR_STATUS),
529			mvsd_read(MVSD_NOR_INTR_EN));
530
531		host->mrq = NULL;
532
533		mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
534
535		host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
536		mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
537
538		host->intr_en &= MVSD_NOR_CARD_INT;
539		mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
540		mvsd_write(MVSD_ERR_INTR_EN, 0);
541		mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
542
543		mrq->cmd->error = -ETIMEDOUT;
544		mvsd_finish_cmd(host, mrq->cmd, 0);
545		if (mrq->data) {
546			mrq->data->error = -ETIMEDOUT;
547			mvsd_finish_data(host, mrq->data, 0);
548		}
549	}
550	spin_unlock_irqrestore(&host->lock, flags);
551
552	if (mrq)
553		mmc_request_done(host->mmc, mrq);
554}
555
556static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
557{
558	struct mvsd_host *host = mmc_priv(mmc);
559	void __iomem *iobase = host->base;
560	unsigned long flags;
561
562	spin_lock_irqsave(&host->lock, flags);
563	if (enable) {
564		host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN;
565		host->intr_en |= MVSD_NOR_CARD_INT;
566	} else {
567		host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN;
568		host->intr_en &= ~MVSD_NOR_CARD_INT;
569	}
570	mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
571	mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
572	spin_unlock_irqrestore(&host->lock, flags);
573}
574
575static void mvsd_power_up(struct mvsd_host *host)
576{
577	void __iomem *iobase = host->base;
578	dev_dbg(host->dev, "power up\n");
579	mvsd_write(MVSD_NOR_INTR_EN, 0);
580	mvsd_write(MVSD_ERR_INTR_EN, 0);
581	mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
582	mvsd_write(MVSD_XFER_MODE, 0);
583	mvsd_write(MVSD_NOR_STATUS_EN, 0xffff);
584	mvsd_write(MVSD_ERR_STATUS_EN, 0xffff);
585	mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
586	mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
587}
588
589static void mvsd_power_down(struct mvsd_host *host)
590{
591	void __iomem *iobase = host->base;
592	dev_dbg(host->dev, "power down\n");
593	mvsd_write(MVSD_NOR_INTR_EN, 0);
594	mvsd_write(MVSD_ERR_INTR_EN, 0);
595	mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
596	mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
597	mvsd_write(MVSD_NOR_STATUS_EN, 0);
598	mvsd_write(MVSD_ERR_STATUS_EN, 0);
599	mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
600	mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
601}
602
603static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
604{
605	struct mvsd_host *host = mmc_priv(mmc);
606	void __iomem *iobase = host->base;
607	u32 ctrl_reg = 0;
608
609	if (ios->power_mode == MMC_POWER_UP)
610		mvsd_power_up(host);
611
612	if (ios->clock == 0) {
613		mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
614		mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX);
615		host->clock = 0;
616		dev_dbg(host->dev, "clock off\n");
617	} else if (ios->clock != host->clock) {
618		u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1;
619		if (m > MVSD_BASE_DIV_MAX)
620			m = MVSD_BASE_DIV_MAX;
621		mvsd_write(MVSD_CLK_DIV, m);
622		host->clock = ios->clock;
623		host->ns_per_clk = 1000000000 / (host->base_clock / (m+1));
624		dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n",
625			ios->clock, host->base_clock / (m+1), m);
626	}
627
628	/* default transfer mode */
629	ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN;
630	ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST;
631
632	/* default to maximum timeout */
633	ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK;
634	ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN;
635
636	if (ios->bus_mode == MMC_BUSMODE_PUSHPULL)
637		ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN;
638
639	if (ios->bus_width == MMC_BUS_WIDTH_4)
640		ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
641
642	/*
643	 * The HI_SPEED_EN bit is causing trouble with many (but not all)
644	 * high speed SD, SDHC and SDIO cards.  Not enabling that bit
645	 * makes all cards work.  So let's just ignore that bit for now
646	 * and revisit this issue if problems for not enabling this bit
647	 * are ever reported.
648	 */
649#if 0
650	if (ios->timing == MMC_TIMING_MMC_HS ||
651	    ios->timing == MMC_TIMING_SD_HS)
652		ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
653#endif
654
655	host->ctrl = ctrl_reg;
656	mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
657	dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg,
658		(ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ?
659			"push-pull" : "open-drain",
660		(ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ?
661			"4bit-width" : "1bit-width",
662		(ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ?
663			"high-speed" : "");
664
665	if (ios->power_mode == MMC_POWER_OFF)
666		mvsd_power_down(host);
667}
668
669static const struct mmc_host_ops mvsd_ops = {
670	.request		= mvsd_request,
671	.get_ro			= mmc_gpio_get_ro,
672	.set_ios		= mvsd_set_ios,
673	.enable_sdio_irq	= mvsd_enable_sdio_irq,
674};
675
676static void
677mv_conf_mbus_windows(struct mvsd_host *host,
678		     const struct mbus_dram_target_info *dram)
679{
680	void __iomem *iobase = host->base;
681	int i;
682
683	for (i = 0; i < 4; i++) {
684		writel(0, iobase + MVSD_WINDOW_CTRL(i));
685		writel(0, iobase + MVSD_WINDOW_BASE(i));
686	}
687
688	for (i = 0; i < dram->num_cs; i++) {
689		const struct mbus_dram_window *cs = dram->cs + i;
690		writel(((cs->size - 1) & 0xffff0000) |
691		       (cs->mbus_attr << 8) |
692		       (dram->mbus_dram_target_id << 4) | 1,
693		       iobase + MVSD_WINDOW_CTRL(i));
694		writel(cs->base, iobase + MVSD_WINDOW_BASE(i));
695	}
696}
697
698static int mvsd_probe(struct platform_device *pdev)
699{
700	struct device_node *np = pdev->dev.of_node;
701	struct mmc_host *mmc = NULL;
702	struct mvsd_host *host = NULL;
703	const struct mbus_dram_target_info *dram;
704	struct resource *r;
705	int ret, irq;
706
707	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
708	irq = platform_get_irq(pdev, 0);
709	if (!r || irq < 0)
710		return -ENXIO;
711
712	mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
713	if (!mmc) {
714		ret = -ENOMEM;
715		goto out;
716	}
717
718	host = mmc_priv(mmc);
719	host->mmc = mmc;
720	host->dev = &pdev->dev;
721
722	/*
723	 * Some non-DT platforms do not pass a clock, and the clock
724	 * frequency is passed through platform_data. On DT platforms,
725	 * a clock must always be passed, even if there is no gatable
726	 * clock associated to the SDIO interface (it can simply be a
727	 * fixed rate clock).
728	 */
729	host->clk = devm_clk_get(&pdev->dev, NULL);
730	if (!IS_ERR(host->clk))
731		clk_prepare_enable(host->clk);
732
733	mmc->ops = &mvsd_ops;
734
735	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
736
737	mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX);
738	mmc->f_max = MVSD_CLOCKRATE_MAX;
739
740	mmc->max_blk_size = 2048;
741	mmc->max_blk_count = 65535;
742
743	mmc->max_segs = 1;
744	mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
745	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
746
747	if (np) {
748		if (IS_ERR(host->clk)) {
749			dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
750			ret = -EINVAL;
751			goto out;
752		}
753
754		host->base_clock = clk_get_rate(host->clk) / 2;
755		ret = mmc_of_parse(mmc);
756		if (ret < 0)
757			goto out;
758	} else {
759		const struct mvsdio_platform_data *mvsd_data;
760
761		mvsd_data = pdev->dev.platform_data;
762		if (!mvsd_data) {
763			ret = -ENXIO;
764			goto out;
765		}
766		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ |
767			    MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
768		host->base_clock = mvsd_data->clock / 2;
769		/* GPIO 0 regarded as invalid for backward compatibility */
770		if (mvsd_data->gpio_card_detect &&
771		    gpio_is_valid(mvsd_data->gpio_card_detect)) {
772			ret = mmc_gpio_request_cd(mmc,
773						  mvsd_data->gpio_card_detect,
774						  0);
775			if (ret)
776				goto out;
777		} else {
778			mmc->caps |= MMC_CAP_NEEDS_POLL;
779		}
780
781		if (mvsd_data->gpio_write_protect &&
782		    gpio_is_valid(mvsd_data->gpio_write_protect))
783			mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect);
784	}
785
786	if (maxfreq)
787		mmc->f_max = maxfreq;
788
789	spin_lock_init(&host->lock);
790
791	host->base = devm_ioremap_resource(&pdev->dev, r);
792	if (IS_ERR(host->base)) {
793		ret = PTR_ERR(host->base);
794		goto out;
795	}
796
797	/* (Re-)program MBUS remapping windows if we are asked to. */
798	dram = mv_mbus_dram_info();
799	if (dram)
800		mv_conf_mbus_windows(host, dram);
801
802	mvsd_power_down(host);
803
804	ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
805	if (ret) {
806		dev_err(&pdev->dev, "cannot assign irq %d\n", irq);
807		goto out;
808	}
809
810	setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
811	platform_set_drvdata(pdev, mmc);
812	ret = mmc_add_host(mmc);
813	if (ret)
814		goto out;
815
816	if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
817		dev_dbg(&pdev->dev, "using GPIO for card detection\n");
818	else
819		dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n");
820
821	return 0;
822
823out:
824	if (mmc) {
825		if (!IS_ERR(host->clk))
826			clk_disable_unprepare(host->clk);
827		mmc_free_host(mmc);
828	}
829
830	return ret;
831}
832
833static int mvsd_remove(struct platform_device *pdev)
834{
835	struct mmc_host *mmc = platform_get_drvdata(pdev);
836
837	struct mvsd_host *host = mmc_priv(mmc);
838
839	mmc_remove_host(mmc);
840	del_timer_sync(&host->timer);
841	mvsd_power_down(host);
842
843	if (!IS_ERR(host->clk))
844		clk_disable_unprepare(host->clk);
845	mmc_free_host(mmc);
846
847	return 0;
848}
849
850static const struct of_device_id mvsdio_dt_ids[] = {
851	{ .compatible = "marvell,orion-sdio" },
852	{ /* sentinel */ }
853};
854MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
855
856static struct platform_driver mvsd_driver = {
857	.probe		= mvsd_probe,
858	.remove		= mvsd_remove,
859	.driver		= {
860		.name	= DRIVER_NAME,
861		.of_match_table = mvsdio_dt_ids,
862	},
863};
864
865module_platform_driver(mvsd_driver);
866
867/* maximum card clock frequency (default 50MHz) */
868module_param(maxfreq, int, 0);
869
870/* force PIO transfers all the time */
871module_param(nodma, int, 0);
872
873MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
874MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
875MODULE_LICENSE("GPL");
876MODULE_ALIAS("platform:mvsdio");
877