1/*
2 * SPI bus driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/clk.h>
13#include <linux/completion.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/of.h>
17#include <linux/bitops.h>
18#include <linux/err.h>
19#include <linux/platform_device.h>
20#include <linux/of_gpio.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/spi_bitbang.h>
23#include <linux/dmaengine.h>
24#include <linux/dma-direction.h>
25#include <linux/dma-mapping.h>
26#include <linux/reset.h>
27
28#define DRIVER_NAME "sirfsoc_spi"
29
30#define SIRFSOC_SPI_CTRL		0x0000
31#define SIRFSOC_SPI_CMD			0x0004
32#define SIRFSOC_SPI_TX_RX_EN		0x0008
33#define SIRFSOC_SPI_INT_EN		0x000C
34#define SIRFSOC_SPI_INT_STATUS		0x0010
35#define SIRFSOC_SPI_TX_DMA_IO_CTRL	0x0100
36#define SIRFSOC_SPI_TX_DMA_IO_LEN	0x0104
37#define SIRFSOC_SPI_TXFIFO_CTRL		0x0108
38#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK	0x010C
39#define SIRFSOC_SPI_TXFIFO_OP		0x0110
40#define SIRFSOC_SPI_TXFIFO_STATUS	0x0114
41#define SIRFSOC_SPI_TXFIFO_DATA		0x0118
42#define SIRFSOC_SPI_RX_DMA_IO_CTRL	0x0120
43#define SIRFSOC_SPI_RX_DMA_IO_LEN	0x0124
44#define SIRFSOC_SPI_RXFIFO_CTRL		0x0128
45#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK	0x012C
46#define SIRFSOC_SPI_RXFIFO_OP		0x0130
47#define SIRFSOC_SPI_RXFIFO_STATUS	0x0134
48#define SIRFSOC_SPI_RXFIFO_DATA		0x0138
49#define SIRFSOC_SPI_DUMMY_DELAY_CTL	0x0144
50
51/* SPI CTRL register defines */
52#define SIRFSOC_SPI_SLV_MODE		BIT(16)
53#define SIRFSOC_SPI_CMD_MODE		BIT(17)
54#define SIRFSOC_SPI_CS_IO_OUT		BIT(18)
55#define SIRFSOC_SPI_CS_IO_MODE		BIT(19)
56#define SIRFSOC_SPI_CLK_IDLE_STAT	BIT(20)
57#define SIRFSOC_SPI_CS_IDLE_STAT	BIT(21)
58#define SIRFSOC_SPI_TRAN_MSB		BIT(22)
59#define SIRFSOC_SPI_DRV_POS_EDGE	BIT(23)
60#define SIRFSOC_SPI_CS_HOLD_TIME	BIT(24)
61#define SIRFSOC_SPI_CLK_SAMPLE_MODE	BIT(25)
62#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8	(0 << 26)
63#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12	(1 << 26)
64#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16	(2 << 26)
65#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32	(3 << 26)
66#define SIRFSOC_SPI_CMD_BYTE_NUM(x)	((x & 3) << 28)
67#define SIRFSOC_SPI_ENA_AUTO_CLR	BIT(30)
68#define SIRFSOC_SPI_MUL_DAT_MODE	BIT(31)
69
70/* Interrupt Enable */
71#define SIRFSOC_SPI_RX_DONE_INT_EN	BIT(0)
72#define SIRFSOC_SPI_TX_DONE_INT_EN	BIT(1)
73#define SIRFSOC_SPI_RX_OFLOW_INT_EN	BIT(2)
74#define SIRFSOC_SPI_TX_UFLOW_INT_EN	BIT(3)
75#define SIRFSOC_SPI_RX_IO_DMA_INT_EN	BIT(4)
76#define SIRFSOC_SPI_TX_IO_DMA_INT_EN	BIT(5)
77#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN	BIT(6)
78#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN	BIT(7)
79#define SIRFSOC_SPI_RXFIFO_THD_INT_EN	BIT(8)
80#define SIRFSOC_SPI_TXFIFO_THD_INT_EN	BIT(9)
81#define SIRFSOC_SPI_FRM_END_INT_EN	BIT(10)
82
83#define SIRFSOC_SPI_INT_MASK_ALL	0x1FFF
84
85/* Interrupt status */
86#define SIRFSOC_SPI_RX_DONE		BIT(0)
87#define SIRFSOC_SPI_TX_DONE		BIT(1)
88#define SIRFSOC_SPI_RX_OFLOW		BIT(2)
89#define SIRFSOC_SPI_TX_UFLOW		BIT(3)
90#define SIRFSOC_SPI_RX_IO_DMA		BIT(4)
91#define SIRFSOC_SPI_RX_FIFO_FULL	BIT(6)
92#define SIRFSOC_SPI_TXFIFO_EMPTY	BIT(7)
93#define SIRFSOC_SPI_RXFIFO_THD_REACH	BIT(8)
94#define SIRFSOC_SPI_TXFIFO_THD_REACH	BIT(9)
95#define SIRFSOC_SPI_FRM_END		BIT(10)
96
97/* TX RX enable */
98#define SIRFSOC_SPI_RX_EN		BIT(0)
99#define SIRFSOC_SPI_TX_EN		BIT(1)
100#define SIRFSOC_SPI_CMD_TX_EN		BIT(2)
101
102#define SIRFSOC_SPI_IO_MODE_SEL		BIT(0)
103#define SIRFSOC_SPI_RX_DMA_FLUSH	BIT(2)
104
105/* FIFO OPs */
106#define SIRFSOC_SPI_FIFO_RESET		BIT(0)
107#define SIRFSOC_SPI_FIFO_START		BIT(1)
108
109/* FIFO CTRL */
110#define SIRFSOC_SPI_FIFO_WIDTH_BYTE	(0 << 0)
111#define SIRFSOC_SPI_FIFO_WIDTH_WORD	(1 << 0)
112#define SIRFSOC_SPI_FIFO_WIDTH_DWORD	(2 << 0)
113
114/* FIFO Status */
115#define	SIRFSOC_SPI_FIFO_LEVEL_MASK	0xFF
116#define SIRFSOC_SPI_FIFO_FULL		BIT(8)
117#define SIRFSOC_SPI_FIFO_EMPTY		BIT(9)
118
119/* 256 bytes rx/tx FIFO */
120#define SIRFSOC_SPI_FIFO_SIZE		256
121#define SIRFSOC_SPI_DAT_FRM_LEN_MAX	(64 * 1024)
122
123#define SIRFSOC_SPI_FIFO_SC(x)		((x) & 0x3F)
124#define SIRFSOC_SPI_FIFO_LC(x)		(((x) & 0x3F) << 10)
125#define SIRFSOC_SPI_FIFO_HC(x)		(((x) & 0x3F) << 20)
126#define SIRFSOC_SPI_FIFO_THD(x)		(((x) & 0xFF) << 2)
127
128/*
129 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
130 * due to the limitation of dma controller
131 */
132
133#define ALIGNED(x) (!((u32)x & 0x3))
134#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
135	ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
136
137#define SIRFSOC_MAX_CMD_BYTES	4
138#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
139
140struct sirfsoc_spi {
141	struct spi_bitbang bitbang;
142	struct completion rx_done;
143	struct completion tx_done;
144
145	void __iomem *base;
146	u32 ctrl_freq;  /* SPI controller clock speed */
147	struct clk *clk;
148
149	/* rx & tx bufs from the spi_transfer */
150	const void *tx;
151	void *rx;
152
153	/* place received word into rx buffer */
154	void (*rx_word) (struct sirfsoc_spi *);
155	/* get word from tx buffer for sending */
156	void (*tx_word) (struct sirfsoc_spi *);
157
158	/* number of words left to be tranmitted/received */
159	unsigned int left_tx_word;
160	unsigned int left_rx_word;
161
162	/* rx & tx DMA channels */
163	struct dma_chan *rx_chan;
164	struct dma_chan *tx_chan;
165	dma_addr_t src_start;
166	dma_addr_t dst_start;
167	void *dummypage;
168	int word_width; /* in bytes */
169
170	/*
171	 * if tx size is not more than 4 and rx size is NULL, use
172	 * command model
173	 */
174	bool	tx_by_cmd;
175	bool	hw_cs;
176};
177
178static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
179{
180	u32 data;
181	u8 *rx = sspi->rx;
182
183	data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
184
185	if (rx) {
186		*rx++ = (u8) data;
187		sspi->rx = rx;
188	}
189
190	sspi->left_rx_word--;
191}
192
193static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
194{
195	u32 data = 0;
196	const u8 *tx = sspi->tx;
197
198	if (tx) {
199		data = *tx++;
200		sspi->tx = tx;
201	}
202
203	writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
204	sspi->left_tx_word--;
205}
206
207static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
208{
209	u32 data;
210	u16 *rx = sspi->rx;
211
212	data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
213
214	if (rx) {
215		*rx++ = (u16) data;
216		sspi->rx = rx;
217	}
218
219	sspi->left_rx_word--;
220}
221
222static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
223{
224	u32 data = 0;
225	const u16 *tx = sspi->tx;
226
227	if (tx) {
228		data = *tx++;
229		sspi->tx = tx;
230	}
231
232	writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
233	sspi->left_tx_word--;
234}
235
236static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
237{
238	u32 data;
239	u32 *rx = sspi->rx;
240
241	data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
242
243	if (rx) {
244		*rx++ = (u32) data;
245		sspi->rx = rx;
246	}
247
248	sspi->left_rx_word--;
249
250}
251
252static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
253{
254	u32 data = 0;
255	const u32 *tx = sspi->tx;
256
257	if (tx) {
258		data = *tx++;
259		sspi->tx = tx;
260	}
261
262	writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
263	sspi->left_tx_word--;
264}
265
266static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
267{
268	struct sirfsoc_spi *sspi = dev_id;
269	u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
270	if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
271		complete(&sspi->tx_done);
272		writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
273		writel(SIRFSOC_SPI_INT_MASK_ALL,
274				sspi->base + SIRFSOC_SPI_INT_STATUS);
275		return IRQ_HANDLED;
276	}
277
278	/* Error Conditions */
279	if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
280			spi_stat & SIRFSOC_SPI_TX_UFLOW) {
281		complete(&sspi->tx_done);
282		complete(&sspi->rx_done);
283		writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
284		writel(SIRFSOC_SPI_INT_MASK_ALL,
285				sspi->base + SIRFSOC_SPI_INT_STATUS);
286		return IRQ_HANDLED;
287	}
288	if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
289		complete(&sspi->tx_done);
290	while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
291		SIRFSOC_SPI_RX_IO_DMA))
292		cpu_relax();
293	complete(&sspi->rx_done);
294	writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
295	writel(SIRFSOC_SPI_INT_MASK_ALL,
296			sspi->base + SIRFSOC_SPI_INT_STATUS);
297
298	return IRQ_HANDLED;
299}
300
301static void spi_sirfsoc_dma_fini_callback(void *data)
302{
303	struct completion *dma_complete = data;
304
305	complete(dma_complete);
306}
307
308static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
309	struct spi_transfer *t)
310{
311	struct sirfsoc_spi *sspi;
312	int timeout = t->len * 10;
313	u32 cmd;
314
315	sspi = spi_master_get_devdata(spi->master);
316	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
317	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
318	memcpy(&cmd, sspi->tx, t->len);
319	if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
320		cmd = cpu_to_be32(cmd) >>
321			((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
322	if (sspi->word_width == 2 && t->len == 4 &&
323			(!(spi->mode & SPI_LSB_FIRST)))
324		cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
325	writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
326	writel(SIRFSOC_SPI_FRM_END_INT_EN,
327		sspi->base + SIRFSOC_SPI_INT_EN);
328	writel(SIRFSOC_SPI_CMD_TX_EN,
329		sspi->base + SIRFSOC_SPI_TX_RX_EN);
330	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
331		dev_err(&spi->dev, "cmd transfer timeout\n");
332		return;
333	}
334	sspi->left_rx_word -= t->len;
335}
336
337static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
338	struct spi_transfer *t)
339{
340	struct sirfsoc_spi *sspi;
341	struct dma_async_tx_descriptor *rx_desc, *tx_desc;
342	int timeout = t->len * 10;
343
344	sspi = spi_master_get_devdata(spi->master);
345	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
346	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
347	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
348	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
349	writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
350	writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
351	if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
352		writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
353			SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
354			sspi->base + SIRFSOC_SPI_CTRL);
355		writel(sspi->left_tx_word - 1,
356				sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
357		writel(sspi->left_tx_word - 1,
358				sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
359	} else {
360		writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
361			sspi->base + SIRFSOC_SPI_CTRL);
362		writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
363		writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
364	}
365	sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
366					(t->tx_buf != t->rx_buf) ?
367					DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
368	rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
369		sspi->dst_start, t->len, DMA_DEV_TO_MEM,
370		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
371	rx_desc->callback = spi_sirfsoc_dma_fini_callback;
372	rx_desc->callback_param = &sspi->rx_done;
373
374	sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
375					(t->tx_buf != t->rx_buf) ?
376					DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
377	tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
378		sspi->src_start, t->len, DMA_MEM_TO_DEV,
379		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
380	tx_desc->callback = spi_sirfsoc_dma_fini_callback;
381	tx_desc->callback_param = &sspi->tx_done;
382
383	dmaengine_submit(tx_desc);
384	dmaengine_submit(rx_desc);
385	dma_async_issue_pending(sspi->tx_chan);
386	dma_async_issue_pending(sspi->rx_chan);
387	writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
388			sspi->base + SIRFSOC_SPI_TX_RX_EN);
389	if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
390		dev_err(&spi->dev, "transfer timeout\n");
391		dmaengine_terminate_all(sspi->rx_chan);
392	} else
393		sspi->left_rx_word = 0;
394	/*
395	 * we only wait tx-done event if transferring by DMA. for PIO,
396	 * we get rx data by writing tx data, so if rx is done, tx has
397	 * done earlier
398	 */
399	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
400		dev_err(&spi->dev, "transfer timeout\n");
401		dmaengine_terminate_all(sspi->tx_chan);
402	}
403	dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
404	dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
405	/* TX, RX FIFO stop */
406	writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
407	writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
408	if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
409		writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
410}
411
412static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
413		struct spi_transfer *t)
414{
415	struct sirfsoc_spi *sspi;
416	int timeout = t->len * 10;
417
418	sspi = spi_master_get_devdata(spi->master);
419	do {
420		writel(SIRFSOC_SPI_FIFO_RESET,
421			sspi->base + SIRFSOC_SPI_RXFIFO_OP);
422		writel(SIRFSOC_SPI_FIFO_RESET,
423			sspi->base + SIRFSOC_SPI_TXFIFO_OP);
424		writel(SIRFSOC_SPI_FIFO_START,
425			sspi->base + SIRFSOC_SPI_RXFIFO_OP);
426		writel(SIRFSOC_SPI_FIFO_START,
427			sspi->base + SIRFSOC_SPI_TXFIFO_OP);
428		writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
429		writel(SIRFSOC_SPI_INT_MASK_ALL,
430			sspi->base + SIRFSOC_SPI_INT_STATUS);
431		writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
432			SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
433			sspi->base + SIRFSOC_SPI_CTRL);
434		writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
435				- 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
436		writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
437				- 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
438		while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
439			& SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
440			sspi->tx_word(sspi);
441		writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
442			SIRFSOC_SPI_TX_UFLOW_INT_EN |
443			SIRFSOC_SPI_RX_OFLOW_INT_EN |
444			SIRFSOC_SPI_RX_IO_DMA_INT_EN,
445			sspi->base + SIRFSOC_SPI_INT_EN);
446		writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
447			sspi->base + SIRFSOC_SPI_TX_RX_EN);
448		if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
449			!wait_for_completion_timeout(&sspi->rx_done, timeout)) {
450			dev_err(&spi->dev, "transfer timeout\n");
451			break;
452		}
453		while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
454			& SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
455			sspi->rx_word(sspi);
456		writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
457		writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
458	} while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
459}
460
461static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
462{
463	struct sirfsoc_spi *sspi;
464	sspi = spi_master_get_devdata(spi->master);
465
466	sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
467	sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
468	sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
469	reinit_completion(&sspi->rx_done);
470	reinit_completion(&sspi->tx_done);
471	/*
472	 * in the transfer, if transfer data using command register with rx_buf
473	 * null, just fill command data into command register and wait for its
474	 * completion.
475	 */
476	if (sspi->tx_by_cmd)
477		spi_sirfsoc_cmd_transfer(spi, t);
478	else if (IS_DMA_VALID(t))
479		spi_sirfsoc_dma_transfer(spi, t);
480	else
481		spi_sirfsoc_pio_transfer(spi, t);
482
483	return t->len - sspi->left_rx_word * sspi->word_width;
484}
485
486static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
487{
488	struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
489
490	if (sspi->hw_cs) {
491		u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
492		switch (value) {
493		case BITBANG_CS_ACTIVE:
494			if (spi->mode & SPI_CS_HIGH)
495				regval |= SIRFSOC_SPI_CS_IO_OUT;
496			else
497				regval &= ~SIRFSOC_SPI_CS_IO_OUT;
498			break;
499		case BITBANG_CS_INACTIVE:
500			if (spi->mode & SPI_CS_HIGH)
501				regval &= ~SIRFSOC_SPI_CS_IO_OUT;
502			else
503				regval |= SIRFSOC_SPI_CS_IO_OUT;
504			break;
505		}
506		writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
507	} else {
508		switch (value) {
509		case BITBANG_CS_ACTIVE:
510			gpio_direction_output(spi->cs_gpio,
511					spi->mode & SPI_CS_HIGH ? 1 : 0);
512			break;
513		case BITBANG_CS_INACTIVE:
514			gpio_direction_output(spi->cs_gpio,
515					spi->mode & SPI_CS_HIGH ? 0 : 1);
516			break;
517		}
518	}
519}
520
521static int
522spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
523{
524	struct sirfsoc_spi *sspi;
525	u8 bits_per_word = 0;
526	int hz = 0;
527	u32 regval;
528	u32 txfifo_ctrl, rxfifo_ctrl;
529	u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
530
531	sspi = spi_master_get_devdata(spi->master);
532
533	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
534	hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
535
536	regval = (sspi->ctrl_freq / (2 * hz)) - 1;
537	if (regval > 0xFFFF || regval < 0) {
538		dev_err(&spi->dev, "Speed %d not supported\n", hz);
539		return -EINVAL;
540	}
541
542	switch (bits_per_word) {
543	case 8:
544		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
545		sspi->rx_word = spi_sirfsoc_rx_word_u8;
546		sspi->tx_word = spi_sirfsoc_tx_word_u8;
547		break;
548	case 12:
549	case 16:
550		regval |= (bits_per_word ==  12) ?
551			SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
552			SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
553		sspi->rx_word = spi_sirfsoc_rx_word_u16;
554		sspi->tx_word = spi_sirfsoc_tx_word_u16;
555		break;
556	case 32:
557		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
558		sspi->rx_word = spi_sirfsoc_rx_word_u32;
559		sspi->tx_word = spi_sirfsoc_tx_word_u32;
560		break;
561	default:
562		BUG();
563	}
564
565	sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
566	txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
567					   (sspi->word_width >> 1);
568	rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
569					   (sspi->word_width >> 1);
570
571	if (!(spi->mode & SPI_CS_HIGH))
572		regval |= SIRFSOC_SPI_CS_IDLE_STAT;
573	if (!(spi->mode & SPI_LSB_FIRST))
574		regval |= SIRFSOC_SPI_TRAN_MSB;
575	if (spi->mode & SPI_CPOL)
576		regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
577
578	/*
579	 * Data should be driven at least 1/2 cycle before the fetch edge
580	 * to make sure that data gets stable at the fetch edge.
581	 */
582	if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
583	    (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
584		regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
585	else
586		regval |= SIRFSOC_SPI_DRV_POS_EDGE;
587
588	writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
589			SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
590			SIRFSOC_SPI_FIFO_HC(2),
591		sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
592	writel(SIRFSOC_SPI_FIFO_SC(2) |
593			SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
594			SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
595		sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
596	writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
597	writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
598
599	if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
600		regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
601				SIRFSOC_SPI_CMD_MODE);
602		sspi->tx_by_cmd = true;
603	} else {
604		regval &= ~SIRFSOC_SPI_CMD_MODE;
605		sspi->tx_by_cmd = false;
606	}
607	/*
608	 * it should never set to hardware cs mode because in hardware cs mode,
609	 * cs signal can't controlled by driver.
610	 */
611	regval |= SIRFSOC_SPI_CS_IO_MODE;
612	writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
613
614	if (IS_DMA_VALID(t)) {
615		/* Enable DMA mode for RX, TX */
616		writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
617		writel(SIRFSOC_SPI_RX_DMA_FLUSH,
618			sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
619	} else {
620		/* Enable IO mode for RX, TX */
621		writel(SIRFSOC_SPI_IO_MODE_SEL,
622			sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
623		writel(SIRFSOC_SPI_IO_MODE_SEL,
624			sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
625	}
626
627	return 0;
628}
629
630static int spi_sirfsoc_setup(struct spi_device *spi)
631{
632	struct sirfsoc_spi *sspi;
633
634	sspi = spi_master_get_devdata(spi->master);
635
636	if (spi->cs_gpio == -ENOENT)
637		sspi->hw_cs = true;
638	else
639		sspi->hw_cs = false;
640	return spi_sirfsoc_setup_transfer(spi, NULL);
641}
642
643static int spi_sirfsoc_probe(struct platform_device *pdev)
644{
645	struct sirfsoc_spi *sspi;
646	struct spi_master *master;
647	struct resource *mem_res;
648	int irq;
649	int i, ret;
650
651	ret = device_reset(&pdev->dev);
652	if (ret) {
653		dev_err(&pdev->dev, "SPI reset failed!\n");
654		return ret;
655	}
656
657	master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
658	if (!master) {
659		dev_err(&pdev->dev, "Unable to allocate SPI master\n");
660		return -ENOMEM;
661	}
662	platform_set_drvdata(pdev, master);
663	sspi = spi_master_get_devdata(master);
664
665	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
666	sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
667	if (IS_ERR(sspi->base)) {
668		ret = PTR_ERR(sspi->base);
669		goto free_master;
670	}
671
672	irq = platform_get_irq(pdev, 0);
673	if (irq < 0) {
674		ret = -ENXIO;
675		goto free_master;
676	}
677	ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
678				DRIVER_NAME, sspi);
679	if (ret)
680		goto free_master;
681
682	sspi->bitbang.master = master;
683	sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
684	sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
685	sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
686	sspi->bitbang.master->setup = spi_sirfsoc_setup;
687	master->bus_num = pdev->id;
688	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
689	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
690					SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
691	master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
692	sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
693
694	/* request DMA channels */
695	sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
696	if (!sspi->rx_chan) {
697		dev_err(&pdev->dev, "can not allocate rx dma channel\n");
698		ret = -ENODEV;
699		goto free_master;
700	}
701	sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
702	if (!sspi->tx_chan) {
703		dev_err(&pdev->dev, "can not allocate tx dma channel\n");
704		ret = -ENODEV;
705		goto free_rx_dma;
706	}
707
708	sspi->clk = clk_get(&pdev->dev, NULL);
709	if (IS_ERR(sspi->clk)) {
710		ret = PTR_ERR(sspi->clk);
711		goto free_tx_dma;
712	}
713	clk_prepare_enable(sspi->clk);
714	sspi->ctrl_freq = clk_get_rate(sspi->clk);
715
716	init_completion(&sspi->rx_done);
717	init_completion(&sspi->tx_done);
718
719	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
720	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
721	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
722	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
723	/* We are not using dummy delay between command and data */
724	writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
725
726	sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
727	if (!sspi->dummypage) {
728		ret = -ENOMEM;
729		goto free_clk;
730	}
731
732	ret = spi_bitbang_start(&sspi->bitbang);
733	if (ret)
734		goto free_dummypage;
735	for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
736		if (master->cs_gpios[i] == -ENOENT)
737			continue;
738		if (!gpio_is_valid(master->cs_gpios[i])) {
739			dev_err(&pdev->dev, "no valid gpio\n");
740			ret = -EINVAL;
741			goto free_dummypage;
742		}
743		ret = devm_gpio_request(&pdev->dev,
744				master->cs_gpios[i], DRIVER_NAME);
745		if (ret) {
746			dev_err(&pdev->dev, "failed to request gpio\n");
747			goto free_dummypage;
748		}
749	}
750	dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
751
752	return 0;
753free_dummypage:
754	kfree(sspi->dummypage);
755free_clk:
756	clk_disable_unprepare(sspi->clk);
757	clk_put(sspi->clk);
758free_tx_dma:
759	dma_release_channel(sspi->tx_chan);
760free_rx_dma:
761	dma_release_channel(sspi->rx_chan);
762free_master:
763	spi_master_put(master);
764
765	return ret;
766}
767
768static int  spi_sirfsoc_remove(struct platform_device *pdev)
769{
770	struct spi_master *master;
771	struct sirfsoc_spi *sspi;
772
773	master = platform_get_drvdata(pdev);
774	sspi = spi_master_get_devdata(master);
775
776	spi_bitbang_stop(&sspi->bitbang);
777	kfree(sspi->dummypage);
778	clk_disable_unprepare(sspi->clk);
779	clk_put(sspi->clk);
780	dma_release_channel(sspi->rx_chan);
781	dma_release_channel(sspi->tx_chan);
782	spi_master_put(master);
783	return 0;
784}
785
786#ifdef CONFIG_PM_SLEEP
787static int spi_sirfsoc_suspend(struct device *dev)
788{
789	struct spi_master *master = dev_get_drvdata(dev);
790	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
791	int ret;
792
793	ret = spi_master_suspend(master);
794	if (ret)
795		return ret;
796
797	clk_disable(sspi->clk);
798	return 0;
799}
800
801static int spi_sirfsoc_resume(struct device *dev)
802{
803	struct spi_master *master = dev_get_drvdata(dev);
804	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
805
806	clk_enable(sspi->clk);
807	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
808	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
809	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
810	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
811
812	return spi_master_resume(master);
813}
814#endif
815
816static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
817			 spi_sirfsoc_resume);
818
819static const struct of_device_id spi_sirfsoc_of_match[] = {
820	{ .compatible = "sirf,prima2-spi", },
821	{}
822};
823MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
824
825static struct platform_driver spi_sirfsoc_driver = {
826	.driver = {
827		.name = DRIVER_NAME,
828		.pm     = &spi_sirfsoc_pm_ops,
829		.of_match_table = spi_sirfsoc_of_match,
830	},
831	.probe = spi_sirfsoc_probe,
832	.remove = spi_sirfsoc_remove,
833};
834module_platform_driver(spi_sirfsoc_driver);
835MODULE_DESCRIPTION("SiRF SoC SPI master driver");
836MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
837MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
838MODULE_LICENSE("GPL v2");
839