1/*
2 * Copyright (C) 2009 Texas Instruments.
3 * Copyright (C) 2010 EF Johnson Technologies
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/gpio.h>
19#include <linux/module.h>
20#include <linux/delay.h>
21#include <linux/platform_device.h>
22#include <linux/err.h>
23#include <linux/clk.h>
24#include <linux/dmaengine.h>
25#include <linux/dma-mapping.h>
26#include <linux/edma.h>
27#include <linux/of.h>
28#include <linux/of_device.h>
29#include <linux/of_gpio.h>
30#include <linux/spi/spi.h>
31#include <linux/spi/spi_bitbang.h>
32#include <linux/slab.h>
33
34#include <linux/platform_data/spi-davinci.h>
35
36#define SPI_NO_RESOURCE		((resource_size_t)-1)
37
38#define CS_DEFAULT	0xFF
39
40#define SPIFMT_PHASE_MASK	BIT(16)
41#define SPIFMT_POLARITY_MASK	BIT(17)
42#define SPIFMT_DISTIMER_MASK	BIT(18)
43#define SPIFMT_SHIFTDIR_MASK	BIT(20)
44#define SPIFMT_WAITENA_MASK	BIT(21)
45#define SPIFMT_PARITYENA_MASK	BIT(22)
46#define SPIFMT_ODD_PARITY_MASK	BIT(23)
47#define SPIFMT_WDELAY_MASK	0x3f000000u
48#define SPIFMT_WDELAY_SHIFT	24
49#define SPIFMT_PRESCALE_SHIFT	8
50
51/* SPIPC0 */
52#define SPIPC0_DIFUN_MASK	BIT(11)		/* MISO */
53#define SPIPC0_DOFUN_MASK	BIT(10)		/* MOSI */
54#define SPIPC0_CLKFUN_MASK	BIT(9)		/* CLK */
55#define SPIPC0_SPIENA_MASK	BIT(8)		/* nREADY */
56
57#define SPIINT_MASKALL		0x0101035F
58#define SPIINT_MASKINT		0x0000015F
59#define SPI_INTLVL_1		0x000001FF
60#define SPI_INTLVL_0		0x00000000
61
62/* SPIDAT1 (upper 16 bit defines) */
63#define SPIDAT1_CSHOLD_MASK	BIT(12)
64#define SPIDAT1_WDEL		BIT(10)
65
66/* SPIGCR1 */
67#define SPIGCR1_CLKMOD_MASK	BIT(1)
68#define SPIGCR1_MASTER_MASK     BIT(0)
69#define SPIGCR1_POWERDOWN_MASK	BIT(8)
70#define SPIGCR1_LOOPBACK_MASK	BIT(16)
71#define SPIGCR1_SPIENA_MASK	BIT(24)
72
73/* SPIBUF */
74#define SPIBUF_TXFULL_MASK	BIT(29)
75#define SPIBUF_RXEMPTY_MASK	BIT(31)
76
77/* SPIDELAY */
78#define SPIDELAY_C2TDELAY_SHIFT 24
79#define SPIDELAY_C2TDELAY_MASK  (0xFF << SPIDELAY_C2TDELAY_SHIFT)
80#define SPIDELAY_T2CDELAY_SHIFT 16
81#define SPIDELAY_T2CDELAY_MASK  (0xFF << SPIDELAY_T2CDELAY_SHIFT)
82#define SPIDELAY_T2EDELAY_SHIFT 8
83#define SPIDELAY_T2EDELAY_MASK  (0xFF << SPIDELAY_T2EDELAY_SHIFT)
84#define SPIDELAY_C2EDELAY_SHIFT 0
85#define SPIDELAY_C2EDELAY_MASK  0xFF
86
87/* Error Masks */
88#define SPIFLG_DLEN_ERR_MASK		BIT(0)
89#define SPIFLG_TIMEOUT_MASK		BIT(1)
90#define SPIFLG_PARERR_MASK		BIT(2)
91#define SPIFLG_DESYNC_MASK		BIT(3)
92#define SPIFLG_BITERR_MASK		BIT(4)
93#define SPIFLG_OVRRUN_MASK		BIT(6)
94#define SPIFLG_BUF_INIT_ACTIVE_MASK	BIT(24)
95#define SPIFLG_ERROR_MASK		(SPIFLG_DLEN_ERR_MASK \
96				| SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
97				| SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
98				| SPIFLG_OVRRUN_MASK)
99
100#define SPIINT_DMA_REQ_EN	BIT(16)
101
102/* SPI Controller registers */
103#define SPIGCR0		0x00
104#define SPIGCR1		0x04
105#define SPIINT		0x08
106#define SPILVL		0x0c
107#define SPIFLG		0x10
108#define SPIPC0		0x14
109#define SPIDAT1		0x3c
110#define SPIBUF		0x40
111#define SPIDELAY	0x48
112#define SPIDEF		0x4c
113#define SPIFMT0		0x50
114
115/* SPI Controller driver's private data. */
116struct davinci_spi {
117	struct spi_bitbang	bitbang;
118	struct clk		*clk;
119
120	u8			version;
121	resource_size_t		pbase;
122	void __iomem		*base;
123	u32			irq;
124	struct completion	done;
125
126	const void		*tx;
127	void			*rx;
128	int			rcount;
129	int			wcount;
130
131	struct dma_chan		*dma_rx;
132	struct dma_chan		*dma_tx;
133	int			dma_rx_chnum;
134	int			dma_tx_chnum;
135
136	struct davinci_spi_platform_data pdata;
137
138	void			(*get_rx)(u32 rx_data, struct davinci_spi *);
139	u32			(*get_tx)(struct davinci_spi *);
140
141	u8			*bytes_per_word;
142};
143
144static struct davinci_spi_config davinci_spi_default_cfg;
145
146static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
147{
148	if (dspi->rx) {
149		u8 *rx = dspi->rx;
150		*rx++ = (u8)data;
151		dspi->rx = rx;
152	}
153}
154
155static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
156{
157	if (dspi->rx) {
158		u16 *rx = dspi->rx;
159		*rx++ = (u16)data;
160		dspi->rx = rx;
161	}
162}
163
164static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
165{
166	u32 data = 0;
167
168	if (dspi->tx) {
169		const u8 *tx = dspi->tx;
170
171		data = *tx++;
172		dspi->tx = tx;
173	}
174	return data;
175}
176
177static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
178{
179	u32 data = 0;
180
181	if (dspi->tx) {
182		const u16 *tx = dspi->tx;
183
184		data = *tx++;
185		dspi->tx = tx;
186	}
187	return data;
188}
189
190static inline void set_io_bits(void __iomem *addr, u32 bits)
191{
192	u32 v = ioread32(addr);
193
194	v |= bits;
195	iowrite32(v, addr);
196}
197
198static inline void clear_io_bits(void __iomem *addr, u32 bits)
199{
200	u32 v = ioread32(addr);
201
202	v &= ~bits;
203	iowrite32(v, addr);
204}
205
206/*
207 * Interface to control the chip select signal
208 */
209static void davinci_spi_chipselect(struct spi_device *spi, int value)
210{
211	struct davinci_spi *dspi;
212	struct davinci_spi_platform_data *pdata;
213	struct davinci_spi_config *spicfg = spi->controller_data;
214	u8 chip_sel = spi->chip_select;
215	u16 spidat1 = CS_DEFAULT;
216	bool gpio_chipsel = false;
217	int gpio;
218
219	dspi = spi_master_get_devdata(spi->master);
220	pdata = &dspi->pdata;
221
222	if (spi->cs_gpio >= 0) {
223		/* SPI core parse and update master->cs_gpio */
224		gpio_chipsel = true;
225		gpio = spi->cs_gpio;
226	}
227
228	/* program delay transfers if tx_delay is non zero */
229	if (spicfg->wdelay)
230		spidat1 |= SPIDAT1_WDEL;
231
232	/*
233	 * Board specific chip select logic decides the polarity and cs
234	 * line for the controller
235	 */
236	if (gpio_chipsel) {
237		if (value == BITBANG_CS_ACTIVE)
238			gpio_set_value(gpio, spi->mode & SPI_CS_HIGH);
239		else
240			gpio_set_value(gpio, !(spi->mode & SPI_CS_HIGH));
241	} else {
242		if (value == BITBANG_CS_ACTIVE) {
243			spidat1 |= SPIDAT1_CSHOLD_MASK;
244			spidat1 &= ~(0x1 << chip_sel);
245		}
246	}
247
248	iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
249}
250
251/**
252 * davinci_spi_get_prescale - Calculates the correct prescale value
253 * @maxspeed_hz: the maximum rate the SPI clock can run at
254 *
255 * This function calculates the prescale value that generates a clock rate
256 * less than or equal to the specified maximum.
257 *
258 * Returns: calculated prescale - 1 for easy programming into SPI registers
259 * or negative error number if valid prescalar cannot be updated.
260 */
261static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
262							u32 max_speed_hz)
263{
264	int ret;
265
266	ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
267
268	if (ret < 3 || ret > 256)
269		return -EINVAL;
270
271	return ret - 1;
272}
273
274/**
275 * davinci_spi_setup_transfer - This functions will determine transfer method
276 * @spi: spi device on which data transfer to be done
277 * @t: spi transfer in which transfer info is filled
278 *
279 * This function determines data transfer method (8/16/32 bit transfer).
280 * It will also set the SPI Clock Control register according to
281 * SPI slave device freq.
282 */
283static int davinci_spi_setup_transfer(struct spi_device *spi,
284		struct spi_transfer *t)
285{
286
287	struct davinci_spi *dspi;
288	struct davinci_spi_config *spicfg;
289	u8 bits_per_word = 0;
290	u32 hz = 0, spifmt = 0;
291	int prescale;
292
293	dspi = spi_master_get_devdata(spi->master);
294	spicfg = spi->controller_data;
295	if (!spicfg)
296		spicfg = &davinci_spi_default_cfg;
297
298	if (t) {
299		bits_per_word = t->bits_per_word;
300		hz = t->speed_hz;
301	}
302
303	/* if bits_per_word is not set then set it default */
304	if (!bits_per_word)
305		bits_per_word = spi->bits_per_word;
306
307	/*
308	 * Assign function pointer to appropriate transfer method
309	 * 8bit, 16bit or 32bit transfer
310	 */
311	if (bits_per_word <= 8) {
312		dspi->get_rx = davinci_spi_rx_buf_u8;
313		dspi->get_tx = davinci_spi_tx_buf_u8;
314		dspi->bytes_per_word[spi->chip_select] = 1;
315	} else {
316		dspi->get_rx = davinci_spi_rx_buf_u16;
317		dspi->get_tx = davinci_spi_tx_buf_u16;
318		dspi->bytes_per_word[spi->chip_select] = 2;
319	}
320
321	if (!hz)
322		hz = spi->max_speed_hz;
323
324	/* Set up SPIFMTn register, unique to this chipselect. */
325
326	prescale = davinci_spi_get_prescale(dspi, hz);
327	if (prescale < 0)
328		return prescale;
329
330	spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
331
332	if (spi->mode & SPI_LSB_FIRST)
333		spifmt |= SPIFMT_SHIFTDIR_MASK;
334
335	if (spi->mode & SPI_CPOL)
336		spifmt |= SPIFMT_POLARITY_MASK;
337
338	if (!(spi->mode & SPI_CPHA))
339		spifmt |= SPIFMT_PHASE_MASK;
340
341	/*
342	* Assume wdelay is used only on SPI peripherals that has this field
343	* in SPIFMTn register and when it's configured from board file or DT.
344	*/
345	if (spicfg->wdelay)
346		spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
347				& SPIFMT_WDELAY_MASK);
348
349	/*
350	 * Version 1 hardware supports two basic SPI modes:
351	 *  - Standard SPI mode uses 4 pins, with chipselect
352	 *  - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
353	 *	(distinct from SPI_3WIRE, with just one data wire;
354	 *	or similar variants without MOSI or without MISO)
355	 *
356	 * Version 2 hardware supports an optional handshaking signal,
357	 * so it can support two more modes:
358	 *  - 5 pin SPI variant is standard SPI plus SPI_READY
359	 *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
360	 */
361
362	if (dspi->version == SPI_VERSION_2) {
363
364		u32 delay = 0;
365
366		if (spicfg->odd_parity)
367			spifmt |= SPIFMT_ODD_PARITY_MASK;
368
369		if (spicfg->parity_enable)
370			spifmt |= SPIFMT_PARITYENA_MASK;
371
372		if (spicfg->timer_disable) {
373			spifmt |= SPIFMT_DISTIMER_MASK;
374		} else {
375			delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
376						& SPIDELAY_C2TDELAY_MASK;
377			delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
378						& SPIDELAY_T2CDELAY_MASK;
379		}
380
381		if (spi->mode & SPI_READY) {
382			spifmt |= SPIFMT_WAITENA_MASK;
383			delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
384						& SPIDELAY_T2EDELAY_MASK;
385			delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
386						& SPIDELAY_C2EDELAY_MASK;
387		}
388
389		iowrite32(delay, dspi->base + SPIDELAY);
390	}
391
392	iowrite32(spifmt, dspi->base + SPIFMT0);
393
394	return 0;
395}
396
397static int davinci_spi_of_setup(struct spi_device *spi)
398{
399	struct davinci_spi_config *spicfg = spi->controller_data;
400	struct device_node *np = spi->dev.of_node;
401	u32 prop;
402
403	if (spicfg == NULL && np) {
404		spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL);
405		if (!spicfg)
406			return -ENOMEM;
407		*spicfg = davinci_spi_default_cfg;
408		/* override with dt configured values */
409		if (!of_property_read_u32(np, "ti,spi-wdelay", &prop))
410			spicfg->wdelay = (u8)prop;
411		spi->controller_data = spicfg;
412	}
413
414	return 0;
415}
416
417/**
418 * davinci_spi_setup - This functions will set default transfer method
419 * @spi: spi device on which data transfer to be done
420 *
421 * This functions sets the default transfer method.
422 */
423static int davinci_spi_setup(struct spi_device *spi)
424{
425	int retval = 0;
426	struct davinci_spi *dspi;
427	struct davinci_spi_platform_data *pdata;
428	struct spi_master *master = spi->master;
429	struct device_node *np = spi->dev.of_node;
430	bool internal_cs = true;
431
432	dspi = spi_master_get_devdata(spi->master);
433	pdata = &dspi->pdata;
434
435	if (!(spi->mode & SPI_NO_CS)) {
436		if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
437			retval = gpio_direction_output(
438				      spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
439			internal_cs = false;
440		} else if (pdata->chip_sel &&
441			   spi->chip_select < pdata->num_chipselect &&
442			   pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
443			spi->cs_gpio = pdata->chip_sel[spi->chip_select];
444			retval = gpio_direction_output(
445				      spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
446			internal_cs = false;
447		}
448
449		if (retval) {
450			dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
451				spi->cs_gpio, retval);
452			return retval;
453		}
454
455		if (internal_cs)
456			set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
457	}
458
459	if (spi->mode & SPI_READY)
460		set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
461
462	if (spi->mode & SPI_LOOP)
463		set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
464	else
465		clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
466
467	return davinci_spi_of_setup(spi);
468}
469
470static void davinci_spi_cleanup(struct spi_device *spi)
471{
472	struct davinci_spi_config *spicfg = spi->controller_data;
473
474	spi->controller_data = NULL;
475	if (spi->dev.of_node)
476		kfree(spicfg);
477}
478
479static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
480{
481	struct device *sdev = dspi->bitbang.master->dev.parent;
482
483	if (int_status & SPIFLG_TIMEOUT_MASK) {
484		dev_dbg(sdev, "SPI Time-out Error\n");
485		return -ETIMEDOUT;
486	}
487	if (int_status & SPIFLG_DESYNC_MASK) {
488		dev_dbg(sdev, "SPI Desynchronization Error\n");
489		return -EIO;
490	}
491	if (int_status & SPIFLG_BITERR_MASK) {
492		dev_dbg(sdev, "SPI Bit error\n");
493		return -EIO;
494	}
495
496	if (dspi->version == SPI_VERSION_2) {
497		if (int_status & SPIFLG_DLEN_ERR_MASK) {
498			dev_dbg(sdev, "SPI Data Length Error\n");
499			return -EIO;
500		}
501		if (int_status & SPIFLG_PARERR_MASK) {
502			dev_dbg(sdev, "SPI Parity Error\n");
503			return -EIO;
504		}
505		if (int_status & SPIFLG_OVRRUN_MASK) {
506			dev_dbg(sdev, "SPI Data Overrun error\n");
507			return -EIO;
508		}
509		if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
510			dev_dbg(sdev, "SPI Buffer Init Active\n");
511			return -EBUSY;
512		}
513	}
514
515	return 0;
516}
517
518/**
519 * davinci_spi_process_events - check for and handle any SPI controller events
520 * @dspi: the controller data
521 *
522 * This function will check the SPIFLG register and handle any events that are
523 * detected there
524 */
525static int davinci_spi_process_events(struct davinci_spi *dspi)
526{
527	u32 buf, status, errors = 0, spidat1;
528
529	buf = ioread32(dspi->base + SPIBUF);
530
531	if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
532		dspi->get_rx(buf & 0xFFFF, dspi);
533		dspi->rcount--;
534	}
535
536	status = ioread32(dspi->base + SPIFLG);
537
538	if (unlikely(status & SPIFLG_ERROR_MASK)) {
539		errors = status & SPIFLG_ERROR_MASK;
540		goto out;
541	}
542
543	if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
544		spidat1 = ioread32(dspi->base + SPIDAT1);
545		dspi->wcount--;
546		spidat1 &= ~0xFFFF;
547		spidat1 |= 0xFFFF & dspi->get_tx(dspi);
548		iowrite32(spidat1, dspi->base + SPIDAT1);
549	}
550
551out:
552	return errors;
553}
554
555static void davinci_spi_dma_rx_callback(void *data)
556{
557	struct davinci_spi *dspi = (struct davinci_spi *)data;
558
559	dspi->rcount = 0;
560
561	if (!dspi->wcount && !dspi->rcount)
562		complete(&dspi->done);
563}
564
565static void davinci_spi_dma_tx_callback(void *data)
566{
567	struct davinci_spi *dspi = (struct davinci_spi *)data;
568
569	dspi->wcount = 0;
570
571	if (!dspi->wcount && !dspi->rcount)
572		complete(&dspi->done);
573}
574
575/**
576 * davinci_spi_bufs - functions which will handle transfer data
577 * @spi: spi device on which data transfer to be done
578 * @t: spi transfer in which transfer info is filled
579 *
580 * This function will put data to be transferred into data register
581 * of SPI controller and then wait until the completion will be marked
582 * by the IRQ Handler.
583 */
584static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
585{
586	struct davinci_spi *dspi;
587	int data_type, ret = -ENOMEM;
588	u32 tx_data, spidat1;
589	u32 errors = 0;
590	struct davinci_spi_config *spicfg;
591	struct davinci_spi_platform_data *pdata;
592	unsigned uninitialized_var(rx_buf_count);
593	void *dummy_buf = NULL;
594	struct scatterlist sg_rx, sg_tx;
595
596	dspi = spi_master_get_devdata(spi->master);
597	pdata = &dspi->pdata;
598	spicfg = (struct davinci_spi_config *)spi->controller_data;
599	if (!spicfg)
600		spicfg = &davinci_spi_default_cfg;
601
602	/* convert len to words based on bits_per_word */
603	data_type = dspi->bytes_per_word[spi->chip_select];
604
605	dspi->tx = t->tx_buf;
606	dspi->rx = t->rx_buf;
607	dspi->wcount = t->len / data_type;
608	dspi->rcount = dspi->wcount;
609
610	spidat1 = ioread32(dspi->base + SPIDAT1);
611
612	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
613	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
614
615	reinit_completion(&dspi->done);
616
617	if (spicfg->io_type == SPI_IO_TYPE_INTR)
618		set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
619
620	if (spicfg->io_type != SPI_IO_TYPE_DMA) {
621		/* start the transfer */
622		dspi->wcount--;
623		tx_data = dspi->get_tx(dspi);
624		spidat1 &= 0xFFFF0000;
625		spidat1 |= tx_data & 0xFFFF;
626		iowrite32(spidat1, dspi->base + SPIDAT1);
627	} else {
628		struct dma_slave_config dma_rx_conf = {
629			.direction = DMA_DEV_TO_MEM,
630			.src_addr = (unsigned long)dspi->pbase + SPIBUF,
631			.src_addr_width = data_type,
632			.src_maxburst = 1,
633		};
634		struct dma_slave_config dma_tx_conf = {
635			.direction = DMA_MEM_TO_DEV,
636			.dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
637			.dst_addr_width = data_type,
638			.dst_maxburst = 1,
639		};
640		struct dma_async_tx_descriptor *rxdesc;
641		struct dma_async_tx_descriptor *txdesc;
642		void *buf;
643
644		dummy_buf = kzalloc(t->len, GFP_KERNEL);
645		if (!dummy_buf)
646			goto err_alloc_dummy_buf;
647
648		dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
649		dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
650
651		sg_init_table(&sg_rx, 1);
652		if (!t->rx_buf)
653			buf = dummy_buf;
654		else
655			buf = t->rx_buf;
656		t->rx_dma = dma_map_single(&spi->dev, buf,
657				t->len, DMA_FROM_DEVICE);
658		if (!t->rx_dma) {
659			ret = -EFAULT;
660			goto err_rx_map;
661		}
662		sg_dma_address(&sg_rx) = t->rx_dma;
663		sg_dma_len(&sg_rx) = t->len;
664
665		sg_init_table(&sg_tx, 1);
666		if (!t->tx_buf)
667			buf = dummy_buf;
668		else
669			buf = (void *)t->tx_buf;
670		t->tx_dma = dma_map_single(&spi->dev, buf,
671				t->len, DMA_TO_DEVICE);
672		if (!t->tx_dma) {
673			ret = -EFAULT;
674			goto err_tx_map;
675		}
676		sg_dma_address(&sg_tx) = t->tx_dma;
677		sg_dma_len(&sg_tx) = t->len;
678
679		rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
680				&sg_rx, 1, DMA_DEV_TO_MEM,
681				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
682		if (!rxdesc)
683			goto err_desc;
684
685		txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
686				&sg_tx, 1, DMA_MEM_TO_DEV,
687				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
688		if (!txdesc)
689			goto err_desc;
690
691		rxdesc->callback = davinci_spi_dma_rx_callback;
692		rxdesc->callback_param = (void *)dspi;
693		txdesc->callback = davinci_spi_dma_tx_callback;
694		txdesc->callback_param = (void *)dspi;
695
696		if (pdata->cshold_bug)
697			iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
698
699		dmaengine_submit(rxdesc);
700		dmaengine_submit(txdesc);
701
702		dma_async_issue_pending(dspi->dma_rx);
703		dma_async_issue_pending(dspi->dma_tx);
704
705		set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
706	}
707
708	/* Wait for the transfer to complete */
709	if (spicfg->io_type != SPI_IO_TYPE_POLL) {
710		wait_for_completion_interruptible(&(dspi->done));
711	} else {
712		while (dspi->rcount > 0 || dspi->wcount > 0) {
713			errors = davinci_spi_process_events(dspi);
714			if (errors)
715				break;
716			cpu_relax();
717		}
718	}
719
720	clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
721	if (spicfg->io_type == SPI_IO_TYPE_DMA) {
722		clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
723
724		dma_unmap_single(&spi->dev, t->rx_dma,
725				t->len, DMA_FROM_DEVICE);
726		dma_unmap_single(&spi->dev, t->tx_dma,
727				t->len, DMA_TO_DEVICE);
728		kfree(dummy_buf);
729	}
730
731	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
732	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
733
734	/*
735	 * Check for bit error, desync error,parity error,timeout error and
736	 * receive overflow errors
737	 */
738	if (errors) {
739		ret = davinci_spi_check_error(dspi, errors);
740		WARN(!ret, "%s: error reported but no error found!\n",
741							dev_name(&spi->dev));
742		return ret;
743	}
744
745	if (dspi->rcount != 0 || dspi->wcount != 0) {
746		dev_err(&spi->dev, "SPI data transfer error\n");
747		return -EIO;
748	}
749
750	return t->len;
751
752err_desc:
753	dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
754err_tx_map:
755	dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
756err_rx_map:
757	kfree(dummy_buf);
758err_alloc_dummy_buf:
759	return ret;
760}
761
762/**
763 * dummy_thread_fn - dummy thread function
764 * @irq: IRQ number for this SPI Master
765 * @context_data: structure for SPI Master controller davinci_spi
766 *
767 * This is to satisfy the request_threaded_irq() API so that the irq
768 * handler is called in interrupt context.
769 */
770static irqreturn_t dummy_thread_fn(s32 irq, void *data)
771{
772	return IRQ_HANDLED;
773}
774
775/**
776 * davinci_spi_irq - Interrupt handler for SPI Master Controller
777 * @irq: IRQ number for this SPI Master
778 * @context_data: structure for SPI Master controller davinci_spi
779 *
780 * ISR will determine that interrupt arrives either for READ or WRITE command.
781 * According to command it will do the appropriate action. It will check
782 * transfer length and if it is not zero then dispatch transfer command again.
783 * If transfer length is zero then it will indicate the COMPLETION so that
784 * davinci_spi_bufs function can go ahead.
785 */
786static irqreturn_t davinci_spi_irq(s32 irq, void *data)
787{
788	struct davinci_spi *dspi = data;
789	int status;
790
791	status = davinci_spi_process_events(dspi);
792	if (unlikely(status != 0))
793		clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
794
795	if ((!dspi->rcount && !dspi->wcount) || status)
796		complete(&dspi->done);
797
798	return IRQ_HANDLED;
799}
800
801static int davinci_spi_request_dma(struct davinci_spi *dspi)
802{
803	dma_cap_mask_t mask;
804	struct device *sdev = dspi->bitbang.master->dev.parent;
805	int r;
806
807	dma_cap_zero(mask);
808	dma_cap_set(DMA_SLAVE, mask);
809
810	dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
811					   &dspi->dma_rx_chnum);
812	if (!dspi->dma_rx) {
813		dev_err(sdev, "request RX DMA channel failed\n");
814		r = -ENODEV;
815		goto rx_dma_failed;
816	}
817
818	dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
819					   &dspi->dma_tx_chnum);
820	if (!dspi->dma_tx) {
821		dev_err(sdev, "request TX DMA channel failed\n");
822		r = -ENODEV;
823		goto tx_dma_failed;
824	}
825
826	return 0;
827
828tx_dma_failed:
829	dma_release_channel(dspi->dma_rx);
830rx_dma_failed:
831	return r;
832}
833
834#if defined(CONFIG_OF)
835static const struct of_device_id davinci_spi_of_match[] = {
836	{
837		.compatible = "ti,dm6441-spi",
838	},
839	{
840		.compatible = "ti,da830-spi",
841		.data = (void *)SPI_VERSION_2,
842	},
843	{ },
844};
845MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
846
847/**
848 * spi_davinci_get_pdata - Get platform data from DTS binding
849 * @pdev: ptr to platform data
850 * @dspi: ptr to driver data
851 *
852 * Parses and populates pdata in dspi from device tree bindings.
853 *
854 * NOTE: Not all platform data params are supported currently.
855 */
856static int spi_davinci_get_pdata(struct platform_device *pdev,
857			struct davinci_spi *dspi)
858{
859	struct device_node *node = pdev->dev.of_node;
860	struct davinci_spi_platform_data *pdata;
861	unsigned int num_cs, intr_line = 0;
862	const struct of_device_id *match;
863
864	pdata = &dspi->pdata;
865
866	pdata->version = SPI_VERSION_1;
867	match = of_match_device(davinci_spi_of_match, &pdev->dev);
868	if (!match)
869		return -ENODEV;
870
871	/* match data has the SPI version number for SPI_VERSION_2 */
872	if (match->data == (void *)SPI_VERSION_2)
873		pdata->version = SPI_VERSION_2;
874
875	/*
876	 * default num_cs is 1 and all chipsel are internal to the chip
877	 * indicated by chip_sel being NULL or cs_gpios being NULL or
878	 * set to -ENOENT. num-cs includes internal as well as gpios.
879	 * indicated by chip_sel being NULL. GPIO based CS is not
880	 * supported yet in DT bindings.
881	 */
882	num_cs = 1;
883	of_property_read_u32(node, "num-cs", &num_cs);
884	pdata->num_chipselect = num_cs;
885	of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
886	pdata->intr_line = intr_line;
887	return 0;
888}
889#else
890static struct davinci_spi_platform_data
891	*spi_davinci_get_pdata(struct platform_device *pdev,
892		struct davinci_spi *dspi)
893{
894	return -ENODEV;
895}
896#endif
897
898/**
899 * davinci_spi_probe - probe function for SPI Master Controller
900 * @pdev: platform_device structure which contains plateform specific data
901 *
902 * According to Linux Device Model this function will be invoked by Linux
903 * with platform_device struct which contains the device specific info.
904 * This function will map the SPI controller's memory, register IRQ,
905 * Reset SPI controller and setting its registers to default value.
906 * It will invoke spi_bitbang_start to create work queue so that client driver
907 * can register transfer method to work queue.
908 */
909static int davinci_spi_probe(struct platform_device *pdev)
910{
911	struct spi_master *master;
912	struct davinci_spi *dspi;
913	struct davinci_spi_platform_data *pdata;
914	struct resource *r;
915	resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
916	resource_size_t	dma_tx_chan = SPI_NO_RESOURCE;
917	int ret = 0;
918	u32 spipc0;
919
920	master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
921	if (master == NULL) {
922		ret = -ENOMEM;
923		goto err;
924	}
925
926	platform_set_drvdata(pdev, master);
927
928	dspi = spi_master_get_devdata(master);
929
930	if (dev_get_platdata(&pdev->dev)) {
931		pdata = dev_get_platdata(&pdev->dev);
932		dspi->pdata = *pdata;
933	} else {
934		/* update dspi pdata with that from the DT */
935		ret = spi_davinci_get_pdata(pdev, dspi);
936		if (ret < 0)
937			goto free_master;
938	}
939
940	/* pdata in dspi is now updated and point pdata to that */
941	pdata = &dspi->pdata;
942
943	dspi->bytes_per_word = devm_kzalloc(&pdev->dev,
944					    sizeof(*dspi->bytes_per_word) *
945					    pdata->num_chipselect, GFP_KERNEL);
946	if (dspi->bytes_per_word == NULL) {
947		ret = -ENOMEM;
948		goto free_master;
949	}
950
951	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
952	if (r == NULL) {
953		ret = -ENOENT;
954		goto free_master;
955	}
956
957	dspi->pbase = r->start;
958
959	dspi->base = devm_ioremap_resource(&pdev->dev, r);
960	if (IS_ERR(dspi->base)) {
961		ret = PTR_ERR(dspi->base);
962		goto free_master;
963	}
964
965	dspi->irq = platform_get_irq(pdev, 0);
966	if (dspi->irq <= 0) {
967		ret = -EINVAL;
968		goto free_master;
969	}
970
971	ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
972				dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
973	if (ret)
974		goto free_master;
975
976	dspi->bitbang.master = master;
977
978	dspi->clk = devm_clk_get(&pdev->dev, NULL);
979	if (IS_ERR(dspi->clk)) {
980		ret = -ENODEV;
981		goto free_master;
982	}
983	clk_prepare_enable(dspi->clk);
984
985	master->dev.of_node = pdev->dev.of_node;
986	master->bus_num = pdev->id;
987	master->num_chipselect = pdata->num_chipselect;
988	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
989	master->setup = davinci_spi_setup;
990	master->cleanup = davinci_spi_cleanup;
991
992	dspi->bitbang.chipselect = davinci_spi_chipselect;
993	dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
994
995	dspi->version = pdata->version;
996
997	dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
998	if (dspi->version == SPI_VERSION_2)
999		dspi->bitbang.flags |= SPI_READY;
1000
1001	if (pdev->dev.of_node) {
1002		int i;
1003
1004		for (i = 0; i < pdata->num_chipselect; i++) {
1005			int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
1006							"cs-gpios", i);
1007
1008			if (cs_gpio == -EPROBE_DEFER) {
1009				ret = cs_gpio;
1010				goto free_clk;
1011			}
1012
1013			if (gpio_is_valid(cs_gpio)) {
1014				ret = devm_gpio_request(&pdev->dev, cs_gpio,
1015							dev_name(&pdev->dev));
1016				if (ret)
1017					goto free_clk;
1018			}
1019		}
1020	}
1021
1022	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1023	if (r)
1024		dma_rx_chan = r->start;
1025	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1026	if (r)
1027		dma_tx_chan = r->start;
1028
1029	dspi->bitbang.txrx_bufs = davinci_spi_bufs;
1030	if (dma_rx_chan != SPI_NO_RESOURCE &&
1031	    dma_tx_chan != SPI_NO_RESOURCE) {
1032		dspi->dma_rx_chnum = dma_rx_chan;
1033		dspi->dma_tx_chnum = dma_tx_chan;
1034
1035		ret = davinci_spi_request_dma(dspi);
1036		if (ret)
1037			goto free_clk;
1038
1039		dev_info(&pdev->dev, "DMA: supported\n");
1040		dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n",
1041				&dma_rx_chan, &dma_tx_chan,
1042				pdata->dma_event_q);
1043	}
1044
1045	dspi->get_rx = davinci_spi_rx_buf_u8;
1046	dspi->get_tx = davinci_spi_tx_buf_u8;
1047
1048	init_completion(&dspi->done);
1049
1050	/* Reset In/OUT SPI module */
1051	iowrite32(0, dspi->base + SPIGCR0);
1052	udelay(100);
1053	iowrite32(1, dspi->base + SPIGCR0);
1054
1055	/* Set up SPIPC0.  CS and ENA init is done in davinci_spi_setup */
1056	spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
1057	iowrite32(spipc0, dspi->base + SPIPC0);
1058
1059	if (pdata->intr_line)
1060		iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
1061	else
1062		iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
1063
1064	iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
1065
1066	/* master mode default */
1067	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
1068	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1069	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
1070
1071	ret = spi_bitbang_start(&dspi->bitbang);
1072	if (ret)
1073		goto free_dma;
1074
1075	dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
1076
1077	return ret;
1078
1079free_dma:
1080	dma_release_channel(dspi->dma_rx);
1081	dma_release_channel(dspi->dma_tx);
1082free_clk:
1083	clk_disable_unprepare(dspi->clk);
1084free_master:
1085	spi_master_put(master);
1086err:
1087	return ret;
1088}
1089
1090/**
1091 * davinci_spi_remove - remove function for SPI Master Controller
1092 * @pdev: platform_device structure which contains plateform specific data
1093 *
1094 * This function will do the reverse action of davinci_spi_probe function
1095 * It will free the IRQ and SPI controller's memory region.
1096 * It will also call spi_bitbang_stop to destroy the work queue which was
1097 * created by spi_bitbang_start.
1098 */
1099static int davinci_spi_remove(struct platform_device *pdev)
1100{
1101	struct davinci_spi *dspi;
1102	struct spi_master *master;
1103
1104	master = platform_get_drvdata(pdev);
1105	dspi = spi_master_get_devdata(master);
1106
1107	spi_bitbang_stop(&dspi->bitbang);
1108
1109	clk_disable_unprepare(dspi->clk);
1110	spi_master_put(master);
1111
1112	return 0;
1113}
1114
1115static struct platform_driver davinci_spi_driver = {
1116	.driver = {
1117		.name = "spi_davinci",
1118		.of_match_table = of_match_ptr(davinci_spi_of_match),
1119	},
1120	.probe = davinci_spi_probe,
1121	.remove = davinci_spi_remove,
1122};
1123module_platform_driver(davinci_spi_driver);
1124
1125MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1126MODULE_LICENSE("GPL");
1127