1/*
2 * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
25#include <linux/err.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/kernel.h>
29#include <linux/kthread.h>
30#include <linux/module.h>
31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/reset.h>
36#include <linux/spi/spi.h>
37
38#define SLINK_COMMAND			0x000
39#define SLINK_BIT_LENGTH(x)		(((x) & 0x1f) << 0)
40#define SLINK_WORD_SIZE(x)		(((x) & 0x1f) << 5)
41#define SLINK_BOTH_EN			(1 << 10)
42#define SLINK_CS_SW			(1 << 11)
43#define SLINK_CS_VALUE			(1 << 12)
44#define SLINK_CS_POLARITY		(1 << 13)
45#define SLINK_IDLE_SDA_DRIVE_LOW	(0 << 16)
46#define SLINK_IDLE_SDA_DRIVE_HIGH	(1 << 16)
47#define SLINK_IDLE_SDA_PULL_LOW		(2 << 16)
48#define SLINK_IDLE_SDA_PULL_HIGH	(3 << 16)
49#define SLINK_IDLE_SDA_MASK		(3 << 16)
50#define SLINK_CS_POLARITY1		(1 << 20)
51#define SLINK_CK_SDA			(1 << 21)
52#define SLINK_CS_POLARITY2		(1 << 22)
53#define SLINK_CS_POLARITY3		(1 << 23)
54#define SLINK_IDLE_SCLK_DRIVE_LOW	(0 << 24)
55#define SLINK_IDLE_SCLK_DRIVE_HIGH	(1 << 24)
56#define SLINK_IDLE_SCLK_PULL_LOW	(2 << 24)
57#define SLINK_IDLE_SCLK_PULL_HIGH	(3 << 24)
58#define SLINK_IDLE_SCLK_MASK		(3 << 24)
59#define SLINK_M_S			(1 << 28)
60#define SLINK_WAIT			(1 << 29)
61#define SLINK_GO			(1 << 30)
62#define SLINK_ENB			(1 << 31)
63
64#define SLINK_MODES			(SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
65
66#define SLINK_COMMAND2			0x004
67#define SLINK_LSBFE			(1 << 0)
68#define SLINK_SSOE			(1 << 1)
69#define SLINK_SPIE			(1 << 4)
70#define SLINK_BIDIROE			(1 << 6)
71#define SLINK_MODFEN			(1 << 7)
72#define SLINK_INT_SIZE(x)		(((x) & 0x1f) << 8)
73#define SLINK_CS_ACTIVE_BETWEEN		(1 << 17)
74#define SLINK_SS_EN_CS(x)		(((x) & 0x3) << 18)
75#define SLINK_SS_SETUP(x)		(((x) & 0x3) << 20)
76#define SLINK_FIFO_REFILLS_0		(0 << 22)
77#define SLINK_FIFO_REFILLS_1		(1 << 22)
78#define SLINK_FIFO_REFILLS_2		(2 << 22)
79#define SLINK_FIFO_REFILLS_3		(3 << 22)
80#define SLINK_FIFO_REFILLS_MASK		(3 << 22)
81#define SLINK_WAIT_PACK_INT(x)		(((x) & 0x7) << 26)
82#define SLINK_SPC0			(1 << 29)
83#define SLINK_TXEN			(1 << 30)
84#define SLINK_RXEN			(1 << 31)
85
86#define SLINK_STATUS			0x008
87#define SLINK_COUNT(val)		(((val) >> 0) & 0x1f)
88#define SLINK_WORD(val)			(((val) >> 5) & 0x1f)
89#define SLINK_BLK_CNT(val)		(((val) >> 0) & 0xffff)
90#define SLINK_MODF			(1 << 16)
91#define SLINK_RX_UNF			(1 << 18)
92#define SLINK_TX_OVF			(1 << 19)
93#define SLINK_TX_FULL			(1 << 20)
94#define SLINK_TX_EMPTY			(1 << 21)
95#define SLINK_RX_FULL			(1 << 22)
96#define SLINK_RX_EMPTY			(1 << 23)
97#define SLINK_TX_UNF			(1 << 24)
98#define SLINK_RX_OVF			(1 << 25)
99#define SLINK_TX_FLUSH			(1 << 26)
100#define SLINK_RX_FLUSH			(1 << 27)
101#define SLINK_SCLK			(1 << 28)
102#define SLINK_ERR			(1 << 29)
103#define SLINK_RDY			(1 << 30)
104#define SLINK_BSY			(1 << 31)
105#define SLINK_FIFO_ERROR		(SLINK_TX_OVF | SLINK_RX_UNF |	\
106					SLINK_TX_UNF | SLINK_RX_OVF)
107
108#define SLINK_FIFO_EMPTY		(SLINK_TX_EMPTY | SLINK_RX_EMPTY)
109
110#define SLINK_MAS_DATA			0x010
111#define SLINK_SLAVE_DATA		0x014
112
113#define SLINK_DMA_CTL			0x018
114#define SLINK_DMA_BLOCK_SIZE(x)		(((x) & 0xffff) << 0)
115#define SLINK_TX_TRIG_1			(0 << 16)
116#define SLINK_TX_TRIG_4			(1 << 16)
117#define SLINK_TX_TRIG_8			(2 << 16)
118#define SLINK_TX_TRIG_16		(3 << 16)
119#define SLINK_TX_TRIG_MASK		(3 << 16)
120#define SLINK_RX_TRIG_1			(0 << 18)
121#define SLINK_RX_TRIG_4			(1 << 18)
122#define SLINK_RX_TRIG_8			(2 << 18)
123#define SLINK_RX_TRIG_16		(3 << 18)
124#define SLINK_RX_TRIG_MASK		(3 << 18)
125#define SLINK_PACKED			(1 << 20)
126#define SLINK_PACK_SIZE_4		(0 << 21)
127#define SLINK_PACK_SIZE_8		(1 << 21)
128#define SLINK_PACK_SIZE_16		(2 << 21)
129#define SLINK_PACK_SIZE_32		(3 << 21)
130#define SLINK_PACK_SIZE_MASK		(3 << 21)
131#define SLINK_IE_TXC			(1 << 26)
132#define SLINK_IE_RXC			(1 << 27)
133#define SLINK_DMA_EN			(1 << 31)
134
135#define SLINK_STATUS2			0x01c
136#define SLINK_TX_FIFO_EMPTY_COUNT(val)	(((val) & 0x3f) >> 0)
137#define SLINK_RX_FIFO_FULL_COUNT(val)	(((val) & 0x3f0000) >> 16)
138#define SLINK_SS_HOLD_TIME(val)		(((val) & 0xF) << 6)
139
140#define SLINK_TX_FIFO			0x100
141#define SLINK_RX_FIFO			0x180
142
143#define DATA_DIR_TX			(1 << 0)
144#define DATA_DIR_RX			(1 << 1)
145
146#define SLINK_DMA_TIMEOUT		(msecs_to_jiffies(1000))
147
148#define DEFAULT_SPI_DMA_BUF_LEN		(16*1024)
149#define TX_FIFO_EMPTY_COUNT_MAX		SLINK_TX_FIFO_EMPTY_COUNT(0x20)
150#define RX_FIFO_FULL_COUNT_ZERO		SLINK_RX_FIFO_FULL_COUNT(0)
151
152#define SLINK_STATUS2_RESET \
153	(TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
154
155#define MAX_CHIP_SELECT			4
156#define SLINK_FIFO_DEPTH		32
157
158struct tegra_slink_chip_data {
159	bool cs_hold_time;
160};
161
162struct tegra_slink_data {
163	struct device				*dev;
164	struct spi_master			*master;
165	const struct tegra_slink_chip_data	*chip_data;
166	spinlock_t				lock;
167
168	struct clk				*clk;
169	struct reset_control			*rst;
170	void __iomem				*base;
171	phys_addr_t				phys;
172	unsigned				irq;
173	u32					cur_speed;
174
175	struct spi_device			*cur_spi;
176	unsigned				cur_pos;
177	unsigned				cur_len;
178	unsigned				words_per_32bit;
179	unsigned				bytes_per_word;
180	unsigned				curr_dma_words;
181	unsigned				cur_direction;
182
183	unsigned				cur_rx_pos;
184	unsigned				cur_tx_pos;
185
186	unsigned				dma_buf_size;
187	unsigned				max_buf_size;
188	bool					is_curr_dma_xfer;
189
190	struct completion			rx_dma_complete;
191	struct completion			tx_dma_complete;
192
193	u32					tx_status;
194	u32					rx_status;
195	u32					status_reg;
196	bool					is_packed;
197	u32					packed_size;
198
199	u32					command_reg;
200	u32					command2_reg;
201	u32					dma_control_reg;
202	u32					def_command_reg;
203	u32					def_command2_reg;
204
205	struct completion			xfer_completion;
206	struct spi_transfer			*curr_xfer;
207	struct dma_chan				*rx_dma_chan;
208	u32					*rx_dma_buf;
209	dma_addr_t				rx_dma_phys;
210	struct dma_async_tx_descriptor		*rx_dma_desc;
211
212	struct dma_chan				*tx_dma_chan;
213	u32					*tx_dma_buf;
214	dma_addr_t				tx_dma_phys;
215	struct dma_async_tx_descriptor		*tx_dma_desc;
216};
217
218static int tegra_slink_runtime_suspend(struct device *dev);
219static int tegra_slink_runtime_resume(struct device *dev);
220
221static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
222		unsigned long reg)
223{
224	return readl(tspi->base + reg);
225}
226
227static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
228		u32 val, unsigned long reg)
229{
230	writel(val, tspi->base + reg);
231
232	/* Read back register to make sure that register writes completed */
233	if (reg != SLINK_TX_FIFO)
234		readl(tspi->base + SLINK_MAS_DATA);
235}
236
237static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
238{
239	u32 val_write;
240
241	tegra_slink_readl(tspi, SLINK_STATUS);
242
243	/* Write 1 to clear status register */
244	val_write = SLINK_RDY | SLINK_FIFO_ERROR;
245	tegra_slink_writel(tspi, val_write, SLINK_STATUS);
246}
247
248static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
249				  struct spi_transfer *t)
250{
251	switch (tspi->bytes_per_word) {
252	case 0:
253		return SLINK_PACK_SIZE_4;
254	case 1:
255		return SLINK_PACK_SIZE_8;
256	case 2:
257		return SLINK_PACK_SIZE_16;
258	case 4:
259		return SLINK_PACK_SIZE_32;
260	default:
261		return 0;
262	}
263}
264
265static unsigned tegra_slink_calculate_curr_xfer_param(
266	struct spi_device *spi, struct tegra_slink_data *tspi,
267	struct spi_transfer *t)
268{
269	unsigned remain_len = t->len - tspi->cur_pos;
270	unsigned max_word;
271	unsigned bits_per_word;
272	unsigned max_len;
273	unsigned total_fifo_words;
274
275	bits_per_word = t->bits_per_word;
276	tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
277
278	if (bits_per_word == 8 || bits_per_word == 16) {
279		tspi->is_packed = 1;
280		tspi->words_per_32bit = 32/bits_per_word;
281	} else {
282		tspi->is_packed = 0;
283		tspi->words_per_32bit = 1;
284	}
285	tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
286
287	if (tspi->is_packed) {
288		max_len = min(remain_len, tspi->max_buf_size);
289		tspi->curr_dma_words = max_len/tspi->bytes_per_word;
290		total_fifo_words = max_len/4;
291	} else {
292		max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
293		max_word = min(max_word, tspi->max_buf_size/4);
294		tspi->curr_dma_words = max_word;
295		total_fifo_words = max_word;
296	}
297	return total_fifo_words;
298}
299
300static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
301	struct tegra_slink_data *tspi, struct spi_transfer *t)
302{
303	unsigned nbytes;
304	unsigned tx_empty_count;
305	u32 fifo_status;
306	unsigned max_n_32bit;
307	unsigned i, count;
308	unsigned int written_words;
309	unsigned fifo_words_left;
310	u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
311
312	fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
313	tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
314
315	if (tspi->is_packed) {
316		fifo_words_left = tx_empty_count * tspi->words_per_32bit;
317		written_words = min(fifo_words_left, tspi->curr_dma_words);
318		nbytes = written_words * tspi->bytes_per_word;
319		max_n_32bit = DIV_ROUND_UP(nbytes, 4);
320		for (count = 0; count < max_n_32bit; count++) {
321			u32 x = 0;
322			for (i = 0; (i < 4) && nbytes; i++, nbytes--)
323				x |= (u32)(*tx_buf++) << (i * 8);
324			tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
325		}
326	} else {
327		max_n_32bit = min(tspi->curr_dma_words,  tx_empty_count);
328		written_words = max_n_32bit;
329		nbytes = written_words * tspi->bytes_per_word;
330		for (count = 0; count < max_n_32bit; count++) {
331			u32 x = 0;
332			for (i = 0; nbytes && (i < tspi->bytes_per_word);
333							i++, nbytes--)
334				x |= (u32)(*tx_buf++) << (i * 8);
335			tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
336		}
337	}
338	tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
339	return written_words;
340}
341
342static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
343		struct tegra_slink_data *tspi, struct spi_transfer *t)
344{
345	unsigned rx_full_count;
346	u32 fifo_status;
347	unsigned i, count;
348	unsigned int read_words = 0;
349	unsigned len;
350	u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
351
352	fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
353	rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
354	if (tspi->is_packed) {
355		len = tspi->curr_dma_words * tspi->bytes_per_word;
356		for (count = 0; count < rx_full_count; count++) {
357			u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
358			for (i = 0; len && (i < 4); i++, len--)
359				*rx_buf++ = (x >> i*8) & 0xFF;
360		}
361		tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
362		read_words += tspi->curr_dma_words;
363	} else {
364		for (count = 0; count < rx_full_count; count++) {
365			u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
366			for (i = 0; (i < tspi->bytes_per_word); i++)
367				*rx_buf++ = (x >> (i*8)) & 0xFF;
368		}
369		tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
370		read_words += rx_full_count;
371	}
372	return read_words;
373}
374
375static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
376		struct tegra_slink_data *tspi, struct spi_transfer *t)
377{
378	/* Make the dma buffer to read by cpu */
379	dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
380				tspi->dma_buf_size, DMA_TO_DEVICE);
381
382	if (tspi->is_packed) {
383		unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
384		memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
385	} else {
386		unsigned int i;
387		unsigned int count;
388		u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
389		unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
390
391		for (count = 0; count < tspi->curr_dma_words; count++) {
392			u32 x = 0;
393			for (i = 0; consume && (i < tspi->bytes_per_word);
394							i++, consume--)
395				x |= (u32)(*tx_buf++) << (i * 8);
396			tspi->tx_dma_buf[count] = x;
397		}
398	}
399	tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
400
401	/* Make the dma buffer to read by dma */
402	dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
403				tspi->dma_buf_size, DMA_TO_DEVICE);
404}
405
406static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
407		struct tegra_slink_data *tspi, struct spi_transfer *t)
408{
409	unsigned len;
410
411	/* Make the dma buffer to read by cpu */
412	dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
413		tspi->dma_buf_size, DMA_FROM_DEVICE);
414
415	if (tspi->is_packed) {
416		len = tspi->curr_dma_words * tspi->bytes_per_word;
417		memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
418	} else {
419		unsigned int i;
420		unsigned int count;
421		unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
422		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
423
424		for (count = 0; count < tspi->curr_dma_words; count++) {
425			u32 x = tspi->rx_dma_buf[count] & rx_mask;
426			for (i = 0; (i < tspi->bytes_per_word); i++)
427				*rx_buf++ = (x >> (i*8)) & 0xFF;
428		}
429	}
430	tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
431
432	/* Make the dma buffer to read by dma */
433	dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
434		tspi->dma_buf_size, DMA_FROM_DEVICE);
435}
436
437static void tegra_slink_dma_complete(void *args)
438{
439	struct completion *dma_complete = args;
440
441	complete(dma_complete);
442}
443
444static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
445{
446	reinit_completion(&tspi->tx_dma_complete);
447	tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
448				tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
449				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
450	if (!tspi->tx_dma_desc) {
451		dev_err(tspi->dev, "Not able to get desc for Tx\n");
452		return -EIO;
453	}
454
455	tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
456	tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
457
458	dmaengine_submit(tspi->tx_dma_desc);
459	dma_async_issue_pending(tspi->tx_dma_chan);
460	return 0;
461}
462
463static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
464{
465	reinit_completion(&tspi->rx_dma_complete);
466	tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
467				tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
468				DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
469	if (!tspi->rx_dma_desc) {
470		dev_err(tspi->dev, "Not able to get desc for Rx\n");
471		return -EIO;
472	}
473
474	tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
475	tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
476
477	dmaengine_submit(tspi->rx_dma_desc);
478	dma_async_issue_pending(tspi->rx_dma_chan);
479	return 0;
480}
481
482static int tegra_slink_start_dma_based_transfer(
483		struct tegra_slink_data *tspi, struct spi_transfer *t)
484{
485	u32 val;
486	unsigned int len;
487	int ret = 0;
488	u32 status;
489
490	/* Make sure that Rx and Tx fifo are empty */
491	status = tegra_slink_readl(tspi, SLINK_STATUS);
492	if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
493		dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
494			(unsigned)status);
495		return -EIO;
496	}
497
498	val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
499	val |= tspi->packed_size;
500	if (tspi->is_packed)
501		len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
502					4) * 4;
503	else
504		len = tspi->curr_dma_words * 4;
505
506	/* Set attention level based on length of transfer */
507	if (len & 0xF)
508		val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
509	else if (((len) >> 4) & 0x1)
510		val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
511	else
512		val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
513
514	if (tspi->cur_direction & DATA_DIR_TX)
515		val |= SLINK_IE_TXC;
516
517	if (tspi->cur_direction & DATA_DIR_RX)
518		val |= SLINK_IE_RXC;
519
520	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
521	tspi->dma_control_reg = val;
522
523	if (tspi->cur_direction & DATA_DIR_TX) {
524		tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
525		wmb();
526		ret = tegra_slink_start_tx_dma(tspi, len);
527		if (ret < 0) {
528			dev_err(tspi->dev,
529				"Starting tx dma failed, err %d\n", ret);
530			return ret;
531		}
532
533		/* Wait for tx fifo to be fill before starting slink */
534		status = tegra_slink_readl(tspi, SLINK_STATUS);
535		while (!(status & SLINK_TX_FULL))
536			status = tegra_slink_readl(tspi, SLINK_STATUS);
537	}
538
539	if (tspi->cur_direction & DATA_DIR_RX) {
540		/* Make the dma buffer to read by dma */
541		dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
542				tspi->dma_buf_size, DMA_FROM_DEVICE);
543
544		ret = tegra_slink_start_rx_dma(tspi, len);
545		if (ret < 0) {
546			dev_err(tspi->dev,
547				"Starting rx dma failed, err %d\n", ret);
548			if (tspi->cur_direction & DATA_DIR_TX)
549				dmaengine_terminate_all(tspi->tx_dma_chan);
550			return ret;
551		}
552	}
553	tspi->is_curr_dma_xfer = true;
554	if (tspi->is_packed) {
555		val |= SLINK_PACKED;
556		tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
557		/* HW need small delay after settign Packed mode */
558		udelay(1);
559	}
560	tspi->dma_control_reg = val;
561
562	val |= SLINK_DMA_EN;
563	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
564	return ret;
565}
566
567static int tegra_slink_start_cpu_based_transfer(
568		struct tegra_slink_data *tspi, struct spi_transfer *t)
569{
570	u32 val;
571	unsigned cur_words;
572
573	val = tspi->packed_size;
574	if (tspi->cur_direction & DATA_DIR_TX)
575		val |= SLINK_IE_TXC;
576
577	if (tspi->cur_direction & DATA_DIR_RX)
578		val |= SLINK_IE_RXC;
579
580	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
581	tspi->dma_control_reg = val;
582
583	if (tspi->cur_direction & DATA_DIR_TX)
584		cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
585	else
586		cur_words = tspi->curr_dma_words;
587	val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
588	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
589	tspi->dma_control_reg = val;
590
591	tspi->is_curr_dma_xfer = false;
592	if (tspi->is_packed) {
593		val |= SLINK_PACKED;
594		tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
595		udelay(1);
596		wmb();
597	}
598	tspi->dma_control_reg = val;
599	val |= SLINK_DMA_EN;
600	tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
601	return 0;
602}
603
604static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
605			bool dma_to_memory)
606{
607	struct dma_chan *dma_chan;
608	u32 *dma_buf;
609	dma_addr_t dma_phys;
610	int ret;
611	struct dma_slave_config dma_sconfig;
612
613	dma_chan = dma_request_slave_channel_reason(tspi->dev,
614						dma_to_memory ? "rx" : "tx");
615	if (IS_ERR(dma_chan)) {
616		ret = PTR_ERR(dma_chan);
617		if (ret != -EPROBE_DEFER)
618			dev_err(tspi->dev,
619				"Dma channel is not available: %d\n", ret);
620		return ret;
621	}
622
623	dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
624				&dma_phys, GFP_KERNEL);
625	if (!dma_buf) {
626		dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
627		dma_release_channel(dma_chan);
628		return -ENOMEM;
629	}
630
631	if (dma_to_memory) {
632		dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
633		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
634		dma_sconfig.src_maxburst = 0;
635	} else {
636		dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
637		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
638		dma_sconfig.dst_maxburst = 0;
639	}
640
641	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
642	if (ret)
643		goto scrub;
644	if (dma_to_memory) {
645		tspi->rx_dma_chan = dma_chan;
646		tspi->rx_dma_buf = dma_buf;
647		tspi->rx_dma_phys = dma_phys;
648	} else {
649		tspi->tx_dma_chan = dma_chan;
650		tspi->tx_dma_buf = dma_buf;
651		tspi->tx_dma_phys = dma_phys;
652	}
653	return 0;
654
655scrub:
656	dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
657	dma_release_channel(dma_chan);
658	return ret;
659}
660
661static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
662	bool dma_to_memory)
663{
664	u32 *dma_buf;
665	dma_addr_t dma_phys;
666	struct dma_chan *dma_chan;
667
668	if (dma_to_memory) {
669		dma_buf = tspi->rx_dma_buf;
670		dma_chan = tspi->rx_dma_chan;
671		dma_phys = tspi->rx_dma_phys;
672		tspi->rx_dma_chan = NULL;
673		tspi->rx_dma_buf = NULL;
674	} else {
675		dma_buf = tspi->tx_dma_buf;
676		dma_chan = tspi->tx_dma_chan;
677		dma_phys = tspi->tx_dma_phys;
678		tspi->tx_dma_buf = NULL;
679		tspi->tx_dma_chan = NULL;
680	}
681	if (!dma_chan)
682		return;
683
684	dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
685	dma_release_channel(dma_chan);
686}
687
688static int tegra_slink_start_transfer_one(struct spi_device *spi,
689		struct spi_transfer *t)
690{
691	struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
692	u32 speed;
693	u8 bits_per_word;
694	unsigned total_fifo_words;
695	int ret;
696	u32 command;
697	u32 command2;
698
699	bits_per_word = t->bits_per_word;
700	speed = t->speed_hz;
701	if (speed != tspi->cur_speed) {
702		clk_set_rate(tspi->clk, speed * 4);
703		tspi->cur_speed = speed;
704	}
705
706	tspi->cur_spi = spi;
707	tspi->cur_pos = 0;
708	tspi->cur_rx_pos = 0;
709	tspi->cur_tx_pos = 0;
710	tspi->curr_xfer = t;
711	total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
712
713	command = tspi->command_reg;
714	command &= ~SLINK_BIT_LENGTH(~0);
715	command |= SLINK_BIT_LENGTH(bits_per_word - 1);
716
717	command2 = tspi->command2_reg;
718	command2 &= ~(SLINK_RXEN | SLINK_TXEN);
719
720	tegra_slink_writel(tspi, command, SLINK_COMMAND);
721	tspi->command_reg = command;
722
723	tspi->cur_direction = 0;
724	if (t->rx_buf) {
725		command2 |= SLINK_RXEN;
726		tspi->cur_direction |= DATA_DIR_RX;
727	}
728	if (t->tx_buf) {
729		command2 |= SLINK_TXEN;
730		tspi->cur_direction |= DATA_DIR_TX;
731	}
732	tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
733	tspi->command2_reg = command2;
734
735	if (total_fifo_words > SLINK_FIFO_DEPTH)
736		ret = tegra_slink_start_dma_based_transfer(tspi, t);
737	else
738		ret = tegra_slink_start_cpu_based_transfer(tspi, t);
739	return ret;
740}
741
742static int tegra_slink_setup(struct spi_device *spi)
743{
744	static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
745			SLINK_CS_POLARITY,
746			SLINK_CS_POLARITY1,
747			SLINK_CS_POLARITY2,
748			SLINK_CS_POLARITY3,
749	};
750
751	struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
752	u32 val;
753	unsigned long flags;
754	int ret;
755
756	dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
757		spi->bits_per_word,
758		spi->mode & SPI_CPOL ? "" : "~",
759		spi->mode & SPI_CPHA ? "" : "~",
760		spi->max_speed_hz);
761
762	ret = pm_runtime_get_sync(tspi->dev);
763	if (ret < 0) {
764		dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
765		return ret;
766	}
767
768	spin_lock_irqsave(&tspi->lock, flags);
769	val = tspi->def_command_reg;
770	if (spi->mode & SPI_CS_HIGH)
771		val |= cs_pol_bit[spi->chip_select];
772	else
773		val &= ~cs_pol_bit[spi->chip_select];
774	tspi->def_command_reg = val;
775	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
776	spin_unlock_irqrestore(&tspi->lock, flags);
777
778	pm_runtime_put(tspi->dev);
779	return 0;
780}
781
782static int tegra_slink_prepare_message(struct spi_master *master,
783				       struct spi_message *msg)
784{
785	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
786	struct spi_device *spi = msg->spi;
787
788	tegra_slink_clear_status(tspi);
789
790	tspi->command_reg = tspi->def_command_reg;
791	tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
792
793	tspi->command2_reg = tspi->def_command2_reg;
794	tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select);
795
796	tspi->command_reg &= ~SLINK_MODES;
797	if (spi->mode & SPI_CPHA)
798		tspi->command_reg |= SLINK_CK_SDA;
799
800	if (spi->mode & SPI_CPOL)
801		tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
802	else
803		tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
804
805	return 0;
806}
807
808static int tegra_slink_transfer_one(struct spi_master *master,
809				    struct spi_device *spi,
810				    struct spi_transfer *xfer)
811{
812	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
813	int ret;
814
815	reinit_completion(&tspi->xfer_completion);
816	ret = tegra_slink_start_transfer_one(spi, xfer);
817	if (ret < 0) {
818		dev_err(tspi->dev,
819			"spi can not start transfer, err %d\n", ret);
820		return ret;
821	}
822
823	ret = wait_for_completion_timeout(&tspi->xfer_completion,
824					  SLINK_DMA_TIMEOUT);
825	if (WARN_ON(ret == 0)) {
826		dev_err(tspi->dev,
827			"spi trasfer timeout, err %d\n", ret);
828		return -EIO;
829	}
830
831	if (tspi->tx_status)
832		return tspi->tx_status;
833	if (tspi->rx_status)
834		return tspi->rx_status;
835
836	return 0;
837}
838
839static int tegra_slink_unprepare_message(struct spi_master *master,
840					 struct spi_message *msg)
841{
842	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
843
844	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
845	tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
846
847	return 0;
848}
849
850static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
851{
852	struct spi_transfer *t = tspi->curr_xfer;
853	unsigned long flags;
854
855	spin_lock_irqsave(&tspi->lock, flags);
856	if (tspi->tx_status ||  tspi->rx_status ||
857				(tspi->status_reg & SLINK_BSY)) {
858		dev_err(tspi->dev,
859			"CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
860		dev_err(tspi->dev,
861			"CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
862				tspi->command2_reg, tspi->dma_control_reg);
863		reset_control_assert(tspi->rst);
864		udelay(2);
865		reset_control_deassert(tspi->rst);
866		complete(&tspi->xfer_completion);
867		goto exit;
868	}
869
870	if (tspi->cur_direction & DATA_DIR_RX)
871		tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
872
873	if (tspi->cur_direction & DATA_DIR_TX)
874		tspi->cur_pos = tspi->cur_tx_pos;
875	else
876		tspi->cur_pos = tspi->cur_rx_pos;
877
878	if (tspi->cur_pos == t->len) {
879		complete(&tspi->xfer_completion);
880		goto exit;
881	}
882
883	tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
884	tegra_slink_start_cpu_based_transfer(tspi, t);
885exit:
886	spin_unlock_irqrestore(&tspi->lock, flags);
887	return IRQ_HANDLED;
888}
889
890static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
891{
892	struct spi_transfer *t = tspi->curr_xfer;
893	long wait_status;
894	int err = 0;
895	unsigned total_fifo_words;
896	unsigned long flags;
897
898	/* Abort dmas if any error */
899	if (tspi->cur_direction & DATA_DIR_TX) {
900		if (tspi->tx_status) {
901			dmaengine_terminate_all(tspi->tx_dma_chan);
902			err += 1;
903		} else {
904			wait_status = wait_for_completion_interruptible_timeout(
905				&tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
906			if (wait_status <= 0) {
907				dmaengine_terminate_all(tspi->tx_dma_chan);
908				dev_err(tspi->dev, "TxDma Xfer failed\n");
909				err += 1;
910			}
911		}
912	}
913
914	if (tspi->cur_direction & DATA_DIR_RX) {
915		if (tspi->rx_status) {
916			dmaengine_terminate_all(tspi->rx_dma_chan);
917			err += 2;
918		} else {
919			wait_status = wait_for_completion_interruptible_timeout(
920				&tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
921			if (wait_status <= 0) {
922				dmaengine_terminate_all(tspi->rx_dma_chan);
923				dev_err(tspi->dev, "RxDma Xfer failed\n");
924				err += 2;
925			}
926		}
927	}
928
929	spin_lock_irqsave(&tspi->lock, flags);
930	if (err) {
931		dev_err(tspi->dev,
932			"DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
933		dev_err(tspi->dev,
934			"DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
935				tspi->command2_reg, tspi->dma_control_reg);
936		reset_control_assert(tspi->rst);
937		udelay(2);
938		reset_control_assert(tspi->rst);
939		complete(&tspi->xfer_completion);
940		spin_unlock_irqrestore(&tspi->lock, flags);
941		return IRQ_HANDLED;
942	}
943
944	if (tspi->cur_direction & DATA_DIR_RX)
945		tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
946
947	if (tspi->cur_direction & DATA_DIR_TX)
948		tspi->cur_pos = tspi->cur_tx_pos;
949	else
950		tspi->cur_pos = tspi->cur_rx_pos;
951
952	if (tspi->cur_pos == t->len) {
953		complete(&tspi->xfer_completion);
954		goto exit;
955	}
956
957	/* Continue transfer in current message */
958	total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
959							tspi, t);
960	if (total_fifo_words > SLINK_FIFO_DEPTH)
961		err = tegra_slink_start_dma_based_transfer(tspi, t);
962	else
963		err = tegra_slink_start_cpu_based_transfer(tspi, t);
964
965exit:
966	spin_unlock_irqrestore(&tspi->lock, flags);
967	return IRQ_HANDLED;
968}
969
970static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
971{
972	struct tegra_slink_data *tspi = context_data;
973
974	if (!tspi->is_curr_dma_xfer)
975		return handle_cpu_based_xfer(tspi);
976	return handle_dma_based_xfer(tspi);
977}
978
979static irqreturn_t tegra_slink_isr(int irq, void *context_data)
980{
981	struct tegra_slink_data *tspi = context_data;
982
983	tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
984	if (tspi->cur_direction & DATA_DIR_TX)
985		tspi->tx_status = tspi->status_reg &
986					(SLINK_TX_OVF | SLINK_TX_UNF);
987
988	if (tspi->cur_direction & DATA_DIR_RX)
989		tspi->rx_status = tspi->status_reg &
990					(SLINK_RX_OVF | SLINK_RX_UNF);
991	tegra_slink_clear_status(tspi);
992
993	return IRQ_WAKE_THREAD;
994}
995
996static const struct tegra_slink_chip_data tegra30_spi_cdata = {
997	.cs_hold_time = true,
998};
999
1000static const struct tegra_slink_chip_data tegra20_spi_cdata = {
1001	.cs_hold_time = false,
1002};
1003
1004static const struct of_device_id tegra_slink_of_match[] = {
1005	{ .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
1006	{ .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
1007	{}
1008};
1009MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
1010
1011static int tegra_slink_probe(struct platform_device *pdev)
1012{
1013	struct spi_master	*master;
1014	struct tegra_slink_data	*tspi;
1015	struct resource		*r;
1016	int ret, spi_irq;
1017	const struct tegra_slink_chip_data *cdata = NULL;
1018	const struct of_device_id *match;
1019
1020	match = of_match_device(tegra_slink_of_match, &pdev->dev);
1021	if (!match) {
1022		dev_err(&pdev->dev, "Error: No device match found\n");
1023		return -ENODEV;
1024	}
1025	cdata = match->data;
1026
1027	master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1028	if (!master) {
1029		dev_err(&pdev->dev, "master allocation failed\n");
1030		return -ENOMEM;
1031	}
1032
1033	/* the spi->mode bits understood by this driver: */
1034	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1035	master->setup = tegra_slink_setup;
1036	master->prepare_message = tegra_slink_prepare_message;
1037	master->transfer_one = tegra_slink_transfer_one;
1038	master->unprepare_message = tegra_slink_unprepare_message;
1039	master->auto_runtime_pm = true;
1040	master->num_chipselect = MAX_CHIP_SELECT;
1041
1042	platform_set_drvdata(pdev, master);
1043	tspi = spi_master_get_devdata(master);
1044	tspi->master = master;
1045	tspi->dev = &pdev->dev;
1046	tspi->chip_data = cdata;
1047	spin_lock_init(&tspi->lock);
1048
1049	if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
1050				 &master->max_speed_hz))
1051		master->max_speed_hz = 25000000; /* 25MHz */
1052
1053	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054	if (!r) {
1055		dev_err(&pdev->dev, "No IO memory resource\n");
1056		ret = -ENODEV;
1057		goto exit_free_master;
1058	}
1059	tspi->phys = r->start;
1060	tspi->base = devm_ioremap_resource(&pdev->dev, r);
1061	if (IS_ERR(tspi->base)) {
1062		ret = PTR_ERR(tspi->base);
1063		goto exit_free_master;
1064	}
1065
1066	spi_irq = platform_get_irq(pdev, 0);
1067	tspi->irq = spi_irq;
1068	ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
1069			tegra_slink_isr_thread, IRQF_ONESHOT,
1070			dev_name(&pdev->dev), tspi);
1071	if (ret < 0) {
1072		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1073					tspi->irq);
1074		goto exit_free_master;
1075	}
1076
1077	tspi->clk = devm_clk_get(&pdev->dev, NULL);
1078	if (IS_ERR(tspi->clk)) {
1079		dev_err(&pdev->dev, "can not get clock\n");
1080		ret = PTR_ERR(tspi->clk);
1081		goto exit_free_irq;
1082	}
1083
1084	tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
1085	if (IS_ERR(tspi->rst)) {
1086		dev_err(&pdev->dev, "can not get reset\n");
1087		ret = PTR_ERR(tspi->rst);
1088		goto exit_free_irq;
1089	}
1090
1091	tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
1092	tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1093
1094	ret = tegra_slink_init_dma_param(tspi, true);
1095	if (ret < 0)
1096		goto exit_free_irq;
1097	ret = tegra_slink_init_dma_param(tspi, false);
1098	if (ret < 0)
1099		goto exit_rx_dma_free;
1100	tspi->max_buf_size = tspi->dma_buf_size;
1101	init_completion(&tspi->tx_dma_complete);
1102	init_completion(&tspi->rx_dma_complete);
1103
1104	init_completion(&tspi->xfer_completion);
1105
1106	pm_runtime_enable(&pdev->dev);
1107	if (!pm_runtime_enabled(&pdev->dev)) {
1108		ret = tegra_slink_runtime_resume(&pdev->dev);
1109		if (ret)
1110			goto exit_pm_disable;
1111	}
1112
1113	ret = pm_runtime_get_sync(&pdev->dev);
1114	if (ret < 0) {
1115		dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1116		goto exit_pm_disable;
1117	}
1118	tspi->def_command_reg  = SLINK_M_S;
1119	tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
1120	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
1121	tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
1122	pm_runtime_put(&pdev->dev);
1123
1124	master->dev.of_node = pdev->dev.of_node;
1125	ret = devm_spi_register_master(&pdev->dev, master);
1126	if (ret < 0) {
1127		dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1128		goto exit_pm_disable;
1129	}
1130	return ret;
1131
1132exit_pm_disable:
1133	pm_runtime_disable(&pdev->dev);
1134	if (!pm_runtime_status_suspended(&pdev->dev))
1135		tegra_slink_runtime_suspend(&pdev->dev);
1136	tegra_slink_deinit_dma_param(tspi, false);
1137exit_rx_dma_free:
1138	tegra_slink_deinit_dma_param(tspi, true);
1139exit_free_irq:
1140	free_irq(spi_irq, tspi);
1141exit_free_master:
1142	spi_master_put(master);
1143	return ret;
1144}
1145
1146static int tegra_slink_remove(struct platform_device *pdev)
1147{
1148	struct spi_master *master = platform_get_drvdata(pdev);
1149	struct tegra_slink_data	*tspi = spi_master_get_devdata(master);
1150
1151	free_irq(tspi->irq, tspi);
1152
1153	if (tspi->tx_dma_chan)
1154		tegra_slink_deinit_dma_param(tspi, false);
1155
1156	if (tspi->rx_dma_chan)
1157		tegra_slink_deinit_dma_param(tspi, true);
1158
1159	pm_runtime_disable(&pdev->dev);
1160	if (!pm_runtime_status_suspended(&pdev->dev))
1161		tegra_slink_runtime_suspend(&pdev->dev);
1162
1163	return 0;
1164}
1165
1166#ifdef CONFIG_PM_SLEEP
1167static int tegra_slink_suspend(struct device *dev)
1168{
1169	struct spi_master *master = dev_get_drvdata(dev);
1170
1171	return spi_master_suspend(master);
1172}
1173
1174static int tegra_slink_resume(struct device *dev)
1175{
1176	struct spi_master *master = dev_get_drvdata(dev);
1177	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1178	int ret;
1179
1180	ret = pm_runtime_get_sync(dev);
1181	if (ret < 0) {
1182		dev_err(dev, "pm runtime failed, e = %d\n", ret);
1183		return ret;
1184	}
1185	tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
1186	tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
1187	pm_runtime_put(dev);
1188
1189	return spi_master_resume(master);
1190}
1191#endif
1192
1193static int tegra_slink_runtime_suspend(struct device *dev)
1194{
1195	struct spi_master *master = dev_get_drvdata(dev);
1196	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1197
1198	/* Flush all write which are in PPSB queue by reading back */
1199	tegra_slink_readl(tspi, SLINK_MAS_DATA);
1200
1201	clk_disable_unprepare(tspi->clk);
1202	return 0;
1203}
1204
1205static int tegra_slink_runtime_resume(struct device *dev)
1206{
1207	struct spi_master *master = dev_get_drvdata(dev);
1208	struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1209	int ret;
1210
1211	ret = clk_prepare_enable(tspi->clk);
1212	if (ret < 0) {
1213		dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1214		return ret;
1215	}
1216	return 0;
1217}
1218
1219static const struct dev_pm_ops slink_pm_ops = {
1220	SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
1221		tegra_slink_runtime_resume, NULL)
1222	SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
1223};
1224static struct platform_driver tegra_slink_driver = {
1225	.driver = {
1226		.name		= "spi-tegra-slink",
1227		.pm		= &slink_pm_ops,
1228		.of_match_table	= tegra_slink_of_match,
1229	},
1230	.probe =	tegra_slink_probe,
1231	.remove =	tegra_slink_remove,
1232};
1233module_platform_driver(tegra_slink_driver);
1234
1235MODULE_ALIAS("platform:spi-tegra-slink");
1236MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
1237MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1238MODULE_LICENSE("GPL v2");
1239