1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/omap-dma.h>
27 #include <linux/platform_device.h>
28 #include <linux/err.h>
29 #include <linux/clk.h>
30 #include <linux/io.h>
31 #include <linux/slab.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/of.h>
34 #include <linux/of_device.h>
35 #include <linux/gcd.h>
36 
37 #include <linux/spi/spi.h>
38 #include <linux/gpio.h>
39 
40 #include <linux/platform_data/spi-omap2-mcspi.h>
41 
42 #define OMAP2_MCSPI_MAX_FREQ		48000000
43 #define OMAP2_MCSPI_MAX_DIVIDER		4096
44 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
45 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
46 #define SPI_AUTOSUSPEND_TIMEOUT		2000
47 
48 #define OMAP2_MCSPI_REVISION		0x00
49 #define OMAP2_MCSPI_SYSSTATUS		0x14
50 #define OMAP2_MCSPI_IRQSTATUS		0x18
51 #define OMAP2_MCSPI_IRQENABLE		0x1c
52 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
53 #define OMAP2_MCSPI_SYST		0x24
54 #define OMAP2_MCSPI_MODULCTRL		0x28
55 #define OMAP2_MCSPI_XFERLEVEL		0x7c
56 
57 /* per-channel banks, 0x14 bytes each, first is: */
58 #define OMAP2_MCSPI_CHCONF0		0x2c
59 #define OMAP2_MCSPI_CHSTAT0		0x30
60 #define OMAP2_MCSPI_CHCTRL0		0x34
61 #define OMAP2_MCSPI_TX0			0x38
62 #define OMAP2_MCSPI_RX0			0x3c
63 
64 /* per-register bitmasks: */
65 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
66 
67 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
68 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
69 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
70 
71 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
72 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
73 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
74 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
75 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
76 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
77 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
78 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
79 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
80 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
81 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
82 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
83 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
84 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
85 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
86 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
87 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
88 #define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
89 
90 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
91 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
92 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
93 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
94 
95 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
96 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
97 
98 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
99 
100 /* We have 2 DMA channels per CS, one for RX and one for TX */
101 struct omap2_mcspi_dma {
102 	struct dma_chan *dma_tx;
103 	struct dma_chan *dma_rx;
104 
105 	int dma_tx_sync_dev;
106 	int dma_rx_sync_dev;
107 
108 	struct completion dma_tx_completion;
109 	struct completion dma_rx_completion;
110 
111 	char dma_rx_ch_name[14];
112 	char dma_tx_ch_name[14];
113 };
114 
115 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
116  * cache operations; better heuristics consider wordsize and bitrate.
117  */
118 #define DMA_MIN_BYTES			160
119 
120 
121 /*
122  * Used for context save and restore, structure members to be updated whenever
123  * corresponding registers are modified.
124  */
125 struct omap2_mcspi_regs {
126 	u32 modulctrl;
127 	u32 wakeupenable;
128 	struct list_head cs;
129 };
130 
131 struct omap2_mcspi {
132 	struct spi_master	*master;
133 	/* Virtual base address of the controller */
134 	void __iomem		*base;
135 	unsigned long		phys;
136 	/* SPI1 has 4 channels, while SPI2 has 2 */
137 	struct omap2_mcspi_dma	*dma_channels;
138 	struct device		*dev;
139 	struct omap2_mcspi_regs ctx;
140 	int			fifo_depth;
141 	unsigned int		pin_dir:1;
142 };
143 
144 struct omap2_mcspi_cs {
145 	void __iomem		*base;
146 	unsigned long		phys;
147 	int			word_len;
148 	u16			mode;
149 	struct list_head	node;
150 	/* Context save and restore shadow register */
151 	u32			chconf0, chctrl0;
152 };
153 
mcspi_write_reg(struct spi_master * master,int idx,u32 val)154 static inline void mcspi_write_reg(struct spi_master *master,
155 		int idx, u32 val)
156 {
157 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
158 
159 	writel_relaxed(val, mcspi->base + idx);
160 }
161 
mcspi_read_reg(struct spi_master * master,int idx)162 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
163 {
164 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
165 
166 	return readl_relaxed(mcspi->base + idx);
167 }
168 
mcspi_write_cs_reg(const struct spi_device * spi,int idx,u32 val)169 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
170 		int idx, u32 val)
171 {
172 	struct omap2_mcspi_cs	*cs = spi->controller_state;
173 
174 	writel_relaxed(val, cs->base +  idx);
175 }
176 
mcspi_read_cs_reg(const struct spi_device * spi,int idx)177 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
178 {
179 	struct omap2_mcspi_cs	*cs = spi->controller_state;
180 
181 	return readl_relaxed(cs->base + idx);
182 }
183 
mcspi_cached_chconf0(const struct spi_device * spi)184 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
185 {
186 	struct omap2_mcspi_cs *cs = spi->controller_state;
187 
188 	return cs->chconf0;
189 }
190 
mcspi_write_chconf0(const struct spi_device * spi,u32 val)191 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
192 {
193 	struct omap2_mcspi_cs *cs = spi->controller_state;
194 
195 	cs->chconf0 = val;
196 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
197 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
198 }
199 
mcspi_bytes_per_word(int word_len)200 static inline int mcspi_bytes_per_word(int word_len)
201 {
202 	if (word_len <= 8)
203 		return 1;
204 	else if (word_len <= 16)
205 		return 2;
206 	else /* word_len <= 32 */
207 		return 4;
208 }
209 
omap2_mcspi_set_dma_req(const struct spi_device * spi,int is_read,int enable)210 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
211 		int is_read, int enable)
212 {
213 	u32 l, rw;
214 
215 	l = mcspi_cached_chconf0(spi);
216 
217 	if (is_read) /* 1 is read, 0 write */
218 		rw = OMAP2_MCSPI_CHCONF_DMAR;
219 	else
220 		rw = OMAP2_MCSPI_CHCONF_DMAW;
221 
222 	if (enable)
223 		l |= rw;
224 	else
225 		l &= ~rw;
226 
227 	mcspi_write_chconf0(spi, l);
228 }
229 
omap2_mcspi_set_enable(const struct spi_device * spi,int enable)230 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
231 {
232 	struct omap2_mcspi_cs *cs = spi->controller_state;
233 	u32 l;
234 
235 	l = cs->chctrl0;
236 	if (enable)
237 		l |= OMAP2_MCSPI_CHCTRL_EN;
238 	else
239 		l &= ~OMAP2_MCSPI_CHCTRL_EN;
240 	cs->chctrl0 = l;
241 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
242 	/* Flash post-writes */
243 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
244 }
245 
omap2_mcspi_set_cs(struct spi_device * spi,bool enable)246 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
247 {
248 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
249 	u32 l;
250 
251 	/* The controller handles the inverted chip selects
252 	 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
253 	 * the inversion from the core spi_set_cs function.
254 	 */
255 	if (spi->mode & SPI_CS_HIGH)
256 		enable = !enable;
257 
258 	if (spi->controller_state) {
259 		int err = pm_runtime_get_sync(mcspi->dev);
260 		if (err < 0) {
261 			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
262 			return;
263 		}
264 
265 		l = mcspi_cached_chconf0(spi);
266 
267 		if (enable)
268 			l &= ~OMAP2_MCSPI_CHCONF_FORCE;
269 		else
270 			l |= OMAP2_MCSPI_CHCONF_FORCE;
271 
272 		mcspi_write_chconf0(spi, l);
273 
274 		pm_runtime_mark_last_busy(mcspi->dev);
275 		pm_runtime_put_autosuspend(mcspi->dev);
276 	}
277 }
278 
omap2_mcspi_set_master_mode(struct spi_master * master)279 static void omap2_mcspi_set_master_mode(struct spi_master *master)
280 {
281 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
282 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
283 	u32 l;
284 
285 	/*
286 	 * Setup when switching from (reset default) slave mode
287 	 * to single-channel master mode
288 	 */
289 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
290 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
291 	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
292 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
293 
294 	ctx->modulctrl = l;
295 }
296 
omap2_mcspi_set_fifo(const struct spi_device * spi,struct spi_transfer * t,int enable)297 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
298 				struct spi_transfer *t, int enable)
299 {
300 	struct spi_master *master = spi->master;
301 	struct omap2_mcspi_cs *cs = spi->controller_state;
302 	struct omap2_mcspi *mcspi;
303 	unsigned int wcnt;
304 	int max_fifo_depth, fifo_depth, bytes_per_word;
305 	u32 chconf, xferlevel;
306 
307 	mcspi = spi_master_get_devdata(master);
308 
309 	chconf = mcspi_cached_chconf0(spi);
310 	if (enable) {
311 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
312 		if (t->len % bytes_per_word != 0)
313 			goto disable_fifo;
314 
315 		if (t->rx_buf != NULL && t->tx_buf != NULL)
316 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
317 		else
318 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
319 
320 		fifo_depth = gcd(t->len, max_fifo_depth);
321 		if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
322 			goto disable_fifo;
323 
324 		wcnt = t->len / bytes_per_word;
325 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
326 			goto disable_fifo;
327 
328 		xferlevel = wcnt << 16;
329 		if (t->rx_buf != NULL) {
330 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
331 			xferlevel |= (fifo_depth - 1) << 8;
332 		}
333 		if (t->tx_buf != NULL) {
334 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
335 			xferlevel |= fifo_depth - 1;
336 		}
337 
338 		mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
339 		mcspi_write_chconf0(spi, chconf);
340 		mcspi->fifo_depth = fifo_depth;
341 
342 		return;
343 	}
344 
345 disable_fifo:
346 	if (t->rx_buf != NULL)
347 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
348 
349 	if (t->tx_buf != NULL)
350 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
351 
352 	mcspi_write_chconf0(spi, chconf);
353 	mcspi->fifo_depth = 0;
354 }
355 
omap2_mcspi_restore_ctx(struct omap2_mcspi * mcspi)356 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
357 {
358 	struct spi_master	*spi_cntrl = mcspi->master;
359 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
360 	struct omap2_mcspi_cs	*cs;
361 
362 	/* McSPI: context restore */
363 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
364 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
365 
366 	list_for_each_entry(cs, &ctx->cs, node)
367 		writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
368 }
369 
mcspi_wait_for_reg_bit(void __iomem * reg,unsigned long bit)370 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
371 {
372 	unsigned long timeout;
373 
374 	timeout = jiffies + msecs_to_jiffies(1000);
375 	while (!(readl_relaxed(reg) & bit)) {
376 		if (time_after(jiffies, timeout)) {
377 			if (!(readl_relaxed(reg) & bit))
378 				return -ETIMEDOUT;
379 			else
380 				return 0;
381 		}
382 		cpu_relax();
383 	}
384 	return 0;
385 }
386 
omap2_mcspi_rx_callback(void * data)387 static void omap2_mcspi_rx_callback(void *data)
388 {
389 	struct spi_device *spi = data;
390 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
391 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
392 
393 	/* We must disable the DMA RX request */
394 	omap2_mcspi_set_dma_req(spi, 1, 0);
395 
396 	complete(&mcspi_dma->dma_rx_completion);
397 }
398 
omap2_mcspi_tx_callback(void * data)399 static void omap2_mcspi_tx_callback(void *data)
400 {
401 	struct spi_device *spi = data;
402 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
403 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
404 
405 	/* We must disable the DMA TX request */
406 	omap2_mcspi_set_dma_req(spi, 0, 0);
407 
408 	complete(&mcspi_dma->dma_tx_completion);
409 }
410 
omap2_mcspi_tx_dma(struct spi_device * spi,struct spi_transfer * xfer,struct dma_slave_config cfg)411 static void omap2_mcspi_tx_dma(struct spi_device *spi,
412 				struct spi_transfer *xfer,
413 				struct dma_slave_config cfg)
414 {
415 	struct omap2_mcspi	*mcspi;
416 	struct omap2_mcspi_dma  *mcspi_dma;
417 	unsigned int		count;
418 
419 	mcspi = spi_master_get_devdata(spi->master);
420 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
421 	count = xfer->len;
422 
423 	if (mcspi_dma->dma_tx) {
424 		struct dma_async_tx_descriptor *tx;
425 		struct scatterlist sg;
426 
427 		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
428 
429 		sg_init_table(&sg, 1);
430 		sg_dma_address(&sg) = xfer->tx_dma;
431 		sg_dma_len(&sg) = xfer->len;
432 
433 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
434 		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
435 		if (tx) {
436 			tx->callback = omap2_mcspi_tx_callback;
437 			tx->callback_param = spi;
438 			dmaengine_submit(tx);
439 		} else {
440 			/* FIXME: fall back to PIO? */
441 		}
442 	}
443 	dma_async_issue_pending(mcspi_dma->dma_tx);
444 	omap2_mcspi_set_dma_req(spi, 0, 1);
445 
446 }
447 
448 static unsigned
omap2_mcspi_rx_dma(struct spi_device * spi,struct spi_transfer * xfer,struct dma_slave_config cfg,unsigned es)449 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
450 				struct dma_slave_config cfg,
451 				unsigned es)
452 {
453 	struct omap2_mcspi	*mcspi;
454 	struct omap2_mcspi_dma  *mcspi_dma;
455 	unsigned int		count, dma_count;
456 	u32			l;
457 	int			elements = 0;
458 	int			word_len, element_count;
459 	struct omap2_mcspi_cs	*cs = spi->controller_state;
460 	mcspi = spi_master_get_devdata(spi->master);
461 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
462 	count = xfer->len;
463 	dma_count = xfer->len;
464 
465 	if (mcspi->fifo_depth == 0)
466 		dma_count -= es;
467 
468 	word_len = cs->word_len;
469 	l = mcspi_cached_chconf0(spi);
470 
471 	if (word_len <= 8)
472 		element_count = count;
473 	else if (word_len <= 16)
474 		element_count = count >> 1;
475 	else /* word_len <= 32 */
476 		element_count = count >> 2;
477 
478 	if (mcspi_dma->dma_rx) {
479 		struct dma_async_tx_descriptor *tx;
480 		struct scatterlist sg;
481 
482 		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
483 
484 		if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
485 			dma_count -= es;
486 
487 		sg_init_table(&sg, 1);
488 		sg_dma_address(&sg) = xfer->rx_dma;
489 		sg_dma_len(&sg) = dma_count;
490 
491 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
492 				DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
493 				DMA_CTRL_ACK);
494 		if (tx) {
495 			tx->callback = omap2_mcspi_rx_callback;
496 			tx->callback_param = spi;
497 			dmaengine_submit(tx);
498 		} else {
499 				/* FIXME: fall back to PIO? */
500 		}
501 	}
502 
503 	dma_async_issue_pending(mcspi_dma->dma_rx);
504 	omap2_mcspi_set_dma_req(spi, 1, 1);
505 
506 	wait_for_completion(&mcspi_dma->dma_rx_completion);
507 	dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
508 			 DMA_FROM_DEVICE);
509 
510 	if (mcspi->fifo_depth > 0)
511 		return count;
512 
513 	omap2_mcspi_set_enable(spi, 0);
514 
515 	elements = element_count - 1;
516 
517 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
518 		elements--;
519 
520 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
521 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
522 			u32 w;
523 
524 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
525 			if (word_len <= 8)
526 				((u8 *)xfer->rx_buf)[elements++] = w;
527 			else if (word_len <= 16)
528 				((u16 *)xfer->rx_buf)[elements++] = w;
529 			else /* word_len <= 32 */
530 				((u32 *)xfer->rx_buf)[elements++] = w;
531 		} else {
532 			int bytes_per_word = mcspi_bytes_per_word(word_len);
533 			dev_err(&spi->dev, "DMA RX penultimate word empty\n");
534 			count -= (bytes_per_word << 1);
535 			omap2_mcspi_set_enable(spi, 1);
536 			return count;
537 		}
538 	}
539 	if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
540 				& OMAP2_MCSPI_CHSTAT_RXS)) {
541 		u32 w;
542 
543 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
544 		if (word_len <= 8)
545 			((u8 *)xfer->rx_buf)[elements] = w;
546 		else if (word_len <= 16)
547 			((u16 *)xfer->rx_buf)[elements] = w;
548 		else /* word_len <= 32 */
549 			((u32 *)xfer->rx_buf)[elements] = w;
550 	} else {
551 		dev_err(&spi->dev, "DMA RX last word empty\n");
552 		count -= mcspi_bytes_per_word(word_len);
553 	}
554 	omap2_mcspi_set_enable(spi, 1);
555 	return count;
556 }
557 
558 static unsigned
omap2_mcspi_txrx_dma(struct spi_device * spi,struct spi_transfer * xfer)559 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
560 {
561 	struct omap2_mcspi	*mcspi;
562 	struct omap2_mcspi_cs	*cs = spi->controller_state;
563 	struct omap2_mcspi_dma  *mcspi_dma;
564 	unsigned int		count;
565 	u32			l;
566 	u8			*rx;
567 	const u8		*tx;
568 	struct dma_slave_config	cfg;
569 	enum dma_slave_buswidth width;
570 	unsigned es;
571 	u32			burst;
572 	void __iomem		*chstat_reg;
573 	void __iomem            *irqstat_reg;
574 	int			wait_res;
575 
576 	mcspi = spi_master_get_devdata(spi->master);
577 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
578 	l = mcspi_cached_chconf0(spi);
579 
580 
581 	if (cs->word_len <= 8) {
582 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
583 		es = 1;
584 	} else if (cs->word_len <= 16) {
585 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
586 		es = 2;
587 	} else {
588 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
589 		es = 4;
590 	}
591 
592 	count = xfer->len;
593 	burst = 1;
594 
595 	if (mcspi->fifo_depth > 0) {
596 		if (count > mcspi->fifo_depth)
597 			burst = mcspi->fifo_depth / es;
598 		else
599 			burst = count / es;
600 	}
601 
602 	memset(&cfg, 0, sizeof(cfg));
603 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
604 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
605 	cfg.src_addr_width = width;
606 	cfg.dst_addr_width = width;
607 	cfg.src_maxburst = burst;
608 	cfg.dst_maxburst = burst;
609 
610 	rx = xfer->rx_buf;
611 	tx = xfer->tx_buf;
612 
613 	if (tx != NULL)
614 		omap2_mcspi_tx_dma(spi, xfer, cfg);
615 
616 	if (rx != NULL)
617 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
618 
619 	if (tx != NULL) {
620 		wait_for_completion(&mcspi_dma->dma_tx_completion);
621 		dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
622 				 DMA_TO_DEVICE);
623 
624 		if (mcspi->fifo_depth > 0) {
625 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
626 
627 			if (mcspi_wait_for_reg_bit(irqstat_reg,
628 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
629 				dev_err(&spi->dev, "EOW timed out\n");
630 
631 			mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
632 					OMAP2_MCSPI_IRQSTATUS_EOW);
633 		}
634 
635 		/* for TX_ONLY mode, be sure all words have shifted out */
636 		if (rx == NULL) {
637 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
638 			if (mcspi->fifo_depth > 0) {
639 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
640 						OMAP2_MCSPI_CHSTAT_TXFFE);
641 				if (wait_res < 0)
642 					dev_err(&spi->dev, "TXFFE timed out\n");
643 			} else {
644 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
645 						OMAP2_MCSPI_CHSTAT_TXS);
646 				if (wait_res < 0)
647 					dev_err(&spi->dev, "TXS timed out\n");
648 			}
649 			if (wait_res >= 0 &&
650 				(mcspi_wait_for_reg_bit(chstat_reg,
651 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
652 				dev_err(&spi->dev, "EOT timed out\n");
653 		}
654 	}
655 	return count;
656 }
657 
658 static unsigned
omap2_mcspi_txrx_pio(struct spi_device * spi,struct spi_transfer * xfer)659 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
660 {
661 	struct omap2_mcspi	*mcspi;
662 	struct omap2_mcspi_cs	*cs = spi->controller_state;
663 	unsigned int		count, c;
664 	u32			l;
665 	void __iomem		*base = cs->base;
666 	void __iomem		*tx_reg;
667 	void __iomem		*rx_reg;
668 	void __iomem		*chstat_reg;
669 	int			word_len;
670 
671 	mcspi = spi_master_get_devdata(spi->master);
672 	count = xfer->len;
673 	c = count;
674 	word_len = cs->word_len;
675 
676 	l = mcspi_cached_chconf0(spi);
677 
678 	/* We store the pre-calculated register addresses on stack to speed
679 	 * up the transfer loop. */
680 	tx_reg		= base + OMAP2_MCSPI_TX0;
681 	rx_reg		= base + OMAP2_MCSPI_RX0;
682 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
683 
684 	if (c < (word_len>>3))
685 		return 0;
686 
687 	if (word_len <= 8) {
688 		u8		*rx;
689 		const u8	*tx;
690 
691 		rx = xfer->rx_buf;
692 		tx = xfer->tx_buf;
693 
694 		do {
695 			c -= 1;
696 			if (tx != NULL) {
697 				if (mcspi_wait_for_reg_bit(chstat_reg,
698 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
699 					dev_err(&spi->dev, "TXS timed out\n");
700 					goto out;
701 				}
702 				dev_vdbg(&spi->dev, "write-%d %02x\n",
703 						word_len, *tx);
704 				writel_relaxed(*tx++, tx_reg);
705 			}
706 			if (rx != NULL) {
707 				if (mcspi_wait_for_reg_bit(chstat_reg,
708 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
709 					dev_err(&spi->dev, "RXS timed out\n");
710 					goto out;
711 				}
712 
713 				if (c == 1 && tx == NULL &&
714 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
715 					omap2_mcspi_set_enable(spi, 0);
716 					*rx++ = readl_relaxed(rx_reg);
717 					dev_vdbg(&spi->dev, "read-%d %02x\n",
718 						    word_len, *(rx - 1));
719 					if (mcspi_wait_for_reg_bit(chstat_reg,
720 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
721 						dev_err(&spi->dev,
722 							"RXS timed out\n");
723 						goto out;
724 					}
725 					c = 0;
726 				} else if (c == 0 && tx == NULL) {
727 					omap2_mcspi_set_enable(spi, 0);
728 				}
729 
730 				*rx++ = readl_relaxed(rx_reg);
731 				dev_vdbg(&spi->dev, "read-%d %02x\n",
732 						word_len, *(rx - 1));
733 			}
734 		} while (c);
735 	} else if (word_len <= 16) {
736 		u16		*rx;
737 		const u16	*tx;
738 
739 		rx = xfer->rx_buf;
740 		tx = xfer->tx_buf;
741 		do {
742 			c -= 2;
743 			if (tx != NULL) {
744 				if (mcspi_wait_for_reg_bit(chstat_reg,
745 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
746 					dev_err(&spi->dev, "TXS timed out\n");
747 					goto out;
748 				}
749 				dev_vdbg(&spi->dev, "write-%d %04x\n",
750 						word_len, *tx);
751 				writel_relaxed(*tx++, tx_reg);
752 			}
753 			if (rx != NULL) {
754 				if (mcspi_wait_for_reg_bit(chstat_reg,
755 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
756 					dev_err(&spi->dev, "RXS timed out\n");
757 					goto out;
758 				}
759 
760 				if (c == 2 && tx == NULL &&
761 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
762 					omap2_mcspi_set_enable(spi, 0);
763 					*rx++ = readl_relaxed(rx_reg);
764 					dev_vdbg(&spi->dev, "read-%d %04x\n",
765 						    word_len, *(rx - 1));
766 					if (mcspi_wait_for_reg_bit(chstat_reg,
767 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
768 						dev_err(&spi->dev,
769 							"RXS timed out\n");
770 						goto out;
771 					}
772 					c = 0;
773 				} else if (c == 0 && tx == NULL) {
774 					omap2_mcspi_set_enable(spi, 0);
775 				}
776 
777 				*rx++ = readl_relaxed(rx_reg);
778 				dev_vdbg(&spi->dev, "read-%d %04x\n",
779 						word_len, *(rx - 1));
780 			}
781 		} while (c >= 2);
782 	} else if (word_len <= 32) {
783 		u32		*rx;
784 		const u32	*tx;
785 
786 		rx = xfer->rx_buf;
787 		tx = xfer->tx_buf;
788 		do {
789 			c -= 4;
790 			if (tx != NULL) {
791 				if (mcspi_wait_for_reg_bit(chstat_reg,
792 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
793 					dev_err(&spi->dev, "TXS timed out\n");
794 					goto out;
795 				}
796 				dev_vdbg(&spi->dev, "write-%d %08x\n",
797 						word_len, *tx);
798 				writel_relaxed(*tx++, tx_reg);
799 			}
800 			if (rx != NULL) {
801 				if (mcspi_wait_for_reg_bit(chstat_reg,
802 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
803 					dev_err(&spi->dev, "RXS timed out\n");
804 					goto out;
805 				}
806 
807 				if (c == 4 && tx == NULL &&
808 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
809 					omap2_mcspi_set_enable(spi, 0);
810 					*rx++ = readl_relaxed(rx_reg);
811 					dev_vdbg(&spi->dev, "read-%d %08x\n",
812 						    word_len, *(rx - 1));
813 					if (mcspi_wait_for_reg_bit(chstat_reg,
814 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
815 						dev_err(&spi->dev,
816 							"RXS timed out\n");
817 						goto out;
818 					}
819 					c = 0;
820 				} else if (c == 0 && tx == NULL) {
821 					omap2_mcspi_set_enable(spi, 0);
822 				}
823 
824 				*rx++ = readl_relaxed(rx_reg);
825 				dev_vdbg(&spi->dev, "read-%d %08x\n",
826 						word_len, *(rx - 1));
827 			}
828 		} while (c >= 4);
829 	}
830 
831 	/* for TX_ONLY mode, be sure all words have shifted out */
832 	if (xfer->rx_buf == NULL) {
833 		if (mcspi_wait_for_reg_bit(chstat_reg,
834 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
835 			dev_err(&spi->dev, "TXS timed out\n");
836 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
837 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
838 			dev_err(&spi->dev, "EOT timed out\n");
839 
840 		/* disable chan to purge rx datas received in TX_ONLY transfer,
841 		 * otherwise these rx datas will affect the direct following
842 		 * RX_ONLY transfer.
843 		 */
844 		omap2_mcspi_set_enable(spi, 0);
845 	}
846 out:
847 	omap2_mcspi_set_enable(spi, 1);
848 	return count - c;
849 }
850 
omap2_mcspi_calc_divisor(u32 speed_hz)851 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
852 {
853 	u32 div;
854 
855 	for (div = 0; div < 15; div++)
856 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
857 			return div;
858 
859 	return 15;
860 }
861 
862 /* called only when no transfer is active to this device */
omap2_mcspi_setup_transfer(struct spi_device * spi,struct spi_transfer * t)863 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
864 		struct spi_transfer *t)
865 {
866 	struct omap2_mcspi_cs *cs = spi->controller_state;
867 	struct omap2_mcspi *mcspi;
868 	struct spi_master *spi_cntrl;
869 	u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
870 	u8 word_len = spi->bits_per_word;
871 	u32 speed_hz = spi->max_speed_hz;
872 
873 	mcspi = spi_master_get_devdata(spi->master);
874 	spi_cntrl = mcspi->master;
875 
876 	if (t != NULL && t->bits_per_word)
877 		word_len = t->bits_per_word;
878 
879 	cs->word_len = word_len;
880 
881 	if (t && t->speed_hz)
882 		speed_hz = t->speed_hz;
883 
884 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
885 	if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
886 		clkd = omap2_mcspi_calc_divisor(speed_hz);
887 		speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
888 		clkg = 0;
889 	} else {
890 		div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
891 		speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
892 		clkd = (div - 1) & 0xf;
893 		extclk = (div - 1) >> 4;
894 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
895 	}
896 
897 	l = mcspi_cached_chconf0(spi);
898 
899 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
900 	 * REVISIT: this controller could support SPI_3WIRE mode.
901 	 */
902 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
903 		l &= ~OMAP2_MCSPI_CHCONF_IS;
904 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
905 		l |= OMAP2_MCSPI_CHCONF_DPE0;
906 	} else {
907 		l |= OMAP2_MCSPI_CHCONF_IS;
908 		l |= OMAP2_MCSPI_CHCONF_DPE1;
909 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
910 	}
911 
912 	/* wordlength */
913 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
914 	l |= (word_len - 1) << 7;
915 
916 	/* set chipselect polarity; manage with FORCE */
917 	if (!(spi->mode & SPI_CS_HIGH))
918 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
919 	else
920 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
921 
922 	/* set clock divisor */
923 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
924 	l |= clkd << 2;
925 
926 	/* set clock granularity */
927 	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
928 	l |= clkg;
929 	if (clkg) {
930 		cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
931 		cs->chctrl0 |= extclk << 8;
932 		mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
933 	}
934 
935 	/* set SPI mode 0..3 */
936 	if (spi->mode & SPI_CPOL)
937 		l |= OMAP2_MCSPI_CHCONF_POL;
938 	else
939 		l &= ~OMAP2_MCSPI_CHCONF_POL;
940 	if (spi->mode & SPI_CPHA)
941 		l |= OMAP2_MCSPI_CHCONF_PHA;
942 	else
943 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
944 
945 	mcspi_write_chconf0(spi, l);
946 
947 	cs->mode = spi->mode;
948 
949 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
950 			speed_hz,
951 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
952 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
953 
954 	return 0;
955 }
956 
957 /*
958  * Note that we currently allow DMA only if we get a channel
959  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
960  */
omap2_mcspi_request_dma(struct spi_device * spi)961 static int omap2_mcspi_request_dma(struct spi_device *spi)
962 {
963 	struct spi_master	*master = spi->master;
964 	struct omap2_mcspi	*mcspi;
965 	struct omap2_mcspi_dma	*mcspi_dma;
966 	dma_cap_mask_t mask;
967 	unsigned sig;
968 
969 	mcspi = spi_master_get_devdata(master);
970 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
971 
972 	init_completion(&mcspi_dma->dma_rx_completion);
973 	init_completion(&mcspi_dma->dma_tx_completion);
974 
975 	dma_cap_zero(mask);
976 	dma_cap_set(DMA_SLAVE, mask);
977 	sig = mcspi_dma->dma_rx_sync_dev;
978 
979 	mcspi_dma->dma_rx =
980 		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
981 						 &sig, &master->dev,
982 						 mcspi_dma->dma_rx_ch_name);
983 	if (!mcspi_dma->dma_rx)
984 		goto no_dma;
985 
986 	sig = mcspi_dma->dma_tx_sync_dev;
987 	mcspi_dma->dma_tx =
988 		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
989 						 &sig, &master->dev,
990 						 mcspi_dma->dma_tx_ch_name);
991 
992 	if (!mcspi_dma->dma_tx) {
993 		dma_release_channel(mcspi_dma->dma_rx);
994 		mcspi_dma->dma_rx = NULL;
995 		goto no_dma;
996 	}
997 
998 	return 0;
999 
1000 no_dma:
1001 	dev_warn(&spi->dev, "not using DMA for McSPI\n");
1002 	return -EAGAIN;
1003 }
1004 
omap2_mcspi_setup(struct spi_device * spi)1005 static int omap2_mcspi_setup(struct spi_device *spi)
1006 {
1007 	int			ret;
1008 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
1009 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1010 	struct omap2_mcspi_dma	*mcspi_dma;
1011 	struct omap2_mcspi_cs	*cs = spi->controller_state;
1012 
1013 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
1014 
1015 	if (!cs) {
1016 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
1017 		if (!cs)
1018 			return -ENOMEM;
1019 		cs->base = mcspi->base + spi->chip_select * 0x14;
1020 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
1021 		cs->mode = 0;
1022 		cs->chconf0 = 0;
1023 		cs->chctrl0 = 0;
1024 		spi->controller_state = cs;
1025 		/* Link this to context save list */
1026 		list_add_tail(&cs->node, &ctx->cs);
1027 
1028 		if (gpio_is_valid(spi->cs_gpio)) {
1029 			ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
1030 			if (ret) {
1031 				dev_err(&spi->dev, "failed to request gpio\n");
1032 				return ret;
1033 			}
1034 			gpio_direction_output(spi->cs_gpio,
1035 					 !(spi->mode & SPI_CS_HIGH));
1036 		}
1037 	}
1038 
1039 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
1040 		ret = omap2_mcspi_request_dma(spi);
1041 		if (ret < 0 && ret != -EAGAIN)
1042 			return ret;
1043 	}
1044 
1045 	ret = pm_runtime_get_sync(mcspi->dev);
1046 	if (ret < 0)
1047 		return ret;
1048 
1049 	ret = omap2_mcspi_setup_transfer(spi, NULL);
1050 	pm_runtime_mark_last_busy(mcspi->dev);
1051 	pm_runtime_put_autosuspend(mcspi->dev);
1052 
1053 	return ret;
1054 }
1055 
omap2_mcspi_cleanup(struct spi_device * spi)1056 static void omap2_mcspi_cleanup(struct spi_device *spi)
1057 {
1058 	struct omap2_mcspi	*mcspi;
1059 	struct omap2_mcspi_dma	*mcspi_dma;
1060 	struct omap2_mcspi_cs	*cs;
1061 
1062 	mcspi = spi_master_get_devdata(spi->master);
1063 
1064 	if (spi->controller_state) {
1065 		/* Unlink controller state from context save list */
1066 		cs = spi->controller_state;
1067 		list_del(&cs->node);
1068 
1069 		kfree(cs);
1070 	}
1071 
1072 	if (spi->chip_select < spi->master->num_chipselect) {
1073 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
1074 
1075 		if (mcspi_dma->dma_rx) {
1076 			dma_release_channel(mcspi_dma->dma_rx);
1077 			mcspi_dma->dma_rx = NULL;
1078 		}
1079 		if (mcspi_dma->dma_tx) {
1080 			dma_release_channel(mcspi_dma->dma_tx);
1081 			mcspi_dma->dma_tx = NULL;
1082 		}
1083 	}
1084 
1085 	if (gpio_is_valid(spi->cs_gpio))
1086 		gpio_free(spi->cs_gpio);
1087 }
1088 
omap2_mcspi_work_one(struct omap2_mcspi * mcspi,struct spi_device * spi,struct spi_transfer * t)1089 static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1090 		struct spi_device *spi, struct spi_transfer *t)
1091 {
1092 
1093 	/* We only enable one channel at a time -- the one whose message is
1094 	 * -- although this controller would gladly
1095 	 * arbitrate among multiple channels.  This corresponds to "single
1096 	 * channel" master mode.  As a side effect, we need to manage the
1097 	 * chipselect with the FORCE bit ... CS != channel enable.
1098 	 */
1099 
1100 	struct spi_master		*master;
1101 	struct omap2_mcspi_dma		*mcspi_dma;
1102 	struct omap2_mcspi_cs		*cs;
1103 	struct omap2_mcspi_device_config *cd;
1104 	int				par_override = 0;
1105 	int				status = 0;
1106 	u32				chconf;
1107 
1108 	master = spi->master;
1109 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
1110 	cs = spi->controller_state;
1111 	cd = spi->controller_data;
1112 
1113 	/*
1114 	 * The slave driver could have changed spi->mode in which case
1115 	 * it will be different from cs->mode (the current hardware setup).
1116 	 * If so, set par_override (even though its not a parity issue) so
1117 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
1118 	 * with the correct mode on the first iteration of the loop below.
1119 	 */
1120 	if (spi->mode != cs->mode)
1121 		par_override = 1;
1122 
1123 	omap2_mcspi_set_enable(spi, 0);
1124 
1125 	if (gpio_is_valid(spi->cs_gpio))
1126 		omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1127 
1128 	if (par_override ||
1129 	    (t->speed_hz != spi->max_speed_hz) ||
1130 	    (t->bits_per_word != spi->bits_per_word)) {
1131 		par_override = 1;
1132 		status = omap2_mcspi_setup_transfer(spi, t);
1133 		if (status < 0)
1134 			goto out;
1135 		if (t->speed_hz == spi->max_speed_hz &&
1136 		    t->bits_per_word == spi->bits_per_word)
1137 			par_override = 0;
1138 	}
1139 	if (cd && cd->cs_per_word) {
1140 		chconf = mcspi->ctx.modulctrl;
1141 		chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1142 		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1143 		mcspi->ctx.modulctrl =
1144 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1145 	}
1146 
1147 	chconf = mcspi_cached_chconf0(spi);
1148 	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1149 	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1150 
1151 	if (t->tx_buf == NULL)
1152 		chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1153 	else if (t->rx_buf == NULL)
1154 		chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1155 
1156 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1157 		/* Turbo mode is for more than one word */
1158 		if (t->len > ((cs->word_len + 7) >> 3))
1159 			chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1160 	}
1161 
1162 	mcspi_write_chconf0(spi, chconf);
1163 
1164 	if (t->len) {
1165 		unsigned	count;
1166 
1167 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1168 		    (t->len >= DMA_MIN_BYTES))
1169 			omap2_mcspi_set_fifo(spi, t, 1);
1170 
1171 		omap2_mcspi_set_enable(spi, 1);
1172 
1173 		/* RX_ONLY mode needs dummy data in TX reg */
1174 		if (t->tx_buf == NULL)
1175 			writel_relaxed(0, cs->base
1176 					+ OMAP2_MCSPI_TX0);
1177 
1178 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1179 		    (t->len >= DMA_MIN_BYTES))
1180 			count = omap2_mcspi_txrx_dma(spi, t);
1181 		else
1182 			count = omap2_mcspi_txrx_pio(spi, t);
1183 
1184 		if (count != t->len) {
1185 			status = -EIO;
1186 			goto out;
1187 		}
1188 	}
1189 
1190 	omap2_mcspi_set_enable(spi, 0);
1191 
1192 	if (mcspi->fifo_depth > 0)
1193 		omap2_mcspi_set_fifo(spi, t, 0);
1194 
1195 out:
1196 	/* Restore defaults if they were overriden */
1197 	if (par_override) {
1198 		par_override = 0;
1199 		status = omap2_mcspi_setup_transfer(spi, NULL);
1200 	}
1201 
1202 	if (cd && cd->cs_per_word) {
1203 		chconf = mcspi->ctx.modulctrl;
1204 		chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
1205 		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1206 		mcspi->ctx.modulctrl =
1207 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1208 	}
1209 
1210 	omap2_mcspi_set_enable(spi, 0);
1211 
1212 	if (gpio_is_valid(spi->cs_gpio))
1213 		omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1214 
1215 	if (mcspi->fifo_depth > 0 && t)
1216 		omap2_mcspi_set_fifo(spi, t, 0);
1217 
1218 	return status;
1219 }
1220 
omap2_mcspi_prepare_message(struct spi_master * master,struct spi_message * msg)1221 static int omap2_mcspi_prepare_message(struct spi_master *master,
1222 				       struct spi_message *msg)
1223 {
1224 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1225 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1226 	struct omap2_mcspi_cs	*cs;
1227 
1228 	/* Only a single channel can have the FORCE bit enabled
1229 	 * in its chconf0 register.
1230 	 * Scan all channels and disable them except the current one.
1231 	 * A FORCE can remain from a last transfer having cs_change enabled
1232 	 */
1233 	list_for_each_entry(cs, &ctx->cs, node) {
1234 		if (msg->spi->controller_state == cs)
1235 			continue;
1236 
1237 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
1238 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1239 			writel_relaxed(cs->chconf0,
1240 					cs->base + OMAP2_MCSPI_CHCONF0);
1241 			readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
1242 		}
1243 	}
1244 
1245 	return 0;
1246 }
1247 
omap2_mcspi_transfer_one(struct spi_master * master,struct spi_device * spi,struct spi_transfer * t)1248 static int omap2_mcspi_transfer_one(struct spi_master *master,
1249 		struct spi_device *spi, struct spi_transfer *t)
1250 {
1251 	struct omap2_mcspi	*mcspi;
1252 	struct omap2_mcspi_dma	*mcspi_dma;
1253 	const void	*tx_buf = t->tx_buf;
1254 	void		*rx_buf = t->rx_buf;
1255 	unsigned	len = t->len;
1256 
1257 	mcspi = spi_master_get_devdata(master);
1258 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
1259 
1260 	if ((len && !(rx_buf || tx_buf))) {
1261 		dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1262 				t->speed_hz,
1263 				len,
1264 				tx_buf ? "tx" : "",
1265 				rx_buf ? "rx" : "",
1266 				t->bits_per_word);
1267 		return -EINVAL;
1268 	}
1269 
1270 	if (len < DMA_MIN_BYTES)
1271 		goto skip_dma_map;
1272 
1273 	if (mcspi_dma->dma_tx && tx_buf != NULL) {
1274 		t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1275 				len, DMA_TO_DEVICE);
1276 		if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1277 			dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1278 					'T', len);
1279 			return -EINVAL;
1280 		}
1281 	}
1282 	if (mcspi_dma->dma_rx && rx_buf != NULL) {
1283 		t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1284 				DMA_FROM_DEVICE);
1285 		if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1286 			dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1287 					'R', len);
1288 			if (tx_buf != NULL)
1289 				dma_unmap_single(mcspi->dev, t->tx_dma,
1290 						len, DMA_TO_DEVICE);
1291 			return -EINVAL;
1292 		}
1293 	}
1294 
1295 skip_dma_map:
1296 	return omap2_mcspi_work_one(mcspi, spi, t);
1297 }
1298 
omap2_mcspi_master_setup(struct omap2_mcspi * mcspi)1299 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1300 {
1301 	struct spi_master	*master = mcspi->master;
1302 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1303 	int			ret = 0;
1304 
1305 	ret = pm_runtime_get_sync(mcspi->dev);
1306 	if (ret < 0)
1307 		return ret;
1308 
1309 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1310 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1311 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1312 
1313 	omap2_mcspi_set_master_mode(master);
1314 	pm_runtime_mark_last_busy(mcspi->dev);
1315 	pm_runtime_put_autosuspend(mcspi->dev);
1316 	return 0;
1317 }
1318 
omap_mcspi_runtime_resume(struct device * dev)1319 static int omap_mcspi_runtime_resume(struct device *dev)
1320 {
1321 	struct omap2_mcspi	*mcspi;
1322 	struct spi_master	*master;
1323 
1324 	master = dev_get_drvdata(dev);
1325 	mcspi = spi_master_get_devdata(master);
1326 	omap2_mcspi_restore_ctx(mcspi);
1327 
1328 	return 0;
1329 }
1330 
1331 static struct omap2_mcspi_platform_config omap2_pdata = {
1332 	.regs_offset = 0,
1333 };
1334 
1335 static struct omap2_mcspi_platform_config omap4_pdata = {
1336 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1337 };
1338 
1339 static const struct of_device_id omap_mcspi_of_match[] = {
1340 	{
1341 		.compatible = "ti,omap2-mcspi",
1342 		.data = &omap2_pdata,
1343 	},
1344 	{
1345 		.compatible = "ti,omap4-mcspi",
1346 		.data = &omap4_pdata,
1347 	},
1348 	{ },
1349 };
1350 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1351 
omap2_mcspi_probe(struct platform_device * pdev)1352 static int omap2_mcspi_probe(struct platform_device *pdev)
1353 {
1354 	struct spi_master	*master;
1355 	const struct omap2_mcspi_platform_config *pdata;
1356 	struct omap2_mcspi	*mcspi;
1357 	struct resource		*r;
1358 	int			status = 0, i;
1359 	u32			regs_offset = 0;
1360 	static int		bus_num = 1;
1361 	struct device_node	*node = pdev->dev.of_node;
1362 	const struct of_device_id *match;
1363 
1364 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1365 	if (master == NULL) {
1366 		dev_dbg(&pdev->dev, "master allocation failed\n");
1367 		return -ENOMEM;
1368 	}
1369 
1370 	/* the spi->mode bits understood by this driver: */
1371 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1372 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1373 	master->setup = omap2_mcspi_setup;
1374 	master->auto_runtime_pm = true;
1375 	master->prepare_message = omap2_mcspi_prepare_message;
1376 	master->transfer_one = omap2_mcspi_transfer_one;
1377 	master->set_cs = omap2_mcspi_set_cs;
1378 	master->cleanup = omap2_mcspi_cleanup;
1379 	master->dev.of_node = node;
1380 	master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1381 	master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
1382 
1383 	platform_set_drvdata(pdev, master);
1384 
1385 	mcspi = spi_master_get_devdata(master);
1386 	mcspi->master = master;
1387 
1388 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1389 	if (match) {
1390 		u32 num_cs = 1; /* default number of chipselect */
1391 		pdata = match->data;
1392 
1393 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1394 		master->num_chipselect = num_cs;
1395 		master->bus_num = bus_num++;
1396 		if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
1397 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1398 	} else {
1399 		pdata = dev_get_platdata(&pdev->dev);
1400 		master->num_chipselect = pdata->num_cs;
1401 		if (pdev->id != -1)
1402 			master->bus_num = pdev->id;
1403 		mcspi->pin_dir = pdata->pin_dir;
1404 	}
1405 	regs_offset = pdata->regs_offset;
1406 
1407 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1408 	if (r == NULL) {
1409 		status = -ENODEV;
1410 		goto free_master;
1411 	}
1412 
1413 	r->start += regs_offset;
1414 	r->end += regs_offset;
1415 	mcspi->phys = r->start;
1416 
1417 	mcspi->base = devm_ioremap_resource(&pdev->dev, r);
1418 	if (IS_ERR(mcspi->base)) {
1419 		status = PTR_ERR(mcspi->base);
1420 		goto free_master;
1421 	}
1422 
1423 	mcspi->dev = &pdev->dev;
1424 
1425 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1426 
1427 	mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
1428 					   sizeof(struct omap2_mcspi_dma),
1429 					   GFP_KERNEL);
1430 	if (mcspi->dma_channels == NULL) {
1431 		status = -ENOMEM;
1432 		goto free_master;
1433 	}
1434 
1435 	for (i = 0; i < master->num_chipselect; i++) {
1436 		char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
1437 		char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
1438 		struct resource *dma_res;
1439 
1440 		sprintf(dma_rx_ch_name, "rx%d", i);
1441 		if (!pdev->dev.of_node) {
1442 			dma_res =
1443 				platform_get_resource_byname(pdev,
1444 							     IORESOURCE_DMA,
1445 							     dma_rx_ch_name);
1446 			if (!dma_res) {
1447 				dev_dbg(&pdev->dev,
1448 					"cannot get DMA RX channel\n");
1449 				status = -ENODEV;
1450 				break;
1451 			}
1452 
1453 			mcspi->dma_channels[i].dma_rx_sync_dev =
1454 				dma_res->start;
1455 		}
1456 		sprintf(dma_tx_ch_name, "tx%d", i);
1457 		if (!pdev->dev.of_node) {
1458 			dma_res =
1459 				platform_get_resource_byname(pdev,
1460 							     IORESOURCE_DMA,
1461 							     dma_tx_ch_name);
1462 			if (!dma_res) {
1463 				dev_dbg(&pdev->dev,
1464 					"cannot get DMA TX channel\n");
1465 				status = -ENODEV;
1466 				break;
1467 			}
1468 
1469 			mcspi->dma_channels[i].dma_tx_sync_dev =
1470 				dma_res->start;
1471 		}
1472 	}
1473 
1474 	if (status < 0)
1475 		goto free_master;
1476 
1477 	pm_runtime_use_autosuspend(&pdev->dev);
1478 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1479 	pm_runtime_enable(&pdev->dev);
1480 
1481 	status = omap2_mcspi_master_setup(mcspi);
1482 	if (status < 0)
1483 		goto disable_pm;
1484 
1485 	status = devm_spi_register_master(&pdev->dev, master);
1486 	if (status < 0)
1487 		goto disable_pm;
1488 
1489 	return status;
1490 
1491 disable_pm:
1492 	pm_runtime_disable(&pdev->dev);
1493 free_master:
1494 	spi_master_put(master);
1495 	return status;
1496 }
1497 
omap2_mcspi_remove(struct platform_device * pdev)1498 static int omap2_mcspi_remove(struct platform_device *pdev)
1499 {
1500 	struct spi_master *master = platform_get_drvdata(pdev);
1501 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1502 
1503 	pm_runtime_put_sync(mcspi->dev);
1504 	pm_runtime_disable(&pdev->dev);
1505 
1506 	return 0;
1507 }
1508 
1509 /* work with hotplug and coldplug */
1510 MODULE_ALIAS("platform:omap2_mcspi");
1511 
1512 #ifdef	CONFIG_SUSPEND
1513 /*
1514  * When SPI wake up from off-mode, CS is in activate state. If it was in
1515  * unactive state when driver was suspend, then force it to unactive state at
1516  * wake up.
1517  */
omap2_mcspi_resume(struct device * dev)1518 static int omap2_mcspi_resume(struct device *dev)
1519 {
1520 	struct spi_master	*master = dev_get_drvdata(dev);
1521 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1522 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1523 	struct omap2_mcspi_cs	*cs;
1524 
1525 	pm_runtime_get_sync(mcspi->dev);
1526 	list_for_each_entry(cs, &ctx->cs, node) {
1527 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1528 			/*
1529 			 * We need to toggle CS state for OMAP take this
1530 			 * change in account.
1531 			 */
1532 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1533 			writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1534 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1535 			writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1536 		}
1537 	}
1538 	pm_runtime_mark_last_busy(mcspi->dev);
1539 	pm_runtime_put_autosuspend(mcspi->dev);
1540 	return 0;
1541 }
1542 #else
1543 #define	omap2_mcspi_resume	NULL
1544 #endif
1545 
1546 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1547 	.resume = omap2_mcspi_resume,
1548 	.runtime_resume	= omap_mcspi_runtime_resume,
1549 };
1550 
1551 static struct platform_driver omap2_mcspi_driver = {
1552 	.driver = {
1553 		.name =		"omap2_mcspi",
1554 		.pm =		&omap2_mcspi_pm_ops,
1555 		.of_match_table = omap_mcspi_of_match,
1556 	},
1557 	.probe =	omap2_mcspi_probe,
1558 	.remove =	omap2_mcspi_remove,
1559 };
1560 
1561 module_platform_driver(omap2_mcspi_driver);
1562 MODULE_LICENSE("GPL");
1563