1/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/pxa2xx_ssp.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/pxa2xx_spi.h>
23
24#include <mach/dma.h>
25#include "spi-pxa2xx.h"
26
27#define DMA_INT_MASK		(DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
28#define RESET_DMA_CHANNEL	(DCSR_NODESC | DMA_INT_MASK)
29
30bool pxa2xx_spi_dma_is_possible(size_t len)
31{
32	/* Try to map dma buffer and do a dma transfer if successful, but
33	 * only if the length is non-zero and less than MAX_DMA_LEN.
34	 *
35	 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
36	 * of PIO instead.  Care is needed above because the transfer may
37	 * have have been passed with buffers that are already dma mapped.
38	 * A zero-length transfer in PIO mode will not try to write/read
39	 * to/from the buffers
40	 *
41	 * REVISIT large transfers are exactly where we most want to be
42	 * using DMA.  If this happens much, split those transfers into
43	 * multiple DMA segments rather than forcing PIO.
44	 */
45	return len > 0 && len <= MAX_DMA_LEN;
46}
47
48int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
49{
50	struct spi_message *msg = drv_data->cur_msg;
51	struct device *dev = &msg->spi->dev;
52
53	if (!drv_data->cur_chip->enable_dma)
54		return 0;
55
56	if (msg->is_dma_mapped)
57		return  drv_data->rx_dma && drv_data->tx_dma;
58
59	if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
60		return 0;
61
62	/* Modify setup if rx buffer is null */
63	if (drv_data->rx == NULL) {
64		*drv_data->null_dma_buf = 0;
65		drv_data->rx = drv_data->null_dma_buf;
66		drv_data->rx_map_len = 4;
67	} else
68		drv_data->rx_map_len = drv_data->len;
69
70
71	/* Modify setup if tx buffer is null */
72	if (drv_data->tx == NULL) {
73		*drv_data->null_dma_buf = 0;
74		drv_data->tx = drv_data->null_dma_buf;
75		drv_data->tx_map_len = 4;
76	} else
77		drv_data->tx_map_len = drv_data->len;
78
79	/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
80	 * so we flush the cache *before* invalidating it, in case
81	 * the tx and rx buffers overlap.
82	 */
83	drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
84					drv_data->tx_map_len, DMA_TO_DEVICE);
85	if (dma_mapping_error(dev, drv_data->tx_dma))
86		return 0;
87
88	/* Stream map the rx buffer */
89	drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
90					drv_data->rx_map_len, DMA_FROM_DEVICE);
91	if (dma_mapping_error(dev, drv_data->rx_dma)) {
92		dma_unmap_single(dev, drv_data->tx_dma,
93					drv_data->tx_map_len, DMA_TO_DEVICE);
94		return 0;
95	}
96
97	return 1;
98}
99
100static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
101{
102	struct device *dev;
103
104	if (!drv_data->dma_mapped)
105		return;
106
107	if (!drv_data->cur_msg->is_dma_mapped) {
108		dev = &drv_data->cur_msg->spi->dev;
109		dma_unmap_single(dev, drv_data->rx_dma,
110					drv_data->rx_map_len, DMA_FROM_DEVICE);
111		dma_unmap_single(dev, drv_data->tx_dma,
112					drv_data->tx_map_len, DMA_TO_DEVICE);
113	}
114
115	drv_data->dma_mapped = 0;
116}
117
118static int wait_ssp_rx_stall(struct driver_data *drv_data)
119{
120	unsigned long limit = loops_per_jiffy << 1;
121
122	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
123		cpu_relax();
124
125	return limit;
126}
127
128static int wait_dma_channel_stop(int channel)
129{
130	unsigned long limit = loops_per_jiffy << 1;
131
132	while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
133		cpu_relax();
134
135	return limit;
136}
137
138static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
139				      const char *msg)
140{
141	/* Stop and reset */
142	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
143	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
144	write_SSSR_CS(drv_data, drv_data->clear_sr);
145	pxa2xx_spi_write(drv_data, SSCR1,
146			 pxa2xx_spi_read(drv_data, SSCR1)
147			 & ~drv_data->dma_cr1);
148	if (!pxa25x_ssp_comp(drv_data))
149		pxa2xx_spi_write(drv_data, SSTO, 0);
150	pxa2xx_spi_flush(drv_data);
151	pxa2xx_spi_write(drv_data, SSCR0,
152			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
153
154	pxa2xx_spi_unmap_dma_buffers(drv_data);
155
156	dev_err(&drv_data->pdev->dev, "%s\n", msg);
157
158	drv_data->cur_msg->state = ERROR_STATE;
159	tasklet_schedule(&drv_data->pump_transfers);
160}
161
162static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
163{
164	struct spi_message *msg = drv_data->cur_msg;
165
166	/* Clear and disable interrupts on SSP and DMA channels*/
167	pxa2xx_spi_write(drv_data, SSCR1,
168			 pxa2xx_spi_read(drv_data, SSCR1)
169			 & ~drv_data->dma_cr1);
170	write_SSSR_CS(drv_data, drv_data->clear_sr);
171	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
172	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
173
174	if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
175		dev_err(&drv_data->pdev->dev,
176			"dma_handler: dma rx channel stop failed\n");
177
178	if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
179		dev_err(&drv_data->pdev->dev,
180			"dma_transfer: ssp rx stall failed\n");
181
182	pxa2xx_spi_unmap_dma_buffers(drv_data);
183
184	/* update the buffer pointer for the amount completed in dma */
185	drv_data->rx += drv_data->len -
186			(DCMD(drv_data->rx_channel) & DCMD_LENGTH);
187
188	/* read trailing data from fifo, it does not matter how many
189	 * bytes are in the fifo just read until buffer is full
190	 * or fifo is empty, which ever occurs first */
191	drv_data->read(drv_data);
192
193	/* return count of what was actually read */
194	msg->actual_length += drv_data->len -
195				(drv_data->rx_end - drv_data->rx);
196
197	/* Transfer delays and chip select release are
198	 * handled in pump_transfers or giveback
199	 */
200
201	/* Move to next transfer */
202	msg->state = pxa2xx_spi_next_transfer(drv_data);
203
204	/* Schedule transfer tasklet */
205	tasklet_schedule(&drv_data->pump_transfers);
206}
207
208void pxa2xx_spi_dma_handler(int channel, void *data)
209{
210	struct driver_data *drv_data = data;
211	u32 irq_status = DCSR(channel) & DMA_INT_MASK;
212
213	if (irq_status & DCSR_BUSERR) {
214
215		if (channel == drv_data->tx_channel)
216			pxa2xx_spi_dma_error_stop(drv_data,
217				"dma_handler: bad bus address on tx channel");
218		else
219			pxa2xx_spi_dma_error_stop(drv_data,
220				"dma_handler: bad bus address on rx channel");
221		return;
222	}
223
224	/* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
225	if ((channel == drv_data->tx_channel)
226		&& (irq_status & DCSR_ENDINTR)
227		&& (drv_data->ssp_type == PXA25x_SSP)) {
228
229		/* Wait for rx to stall */
230		if (wait_ssp_rx_stall(drv_data) == 0)
231			dev_err(&drv_data->pdev->dev,
232				"dma_handler: ssp rx stall failed\n");
233
234		/* finish this transfer, start the next */
235		pxa2xx_spi_dma_transfer_complete(drv_data);
236	}
237}
238
239irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
240{
241	u32 irq_status;
242
243	irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
244	if (irq_status & SSSR_ROR) {
245		pxa2xx_spi_dma_error_stop(drv_data,
246					  "dma_transfer: fifo overrun");
247		return IRQ_HANDLED;
248	}
249
250	/* Check for false positive timeout */
251	if ((irq_status & SSSR_TINT)
252		&& (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
253		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
254		return IRQ_HANDLED;
255	}
256
257	if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
258
259		/* Clear and disable timeout interrupt, do the rest in
260		 * dma_transfer_complete */
261		if (!pxa25x_ssp_comp(drv_data))
262			pxa2xx_spi_write(drv_data, SSTO, 0);
263
264		/* finish this transfer, start the next */
265		pxa2xx_spi_dma_transfer_complete(drv_data);
266
267		return IRQ_HANDLED;
268	}
269
270	/* Opps problem detected */
271	return IRQ_NONE;
272}
273
274int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
275{
276	u32 dma_width;
277
278	switch (drv_data->n_bytes) {
279	case 1:
280		dma_width = DCMD_WIDTH1;
281		break;
282	case 2:
283		dma_width = DCMD_WIDTH2;
284		break;
285	default:
286		dma_width = DCMD_WIDTH4;
287		break;
288	}
289
290	/* Setup rx DMA Channel */
291	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
292	DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
293	DTADR(drv_data->rx_channel) = drv_data->rx_dma;
294	if (drv_data->rx == drv_data->null_dma_buf)
295		/* No target address increment */
296		DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
297						| dma_width
298						| dma_burst
299						| drv_data->len;
300	else
301		DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
302						| DCMD_FLOWSRC
303						| dma_width
304						| dma_burst
305						| drv_data->len;
306
307	/* Setup tx DMA Channel */
308	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
309	DSADR(drv_data->tx_channel) = drv_data->tx_dma;
310	DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
311	if (drv_data->tx == drv_data->null_dma_buf)
312		/* No source address increment */
313		DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
314						| dma_width
315						| dma_burst
316						| drv_data->len;
317	else
318		DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
319						| DCMD_FLOWTRG
320						| dma_width
321						| dma_burst
322						| drv_data->len;
323
324	/* Enable dma end irqs on SSP to detect end of transfer */
325	if (drv_data->ssp_type == PXA25x_SSP)
326		DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
327
328	return 0;
329}
330
331void pxa2xx_spi_dma_start(struct driver_data *drv_data)
332{
333	DCSR(drv_data->rx_channel) |= DCSR_RUN;
334	DCSR(drv_data->tx_channel) |= DCSR_RUN;
335}
336
337int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
338{
339	struct device *dev = &drv_data->pdev->dev;
340	struct ssp_device *ssp = drv_data->ssp;
341
342	/* Get two DMA channels	(rx and tx) */
343	drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
344						DMA_PRIO_HIGH,
345						pxa2xx_spi_dma_handler,
346						drv_data);
347	if (drv_data->rx_channel < 0) {
348		dev_err(dev, "problem (%d) requesting rx channel\n",
349			drv_data->rx_channel);
350		return -ENODEV;
351	}
352	drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
353						DMA_PRIO_MEDIUM,
354						pxa2xx_spi_dma_handler,
355						drv_data);
356	if (drv_data->tx_channel < 0) {
357		dev_err(dev, "problem (%d) requesting tx channel\n",
358			drv_data->tx_channel);
359		pxa_free_dma(drv_data->rx_channel);
360		return -ENODEV;
361	}
362
363	DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
364	DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
365
366	return 0;
367}
368
369void pxa2xx_spi_dma_release(struct driver_data *drv_data)
370{
371	struct ssp_device *ssp = drv_data->ssp;
372
373	DRCMR(ssp->drcmr_rx) = 0;
374	DRCMR(ssp->drcmr_tx) = 0;
375
376	if (drv_data->tx_channel != 0)
377		pxa_free_dma(drv_data->tx_channel);
378	if (drv_data->rx_channel != 0)
379		pxa_free_dma(drv_data->rx_channel);
380}
381
382void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
383{
384	if (drv_data->rx_channel != -1)
385		DRCMR(drv_data->ssp->drcmr_rx) =
386			DRCMR_MAPVLD | drv_data->rx_channel;
387	if (drv_data->tx_channel != -1)
388		DRCMR(drv_data->ssp->drcmr_tx) =
389			DRCMR_MAPVLD | drv_data->tx_channel;
390}
391
392int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
393					   struct spi_device *spi,
394					   u8 bits_per_word, u32 *burst_code,
395					   u32 *threshold)
396{
397	struct pxa2xx_spi_chip *chip_info =
398			(struct pxa2xx_spi_chip *)spi->controller_data;
399	int bytes_per_word;
400	int burst_bytes;
401	int thresh_words;
402	int req_burst_size;
403	int retval = 0;
404
405	/* Set the threshold (in registers) to equal the same amount of data
406	 * as represented by burst size (in bytes).  The computation below
407	 * is (burst_size rounded up to nearest 8 byte, word or long word)
408	 * divided by (bytes/register); the tx threshold is the inverse of
409	 * the rx, so that there will always be enough data in the rx fifo
410	 * to satisfy a burst, and there will always be enough space in the
411	 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
412	 * there is not enough space), there must always remain enough empty
413	 * space in the rx fifo for any data loaded to the tx fifo.
414	 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
415	 * will be 8, or half the fifo;
416	 * The threshold can only be set to 2, 4 or 8, but not 16, because
417	 * to burst 16 to the tx fifo, the fifo would have to be empty;
418	 * however, the minimum fifo trigger level is 1, and the tx will
419	 * request service when the fifo is at this level, with only 15 spaces.
420	 */
421
422	/* find bytes/word */
423	if (bits_per_word <= 8)
424		bytes_per_word = 1;
425	else if (bits_per_word <= 16)
426		bytes_per_word = 2;
427	else
428		bytes_per_word = 4;
429
430	/* use struct pxa2xx_spi_chip->dma_burst_size if available */
431	if (chip_info)
432		req_burst_size = chip_info->dma_burst_size;
433	else {
434		switch (chip->dma_burst_size) {
435		default:
436			/* if the default burst size is not set,
437			 * do it now */
438			chip->dma_burst_size = DCMD_BURST8;
439		case DCMD_BURST8:
440			req_burst_size = 8;
441			break;
442		case DCMD_BURST16:
443			req_burst_size = 16;
444			break;
445		case DCMD_BURST32:
446			req_burst_size = 32;
447			break;
448		}
449	}
450	if (req_burst_size <= 8) {
451		*burst_code = DCMD_BURST8;
452		burst_bytes = 8;
453	} else if (req_burst_size <= 16) {
454		if (bytes_per_word == 1) {
455			/* don't burst more than 1/2 the fifo */
456			*burst_code = DCMD_BURST8;
457			burst_bytes = 8;
458			retval = 1;
459		} else {
460			*burst_code = DCMD_BURST16;
461			burst_bytes = 16;
462		}
463	} else {
464		if (bytes_per_word == 1) {
465			/* don't burst more than 1/2 the fifo */
466			*burst_code = DCMD_BURST8;
467			burst_bytes = 8;
468			retval = 1;
469		} else if (bytes_per_word == 2) {
470			/* don't burst more than 1/2 the fifo */
471			*burst_code = DCMD_BURST16;
472			burst_bytes = 16;
473			retval = 1;
474		} else {
475			*burst_code = DCMD_BURST32;
476			burst_bytes = 32;
477		}
478	}
479
480	thresh_words = burst_bytes / bytes_per_word;
481
482	/* thresh_words will be between 2 and 8 */
483	*threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
484			| (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
485
486	return retval;
487}
488