1/*
2 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 *
13 */
14/*
15 * QCOM BAM DMA engine driver
16 *
17 * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
18 * peripherals on the MSM 8x74.  The configuration of the channels are dependent
19 * on the way they are hard wired to that specific peripheral.  The peripheral
20 * device tree entries specify the configuration of each channel.
21 *
22 * The DMA controller requires the use of external memory for storage of the
23 * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
24 * circular buffer and operations are managed according to the offset within the
25 * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
26 * are back to defaults.
27 *
28 * During DMA operations, we write descriptors to the FIFO, being careful to
29 * handle wrapping and then write the last FIFO offset to that channel's
30 * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
31 * indicates the current FIFO offset that is being processed, so there is some
32 * indication of where the hardware is currently working.
33 */
34
35#include <linux/kernel.h>
36#include <linux/io.h>
37#include <linux/init.h>
38#include <linux/slab.h>
39#include <linux/module.h>
40#include <linux/interrupt.h>
41#include <linux/dma-mapping.h>
42#include <linux/scatterlist.h>
43#include <linux/device.h>
44#include <linux/platform_device.h>
45#include <linux/of.h>
46#include <linux/of_address.h>
47#include <linux/of_irq.h>
48#include <linux/of_dma.h>
49#include <linux/clk.h>
50#include <linux/dmaengine.h>
51
52#include "dmaengine.h"
53#include "virt-dma.h"
54
55struct bam_desc_hw {
56	u32 addr;		/* Buffer physical address */
57	u16 size;		/* Buffer size in bytes */
58	u16 flags;
59};
60
61#define DESC_FLAG_INT BIT(15)
62#define DESC_FLAG_EOT BIT(14)
63#define DESC_FLAG_EOB BIT(13)
64#define DESC_FLAG_NWD BIT(12)
65
66struct bam_async_desc {
67	struct virt_dma_desc vd;
68
69	u32 num_desc;
70	u32 xfer_len;
71
72	/* transaction flags, EOT|EOB|NWD */
73	u16 flags;
74
75	struct bam_desc_hw *curr_desc;
76
77	enum dma_transfer_direction dir;
78	size_t length;
79	struct bam_desc_hw desc[0];
80};
81
82enum bam_reg {
83	BAM_CTRL,
84	BAM_REVISION,
85	BAM_NUM_PIPES,
86	BAM_DESC_CNT_TRSHLD,
87	BAM_IRQ_SRCS,
88	BAM_IRQ_SRCS_MSK,
89	BAM_IRQ_SRCS_UNMASKED,
90	BAM_IRQ_STTS,
91	BAM_IRQ_CLR,
92	BAM_IRQ_EN,
93	BAM_CNFG_BITS,
94	BAM_IRQ_SRCS_EE,
95	BAM_IRQ_SRCS_MSK_EE,
96	BAM_P_CTRL,
97	BAM_P_RST,
98	BAM_P_HALT,
99	BAM_P_IRQ_STTS,
100	BAM_P_IRQ_CLR,
101	BAM_P_IRQ_EN,
102	BAM_P_EVNT_DEST_ADDR,
103	BAM_P_EVNT_REG,
104	BAM_P_SW_OFSTS,
105	BAM_P_DATA_FIFO_ADDR,
106	BAM_P_DESC_FIFO_ADDR,
107	BAM_P_EVNT_GEN_TRSHLD,
108	BAM_P_FIFO_SIZES,
109};
110
111struct reg_offset_data {
112	u32 base_offset;
113	unsigned int pipe_mult, evnt_mult, ee_mult;
114};
115
116static const struct reg_offset_data bam_v1_3_reg_info[] = {
117	[BAM_CTRL]		= { 0x0F80, 0x00, 0x00, 0x00 },
118	[BAM_REVISION]		= { 0x0F84, 0x00, 0x00, 0x00 },
119	[BAM_NUM_PIPES]		= { 0x0FBC, 0x00, 0x00, 0x00 },
120	[BAM_DESC_CNT_TRSHLD]	= { 0x0F88, 0x00, 0x00, 0x00 },
121	[BAM_IRQ_SRCS]		= { 0x0F8C, 0x00, 0x00, 0x00 },
122	[BAM_IRQ_SRCS_MSK]	= { 0x0F90, 0x00, 0x00, 0x00 },
123	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0FB0, 0x00, 0x00, 0x00 },
124	[BAM_IRQ_STTS]		= { 0x0F94, 0x00, 0x00, 0x00 },
125	[BAM_IRQ_CLR]		= { 0x0F98, 0x00, 0x00, 0x00 },
126	[BAM_IRQ_EN]		= { 0x0F9C, 0x00, 0x00, 0x00 },
127	[BAM_CNFG_BITS]		= { 0x0FFC, 0x00, 0x00, 0x00 },
128	[BAM_IRQ_SRCS_EE]	= { 0x1800, 0x00, 0x00, 0x80 },
129	[BAM_IRQ_SRCS_MSK_EE]	= { 0x1804, 0x00, 0x00, 0x80 },
130	[BAM_P_CTRL]		= { 0x0000, 0x80, 0x00, 0x00 },
131	[BAM_P_RST]		= { 0x0004, 0x80, 0x00, 0x00 },
132	[BAM_P_HALT]		= { 0x0008, 0x80, 0x00, 0x00 },
133	[BAM_P_IRQ_STTS]	= { 0x0010, 0x80, 0x00, 0x00 },
134	[BAM_P_IRQ_CLR]		= { 0x0014, 0x80, 0x00, 0x00 },
135	[BAM_P_IRQ_EN]		= { 0x0018, 0x80, 0x00, 0x00 },
136	[BAM_P_EVNT_DEST_ADDR]	= { 0x102C, 0x00, 0x40, 0x00 },
137	[BAM_P_EVNT_REG]	= { 0x1018, 0x00, 0x40, 0x00 },
138	[BAM_P_SW_OFSTS]	= { 0x1000, 0x00, 0x40, 0x00 },
139	[BAM_P_DATA_FIFO_ADDR]	= { 0x1024, 0x00, 0x40, 0x00 },
140	[BAM_P_DESC_FIFO_ADDR]	= { 0x101C, 0x00, 0x40, 0x00 },
141	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1028, 0x00, 0x40, 0x00 },
142	[BAM_P_FIFO_SIZES]	= { 0x1020, 0x00, 0x40, 0x00 },
143};
144
145static const struct reg_offset_data bam_v1_4_reg_info[] = {
146	[BAM_CTRL]		= { 0x0000, 0x00, 0x00, 0x00 },
147	[BAM_REVISION]		= { 0x0004, 0x00, 0x00, 0x00 },
148	[BAM_NUM_PIPES]		= { 0x003C, 0x00, 0x00, 0x00 },
149	[BAM_DESC_CNT_TRSHLD]	= { 0x0008, 0x00, 0x00, 0x00 },
150	[BAM_IRQ_SRCS]		= { 0x000C, 0x00, 0x00, 0x00 },
151	[BAM_IRQ_SRCS_MSK]	= { 0x0010, 0x00, 0x00, 0x00 },
152	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0030, 0x00, 0x00, 0x00 },
153	[BAM_IRQ_STTS]		= { 0x0014, 0x00, 0x00, 0x00 },
154	[BAM_IRQ_CLR]		= { 0x0018, 0x00, 0x00, 0x00 },
155	[BAM_IRQ_EN]		= { 0x001C, 0x00, 0x00, 0x00 },
156	[BAM_CNFG_BITS]		= { 0x007C, 0x00, 0x00, 0x00 },
157	[BAM_IRQ_SRCS_EE]	= { 0x0800, 0x00, 0x00, 0x80 },
158	[BAM_IRQ_SRCS_MSK_EE]	= { 0x0804, 0x00, 0x00, 0x80 },
159	[BAM_P_CTRL]		= { 0x1000, 0x1000, 0x00, 0x00 },
160	[BAM_P_RST]		= { 0x1004, 0x1000, 0x00, 0x00 },
161	[BAM_P_HALT]		= { 0x1008, 0x1000, 0x00, 0x00 },
162	[BAM_P_IRQ_STTS]	= { 0x1010, 0x1000, 0x00, 0x00 },
163	[BAM_P_IRQ_CLR]		= { 0x1014, 0x1000, 0x00, 0x00 },
164	[BAM_P_IRQ_EN]		= { 0x1018, 0x1000, 0x00, 0x00 },
165	[BAM_P_EVNT_DEST_ADDR]	= { 0x182C, 0x00, 0x1000, 0x00 },
166	[BAM_P_EVNT_REG]	= { 0x1818, 0x00, 0x1000, 0x00 },
167	[BAM_P_SW_OFSTS]	= { 0x1800, 0x00, 0x1000, 0x00 },
168	[BAM_P_DATA_FIFO_ADDR]	= { 0x1824, 0x00, 0x1000, 0x00 },
169	[BAM_P_DESC_FIFO_ADDR]	= { 0x181C, 0x00, 0x1000, 0x00 },
170	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1828, 0x00, 0x1000, 0x00 },
171	[BAM_P_FIFO_SIZES]	= { 0x1820, 0x00, 0x1000, 0x00 },
172};
173
174static const struct reg_offset_data bam_v1_7_reg_info[] = {
175	[BAM_CTRL]		= { 0x00000, 0x00, 0x00, 0x00 },
176	[BAM_REVISION]		= { 0x01000, 0x00, 0x00, 0x00 },
177	[BAM_NUM_PIPES]		= { 0x01008, 0x00, 0x00, 0x00 },
178	[BAM_DESC_CNT_TRSHLD]	= { 0x00008, 0x00, 0x00, 0x00 },
179	[BAM_IRQ_SRCS]		= { 0x03010, 0x00, 0x00, 0x00 },
180	[BAM_IRQ_SRCS_MSK]	= { 0x03014, 0x00, 0x00, 0x00 },
181	[BAM_IRQ_SRCS_UNMASKED]	= { 0x03018, 0x00, 0x00, 0x00 },
182	[BAM_IRQ_STTS]		= { 0x00014, 0x00, 0x00, 0x00 },
183	[BAM_IRQ_CLR]		= { 0x00018, 0x00, 0x00, 0x00 },
184	[BAM_IRQ_EN]		= { 0x0001C, 0x00, 0x00, 0x00 },
185	[BAM_CNFG_BITS]		= { 0x0007C, 0x00, 0x00, 0x00 },
186	[BAM_IRQ_SRCS_EE]	= { 0x03000, 0x00, 0x00, 0x1000 },
187	[BAM_IRQ_SRCS_MSK_EE]	= { 0x03004, 0x00, 0x00, 0x1000 },
188	[BAM_P_CTRL]		= { 0x13000, 0x1000, 0x00, 0x00 },
189	[BAM_P_RST]		= { 0x13004, 0x1000, 0x00, 0x00 },
190	[BAM_P_HALT]		= { 0x13008, 0x1000, 0x00, 0x00 },
191	[BAM_P_IRQ_STTS]	= { 0x13010, 0x1000, 0x00, 0x00 },
192	[BAM_P_IRQ_CLR]		= { 0x13014, 0x1000, 0x00, 0x00 },
193	[BAM_P_IRQ_EN]		= { 0x13018, 0x1000, 0x00, 0x00 },
194	[BAM_P_EVNT_DEST_ADDR]	= { 0x1382C, 0x00, 0x1000, 0x00 },
195	[BAM_P_EVNT_REG]	= { 0x13818, 0x00, 0x1000, 0x00 },
196	[BAM_P_SW_OFSTS]	= { 0x13800, 0x00, 0x1000, 0x00 },
197	[BAM_P_DATA_FIFO_ADDR]	= { 0x13824, 0x00, 0x1000, 0x00 },
198	[BAM_P_DESC_FIFO_ADDR]	= { 0x1381C, 0x00, 0x1000, 0x00 },
199	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x13828, 0x00, 0x1000, 0x00 },
200	[BAM_P_FIFO_SIZES]	= { 0x13820, 0x00, 0x1000, 0x00 },
201};
202
203/* BAM CTRL */
204#define BAM_SW_RST			BIT(0)
205#define BAM_EN				BIT(1)
206#define BAM_EN_ACCUM			BIT(4)
207#define BAM_TESTBUS_SEL_SHIFT		5
208#define BAM_TESTBUS_SEL_MASK		0x3F
209#define BAM_DESC_CACHE_SEL_SHIFT	13
210#define BAM_DESC_CACHE_SEL_MASK		0x3
211#define BAM_CACHED_DESC_STORE		BIT(15)
212#define IBC_DISABLE			BIT(16)
213
214/* BAM REVISION */
215#define REVISION_SHIFT		0
216#define REVISION_MASK		0xFF
217#define NUM_EES_SHIFT		8
218#define NUM_EES_MASK		0xF
219#define CE_BUFFER_SIZE		BIT(13)
220#define AXI_ACTIVE		BIT(14)
221#define USE_VMIDMT		BIT(15)
222#define SECURED			BIT(16)
223#define BAM_HAS_NO_BYPASS	BIT(17)
224#define HIGH_FREQUENCY_BAM	BIT(18)
225#define INACTIV_TMRS_EXST	BIT(19)
226#define NUM_INACTIV_TMRS	BIT(20)
227#define DESC_CACHE_DEPTH_SHIFT	21
228#define DESC_CACHE_DEPTH_1	(0 << DESC_CACHE_DEPTH_SHIFT)
229#define DESC_CACHE_DEPTH_2	(1 << DESC_CACHE_DEPTH_SHIFT)
230#define DESC_CACHE_DEPTH_3	(2 << DESC_CACHE_DEPTH_SHIFT)
231#define DESC_CACHE_DEPTH_4	(3 << DESC_CACHE_DEPTH_SHIFT)
232#define CMD_DESC_EN		BIT(23)
233#define INACTIV_TMR_BASE_SHIFT	24
234#define INACTIV_TMR_BASE_MASK	0xFF
235
236/* BAM NUM PIPES */
237#define BAM_NUM_PIPES_SHIFT		0
238#define BAM_NUM_PIPES_MASK		0xFF
239#define PERIPH_NON_PIPE_GRP_SHIFT	16
240#define PERIPH_NON_PIP_GRP_MASK		0xFF
241#define BAM_NON_PIPE_GRP_SHIFT		24
242#define BAM_NON_PIPE_GRP_MASK		0xFF
243
244/* BAM CNFG BITS */
245#define BAM_PIPE_CNFG		BIT(2)
246#define BAM_FULL_PIPE		BIT(11)
247#define BAM_NO_EXT_P_RST	BIT(12)
248#define BAM_IBC_DISABLE		BIT(13)
249#define BAM_SB_CLK_REQ		BIT(14)
250#define BAM_PSM_CSW_REQ		BIT(15)
251#define BAM_PSM_P_RES		BIT(16)
252#define BAM_AU_P_RES		BIT(17)
253#define BAM_SI_P_RES		BIT(18)
254#define BAM_WB_P_RES		BIT(19)
255#define BAM_WB_BLK_CSW		BIT(20)
256#define BAM_WB_CSW_ACK_IDL	BIT(21)
257#define BAM_WB_RETR_SVPNT	BIT(22)
258#define BAM_WB_DSC_AVL_P_RST	BIT(23)
259#define BAM_REG_P_EN		BIT(24)
260#define BAM_PSM_P_HD_DATA	BIT(25)
261#define BAM_AU_ACCUMED		BIT(26)
262#define BAM_CMD_ENABLE		BIT(27)
263
264#define BAM_CNFG_BITS_DEFAULT	(BAM_PIPE_CNFG |	\
265				 BAM_NO_EXT_P_RST |	\
266				 BAM_IBC_DISABLE |	\
267				 BAM_SB_CLK_REQ |	\
268				 BAM_PSM_CSW_REQ |	\
269				 BAM_PSM_P_RES |	\
270				 BAM_AU_P_RES |		\
271				 BAM_SI_P_RES |		\
272				 BAM_WB_P_RES |		\
273				 BAM_WB_BLK_CSW |	\
274				 BAM_WB_CSW_ACK_IDL |	\
275				 BAM_WB_RETR_SVPNT |	\
276				 BAM_WB_DSC_AVL_P_RST |	\
277				 BAM_REG_P_EN |		\
278				 BAM_PSM_P_HD_DATA |	\
279				 BAM_AU_ACCUMED |	\
280				 BAM_CMD_ENABLE)
281
282/* PIPE CTRL */
283#define P_EN			BIT(1)
284#define P_DIRECTION		BIT(3)
285#define P_SYS_STRM		BIT(4)
286#define P_SYS_MODE		BIT(5)
287#define P_AUTO_EOB		BIT(6)
288#define P_AUTO_EOB_SEL_SHIFT	7
289#define P_AUTO_EOB_SEL_512	(0 << P_AUTO_EOB_SEL_SHIFT)
290#define P_AUTO_EOB_SEL_256	(1 << P_AUTO_EOB_SEL_SHIFT)
291#define P_AUTO_EOB_SEL_128	(2 << P_AUTO_EOB_SEL_SHIFT)
292#define P_AUTO_EOB_SEL_64	(3 << P_AUTO_EOB_SEL_SHIFT)
293#define P_PREFETCH_LIMIT_SHIFT	9
294#define P_PREFETCH_LIMIT_32	(0 << P_PREFETCH_LIMIT_SHIFT)
295#define P_PREFETCH_LIMIT_16	(1 << P_PREFETCH_LIMIT_SHIFT)
296#define P_PREFETCH_LIMIT_4	(2 << P_PREFETCH_LIMIT_SHIFT)
297#define P_WRITE_NWD		BIT(11)
298#define P_LOCK_GROUP_SHIFT	16
299#define P_LOCK_GROUP_MASK	0x1F
300
301/* BAM_DESC_CNT_TRSHLD */
302#define CNT_TRSHLD		0xffff
303#define DEFAULT_CNT_THRSHLD	0x4
304
305/* BAM_IRQ_SRCS */
306#define BAM_IRQ			BIT(31)
307#define P_IRQ			0x7fffffff
308
309/* BAM_IRQ_SRCS_MSK */
310#define BAM_IRQ_MSK		BAM_IRQ
311#define P_IRQ_MSK		P_IRQ
312
313/* BAM_IRQ_STTS */
314#define BAM_TIMER_IRQ		BIT(4)
315#define BAM_EMPTY_IRQ		BIT(3)
316#define BAM_ERROR_IRQ		BIT(2)
317#define BAM_HRESP_ERR_IRQ	BIT(1)
318
319/* BAM_IRQ_CLR */
320#define BAM_TIMER_CLR		BIT(4)
321#define BAM_EMPTY_CLR		BIT(3)
322#define BAM_ERROR_CLR		BIT(2)
323#define BAM_HRESP_ERR_CLR	BIT(1)
324
325/* BAM_IRQ_EN */
326#define BAM_TIMER_EN		BIT(4)
327#define BAM_EMPTY_EN		BIT(3)
328#define BAM_ERROR_EN		BIT(2)
329#define BAM_HRESP_ERR_EN	BIT(1)
330
331/* BAM_P_IRQ_EN */
332#define P_PRCSD_DESC_EN		BIT(0)
333#define P_TIMER_EN		BIT(1)
334#define P_WAKE_EN		BIT(2)
335#define P_OUT_OF_DESC_EN	BIT(3)
336#define P_ERR_EN		BIT(4)
337#define P_TRNSFR_END_EN		BIT(5)
338#define P_DEFAULT_IRQS_EN	(P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
339
340/* BAM_P_SW_OFSTS */
341#define P_SW_OFSTS_MASK		0xffff
342
343#define BAM_DESC_FIFO_SIZE	SZ_32K
344#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
345#define BAM_MAX_DATA_SIZE	(SZ_32K - 8)
346
347struct bam_chan {
348	struct virt_dma_chan vc;
349
350	struct bam_device *bdev;
351
352	/* configuration from device tree */
353	u32 id;
354
355	struct bam_async_desc *curr_txd;	/* current running dma */
356
357	/* runtime configuration */
358	struct dma_slave_config slave;
359
360	/* fifo storage */
361	struct bam_desc_hw *fifo_virt;
362	dma_addr_t fifo_phys;
363
364	/* fifo markers */
365	unsigned short head;		/* start of active descriptor entries */
366	unsigned short tail;		/* end of active descriptor entries */
367
368	unsigned int initialized;	/* is the channel hw initialized? */
369	unsigned int paused;		/* is the channel paused? */
370	unsigned int reconfigure;	/* new slave config? */
371
372	struct list_head node;
373};
374
375static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
376{
377	return container_of(common, struct bam_chan, vc.chan);
378}
379
380struct bam_device {
381	void __iomem *regs;
382	struct device *dev;
383	struct dma_device common;
384	struct device_dma_parameters dma_parms;
385	struct bam_chan *channels;
386	u32 num_channels;
387
388	/* execution environment ID, from DT */
389	u32 ee;
390
391	const struct reg_offset_data *layout;
392
393	struct clk *bamclk;
394	int irq;
395
396	/* dma start transaction tasklet */
397	struct tasklet_struct task;
398};
399
400/**
401 * bam_addr - returns BAM register address
402 * @bdev: bam device
403 * @pipe: pipe instance (ignored when register doesn't have multiple instances)
404 * @reg:  register enum
405 */
406static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
407		enum bam_reg reg)
408{
409	const struct reg_offset_data r = bdev->layout[reg];
410
411	return bdev->regs + r.base_offset +
412		r.pipe_mult * pipe +
413		r.evnt_mult * pipe +
414		r.ee_mult * bdev->ee;
415}
416
417/**
418 * bam_reset_channel - Reset individual BAM DMA channel
419 * @bchan: bam channel
420 *
421 * This function resets a specific BAM channel
422 */
423static void bam_reset_channel(struct bam_chan *bchan)
424{
425	struct bam_device *bdev = bchan->bdev;
426
427	lockdep_assert_held(&bchan->vc.lock);
428
429	/* reset channel */
430	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
431	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
432
433	/* don't allow cpu to reorder BAM register accesses done after this */
434	wmb();
435
436	/* make sure hw is initialized when channel is used the first time  */
437	bchan->initialized = 0;
438}
439
440/**
441 * bam_chan_init_hw - Initialize channel hardware
442 * @bchan: bam channel
443 *
444 * This function resets and initializes the BAM channel
445 */
446static void bam_chan_init_hw(struct bam_chan *bchan,
447	enum dma_transfer_direction dir)
448{
449	struct bam_device *bdev = bchan->bdev;
450	u32 val;
451
452	/* Reset the channel to clear internal state of the FIFO */
453	bam_reset_channel(bchan);
454
455	/*
456	 * write out 8 byte aligned address.  We have enough space for this
457	 * because we allocated 1 more descriptor (8 bytes) than we can use
458	 */
459	writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
460			bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
461	writel_relaxed(BAM_DESC_FIFO_SIZE,
462			bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
463
464	/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
465	writel_relaxed(P_DEFAULT_IRQS_EN,
466			bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
467
468	/* unmask the specific pipe and EE combo */
469	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
470	val |= BIT(bchan->id);
471	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
472
473	/* don't allow cpu to reorder the channel enable done below */
474	wmb();
475
476	/* set fixed direction and mode, then enable channel */
477	val = P_EN | P_SYS_MODE;
478	if (dir == DMA_DEV_TO_MEM)
479		val |= P_DIRECTION;
480
481	writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
482
483	bchan->initialized = 1;
484
485	/* init FIFO pointers */
486	bchan->head = 0;
487	bchan->tail = 0;
488}
489
490/**
491 * bam_alloc_chan - Allocate channel resources for DMA channel.
492 * @chan: specified channel
493 *
494 * This function allocates the FIFO descriptor memory
495 */
496static int bam_alloc_chan(struct dma_chan *chan)
497{
498	struct bam_chan *bchan = to_bam_chan(chan);
499	struct bam_device *bdev = bchan->bdev;
500
501	if (bchan->fifo_virt)
502		return 0;
503
504	/* allocate FIFO descriptor space, but only if necessary */
505	bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
506				&bchan->fifo_phys, GFP_KERNEL);
507
508	if (!bchan->fifo_virt) {
509		dev_err(bdev->dev, "Failed to allocate desc fifo\n");
510		return -ENOMEM;
511	}
512
513	return 0;
514}
515
516/**
517 * bam_free_chan - Frees dma resources associated with specific channel
518 * @chan: specified channel
519 *
520 * Free the allocated fifo descriptor memory and channel resources
521 *
522 */
523static void bam_free_chan(struct dma_chan *chan)
524{
525	struct bam_chan *bchan = to_bam_chan(chan);
526	struct bam_device *bdev = bchan->bdev;
527	u32 val;
528	unsigned long flags;
529
530	vchan_free_chan_resources(to_virt_chan(chan));
531
532	if (bchan->curr_txd) {
533		dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
534		return;
535	}
536
537	spin_lock_irqsave(&bchan->vc.lock, flags);
538	bam_reset_channel(bchan);
539	spin_unlock_irqrestore(&bchan->vc.lock, flags);
540
541	dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
542				bchan->fifo_phys);
543	bchan->fifo_virt = NULL;
544
545	/* mask irq for pipe/channel */
546	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
547	val &= ~BIT(bchan->id);
548	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
549
550	/* disable irq */
551	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
552}
553
554/**
555 * bam_slave_config - set slave configuration for channel
556 * @chan: dma channel
557 * @cfg: slave configuration
558 *
559 * Sets slave configuration for channel
560 *
561 */
562static int bam_slave_config(struct dma_chan *chan,
563			    struct dma_slave_config *cfg)
564{
565	struct bam_chan *bchan = to_bam_chan(chan);
566	unsigned long flag;
567
568	spin_lock_irqsave(&bchan->vc.lock, flag);
569	memcpy(&bchan->slave, cfg, sizeof(*cfg));
570	bchan->reconfigure = 1;
571	spin_unlock_irqrestore(&bchan->vc.lock, flag);
572
573	return 0;
574}
575
576/**
577 * bam_prep_slave_sg - Prep slave sg transaction
578 *
579 * @chan: dma channel
580 * @sgl: scatter gather list
581 * @sg_len: length of sg
582 * @direction: DMA transfer direction
583 * @flags: DMA flags
584 * @context: transfer context (unused)
585 */
586static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
587	struct scatterlist *sgl, unsigned int sg_len,
588	enum dma_transfer_direction direction, unsigned long flags,
589	void *context)
590{
591	struct bam_chan *bchan = to_bam_chan(chan);
592	struct bam_device *bdev = bchan->bdev;
593	struct bam_async_desc *async_desc;
594	struct scatterlist *sg;
595	u32 i;
596	struct bam_desc_hw *desc;
597	unsigned int num_alloc = 0;
598
599
600	if (!is_slave_direction(direction)) {
601		dev_err(bdev->dev, "invalid dma direction\n");
602		return NULL;
603	}
604
605	/* calculate number of required entries */
606	for_each_sg(sgl, sg, sg_len, i)
607		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
608
609	/* allocate enough room to accomodate the number of entries */
610	async_desc = kzalloc(sizeof(*async_desc) +
611			(num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
612
613	if (!async_desc)
614		goto err_out;
615
616	if (flags & DMA_PREP_FENCE)
617		async_desc->flags |= DESC_FLAG_NWD;
618
619	if (flags & DMA_PREP_INTERRUPT)
620		async_desc->flags |= DESC_FLAG_EOT;
621	else
622		async_desc->flags |= DESC_FLAG_INT;
623
624	async_desc->num_desc = num_alloc;
625	async_desc->curr_desc = async_desc->desc;
626	async_desc->dir = direction;
627
628	/* fill in temporary descriptors */
629	desc = async_desc->desc;
630	for_each_sg(sgl, sg, sg_len, i) {
631		unsigned int remainder = sg_dma_len(sg);
632		unsigned int curr_offset = 0;
633
634		do {
635			desc->addr = sg_dma_address(sg) + curr_offset;
636
637			if (remainder > BAM_MAX_DATA_SIZE) {
638				desc->size = BAM_MAX_DATA_SIZE;
639				remainder -= BAM_MAX_DATA_SIZE;
640				curr_offset += BAM_MAX_DATA_SIZE;
641			} else {
642				desc->size = remainder;
643				remainder = 0;
644			}
645
646			async_desc->length += desc->size;
647			desc++;
648		} while (remainder > 0);
649	}
650
651	return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
652
653err_out:
654	kfree(async_desc);
655	return NULL;
656}
657
658/**
659 * bam_dma_terminate_all - terminate all transactions on a channel
660 * @bchan: bam dma channel
661 *
662 * Dequeues and frees all transactions
663 * No callbacks are done
664 *
665 */
666static int bam_dma_terminate_all(struct dma_chan *chan)
667{
668	struct bam_chan *bchan = to_bam_chan(chan);
669	unsigned long flag;
670	LIST_HEAD(head);
671
672	/* remove all transactions, including active transaction */
673	spin_lock_irqsave(&bchan->vc.lock, flag);
674	if (bchan->curr_txd) {
675		list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
676		bchan->curr_txd = NULL;
677	}
678
679	vchan_get_all_descriptors(&bchan->vc, &head);
680	spin_unlock_irqrestore(&bchan->vc.lock, flag);
681
682	vchan_dma_desc_free_list(&bchan->vc, &head);
683
684	return 0;
685}
686
687/**
688 * bam_pause - Pause DMA channel
689 * @chan: dma channel
690 *
691 */
692static int bam_pause(struct dma_chan *chan)
693{
694	struct bam_chan *bchan = to_bam_chan(chan);
695	struct bam_device *bdev = bchan->bdev;
696	unsigned long flag;
697
698	spin_lock_irqsave(&bchan->vc.lock, flag);
699	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
700	bchan->paused = 1;
701	spin_unlock_irqrestore(&bchan->vc.lock, flag);
702
703	return 0;
704}
705
706/**
707 * bam_resume - Resume DMA channel operations
708 * @chan: dma channel
709 *
710 */
711static int bam_resume(struct dma_chan *chan)
712{
713	struct bam_chan *bchan = to_bam_chan(chan);
714	struct bam_device *bdev = bchan->bdev;
715	unsigned long flag;
716
717	spin_lock_irqsave(&bchan->vc.lock, flag);
718	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
719	bchan->paused = 0;
720	spin_unlock_irqrestore(&bchan->vc.lock, flag);
721
722	return 0;
723}
724
725/**
726 * process_channel_irqs - processes the channel interrupts
727 * @bdev: bam controller
728 *
729 * This function processes the channel interrupts
730 *
731 */
732static u32 process_channel_irqs(struct bam_device *bdev)
733{
734	u32 i, srcs, pipe_stts;
735	unsigned long flags;
736	struct bam_async_desc *async_desc;
737
738	srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
739
740	/* return early if no pipe/channel interrupts are present */
741	if (!(srcs & P_IRQ))
742		return srcs;
743
744	for (i = 0; i < bdev->num_channels; i++) {
745		struct bam_chan *bchan = &bdev->channels[i];
746
747		if (!(srcs & BIT(i)))
748			continue;
749
750		/* clear pipe irq */
751		pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
752
753		writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
754
755		spin_lock_irqsave(&bchan->vc.lock, flags);
756		async_desc = bchan->curr_txd;
757
758		if (async_desc) {
759			async_desc->num_desc -= async_desc->xfer_len;
760			async_desc->curr_desc += async_desc->xfer_len;
761			bchan->curr_txd = NULL;
762
763			/* manage FIFO */
764			bchan->head += async_desc->xfer_len;
765			bchan->head %= MAX_DESCRIPTORS;
766
767			/*
768			 * if complete, process cookie.  Otherwise
769			 * push back to front of desc_issued so that
770			 * it gets restarted by the tasklet
771			 */
772			if (!async_desc->num_desc)
773				vchan_cookie_complete(&async_desc->vd);
774			else
775				list_add(&async_desc->vd.node,
776					&bchan->vc.desc_issued);
777		}
778
779		spin_unlock_irqrestore(&bchan->vc.lock, flags);
780	}
781
782	return srcs;
783}
784
785/**
786 * bam_dma_irq - irq handler for bam controller
787 * @irq: IRQ of interrupt
788 * @data: callback data
789 *
790 * IRQ handler for the bam controller
791 */
792static irqreturn_t bam_dma_irq(int irq, void *data)
793{
794	struct bam_device *bdev = data;
795	u32 clr_mask = 0, srcs = 0;
796
797	srcs |= process_channel_irqs(bdev);
798
799	/* kick off tasklet to start next dma transfer */
800	if (srcs & P_IRQ)
801		tasklet_schedule(&bdev->task);
802
803	if (srcs & BAM_IRQ)
804		clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
805
806	/* don't allow reorder of the various accesses to the BAM registers */
807	mb();
808
809	writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
810
811	return IRQ_HANDLED;
812}
813
814/**
815 * bam_tx_status - returns status of transaction
816 * @chan: dma channel
817 * @cookie: transaction cookie
818 * @txstate: DMA transaction state
819 *
820 * Return status of dma transaction
821 */
822static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
823		struct dma_tx_state *txstate)
824{
825	struct bam_chan *bchan = to_bam_chan(chan);
826	struct virt_dma_desc *vd;
827	int ret;
828	size_t residue = 0;
829	unsigned int i;
830	unsigned long flags;
831
832	ret = dma_cookie_status(chan, cookie, txstate);
833	if (ret == DMA_COMPLETE)
834		return ret;
835
836	if (!txstate)
837		return bchan->paused ? DMA_PAUSED : ret;
838
839	spin_lock_irqsave(&bchan->vc.lock, flags);
840	vd = vchan_find_desc(&bchan->vc, cookie);
841	if (vd)
842		residue = container_of(vd, struct bam_async_desc, vd)->length;
843	else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
844		for (i = 0; i < bchan->curr_txd->num_desc; i++)
845			residue += bchan->curr_txd->curr_desc[i].size;
846
847	spin_unlock_irqrestore(&bchan->vc.lock, flags);
848
849	dma_set_residue(txstate, residue);
850
851	if (ret == DMA_IN_PROGRESS && bchan->paused)
852		ret = DMA_PAUSED;
853
854	return ret;
855}
856
857/**
858 * bam_apply_new_config
859 * @bchan: bam dma channel
860 * @dir: DMA direction
861 */
862static void bam_apply_new_config(struct bam_chan *bchan,
863	enum dma_transfer_direction dir)
864{
865	struct bam_device *bdev = bchan->bdev;
866	u32 maxburst;
867
868	if (dir == DMA_DEV_TO_MEM)
869		maxburst = bchan->slave.src_maxburst;
870	else
871		maxburst = bchan->slave.dst_maxburst;
872
873	writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
874
875	bchan->reconfigure = 0;
876}
877
878/**
879 * bam_start_dma - start next transaction
880 * @bchan - bam dma channel
881 */
882static void bam_start_dma(struct bam_chan *bchan)
883{
884	struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
885	struct bam_device *bdev = bchan->bdev;
886	struct bam_async_desc *async_desc;
887	struct bam_desc_hw *desc;
888	struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
889					sizeof(struct bam_desc_hw));
890
891	lockdep_assert_held(&bchan->vc.lock);
892
893	if (!vd)
894		return;
895
896	list_del(&vd->node);
897
898	async_desc = container_of(vd, struct bam_async_desc, vd);
899	bchan->curr_txd = async_desc;
900
901	/* on first use, initialize the channel hardware */
902	if (!bchan->initialized)
903		bam_chan_init_hw(bchan, async_desc->dir);
904
905	/* apply new slave config changes, if necessary */
906	if (bchan->reconfigure)
907		bam_apply_new_config(bchan, async_desc->dir);
908
909	desc = bchan->curr_txd->curr_desc;
910
911	if (async_desc->num_desc > MAX_DESCRIPTORS)
912		async_desc->xfer_len = MAX_DESCRIPTORS;
913	else
914		async_desc->xfer_len = async_desc->num_desc;
915
916	/* set any special flags on the last descriptor */
917	if (async_desc->num_desc == async_desc->xfer_len)
918		desc[async_desc->xfer_len - 1].flags = async_desc->flags;
919	else
920		desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
921
922	if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
923		u32 partial = MAX_DESCRIPTORS - bchan->tail;
924
925		memcpy(&fifo[bchan->tail], desc,
926				partial * sizeof(struct bam_desc_hw));
927		memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
928				sizeof(struct bam_desc_hw));
929	} else {
930		memcpy(&fifo[bchan->tail], desc,
931			async_desc->xfer_len * sizeof(struct bam_desc_hw));
932	}
933
934	bchan->tail += async_desc->xfer_len;
935	bchan->tail %= MAX_DESCRIPTORS;
936
937	/* ensure descriptor writes and dma start not reordered */
938	wmb();
939	writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
940			bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
941}
942
943/**
944 * dma_tasklet - DMA IRQ tasklet
945 * @data: tasklet argument (bam controller structure)
946 *
947 * Sets up next DMA operation and then processes all completed transactions
948 */
949static void dma_tasklet(unsigned long data)
950{
951	struct bam_device *bdev = (struct bam_device *)data;
952	struct bam_chan *bchan;
953	unsigned long flags;
954	unsigned int i;
955
956	/* go through the channels and kick off transactions */
957	for (i = 0; i < bdev->num_channels; i++) {
958		bchan = &bdev->channels[i];
959		spin_lock_irqsave(&bchan->vc.lock, flags);
960
961		if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
962			bam_start_dma(bchan);
963		spin_unlock_irqrestore(&bchan->vc.lock, flags);
964	}
965}
966
967/**
968 * bam_issue_pending - starts pending transactions
969 * @chan: dma channel
970 *
971 * Calls tasklet directly which in turn starts any pending transactions
972 */
973static void bam_issue_pending(struct dma_chan *chan)
974{
975	struct bam_chan *bchan = to_bam_chan(chan);
976	unsigned long flags;
977
978	spin_lock_irqsave(&bchan->vc.lock, flags);
979
980	/* if work pending and idle, start a transaction */
981	if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
982		bam_start_dma(bchan);
983
984	spin_unlock_irqrestore(&bchan->vc.lock, flags);
985}
986
987/**
988 * bam_dma_free_desc - free descriptor memory
989 * @vd: virtual descriptor
990 *
991 */
992static void bam_dma_free_desc(struct virt_dma_desc *vd)
993{
994	struct bam_async_desc *async_desc = container_of(vd,
995			struct bam_async_desc, vd);
996
997	kfree(async_desc);
998}
999
1000static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
1001		struct of_dma *of)
1002{
1003	struct bam_device *bdev = container_of(of->of_dma_data,
1004					struct bam_device, common);
1005	unsigned int request;
1006
1007	if (dma_spec->args_count != 1)
1008		return NULL;
1009
1010	request = dma_spec->args[0];
1011	if (request >= bdev->num_channels)
1012		return NULL;
1013
1014	return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
1015}
1016
1017/**
1018 * bam_init
1019 * @bdev: bam device
1020 *
1021 * Initialization helper for global bam registers
1022 */
1023static int bam_init(struct bam_device *bdev)
1024{
1025	u32 val;
1026
1027	/* read revision and configuration information */
1028	val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
1029	val &= NUM_EES_MASK;
1030
1031	/* check that configured EE is within range */
1032	if (bdev->ee >= val)
1033		return -EINVAL;
1034
1035	val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
1036	bdev->num_channels = val & BAM_NUM_PIPES_MASK;
1037
1038	/* s/w reset bam */
1039	/* after reset all pipes are disabled and idle */
1040	val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
1041	val |= BAM_SW_RST;
1042	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1043	val &= ~BAM_SW_RST;
1044	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1045
1046	/* make sure previous stores are visible before enabling BAM */
1047	wmb();
1048
1049	/* enable bam */
1050	val |= BAM_EN;
1051	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1052
1053	/* set descriptor threshhold, start with 4 bytes */
1054	writel_relaxed(DEFAULT_CNT_THRSHLD,
1055			bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
1056
1057	/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
1058	writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
1059
1060	/* enable irqs for errors */
1061	writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
1062			bam_addr(bdev, 0, BAM_IRQ_EN));
1063
1064	/* unmask global bam interrupt */
1065	writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1066
1067	return 0;
1068}
1069
1070static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1071	u32 index)
1072{
1073	bchan->id = index;
1074	bchan->bdev = bdev;
1075
1076	vchan_init(&bchan->vc, &bdev->common);
1077	bchan->vc.desc_free = bam_dma_free_desc;
1078}
1079
1080static const struct of_device_id bam_of_match[] = {
1081	{ .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1082	{ .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1083	{ .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
1084	{}
1085};
1086
1087MODULE_DEVICE_TABLE(of, bam_of_match);
1088
1089static int bam_dma_probe(struct platform_device *pdev)
1090{
1091	struct bam_device *bdev;
1092	const struct of_device_id *match;
1093	struct resource *iores;
1094	int ret, i;
1095
1096	bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
1097	if (!bdev)
1098		return -ENOMEM;
1099
1100	bdev->dev = &pdev->dev;
1101
1102	match = of_match_node(bam_of_match, pdev->dev.of_node);
1103	if (!match) {
1104		dev_err(&pdev->dev, "Unsupported BAM module\n");
1105		return -ENODEV;
1106	}
1107
1108	bdev->layout = match->data;
1109
1110	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1111	bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
1112	if (IS_ERR(bdev->regs))
1113		return PTR_ERR(bdev->regs);
1114
1115	bdev->irq = platform_get_irq(pdev, 0);
1116	if (bdev->irq < 0)
1117		return bdev->irq;
1118
1119	ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
1120	if (ret) {
1121		dev_err(bdev->dev, "Execution environment unspecified\n");
1122		return ret;
1123	}
1124
1125	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1126	if (IS_ERR(bdev->bamclk))
1127		return PTR_ERR(bdev->bamclk);
1128
1129	ret = clk_prepare_enable(bdev->bamclk);
1130	if (ret) {
1131		dev_err(bdev->dev, "failed to prepare/enable clock\n");
1132		return ret;
1133	}
1134
1135	ret = bam_init(bdev);
1136	if (ret)
1137		goto err_disable_clk;
1138
1139	tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
1140
1141	bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1142				sizeof(*bdev->channels), GFP_KERNEL);
1143
1144	if (!bdev->channels) {
1145		ret = -ENOMEM;
1146		goto err_tasklet_kill;
1147	}
1148
1149	/* allocate and initialize channels */
1150	INIT_LIST_HEAD(&bdev->common.channels);
1151
1152	for (i = 0; i < bdev->num_channels; i++)
1153		bam_channel_init(bdev, &bdev->channels[i], i);
1154
1155	ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1156			IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1157	if (ret)
1158		goto err_bam_channel_exit;
1159
1160	/* set max dma segment size */
1161	bdev->common.dev = bdev->dev;
1162	bdev->common.dev->dma_parms = &bdev->dma_parms;
1163	ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
1164	if (ret) {
1165		dev_err(bdev->dev, "cannot set maximum segment size\n");
1166		goto err_bam_channel_exit;
1167	}
1168
1169	platform_set_drvdata(pdev, bdev);
1170
1171	/* set capabilities */
1172	dma_cap_zero(bdev->common.cap_mask);
1173	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1174
1175	/* initialize dmaengine apis */
1176	bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1177	bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1178	bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1179	bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1180	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1181	bdev->common.device_free_chan_resources = bam_free_chan;
1182	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1183	bdev->common.device_config = bam_slave_config;
1184	bdev->common.device_pause = bam_pause;
1185	bdev->common.device_resume = bam_resume;
1186	bdev->common.device_terminate_all = bam_dma_terminate_all;
1187	bdev->common.device_issue_pending = bam_issue_pending;
1188	bdev->common.device_tx_status = bam_tx_status;
1189	bdev->common.dev = bdev->dev;
1190
1191	ret = dma_async_device_register(&bdev->common);
1192	if (ret) {
1193		dev_err(bdev->dev, "failed to register dma async device\n");
1194		goto err_bam_channel_exit;
1195	}
1196
1197	ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1198					&bdev->common);
1199	if (ret)
1200		goto err_unregister_dma;
1201
1202	return 0;
1203
1204err_unregister_dma:
1205	dma_async_device_unregister(&bdev->common);
1206err_bam_channel_exit:
1207	for (i = 0; i < bdev->num_channels; i++)
1208		tasklet_kill(&bdev->channels[i].vc.task);
1209err_tasklet_kill:
1210	tasklet_kill(&bdev->task);
1211err_disable_clk:
1212	clk_disable_unprepare(bdev->bamclk);
1213
1214	return ret;
1215}
1216
1217static int bam_dma_remove(struct platform_device *pdev)
1218{
1219	struct bam_device *bdev = platform_get_drvdata(pdev);
1220	u32 i;
1221
1222	of_dma_controller_free(pdev->dev.of_node);
1223	dma_async_device_unregister(&bdev->common);
1224
1225	/* mask all interrupts for this execution environment */
1226	writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
1227
1228	devm_free_irq(bdev->dev, bdev->irq, bdev);
1229
1230	for (i = 0; i < bdev->num_channels; i++) {
1231		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1232		tasklet_kill(&bdev->channels[i].vc.task);
1233
1234		dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
1235			bdev->channels[i].fifo_virt,
1236			bdev->channels[i].fifo_phys);
1237	}
1238
1239	tasklet_kill(&bdev->task);
1240
1241	clk_disable_unprepare(bdev->bamclk);
1242
1243	return 0;
1244}
1245
1246static struct platform_driver bam_dma_driver = {
1247	.probe = bam_dma_probe,
1248	.remove = bam_dma_remove,
1249	.driver = {
1250		.name = "bam-dma-engine",
1251		.of_match_table = bam_of_match,
1252	},
1253};
1254
1255module_platform_driver(bam_dma_driver);
1256
1257MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1258MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1259MODULE_LICENSE("GPL v2");
1260