1/*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11 * for more details.
12 */
13
14#ifndef MV_XOR_H
15#define MV_XOR_H
16
17#include <linux/types.h>
18#include <linux/io.h>
19#include <linux/dmaengine.h>
20#include <linux/interrupt.h>
21
22#define MV_XOR_POOL_SIZE		PAGE_SIZE
23#define MV_XOR_SLOT_SIZE		64
24#define MV_XOR_THRESHOLD		1
25#define MV_XOR_MAX_CHANNELS             2
26
27#define MV_XOR_MIN_BYTE_COUNT		SZ_128
28#define MV_XOR_MAX_BYTE_COUNT		(SZ_16M - 1)
29
30/* Values for the XOR_CONFIG register */
31#define XOR_OPERATION_MODE_XOR		0
32#define XOR_OPERATION_MODE_MEMCPY	2
33#define XOR_DESCRIPTOR_SWAP		BIT(14)
34#define XOR_DESC_SUCCESS		0x40000000
35
36#define XOR_DESC_DMA_OWNED		BIT(31)
37#define XOR_DESC_EOD_INT_EN		BIT(31)
38
39#define XOR_CURR_DESC(chan)	(chan->mmr_high_base + 0x10 + (chan->idx * 4))
40#define XOR_NEXT_DESC(chan)	(chan->mmr_high_base + 0x00 + (chan->idx * 4))
41#define XOR_BYTE_COUNT(chan)	(chan->mmr_high_base + 0x20 + (chan->idx * 4))
42#define XOR_DEST_POINTER(chan)	(chan->mmr_high_base + 0xB0 + (chan->idx * 4))
43#define XOR_BLOCK_SIZE(chan)	(chan->mmr_high_base + 0xC0 + (chan->idx * 4))
44#define XOR_INIT_VALUE_LOW(chan)	(chan->mmr_high_base + 0xE0)
45#define XOR_INIT_VALUE_HIGH(chan)	(chan->mmr_high_base + 0xE4)
46
47#define XOR_CONFIG(chan)	(chan->mmr_base + 0x10 + (chan->idx * 4))
48#define XOR_ACTIVATION(chan)	(chan->mmr_base + 0x20 + (chan->idx * 4))
49#define XOR_INTR_CAUSE(chan)	(chan->mmr_base + 0x30)
50#define XOR_INTR_MASK(chan)	(chan->mmr_base + 0x40)
51#define XOR_ERROR_CAUSE(chan)	(chan->mmr_base + 0x50)
52#define XOR_ERROR_ADDR(chan)	(chan->mmr_base + 0x60)
53
54#define XOR_INT_END_OF_DESC	BIT(0)
55#define XOR_INT_END_OF_CHAIN	BIT(1)
56#define XOR_INT_STOPPED		BIT(2)
57#define XOR_INT_PAUSED		BIT(3)
58#define XOR_INT_ERR_DECODE	BIT(4)
59#define XOR_INT_ERR_RDPROT	BIT(5)
60#define XOR_INT_ERR_WRPROT	BIT(6)
61#define XOR_INT_ERR_OWN		BIT(7)
62#define XOR_INT_ERR_PAR		BIT(8)
63#define XOR_INT_ERR_MBUS	BIT(9)
64
65#define XOR_INTR_ERRORS		(XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
66				 XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN    | \
67				 XOR_INT_ERR_PAR    | XOR_INT_ERR_MBUS)
68
69#define XOR_INTR_MASK_VALUE	(XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
70				 XOR_INT_STOPPED     | XOR_INTR_ERRORS)
71
72#define WINDOW_BASE(w)		(0x50 + ((w) << 2))
73#define WINDOW_SIZE(w)		(0x70 + ((w) << 2))
74#define WINDOW_REMAP_HIGH(w)	(0x90 + ((w) << 2))
75#define WINDOW_BAR_ENABLE(chan)	(0x40 + ((chan) << 2))
76#define WINDOW_OVERRIDE_CTRL(chan)	(0xA0 + ((chan) << 2))
77
78struct mv_xor_device {
79	void __iomem	     *xor_base;
80	void __iomem	     *xor_high_base;
81	struct clk	     *clk;
82	struct mv_xor_chan   *channels[MV_XOR_MAX_CHANNELS];
83};
84
85/**
86 * struct mv_xor_chan - internal representation of a XOR channel
87 * @pending: allows batching of hardware operations
88 * @lock: serializes enqueue/dequeue operations to the descriptors pool
89 * @mmr_base: memory mapped register base
90 * @idx: the index of the xor channel
91 * @chain: device chain view of the descriptors
92 * @completed_slots: slots completed by HW but still need to be acked
93 * @device: parent device
94 * @common: common dmaengine channel object members
95 * @last_used: place holder for allocation to continue from where it left off
96 * @all_slots: complete domain of slots usable by the channel
97 * @slots_allocated: records the actual size of the descriptor slot pool
98 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
99 */
100struct mv_xor_chan {
101	int			pending;
102	spinlock_t		lock; /* protects the descriptor slot pool */
103	void __iomem		*mmr_base;
104	void __iomem		*mmr_high_base;
105	unsigned int		idx;
106	int                     irq;
107	enum dma_transaction_type	current_type;
108	struct list_head	chain;
109	struct list_head	completed_slots;
110	dma_addr_t		dma_desc_pool;
111	void			*dma_desc_pool_virt;
112	size_t                  pool_size;
113	struct dma_device	dmadev;
114	struct dma_chan		dmachan;
115	struct mv_xor_desc_slot	*last_used;
116	struct list_head	all_slots;
117	int			slots_allocated;
118	struct tasklet_struct	irq_tasklet;
119	char			dummy_src[MV_XOR_MIN_BYTE_COUNT];
120	char			dummy_dst[MV_XOR_MIN_BYTE_COUNT];
121	dma_addr_t		dummy_src_addr, dummy_dst_addr;
122};
123
124/**
125 * struct mv_xor_desc_slot - software descriptor
126 * @slot_node: node on the mv_xor_chan.all_slots list
127 * @chain_node: node on the mv_xor_chan.chain list
128 * @completed_node: node on the mv_xor_chan.completed_slots list
129 * @hw_desc: virtual address of the hardware descriptor chain
130 * @phys: hardware address of the hardware descriptor chain
131 * @slot_used: slot in use or not
132 * @idx: pool index
133 * @tx_list: list of slots that make up a multi-descriptor transaction
134 * @async_tx: support for the async_tx api
135 */
136struct mv_xor_desc_slot {
137	struct list_head	slot_node;
138	struct list_head	chain_node;
139	struct list_head	completed_node;
140	enum dma_transaction_type	type;
141	void			*hw_desc;
142	u16			slot_used;
143	u16			idx;
144	struct dma_async_tx_descriptor	async_tx;
145};
146
147/*
148 * This structure describes XOR descriptor size 64bytes. The
149 * mv_phy_src_idx() macro must be used when indexing the values of the
150 * phy_src_addr[] array. This is due to the fact that the 'descriptor
151 * swap' feature, used on big endian systems, swaps descriptors data
152 * within blocks of 8 bytes. So two consecutive values of the
153 * phy_src_addr[] array are actually swapped in big-endian, which
154 * explains the different mv_phy_src_idx() implementation.
155 */
156#if defined(__LITTLE_ENDIAN)
157struct mv_xor_desc {
158	u32 status;		/* descriptor execution status */
159	u32 crc32_result;	/* result of CRC-32 calculation */
160	u32 desc_command;	/* type of operation to be carried out */
161	u32 phy_next_desc;	/* next descriptor address pointer */
162	u32 byte_count;		/* size of src/dst blocks in bytes */
163	u32 phy_dest_addr;	/* destination block address */
164	u32 phy_src_addr[8];	/* source block addresses */
165	u32 reserved0;
166	u32 reserved1;
167};
168#define mv_phy_src_idx(src_idx) (src_idx)
169#else
170struct mv_xor_desc {
171	u32 crc32_result;	/* result of CRC-32 calculation */
172	u32 status;		/* descriptor execution status */
173	u32 phy_next_desc;	/* next descriptor address pointer */
174	u32 desc_command;	/* type of operation to be carried out */
175	u32 phy_dest_addr;	/* destination block address */
176	u32 byte_count;		/* size of src/dst blocks in bytes */
177	u32 phy_src_addr[8];	/* source block addresses */
178	u32 reserved1;
179	u32 reserved0;
180};
181#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
182#endif
183
184#define to_mv_sw_desc(addr_hw_desc)		\
185	container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
186
187#define mv_hw_desc_slot_idx(hw_desc, idx)	\
188	((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
189
190#endif
191