1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice.  This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 *                Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/hardirq.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20
21#ifndef VXGE_CACHE_LINE_SIZE
22#define VXGE_CACHE_LINE_SIZE 128
23#endif
24
25#ifndef VXGE_ALIGN
26#define VXGE_ALIGN(adrs, size) \
27	(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
28#endif
29
30#define VXGE_HW_MIN_MTU				68
31#define VXGE_HW_MAX_MTU				9600
32#define VXGE_HW_DEFAULT_MTU			1500
33
34#define VXGE_HW_MAX_ROM_IMAGES			8
35
36struct eprom_image {
37	u8 is_valid:1;
38	u8 index;
39	u8 type;
40	u16 version;
41};
42
43#ifdef VXGE_DEBUG_ASSERT
44/**
45 * vxge_assert
46 * @test: C-condition to check
47 * @fmt: printf like format string
48 *
49 * This function implements traditional assert. By default assertions
50 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
51 * compilation
52 * time.
53 */
54#define vxge_assert(test) BUG_ON(!(test))
55#else
56#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */
58
59/**
60 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
64 *                 going to be logged out. Very noisy.
65 *
66 * This enumeration going to be used to switch between different
67 * debug levels during runtime if DEBUG macro defined during
68 * compilation. If DEBUG macro not defined than code will be
69 * compiled out.
70 */
71enum vxge_debug_level {
72	VXGE_NONE   = 0,
73	VXGE_TRACE  = 1,
74	VXGE_ERR    = 2
75};
76
77#define NULL_VPID					0xFFFFFFFF
78#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
79#define VXGE_DEBUG_MODULE_MASK  0xffffffff
80#define VXGE_DEBUG_TRACE_MASK   0xffffffff
81#define VXGE_DEBUG_ERR_MASK     0xffffffff
82#define VXGE_DEBUG_MASK         0x000001ff
83#else
84#define VXGE_DEBUG_MODULE_MASK  0x20000000
85#define VXGE_DEBUG_TRACE_MASK   0x20000000
86#define VXGE_DEBUG_ERR_MASK     0x20000000
87#define VXGE_DEBUG_MASK         0x00000001
88#endif
89
90/*
91 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
92 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
93 *
94 * This enumeration going to be used to distinguish modules
95 * or libraries during compilation and runtime.  Makefile must declare
96 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
97 */
98#define	VXGE_COMPONENT_LL				0x20000000
99#define	VXGE_COMPONENT_ALL				0xffffffff
100
101#define VXGE_HW_BASE_INF	100
102#define VXGE_HW_BASE_ERR	200
103#define VXGE_HW_BASE_BADCFG	300
104
105enum vxge_hw_status {
106	VXGE_HW_OK				  = 0,
107	VXGE_HW_FAIL				  = 1,
108	VXGE_HW_PENDING				  = 2,
109	VXGE_HW_COMPLETIONS_REMAIN		  = 3,
110
111	VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
112	VXGE_HW_INF_OUT_OF_DESCRIPTORS		  = VXGE_HW_BASE_INF + 2,
113
114	VXGE_HW_ERR_INVALID_HANDLE		  = VXGE_HW_BASE_ERR + 1,
115	VXGE_HW_ERR_OUT_OF_MEMORY		  = VXGE_HW_BASE_ERR + 2,
116	VXGE_HW_ERR_VPATH_NOT_AVAILABLE	  	  = VXGE_HW_BASE_ERR + 3,
117	VXGE_HW_ERR_VPATH_NOT_OPEN		  = VXGE_HW_BASE_ERR + 4,
118	VXGE_HW_ERR_WRONG_IRQ			  = VXGE_HW_BASE_ERR + 5,
119	VXGE_HW_ERR_SWAPPER_CTRL		  = VXGE_HW_BASE_ERR + 6,
120	VXGE_HW_ERR_INVALID_MTU_SIZE		  = VXGE_HW_BASE_ERR + 7,
121	VXGE_HW_ERR_INVALID_INDEX		  = VXGE_HW_BASE_ERR + 8,
122	VXGE_HW_ERR_INVALID_TYPE		  = VXGE_HW_BASE_ERR + 9,
123	VXGE_HW_ERR_INVALID_OFFSET		  = VXGE_HW_BASE_ERR + 10,
124	VXGE_HW_ERR_INVALID_DEVICE		  = VXGE_HW_BASE_ERR + 11,
125	VXGE_HW_ERR_VERSION_CONFLICT		  = VXGE_HW_BASE_ERR + 12,
126	VXGE_HW_ERR_INVALID_PCI_INFO		  = VXGE_HW_BASE_ERR + 13,
127	VXGE_HW_ERR_INVALID_TCODE 		  = VXGE_HW_BASE_ERR + 14,
128	VXGE_HW_ERR_INVALID_BLOCK_SIZE		  = VXGE_HW_BASE_ERR + 15,
129	VXGE_HW_ERR_INVALID_STATE		  = VXGE_HW_BASE_ERR + 16,
130	VXGE_HW_ERR_PRIVILAGED_OPEARATION	  = VXGE_HW_BASE_ERR + 17,
131	VXGE_HW_ERR_INVALID_PORT 		  = VXGE_HW_BASE_ERR + 18,
132	VXGE_HW_ERR_FIFO		 	  = VXGE_HW_BASE_ERR + 19,
133	VXGE_HW_ERR_VPATH			  = VXGE_HW_BASE_ERR + 20,
134	VXGE_HW_ERR_CRITICAL			  = VXGE_HW_BASE_ERR + 21,
135	VXGE_HW_ERR_SLOT_FREEZE 		  = VXGE_HW_BASE_ERR + 22,
136
137	VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS	  = VXGE_HW_BASE_BADCFG + 1,
138	VXGE_HW_BADCFG_FIFO_BLOCKS		  = VXGE_HW_BASE_BADCFG + 2,
139	VXGE_HW_BADCFG_VPATH_MTU		  = VXGE_HW_BASE_BADCFG + 3,
140	VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG	  = VXGE_HW_BASE_BADCFG + 4,
141	VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH	  = VXGE_HW_BASE_BADCFG + 5,
142	VXGE_HW_BADCFG_INTR_MODE		  = VXGE_HW_BASE_BADCFG + 6,
143	VXGE_HW_BADCFG_RTS_MAC_EN		  = VXGE_HW_BASE_BADCFG + 7,
144
145	VXGE_HW_EOF_TRACE_BUF			  = -1
146};
147
148/**
149 * enum enum vxge_hw_device_link_state - Link state enumeration.
150 * @VXGE_HW_LINK_NONE: Invalid link state.
151 * @VXGE_HW_LINK_DOWN: Link is down.
152 * @VXGE_HW_LINK_UP: Link is up.
153 *
154 */
155enum vxge_hw_device_link_state {
156	VXGE_HW_LINK_NONE,
157	VXGE_HW_LINK_DOWN,
158	VXGE_HW_LINK_UP
159};
160
161/**
162 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
163 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
164 * @VXGE_HW_FW_UPGRADE_DONE:  upload completed
165 * @VXGE_HW_FW_UPGRADE_ERR:  upload error
166 * @VXGE_FW_UPGRADE_BYTES2SKIP:  skip bytes in the stream
167 *
168 */
169enum vxge_hw_fw_upgrade_code {
170	VXGE_HW_FW_UPGRADE_OK		= 0,
171	VXGE_HW_FW_UPGRADE_DONE		= 1,
172	VXGE_HW_FW_UPGRADE_ERR		= 2,
173	VXGE_FW_UPGRADE_BYTES2SKIP	= 3
174};
175
176/**
177 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
178 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
179 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
185 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
186 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
187 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
188 */
189enum vxge_hw_fw_upgrade_err_code {
190	VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1		= 1,
191	VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW		= 2,
192	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3		= 3,
193	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4		= 4,
194	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5		= 5,
195	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6		= 6,
196	VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7		= 7,
197	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8		= 8,
198	VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN	= 9,
199	VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH		= 10
200};
201
202/**
203 * struct vxge_hw_device_date - Date Format
204 * @day: Day
205 * @month: Month
206 * @year: Year
207 * @date: Date in string format
208 *
209 * Structure for returning date
210 */
211
212#define VXGE_HW_FW_STRLEN	32
213struct vxge_hw_device_date {
214	u32     day;
215	u32     month;
216	u32     year;
217	char    date[VXGE_HW_FW_STRLEN];
218};
219
220struct vxge_hw_device_version {
221	u32     major;
222	u32     minor;
223	u32     build;
224	char    version[VXGE_HW_FW_STRLEN];
225};
226
227/**
228 * struct vxge_hw_fifo_config - Configuration of fifo.
229 * @enable: Is this fifo to be commissioned
230 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
231 * 		blocks per queue.
232 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
233 *             transmit operation).
234 *             No more than 256 transmit buffers can be specified.
235 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
236 *             bytes. Setting @memblock_size to page size ensures
237 *             by-page allocation of descriptors. 128K bytes is the
238 *             maximum supported block size.
239 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
240 *             (e.g., to align on a cache line).
241 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
242 *             Use 0 otherwise.
243 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
244 *             which generally improves latency of the host bridge operation
245 *             (see PCI specification). For valid values please refer
246 *             to struct vxge_hw_fifo_config{} in the driver sources.
247 * Configuration of all Titan fifos.
248 * Note: Valid (min, max) range for each attribute is specified in the body of
249 * the struct vxge_hw_fifo_config{} structure.
250 */
251struct vxge_hw_fifo_config {
252	u32				enable;
253#define VXGE_HW_FIFO_ENABLE				1
254#define VXGE_HW_FIFO_DISABLE				0
255
256	u32				fifo_blocks;
257#define VXGE_HW_MIN_FIFO_BLOCKS				2
258#define VXGE_HW_MAX_FIFO_BLOCKS				128
259
260	u32				max_frags;
261#define VXGE_HW_MIN_FIFO_FRAGS				1
262#define VXGE_HW_MAX_FIFO_FRAGS				256
263
264	u32				memblock_size;
265#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE			VXGE_HW_BLOCK_SIZE
266#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE			131072
267#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE			8096
268
269	u32		                alignment_size;
270#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE		0
271#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE		65536
272#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE		VXGE_CACHE_LINE_SIZE
273
274	u32		                intr;
275#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE			1
276#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE			0
277#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT			0
278
279	u32				no_snoop_bits;
280#define VXGE_HW_FIFO_NO_SNOOP_DISABLED			0
281#define VXGE_HW_FIFO_NO_SNOOP_TXD			1
282#define VXGE_HW_FIFO_NO_SNOOP_FRM			2
283#define VXGE_HW_FIFO_NO_SNOOP_ALL			3
284#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT			0
285
286};
287/**
288 * struct vxge_hw_ring_config - Ring configurations.
289 * @enable: Is this ring to be commissioned
290 * @ring_blocks: Numbers of RxD blocks in the ring
291 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
292 *             to Titan User Guide.
293 * @scatter_mode: Titan supports two receive scatter modes: A and B.
294 *             For details please refer to Titan User Guide.
295 * @rx_timer_val: The number of 32ns periods that would be counted between two
296 *             timer interrupts.
297 * @greedy_return: If Set it forces the device to return absolutely all RxD
298 *             that are consumed and still on board when a timer interrupt
299 *             triggers. If Clear, then if the device has already returned
300 *             RxD before current timer interrupt trigerred and after the
301 *             previous timer interrupt triggered, then the device is not
302 *             forced to returned the rest of the consumed RxD that it has
303 *             on board which account for a byte count less than the one
304 *             programmed into PRC_CFG6.RXD_CRXDT field
305 * @rx_timer_ci: TBD
306 * @backoff_interval_us: Time (in microseconds), after which Titan
307 *             tries to download RxDs posted by the host.
308 *             Note that the "backoff" does not happen if host posts receive
309 *             descriptors in the timely fashion.
310 * Ring configuration.
311 */
312struct vxge_hw_ring_config {
313	u32				enable;
314#define VXGE_HW_RING_ENABLE					1
315#define VXGE_HW_RING_DISABLE					0
316#define VXGE_HW_RING_DEFAULT					1
317
318	u32				ring_blocks;
319#define VXGE_HW_MIN_RING_BLOCKS					1
320#define VXGE_HW_MAX_RING_BLOCKS					128
321#define VXGE_HW_DEF_RING_BLOCKS					2
322
323	u32				buffer_mode;
324#define VXGE_HW_RING_RXD_BUFFER_MODE_1				1
325#define VXGE_HW_RING_RXD_BUFFER_MODE_3				3
326#define VXGE_HW_RING_RXD_BUFFER_MODE_5				5
327#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT			1
328
329	u32				scatter_mode;
330#define VXGE_HW_RING_SCATTER_MODE_A				0
331#define VXGE_HW_RING_SCATTER_MODE_B				1
332#define VXGE_HW_RING_SCATTER_MODE_C				2
333#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT		0xffffffff
334
335	u64				rxds_limit;
336#define VXGE_HW_DEF_RING_RXDS_LIMIT				44
337};
338
339/**
340 * struct vxge_hw_vp_config - Configuration of virtual path
341 * @vp_id: Virtual Path Id
342 * @min_bandwidth: Minimum Guaranteed bandwidth
343 * @ring: See struct vxge_hw_ring_config{}.
344 * @fifo: See struct vxge_hw_fifo_config{}.
345 * @tti: Configuration of interrupt associated with Transmit.
346 *             see struct vxge_hw_tim_intr_config();
347 * @rti: Configuration of interrupt associated with Receive.
348 *              see struct vxge_hw_tim_intr_config();
349 * @mtu: mtu size used on this port.
350 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
351 *             remove the VLAN tag from all received tagged frames that are not
352 *             replicated at the internal L2 switch.
353 *             0 - Do not strip the VLAN tag.
354 *             1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
355 *                 always placed into the RxDMA descriptor.
356 *
357 * This structure is used by the driver to pass the configuration parameters to
358 * configure Virtual Path.
359 */
360struct vxge_hw_vp_config {
361	u32				vp_id;
362
363#define	VXGE_HW_VPATH_PRIORITY_MIN			0
364#define	VXGE_HW_VPATH_PRIORITY_MAX			16
365#define	VXGE_HW_VPATH_PRIORITY_DEFAULT			0
366
367	u32				min_bandwidth;
368#define	VXGE_HW_VPATH_BANDWIDTH_MIN			0
369#define	VXGE_HW_VPATH_BANDWIDTH_MAX			100
370#define	VXGE_HW_VPATH_BANDWIDTH_DEFAULT			0
371
372	struct vxge_hw_ring_config		ring;
373	struct vxge_hw_fifo_config		fifo;
374	struct vxge_hw_tim_intr_config	tti;
375	struct vxge_hw_tim_intr_config	rti;
376
377	u32				mtu;
378#define VXGE_HW_VPATH_MIN_INITIAL_MTU			VXGE_HW_MIN_MTU
379#define VXGE_HW_VPATH_MAX_INITIAL_MTU			VXGE_HW_MAX_MTU
380#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU	0xffffffff
381
382	u32				rpa_strip_vlan_tag;
383#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE			1
384#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE		0
385#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT	0xffffffff
386
387};
388/**
389 * struct vxge_hw_device_config - Device configuration.
390 * @dma_blockpool_initial: Initial size of DMA Pool
391 * @dma_blockpool_max: Maximum blocks in DMA pool
392 * @intr_mode: Line, or MSI-X interrupt.
393 *
394 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
395 * @rth_it_type: RTH IT table programming type
396 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
397 * @vp_config: Configuration for virtual paths
398 * @device_poll_millis: Specify the interval (in mulliseconds)
399 * 			to wait for register reads
400 *
401 * Titan configuration.
402 * Contains per-device configuration parameters, including:
403 * - stats sampling interval, etc.
404 *
405 * In addition, struct vxge_hw_device_config{} includes "subordinate"
406 * configurations, including:
407 * - fifos and rings;
408 * - MAC (done at firmware level).
409 *
410 * See Titan User Guide for more details.
411 * Note: Valid (min, max) range for each attribute is specified in the body of
412 * the struct vxge_hw_device_config{} structure. Please refer to the
413 * corresponding include file.
414 * See also: struct vxge_hw_tim_intr_config{}.
415 */
416struct vxge_hw_device_config {
417	u32					device_poll_millis;
418#define VXGE_HW_MIN_DEVICE_POLL_MILLIS		1
419#define VXGE_HW_MAX_DEVICE_POLL_MILLIS		100000
420#define VXGE_HW_DEF_DEVICE_POLL_MILLIS		1000
421
422	u32					dma_blockpool_initial;
423	u32					dma_blockpool_max;
424#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE		0
425#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE	0
426#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE	4
427#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE		4096
428
429#define	VXGE_HW_MAX_PAYLOAD_SIZE_512		2
430
431	u32					intr_mode:2,
432#define VXGE_HW_INTR_MODE_IRQLINE		0
433#define VXGE_HW_INTR_MODE_MSIX			1
434#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT		2
435
436#define VXGE_HW_INTR_MODE_DEF			0
437
438						rth_en:1,
439#define VXGE_HW_RTH_DISABLE			0
440#define VXGE_HW_RTH_ENABLE			1
441#define VXGE_HW_RTH_DEFAULT			0
442
443						rth_it_type:1,
444#define VXGE_HW_RTH_IT_TYPE_SOLO_IT		0
445#define VXGE_HW_RTH_IT_TYPE_MULTI_IT		1
446#define VXGE_HW_RTH_IT_TYPE_DEFAULT		0
447
448						rts_mac_en:1,
449#define VXGE_HW_RTS_MAC_DISABLE			0
450#define VXGE_HW_RTS_MAC_ENABLE			1
451#define VXGE_HW_RTS_MAC_DEFAULT			0
452
453						hwts_en:1;
454#define	VXGE_HW_HWTS_DISABLE			0
455#define	VXGE_HW_HWTS_ENABLE			1
456#define	VXGE_HW_HWTS_DEFAULT			1
457
458	struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
459};
460
461/**
462 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
463 * @devh: HW device handle.
464 * Link-up notification callback provided by the driver.
465 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
466 *
467 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
468 * vxge_hw_driver_initialize().
469 */
470
471/**
472 * function vxge_uld_link_down_f - Link-Down callback provided by
473 * driver.
474 * @devh: HW device handle.
475 *
476 * Link-Down notification callback provided by the driver.
477 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
478 *
479 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
480 * vxge_hw_driver_initialize().
481 */
482
483/**
484 * function vxge_uld_crit_err_f - Critical Error notification callback.
485 * @devh: HW device handle.
486 * (typically - at HW device iinitialization time).
487 * @type: Enumerated hw error, e.g.: double ECC.
488 * @serr_data: Titan status.
489 * @ext_data: Extended data. The contents depends on the @type.
490 *
491 * Link-Down notification callback provided by the driver.
492 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
493 *
494 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
495 * vxge_hw_driver_initialize().
496 */
497
498/**
499 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
500 * @link_up: See vxge_uld_link_up_f{}.
501 * @link_down: See vxge_uld_link_down_f{}.
502 * @crit_err: See vxge_uld_crit_err_f{}.
503 *
504 * Driver slow-path (per-driver) callbacks.
505 * Implemented by driver and provided to HW via
506 * vxge_hw_driver_initialize().
507 * Note that these callbacks are not mandatory: HW will not invoke
508 * a callback if NULL is specified.
509 *
510 * See also: vxge_hw_driver_initialize().
511 */
512struct vxge_hw_uld_cbs {
513	void (*link_up)(struct __vxge_hw_device *devh);
514	void (*link_down)(struct __vxge_hw_device *devh);
515	void (*crit_err)(struct __vxge_hw_device *devh,
516			enum vxge_hw_event type, u64 ext_data);
517};
518
519/*
520 * struct __vxge_hw_blockpool_entry - Block private data structure
521 * @item: List header used to link.
522 * @length: Length of the block
523 * @memblock: Virtual address block
524 * @dma_addr: DMA Address of the block.
525 * @dma_handle: DMA handle of the block.
526 * @acc_handle: DMA acc handle
527 *
528 * Block is allocated with a header to put the blocks into list.
529 *
530 */
531struct __vxge_hw_blockpool_entry {
532	struct list_head	item;
533	u32			length;
534	void			*memblock;
535	dma_addr_t		dma_addr;
536	struct pci_dev 		*dma_handle;
537	struct pci_dev 		*acc_handle;
538};
539
540/*
541 * struct __vxge_hw_blockpool - Block Pool
542 * @hldev: HW device
543 * @block_size: size of each block.
544 * @Pool_size: Number of blocks in the pool
545 * @pool_max: Maximum number of blocks above which to free additional blocks
546 * @req_out: Number of block requests with OS out standing
547 * @free_block_list: List of free blocks
548 *
549 * Block pool contains the DMA blocks preallocated.
550 *
551 */
552struct __vxge_hw_blockpool {
553	struct __vxge_hw_device *hldev;
554	u32				block_size;
555	u32				pool_size;
556	u32				pool_max;
557	u32				req_out;
558	struct list_head		free_block_list;
559	struct list_head		free_entry_list;
560};
561
562/*
563 * enum enum __vxge_hw_channel_type - Enumerated channel types.
564 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
565 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
566 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
567 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
568 * (and recognized) channel types. Currently: 2.
569 *
570 * Enumerated channel types. Currently there are only two link-layer
571 * channels - Titan fifo and Titan ring. In the future the list will grow.
572 */
573enum __vxge_hw_channel_type {
574	VXGE_HW_CHANNEL_TYPE_UNKNOWN			= 0,
575	VXGE_HW_CHANNEL_TYPE_FIFO			= 1,
576	VXGE_HW_CHANNEL_TYPE_RING			= 2,
577	VXGE_HW_CHANNEL_TYPE_MAX			= 3
578};
579
580/*
581 * struct __vxge_hw_channel
582 * @item: List item; used to maintain a list of open channels.
583 * @type: Channel type. See enum vxge_hw_channel_type{}.
584 * @devh: Device handle. HW device object that contains _this_ channel.
585 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
586 * @length: Channel length. Currently allocated number of descriptors.
587 *          The channel length "grows" when more descriptors get allocated.
588 *          See _hw_mempool_grow.
589 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
590 *               by driver for the subsequent send or receive operation.
591 *               See vxge_hw_fifo_txdl_reserve(),
592 *               vxge_hw_ring_rxd_reserve().
593 * @reserve_ptr: Current pointer in the resrve array
594 * @reserve_top: Reserve top gives the maximum number of dtrs available in
595 *          reserve array.
596 * @work_arr: Work array. Contains descriptors posted to the channel.
597 *            Note that at any point in time @work_arr contains 3 types of
598 *            descriptors:
599 *            1) posted but not yet consumed by Titan device;
600 *            2) consumed but not yet completed;
601 *            3) completed but not yet freed
602 *            (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
603 * @post_index: Post index. At any point in time points on the
604 *              position in the channel, which'll contain next to-be-posted
605 *              descriptor.
606 * @compl_index: Completion index. At any point in time points on the
607 *               position in the channel, which will contain next
608 *               to-be-completed descriptor.
609 * @free_arr: Free array. Contains completed descriptors that were freed
610 *            (i.e., handed over back to HW) by driver.
611 *            See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
612 * @free_ptr: current pointer in free array
613 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
614 *                 to store per-operation control information.
615 * @stats: Pointer to common statistics
616 * @userdata: Per-channel opaque (void*) user-defined context, which may be
617 *            driver object, ULP connection, etc.
618 *            Once channel is open, @userdata is passed back to user via
619 *            vxge_hw_channel_callback_f.
620 *
621 * HW channel object.
622 *
623 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
624 */
625struct __vxge_hw_channel {
626	struct list_head		item;
627	enum __vxge_hw_channel_type	type;
628	struct __vxge_hw_device 	*devh;
629	struct __vxge_hw_vpath_handle 	*vph;
630	u32			length;
631	u32			vp_id;
632	void		**reserve_arr;
633	u32			reserve_ptr;
634	u32			reserve_top;
635	void		**work_arr;
636	u32			post_index ____cacheline_aligned;
637	u32			compl_index ____cacheline_aligned;
638	void		**free_arr;
639	u32			free_ptr;
640	void		**orig_arr;
641	u32			per_dtr_space;
642	void		*userdata;
643	struct vxge_hw_common_reg	__iomem *common_reg;
644	u32			first_vp_id;
645	struct vxge_hw_vpath_stats_sw_common_info *stats;
646
647} ____cacheline_aligned;
648
649/*
650 * struct __vxge_hw_virtualpath - Virtual Path
651 *
652 * @vp_id: Virtual path id
653 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
654 * @hldev: Hal device
655 * @vp_config: Virtual Path Config
656 * @vp_reg: VPATH Register map address in BAR0
657 * @vpmgmt_reg: VPATH_MGMT register map address
658 * @max_mtu: Max mtu that can be supported
659 * @vsport_number: vsport attached to this vpath
660 * @max_kdfc_db: Maximum kernel mode doorbells
661 * @max_nofl_db: Maximum non offload doorbells
662 * @tx_intr_num: Interrupt Number associated with the TX
663
664 * @ringh: Ring Queue
665 * @fifoh: FIFO Queue
666 * @vpath_handles: Virtual Path handles list
667 * @stats_block: Memory for DMAing stats
668 * @stats: Vpath statistics
669 *
670 * Virtual path structure to encapsulate the data related to a virtual path.
671 * Virtual paths are allocated by the HW upon getting configuration from the
672 * driver and inserted into the list of virtual paths.
673 */
674struct __vxge_hw_virtualpath {
675	u32				vp_id;
676
677	u32				vp_open;
678#define VXGE_HW_VP_NOT_OPEN	0
679#define	VXGE_HW_VP_OPEN		1
680
681	struct __vxge_hw_device		*hldev;
682	struct vxge_hw_vp_config	*vp_config;
683	struct vxge_hw_vpath_reg	__iomem *vp_reg;
684	struct vxge_hw_vpmgmt_reg	__iomem *vpmgmt_reg;
685	struct __vxge_hw_non_offload_db_wrapper	__iomem *nofl_db;
686
687	u32				max_mtu;
688	u32				vsport_number;
689	u32				max_kdfc_db;
690	u32				max_nofl_db;
691	u64				tim_tti_cfg1_saved;
692	u64				tim_tti_cfg3_saved;
693	u64				tim_rti_cfg1_saved;
694	u64				tim_rti_cfg3_saved;
695
696	struct __vxge_hw_ring *____cacheline_aligned ringh;
697	struct __vxge_hw_fifo *____cacheline_aligned fifoh;
698	struct list_head		vpath_handles;
699	struct __vxge_hw_blockpool_entry		*stats_block;
700	struct vxge_hw_vpath_stats_hw_info	*hw_stats;
701	struct vxge_hw_vpath_stats_hw_info	*hw_stats_sav;
702	struct vxge_hw_vpath_stats_sw_info	*sw_stats;
703	spinlock_t lock;
704};
705
706/*
707 * struct __vxge_hw_vpath_handle - List item to store callback information
708 * @item: List head to keep the item in linked list
709 * @vpath: Virtual path to which this item belongs
710 *
711 * This structure is used to store the callback information.
712 */
713struct __vxge_hw_vpath_handle {
714	struct list_head	item;
715	struct __vxge_hw_virtualpath	*vpath;
716};
717
718/*
719 * struct __vxge_hw_device
720 *
721 * HW device object.
722 */
723/**
724 * struct __vxge_hw_device  - Hal device object
725 * @magic: Magic Number
726 * @bar0: BAR0 virtual address.
727 * @pdev: Physical device handle
728 * @config: Confguration passed by the LL driver at initialization
729 * @link_state: Link state
730 *
731 * HW device object. Represents Titan adapter
732 */
733struct __vxge_hw_device {
734	u32				magic;
735#define VXGE_HW_DEVICE_MAGIC		0x12345678
736#define VXGE_HW_DEVICE_DEAD		0xDEADDEAD
737	void __iomem			*bar0;
738	struct pci_dev			*pdev;
739	struct net_device		*ndev;
740	struct vxge_hw_device_config	config;
741	enum vxge_hw_device_link_state	link_state;
742
743	const struct vxge_hw_uld_cbs	*uld_callbacks;
744
745	u32				host_type;
746	u32				func_id;
747	u32				access_rights;
748#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH      0x1
749#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM     0x2
750#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM     0x4
751	struct vxge_hw_legacy_reg	__iomem *legacy_reg;
752	struct vxge_hw_toc_reg		__iomem *toc_reg;
753	struct vxge_hw_common_reg	__iomem *common_reg;
754	struct vxge_hw_mrpcim_reg	__iomem *mrpcim_reg;
755	struct vxge_hw_srpcim_reg	__iomem *srpcim_reg \
756					[VXGE_HW_TITAN_SRPCIM_REG_SPACES];
757	struct vxge_hw_vpmgmt_reg	__iomem *vpmgmt_reg \
758					[VXGE_HW_TITAN_VPMGMT_REG_SPACES];
759	struct vxge_hw_vpath_reg	__iomem *vpath_reg \
760					[VXGE_HW_TITAN_VPATH_REG_SPACES];
761	u8				__iomem *kdfc;
762	u8				__iomem *usdc;
763	struct __vxge_hw_virtualpath	virtual_paths \
764					[VXGE_HW_MAX_VIRTUAL_PATHS];
765	u64				vpath_assignments;
766	u64				vpaths_deployed;
767	u32				first_vp_id;
768	u64				tim_int_mask0[4];
769	u32				tim_int_mask1[4];
770
771	struct __vxge_hw_blockpool	block_pool;
772	struct vxge_hw_device_stats	stats;
773	u32				debug_module_mask;
774	u32				debug_level;
775	u32				level_err;
776	u32				level_trace;
777	u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
778};
779
780#define VXGE_HW_INFO_LEN	64
781/**
782 * struct vxge_hw_device_hw_info - Device information
783 * @host_type: Host Type
784 * @func_id: Function Id
785 * @vpath_mask: vpath bit mask
786 * @fw_version: Firmware version
787 * @fw_date: Firmware Date
788 * @flash_version: Firmware version
789 * @flash_date: Firmware Date
790 * @mac_addrs: Mac addresses for each vpath
791 * @mac_addr_masks: Mac address masks for each vpath
792 *
793 * Returns the vpath mask that has the bits set for each vpath allocated
794 * for the driver and the first mac address for each vpath
795 */
796struct vxge_hw_device_hw_info {
797	u32		host_type;
798#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION			0
799#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION			1
800#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0				2
801#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION			3
802#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG			4
803#define VXGE_HW_SR_VH_FUNCTION0					5
804#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION				6
805#define VXGE_HW_VH_NORMAL_FUNCTION				7
806	u64		function_mode;
807#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION			0
808#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION			1
809#define VXGE_HW_FUNCTION_MODE_SRIOV				2
810#define VXGE_HW_FUNCTION_MODE_MRIOV				3
811#define VXGE_HW_FUNCTION_MODE_MRIOV_8				4
812#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17			5
813#define VXGE_HW_FUNCTION_MODE_SRIOV_8				6
814#define VXGE_HW_FUNCTION_MODE_SRIOV_4				7
815#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2			8
816#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4			9
817#define VXGE_HW_FUNCTION_MODE_MRIOV_4				10
818
819	u32		func_id;
820	u64		vpath_mask;
821	struct vxge_hw_device_version fw_version;
822	struct vxge_hw_device_date    fw_date;
823	struct vxge_hw_device_version flash_version;
824	struct vxge_hw_device_date    flash_date;
825	u8		serial_number[VXGE_HW_INFO_LEN];
826	u8		part_number[VXGE_HW_INFO_LEN];
827	u8		product_desc[VXGE_HW_INFO_LEN];
828	u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
829	u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
830};
831
832/**
833 * struct vxge_hw_device_attr - Device memory spaces.
834 * @bar0: BAR0 virtual address.
835 * @pdev: PCI device object.
836 *
837 * Device memory spaces. Includes configuration, BAR0 etc. per device
838 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
839 */
840struct vxge_hw_device_attr {
841	void __iomem		*bar0;
842	struct pci_dev 		*pdev;
843	const struct vxge_hw_uld_cbs *uld_callbacks;
844};
845
846#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls)	(hldev->link_state = ls)
847
848#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) {	\
849	if (i < 16) {				\
850		m0[0] |= vxge_vBIT(0x8, (i*4), 4);	\
851		m0[1] |= vxge_vBIT(0x4, (i*4), 4);	\
852	}			       		\
853	else {					\
854		m1[0] = 0x80000000;		\
855		m1[1] = 0x40000000;		\
856	}					\
857}
858
859#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) {	\
860	if (i < 16) {					\
861		m0[0] &= ~vxge_vBIT(0x8, (i*4), 4);		\
862		m0[1] &= ~vxge_vBIT(0x4, (i*4), 4);		\
863	}						\
864	else {						\
865		m1[0] = 0;				\
866		m1[1] = 0;				\
867	}						\
868}
869
870#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) {		\
871	status = vxge_hw_mrpcim_stats_access(hldev, \
872				VXGE_HW_STATS_OP_READ, \
873				loc, \
874				offset, \
875				&val64);			\
876	if (status != VXGE_HW_OK)				\
877		return status;						\
878}
879
880/*
881 * struct __vxge_hw_ring - Ring channel.
882 * @channel: Channel "base" of this ring, the common part of all HW
883 *           channels.
884 * @mempool: Memory pool, the pool from which descriptors get allocated.
885 *           (See vxge_hw_mm.h).
886 * @config: Ring configuration, part of device configuration
887 *          (see struct vxge_hw_device_config{}).
888 * @ring_length: Length of the ring
889 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
890 *          as per Titan User Guide.
891 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
892 *            1-buffer mode descriptor is 32 byte long, etc.
893 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
894 *                 per-descriptor data (e.g., DMA handle for Solaris)
895 * @per_rxd_space: Per rxd space requested by driver
896 * @rxds_per_block: Number of descriptors per hardware-defined RxD
897 *                  block. Depends on the (1-, 3-, 5-) buffer mode.
898 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
899 *                      usage. Not to confuse with @rxd_priv_size.
900 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
901 * @callback: Channel completion callback. HW invokes the callback when there
902 *            are new completions on that channel. In many implementations
903 *            the @callback executes in the hw interrupt context.
904 * @rxd_init: Channel's descriptor-initialize callback.
905 *            See vxge_hw_ring_rxd_init_f{}.
906 *            If not NULL, HW invokes the callback when opening
907 *            the ring.
908 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
909 *          HW invokes the callback when closing the corresponding channel.
910 *          See also vxge_hw_channel_rxd_term_f{}.
911 * @stats: Statistics for ring
912 * Ring channel.
913 *
914 * Note: The structure is cache line aligned to better utilize
915 *       CPU cache performance.
916 */
917struct __vxge_hw_ring {
918	struct __vxge_hw_channel		channel;
919	struct vxge_hw_mempool			*mempool;
920	struct vxge_hw_vpath_reg		__iomem	*vp_reg;
921	struct vxge_hw_common_reg		__iomem	*common_reg;
922	u32					ring_length;
923	u32					buffer_mode;
924	u32					rxd_size;
925	u32					rxd_priv_size;
926	u32					per_rxd_space;
927	u32					rxds_per_block;
928	u32					rxdblock_priv_size;
929	u32					cmpl_cnt;
930	u32					vp_id;
931	u32					doorbell_cnt;
932	u32					total_db_cnt;
933	u64					rxds_limit;
934	u32					rtimer;
935	u64					tim_rti_cfg1_saved;
936	u64					tim_rti_cfg3_saved;
937
938	enum vxge_hw_status (*callback)(
939			struct __vxge_hw_ring *ringh,
940			void *rxdh,
941			u8 t_code,
942			void *userdata);
943
944	enum vxge_hw_status (*rxd_init)(
945			void *rxdh,
946			void *userdata);
947
948	void (*rxd_term)(
949			void *rxdh,
950			enum vxge_hw_rxd_state state,
951			void *userdata);
952
953	struct vxge_hw_vpath_stats_sw_ring_info *stats	____cacheline_aligned;
954	struct vxge_hw_ring_config		*config;
955} ____cacheline_aligned;
956
957/**
958 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
959 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
960 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
961 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
962 * device.
963 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
964 * filling-in and posting later.
965 *
966 * Titan/HW descriptor states.
967 *
968 */
969enum vxge_hw_txdl_state {
970	VXGE_HW_TXDL_STATE_NONE	= 0,
971	VXGE_HW_TXDL_STATE_AVAIL	= 1,
972	VXGE_HW_TXDL_STATE_POSTED	= 2,
973	VXGE_HW_TXDL_STATE_FREED	= 3
974};
975/*
976 * struct __vxge_hw_fifo - Fifo.
977 * @channel: Channel "base" of this fifo, the common part of all HW
978 *             channels.
979 * @mempool: Memory pool, from which descriptors get allocated.
980 * @config: Fifo configuration, part of device configuration
981 *             (see struct vxge_hw_device_config{}).
982 * @interrupt_type: Interrupt type to be used
983 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
984 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
985 *             on TxDL please refer to Titan UG.
986 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
987 *             per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
988 * @priv_size: Per-Tx descriptor space reserved for driver
989 *             usage.
990 * @per_txdl_space: Per txdl private space for the driver
991 * @callback: Fifo completion callback. HW invokes the callback when there
992 *             are new completions on that fifo. In many implementations
993 *             the @callback executes in the hw interrupt context.
994 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
995 *             HW invokes the callback when closing the corresponding fifo.
996 *             See also vxge_hw_fifo_txdl_term_f{}.
997 * @stats: Statistics of this fifo
998 *
999 * Fifo channel.
1000 * Note: The structure is cache line aligned.
1001 */
1002struct __vxge_hw_fifo {
1003	struct __vxge_hw_channel		channel;
1004	struct vxge_hw_mempool			*mempool;
1005	struct vxge_hw_fifo_config		*config;
1006	struct vxge_hw_vpath_reg		__iomem *vp_reg;
1007	struct __vxge_hw_non_offload_db_wrapper	__iomem *nofl_db;
1008	u64					interrupt_type;
1009	u32					no_snoop_bits;
1010	u32					txdl_per_memblock;
1011	u32					txdl_size;
1012	u32					priv_size;
1013	u32					per_txdl_space;
1014	u32					vp_id;
1015	u32					tx_intr_num;
1016	u32					rtimer;
1017	u64					tim_tti_cfg1_saved;
1018	u64					tim_tti_cfg3_saved;
1019
1020	enum vxge_hw_status (*callback)(
1021			struct __vxge_hw_fifo *fifo_handle,
1022			void *txdlh,
1023			enum vxge_hw_fifo_tcode t_code,
1024			void *userdata,
1025			struct sk_buff ***skb_ptr,
1026			int nr_skb,
1027			int *more);
1028
1029	void (*txdl_term)(
1030			void *txdlh,
1031			enum vxge_hw_txdl_state state,
1032			void *userdata);
1033
1034	struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
1035} ____cacheline_aligned;
1036
1037/*
1038 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
1039 * @dma_addr: DMA (mapped) address of _this_ descriptor.
1040 * @dma_handle: DMA handle used to map the descriptor onto device.
1041 * @dma_offset: Descriptor's offset in the memory block. HW allocates
1042 *	 descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1043 *             Each memblock is a contiguous block of DMA-able memory.
1044 * @frags: Total number of fragments (that is, contiguous data buffers)
1045 * carried by this TxDL.
1046 * @align_vaddr_start: Aligned virtual address start
1047 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1048 *             alignement. Used to place one or more mis-aligned fragments
1049 * @align_dma_addr: DMA address translated from the @align_vaddr.
1050 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1051 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1052 * @align_dma_offset: The current offset into the @align_vaddr area.
1053 * Grows while filling the descriptor, gets reset.
1054 * @align_used_frags: Number of fragments used.
1055 * @alloc_frags: Total number of fragments allocated.
1056 * @unused: TODO
1057 * @next_txdl_priv: (TODO).
1058 * @first_txdp: (TODO).
1059 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1060 *             TxDL list.
1061 * @txdlh: Corresponding txdlh to this TxDL.
1062 * @memblock: Pointer to the TxDL memory block or memory page.
1063 *             on the next send operation.
1064 * @dma_object: DMA address and handle of the memory block that contains
1065 *             the descriptor. This member is used only in the "checked"
1066 *             version of the HW (to enforce certain assertions);
1067 *             otherwise it gets compiled out.
1068 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1069 *
1070 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1071 * information associated with the descriptor. Note that driver can ask HW
1072 * to allocate additional per-descriptor space for its own (driver-specific)
1073 * purposes.
1074 *
1075 * See also: struct vxge_hw_ring_rxd_priv{}.
1076 */
1077struct __vxge_hw_fifo_txdl_priv {
1078	dma_addr_t		dma_addr;
1079	struct pci_dev	*dma_handle;
1080	ptrdiff_t		dma_offset;
1081	u32				frags;
1082	u8				*align_vaddr_start;
1083	u8				*align_vaddr;
1084	dma_addr_t		align_dma_addr;
1085	struct pci_dev 	*align_dma_handle;
1086	struct pci_dev	*align_dma_acch;
1087	ptrdiff_t		align_dma_offset;
1088	u32				align_used_frags;
1089	u32				alloc_frags;
1090	u32				unused;
1091	struct __vxge_hw_fifo_txdl_priv	*next_txdl_priv;
1092	struct vxge_hw_fifo_txd		*first_txdp;
1093	void			*memblock;
1094};
1095
1096/*
1097 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1098 * @control_0: Bits 0 to 7 - Doorbell type.
1099 *             Bits 8 to 31 - Reserved.
1100 *             Bits 32 to 39 - The highest TxD in this TxDL.
1101 *             Bits 40 to 47 - Reserved.
1102	*	       Bits 48 to 55 - Reserved.
1103 *             Bits 56 to 63 - No snoop flags.
1104 * @txdl_ptr:  The starting location of the TxDL in host memory.
1105 *
1106 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1107 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1108 * part of a doorbell write. Consumed by the adapter but is not written by the
1109 * adapter.
1110 */
1111struct __vxge_hw_non_offload_db_wrapper {
1112	u64		control_0;
1113#define	VXGE_HW_NODBW_GET_TYPE(ctrl0)			vxge_bVALn(ctrl0, 0, 8)
1114#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1115#define	VXGE_HW_NODBW_TYPE_NODBW				0
1116
1117#define	VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0)	vxge_bVALn(ctrl0, 32, 8)
1118#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1119
1120#define	VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0)		vxge_bVALn(ctrl0, 56, 8)
1121#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1122#define	VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE		0x2
1123#define	VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ		0x1
1124
1125	u64		txdl_ptr;
1126};
1127
1128/*
1129 * TX Descriptor
1130 */
1131
1132/**
1133 * struct vxge_hw_fifo_txd - Transmit Descriptor
1134 * @control_0: Bits 0 to 6 - Reserved.
1135 *             Bit 7 - List Ownership. This field should be initialized
1136 *             to '1' by the driver before the transmit list pointer is
1137 *             written to the adapter. This field will be set to '0' by the
1138 *             adapter once it has completed transmitting the frame or frames in
1139 *             the list. Note - This field is only valid in TxD0. Additionally,
1140 *             for multi-list sequences, the driver should not release any
1141 *             buffers until the ownership of the last list in the multi-list
1142 *             sequence has been returned to the host.
1143 *             Bits 8 to 11 - Reserved
1144 *             Bits 12 to 15 - Transfer_Code. This field is only valid in
1145 *             TxD0. It is used to describe the status of the transmit data
1146 *             buffer transfer. This field is always overwritten by the
1147 *             adapter, so this field may be initialized to any value.
1148 *             Bits 16 to 17 - Host steering. This field allows the host to
1149 *             override the selection of the physical transmit port.
1150 *             Attention:
1151 *             Normal sounds as if learned from the switch rather than from
1152 *             the aggregation algorythms.
1153 *             00: Normal. Use Destination/MAC Address
1154 *             lookup to determine the transmit port.
1155 *             01: Send on physical Port1.
1156 *             10: Send on physical Port0.
1157 *	       11: Send on both ports.
1158 *             Bits 18 to 21 - Reserved
1159 *             Bits 22 to 23 - Gather_Code. This field is set by the host and
1160 *             is used to describe how individual buffers comprise a frame.
1161 *             10: First descriptor of a frame.
1162 *             00: Middle of a multi-descriptor frame.
1163 *             01: Last descriptor of a frame.
1164 *             11: First and last descriptor of a frame (the entire frame
1165 *             resides in a single buffer).
1166 *             For multi-descriptor frames, the only valid gather code sequence
1167 *             is {10, [00], 01}. In other words, the descriptors must be placed
1168 *             in the list in the correct order.
1169 *             Bits 24 to 27 - Reserved
1170 *             Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1171 *             definition. Only valid in TxD0. This field allows the host to
1172 *             indicate the Ethernet encapsulation of an outbound LSO packet.
1173 *             00 - classic mode (best guess)
1174 *             01 - LLC
1175 *             10 - SNAP
1176 *             11 - DIX
1177 *             If "classic mode" is selected, the adapter will attempt to
1178 *             decode the frame's Ethernet encapsulation by examining the L/T
1179 *             field as follows:
1180 *             <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1181 *             if packet is IPv4 or IPv6.
1182 *             0x8870 Jumbo-SNAP encoding.
1183 *             0x0800 IPv4 DIX encoding
1184 *             0x86DD IPv6 DIX encoding
1185 *             others illegal encapsulation
1186 *             Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1187 *             Set to 1 to perform segmentation offload for TCP/UDP.
1188 *             This field is valid only in TxD0.
1189 *             Bits 31 to 33 - Reserved.
1190 *             Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1191 *             This field is meaningful only when LSO_Control is non-zero.
1192 *             When LSO_Control is set to TCP_LSO, the single (possibly large)
1193 *             TCP segment described by this TxDL will be sent as a series of
1194 *             TCP segments each of which contains no more than LSO_MSS
1195 *             payload bytes.
1196 *             When LSO_Control is set to UDP_LSO, the single (possibly large)
1197 *             UDP datagram described by this TxDL will be sent as a series of
1198 *             UDP datagrams each of which contains no more than LSO_MSS
1199 *             payload bytes.
1200 *             All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1201 *             or TCP payload, with the exception of the last, which will have
1202 *             <= LSO_MSS bytes of payload.
1203 *             Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1204 *             buffer to be read by the adapter. This field is written by the
1205 *             host. A value of 0 is illegal.
1206 *	       Bits 32 to 63 - This value is written by the adapter upon
1207 *	       completion of a UDP or TCP LSO operation and indicates the number
1208 *             of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1209 *             returned for any non-LSO operation.
1210 * @control_1: Bits 0 to 4 - Reserved.
1211 *             Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1212 *             offload. This field is only valid in the first TxD of a frame.
1213 *             Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1214 *             This field is only valid in the first TxD of a frame (the TxD's
1215 *             gather code must be 10 or 11). The driver should only set this
1216 *             bit if it can guarantee that TCP is present.
1217 *             Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1218 *             This field is only valid in the first TxD of a frame (the TxD's
1219 *             gather code must be 10 or 11). The driver should only set this
1220 *             bit if it can guarantee that UDP is present.
1221 *             Bits 8 to 14 - Reserved.
1222 *             Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1223 *             instruct the adapter to insert the VLAN tag specified by the
1224 *             Tx_VLAN_Tag field. This field is only valid in the first TxD of
1225 *             a frame.
1226 *             Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1227 *             to be inserted into the frame by the adapter (the first two bytes
1228 *             of a VLAN tag are always 0x8100). This field is only valid if the
1229 *             Tx_VLAN_Enable field is set to '1'.
1230 *             Bits 32 to 33 - Reserved.
1231 *             Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1232 *             number the frame associated with. This field is written by the
1233 *             host. It is only valid in the first TxD of a frame.
1234 *             Bits 40 to 42 - Reserved.
1235 *             Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1236 *             functions. This field is valid only in the first TxD
1237 *             of a frame.
1238 *             Bits 44 to 45 - Reserved.
1239 *             Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1240 *             generate an interrupt as soon as all of the frames in the list
1241 *             have been transmitted. In order to have per-frame interrupts,
1242 *             the driver should place a maximum of one frame per list. This
1243 *             field is only valid in the first TxD of a frame.
1244 *             Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1245 *             to count the frame toward the utilization interrupt specified in
1246 *             the Tx_Int_Number field. This field is only valid in the first
1247 *             TxD of a frame.
1248 *             Bits 48 to 63 - Reserved.
1249 * @buffer_pointer: Buffer start address.
1250 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1251 *            Titan descriptor prior to posting the latter on the fifo
1252 *            via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1253 *            to the driver with each completed descriptor.
1254 *
1255 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1256 * (list) of TxDs. * For more details please refer to Titan User Guide,
1257 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1258 */
1259struct vxge_hw_fifo_txd {
1260	u64 control_0;
1261#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER		vxge_mBIT(7)
1262
1263#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)		vxge_bVALn(ctrl0, 12, 4)
1264#define VXGE_HW_FIFO_TXD_T_CODE(val) 			vxge_vBIT(val, 12, 4)
1265#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED		VXGE_HW_FIFO_T_CODE_UNUSED
1266
1267
1268#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) 		vxge_vBIT(val, 22, 2)
1269#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST	VXGE_HW_FIFO_GATHER_CODE_FIRST
1270#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST	VXGE_HW_FIFO_GATHER_CODE_LAST
1271
1272
1273#define VXGE_HW_FIFO_TXD_LSO_EN				vxge_mBIT(30)
1274
1275#define VXGE_HW_FIFO_TXD_LSO_MSS(val) 			vxge_vBIT(val, 34, 14)
1276
1277#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) 		vxge_vBIT(val, 48, 16)
1278
1279	u64 control_1;
1280#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN			vxge_mBIT(5)
1281#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN			vxge_mBIT(6)
1282#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN			vxge_mBIT(7)
1283#define VXGE_HW_FIFO_TXD_VLAN_ENABLE			vxge_mBIT(15)
1284
1285#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) 			vxge_vBIT(val, 16, 16)
1286
1287#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) 		vxge_vBIT(val, 34, 6)
1288
1289#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST		vxge_mBIT(46)
1290#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ			vxge_mBIT(47)
1291
1292	u64 buffer_pointer;
1293
1294	u64 host_control;
1295};
1296
1297/**
1298 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1299 * @host_control: This field is exclusively for host use and is "readonly"
1300 *             from the adapter's perspective.
1301 * @control_0:Bits 0 to 6 - RTH_Bucket get
1302 *	      Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1303 *            by the host, and is set to 0 by the adapter.
1304 *	      0 - Host owns RxD and buffer.
1305 *	      1 - The adapter owns RxD and buffer.
1306 *	      Bit 8 - Fast_Path_Eligible When set, indicates that the
1307 *            received frame meets all of the criteria for fast path processing.
1308 *            The required criteria are as follows:
1309 *            !SYN &
1310 *            (Transfer_Code == "Transfer OK") &
1311 *            (!Is_IP_Fragment) &
1312 *            ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1313 *            (Is_IPv6)) &
1314 *            ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1315 *            (Is_UDP & (computed_L4_checksum == 0xFFFF |
1316 *            computed _L4_checksum == 0x0000)))
1317 *            (same meaning for all RxD buffer modes)
1318 *	      Bit 9 - L3 Checksum Correct
1319 *	      Bit 10 - L4 Checksum Correct
1320 *	      Bit 11 - Reserved
1321 *	      Bit 12 to 15 - This field is written by the adapter. It is
1322 *            used to report the status of the frame transfer to the host.
1323 *	      0x0 - Transfer OK
1324 *	      0x4 - RDA Failure During Transfer
1325 *	      0x5 - Unparseable Packet, such as unknown IPv6 header.
1326 *	      0x6 - Frame integrity error (FCS or ECC).
1327 *	      0x7 - Buffer Size Error. The provided buffer(s) were not
1328 *                  appropriately sized and data loss occurred.
1329 *	      0x8 - Internal ECC Error. RxD corrupted.
1330 *	      0x9 - IPv4 Checksum error
1331 *	      0xA - TCP/UDP Checksum error
1332 *	      0xF - Unknown Error or Multiple Error. Indicates an
1333 *               unknown problem or that more than one of transfer codes is set.
1334 *	      Bit 16 - SYN The adapter sets this field to indicate that
1335 *                the incoming frame contained a TCP segment with its SYN bit
1336 *	          set and its ACK bit NOT set. (same meaning for all RxD buffer
1337 *                modes)
1338 *	      Bit 17 - Is ICMP
1339 *	      Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1340 *                Socket Pair Direct Match Table and the frame was steered based
1341 *                on SPDM.
1342 *	      Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1343 *            Indirection Table and the frame was steered based on hash
1344 *            indirection.
1345 *	      Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1346 *	          type) that was used to calculate the hash.
1347 *	      Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1348 *	          tagged.
1349 *	      Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1350 *                of the received frame.
1351 *	      0x0 - Ethernet DIX
1352 *	      0x1 - LLC
1353 *	      0x2 - SNAP (includes Jumbo-SNAP)
1354 *	      0x3 - IPX
1355 *	      Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4	packet.
1356 *	      Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1357 *	      Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1358 *            IP packet.
1359 *	      Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1360 *	      Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1361 *	      Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value 	that
1362 *            arrived with the frame. If the resulting computed IPv4 header
1363 *            checksum for the frame did not produce the expected 0xFFFF value,
1364 *            then the transfer code would be set to 0x9.
1365 *	      Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1366 *            arrived with the frame. If the resulting computed TCP/UDP checksum
1367 *            for the frame did not produce the expected 0xFFFF value, then the
1368 *            transfer code would be set to 0xA.
1369 * @control_1:Bits 0 to 1 - Reserved
1370 *            Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1371 *            eventually overwritten by the adapter. The host writes the
1372 *            available buffer size in bytes when it passes the descriptor to
1373 *            the adapter. When a frame is delivered the host, the adapter
1374 *            populates this field with the number of bytes written into the
1375 *            buffer. The largest supported buffer is 16, 383 bytes.
1376 *	      Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1377 *	      RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1378 *	      Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1379 *            of the VLAN tag, if one was detected by the adapter. This field is
1380 *            populated even if VLAN-tag stripping is enabled.
1381 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1382 *
1383 * One buffer mode RxD for ring structure
1384 */
1385struct vxge_hw_ring_rxd_1 {
1386	u64 host_control;
1387	u64 control_0;
1388#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0)		vxge_bVALn(ctrl0, 0, 7)
1389
1390#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER		vxge_mBIT(7)
1391
1392#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0)	vxge_bVALn(ctrl0, 8, 1)
1393
1394#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0)	vxge_bVALn(ctrl0, 9, 1)
1395
1396#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0)	vxge_bVALn(ctrl0, 10, 1)
1397
1398#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)		vxge_bVALn(ctrl0, 12, 4)
1399#define VXGE_HW_RING_RXD_T_CODE(val) 			vxge_vBIT(val, 12, 4)
1400
1401#define VXGE_HW_RING_RXD_T_CODE_UNUSED		VXGE_HW_RING_T_CODE_UNUSED
1402
1403#define VXGE_HW_RING_RXD_SYN_GET(ctrl0)		vxge_bVALn(ctrl0, 16, 1)
1404
1405#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0)		vxge_bVALn(ctrl0, 17, 1)
1406
1407#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0)	vxge_bVALn(ctrl0, 18, 1)
1408
1409#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0)		vxge_bVALn(ctrl0, 19, 1)
1410
1411#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0)	vxge_bVALn(ctrl0, 20, 4)
1412
1413#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0)		vxge_bVALn(ctrl0, 24, 1)
1414
1415#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0)		vxge_bVALn(ctrl0, 25, 2)
1416
1417#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0)		vxge_bVALn(ctrl0, 27, 5)
1418
1419#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0)	vxge_bVALn(ctrl0, 32, 16)
1420
1421#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0)	vxge_bVALn(ctrl0, 48, 16)
1422
1423	u64 control_1;
1424
1425#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)	vxge_bVALn(ctrl1, 2, 14)
1426#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1427#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK		vxge_vBIT(0x3FFF, 2, 14)
1428
1429#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1)    vxge_bVALn(ctrl1, 16, 32)
1430
1431#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1)	vxge_bVALn(ctrl1, 48, 16)
1432
1433	u64 buffer0_ptr;
1434};
1435
1436enum vxge_hw_rth_algoritms {
1437	RTH_ALG_JENKINS = 0,
1438	RTH_ALG_MS_RSS	= 1,
1439	RTH_ALG_CRC32C	= 2
1440};
1441
1442/**
1443 * struct vxge_hw_rth_hash_types - RTH hash types.
1444 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1445 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1446 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1447 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1448 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1449 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1450 *
1451 * Used to pass RTH hash types to rts_rts_set.
1452 *
1453 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1454 */
1455struct vxge_hw_rth_hash_types {
1456	u8 hash_type_tcpipv4_en:1,
1457	   hash_type_ipv4_en:1,
1458	   hash_type_tcpipv6_en:1,
1459	   hash_type_ipv6_en:1,
1460	   hash_type_tcpipv6ex_en:1,
1461	   hash_type_ipv6ex_en:1;
1462};
1463
1464void vxge_hw_device_debug_set(
1465	struct __vxge_hw_device *devh,
1466	enum vxge_debug_level level,
1467	u32 mask);
1468
1469u32
1470vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1471
1472u32
1473vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1474
1475/**
1476 * vxge_hw_ring_rxd_size_get	- Get the size of ring descriptor.
1477 * @buf_mode: Buffer mode (1, 3 or 5)
1478 *
1479 * This function returns the size of RxD for given buffer mode
1480 */
1481static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1482{
1483	return sizeof(struct vxge_hw_ring_rxd_1);
1484}
1485
1486/**
1487 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1488 * @buf_mode: Buffer mode (1 buffer mode only)
1489 *
1490 * This function returns the number of RxD for RxD block for given buffer mode
1491 */
1492static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1493{
1494	return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1495		sizeof(struct vxge_hw_ring_rxd_1));
1496}
1497
1498/**
1499 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1500 * @rxdh: Descriptor handle.
1501 * @dma_pointer: DMA address of	a single receive buffer	this descriptor
1502 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1503 * the receive buffer should be already mapped to the device
1504 * @size: Size of the receive @dma_pointer buffer.
1505 *
1506 * Prepare 1-buffer-mode Rx	descriptor for posting
1507 * (via	vxge_hw_ring_rxd_post()).
1508 *
1509 * This	inline helper-function does not	return any parameters and always
1510 * succeeds.
1511 *
1512 */
1513static inline
1514void vxge_hw_ring_rxd_1b_set(
1515	void *rxdh,
1516	dma_addr_t dma_pointer,
1517	u32 size)
1518{
1519	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1520	rxdp->buffer0_ptr = dma_pointer;
1521	rxdp->control_1	&= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1522	rxdp->control_1	|= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1523}
1524
1525/**
1526 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1527 * descriptor.
1528 * @vpath_handle: Virtual Path handle.
1529 * @rxdh: Descriptor handle.
1530 * @dma_pointer: DMA address of	a single receive buffer	this descriptor
1531 * carries. Returned by HW.
1532 * @pkt_length:	Length (in bytes) of the data in the buffer pointed by
1533 *
1534 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1535 * This	inline helper-function uses completed descriptor to populate receive
1536 * buffer pointer and other "out" parameters. The function always succeeds.
1537 *
1538 */
1539static inline
1540void vxge_hw_ring_rxd_1b_get(
1541	struct __vxge_hw_ring *ring_handle,
1542	void *rxdh,
1543	u32 *pkt_length)
1544{
1545	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1546
1547	*pkt_length =
1548		(u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1549}
1550
1551/**
1552 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1553 * a completed receive descriptor for 1b mode.
1554 * @vpath_handle: Virtual Path handle.
1555 * @rxdh: Descriptor handle.
1556 * @rxd_info: Descriptor information
1557 *
1558 * Retrieve extended information associated with a completed receive descriptor.
1559 *
1560 */
1561static inline
1562void vxge_hw_ring_rxd_1b_info_get(
1563	struct __vxge_hw_ring *ring_handle,
1564	void *rxdh,
1565	struct vxge_hw_ring_rxd_info *rxd_info)
1566{
1567
1568	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1569	rxd_info->syn_flag =
1570		(u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1571	rxd_info->is_icmp =
1572		(u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1573	rxd_info->fast_path_eligible =
1574		(u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1575	rxd_info->l3_cksum_valid =
1576		(u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1577	rxd_info->l3_cksum =
1578		(u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1579	rxd_info->l4_cksum_valid =
1580		(u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1581	rxd_info->l4_cksum =
1582		(u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
1583	rxd_info->frame =
1584		(u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1585	rxd_info->proto =
1586		(u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1587	rxd_info->is_vlan =
1588		(u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1589	rxd_info->vlan =
1590		(u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1591	rxd_info->rth_bucket =
1592		(u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1593	rxd_info->rth_it_hit =
1594		(u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1595	rxd_info->rth_spdm_hit =
1596		(u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1597	rxd_info->rth_hash_type =
1598		(u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1599	rxd_info->rth_value =
1600		(u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1601}
1602
1603/**
1604 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1605 *                      of 1b mode 3b mode ring.
1606 * @rxdh: Descriptor handle.
1607 *
1608 * Returns: private driver	info associated	with the descriptor.
1609 * driver requests	per-descriptor space via vxge_hw_ring_attr.
1610 *
1611 */
1612static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1613{
1614	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1615	return (void *)(size_t)rxdp->host_control;
1616}
1617
1618/**
1619 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1620 * @txdlh: Descriptor handle.
1621 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1622 *              and/or TCP and/or UDP.
1623 *
1624 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1625 * descriptor.
1626 * This API is part of the preparation of the transmit descriptor for posting
1627 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1628 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1629 * and vxge_hw_fifo_txdl_buffer_set().
1630 * All these APIs fill in the fields of the fifo descriptor,
1631 * in accordance with the Titan specification.
1632 *
1633 */
1634static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1635{
1636	struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1637	txdp->control_1 |= cksum_bits;
1638}
1639
1640/**
1641 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1642 * @txdlh: Descriptor handle.
1643 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1644 *       driver, which in turn inserts the MSS into the @txdlh.
1645 *
1646 * This API is part of the preparation of the transmit descriptor for posting
1647 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1648 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1649 * and vxge_hw_fifo_txdl_cksum_set_bits().
1650 * All these APIs fill in the fields of the fifo descriptor,
1651 * in accordance with the Titan specification.
1652 *
1653 */
1654static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1655{
1656	struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1657
1658	txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1659	txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1660}
1661
1662/**
1663 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1664 * @txdlh: Descriptor handle.
1665 * @vlan_tag: 16bit VLAN tag.
1666 *
1667 * Insert VLAN tag into specified transmit descriptor.
1668 * The actual insertion of the tag into outgoing frame is done by the hardware.
1669 */
1670static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1671{
1672	struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1673
1674	txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1675	txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1676}
1677
1678/**
1679 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1680 * @txdlh: Descriptor handle.
1681 *
1682 * Retrieve per-descriptor private data.
1683 * Note that driver requests per-descriptor space via
1684 * struct vxge_hw_fifo_attr passed to
1685 * vxge_hw_vpath_open().
1686 *
1687 * Returns: private driver data associated with the descriptor.
1688 */
1689static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1690{
1691	struct vxge_hw_fifo_txd *txdp  = (struct vxge_hw_fifo_txd *)txdlh;
1692
1693	return (void *)(size_t)txdp->host_control;
1694}
1695
1696/**
1697 * struct vxge_hw_ring_attr - Ring open "template".
1698 * @callback: Ring completion callback. HW invokes the callback when there
1699 *            are new completions on that ring. In many implementations
1700 *            the @callback executes in the hw interrupt context.
1701 * @rxd_init: Ring's descriptor-initialize callback.
1702 *            See vxge_hw_ring_rxd_init_f{}.
1703 *            If not NULL, HW invokes the callback when opening
1704 *            the ring.
1705 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1706 *          HW invokes the callback when closing the corresponding ring.
1707 *          See also vxge_hw_ring_rxd_term_f{}.
1708 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1709 *            user as one of the @callback, @rxd_init, and @rxd_term arguments.
1710 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1711 *              reserved by HW per each receive descriptor.
1712 *              Can be used to store
1713 *              and retrieve on completion, information specific
1714 *              to the driver.
1715 *
1716 * Ring open "template". User fills the structure with ring
1717 * attributes and passes it to vxge_hw_vpath_open().
1718 */
1719struct vxge_hw_ring_attr {
1720	enum vxge_hw_status (*callback)(
1721			struct __vxge_hw_ring *ringh,
1722			void *rxdh,
1723			u8 t_code,
1724			void *userdata);
1725
1726	enum vxge_hw_status (*rxd_init)(
1727			void *rxdh,
1728			void *userdata);
1729
1730	void (*rxd_term)(
1731			void *rxdh,
1732			enum vxge_hw_rxd_state state,
1733			void *userdata);
1734
1735	void		*userdata;
1736	u32		per_rxd_space;
1737};
1738
1739/**
1740 * function vxge_hw_fifo_callback_f - FIFO callback.
1741 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1742 *             descriptors.
1743 * @txdlh: First completed descriptor.
1744 * @txdl_priv: Pointer to per txdl space allocated
1745 * @t_code: Transfer code, as per Titan User Guide.
1746 *          Returned by HW.
1747 * @host_control: Opaque 64bit data stored by driver inside the Titan
1748 *            descriptor prior to posting the latter on the fifo
1749 *            via vxge_hw_fifo_txdl_post(). The @host_control is returned
1750 *            as is to the driver with each completed descriptor.
1751 * @userdata: Opaque per-fifo data specified at fifo open
1752 *            time, via vxge_hw_vpath_open().
1753 *
1754 * Fifo completion callback (type declaration). A single per-fifo
1755 * callback is specified at fifo open time, via
1756 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1757 * of the Interrupt Service Routine.
1758 *
1759 * Fifo callback gets called by HW if, and only if, there is at least
1760 * one new completion on a given fifo. Upon processing the first @txdlh driver
1761 * is _supposed_ to continue consuming completions using:
1762 *    - vxge_hw_fifo_txdl_next_completed()
1763 *
1764 * Note that failure to process new completions in a timely fashion
1765 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1766 *
1767 * Non-zero @t_code means failure to process transmit descriptor.
1768 *
1769 * In the "transmit" case the failure could happen, for instance, when the
1770 * link is down, in which case Titan completes the descriptor because it
1771 * is not able to send the data out.
1772 *
1773 * For details please refer to Titan User Guide.
1774 *
1775 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1776 */
1777/**
1778 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1779 * @txdlh: First completed descriptor.
1780 * @txdl_priv: Pointer to per txdl space allocated
1781 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1782 * @userdata: Per-fifo user data (a.k.a. context) specified at
1783 * fifo open time, via vxge_hw_vpath_open().
1784 *
1785 * Terminate descriptor callback. Unless NULL is specified in the
1786 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1787 * HW invokes the callback as part of closing fifo, prior to
1788 * de-allocating the ring and associated data structures
1789 * (including descriptors).
1790 * driver should utilize the callback to (for instance) unmap
1791 * and free DMA data buffers associated with the posted (state =
1792 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1793 * as well as other relevant cleanup functions.
1794 *
1795 * See also: struct vxge_hw_fifo_attr{}
1796 */
1797/**
1798 * struct vxge_hw_fifo_attr - Fifo open "template".
1799 * @callback: Fifo completion callback. HW invokes the callback when there
1800 *            are new completions on that fifo. In many implementations
1801 *            the @callback executes in the hw interrupt context.
1802 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1803 *          HW invokes the callback when closing the corresponding fifo.
1804 *          See also vxge_hw_fifo_txdl_term_f{}.
1805 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1806 *            user as one of the @callback, and @txdl_term arguments.
1807 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1808 *              reserved by HW per each transmit descriptor. Can be used to
1809 *              store, and retrieve on completion, information specific
1810 *              to the driver.
1811 *
1812 * Fifo open "template". User fills the structure with fifo
1813 * attributes and passes it to vxge_hw_vpath_open().
1814 */
1815struct vxge_hw_fifo_attr {
1816
1817	enum vxge_hw_status (*callback)(
1818			struct __vxge_hw_fifo *fifo_handle,
1819			void *txdlh,
1820			enum vxge_hw_fifo_tcode t_code,
1821			void *userdata,
1822			struct sk_buff ***skb_ptr,
1823			int nr_skb, int *more);
1824
1825	void (*txdl_term)(
1826			void *txdlh,
1827			enum vxge_hw_txdl_state state,
1828			void *userdata);
1829
1830	void		*userdata;
1831	u32		per_txdl_space;
1832};
1833
1834/**
1835 * struct vxge_hw_vpath_attr - Attributes of virtual path
1836 * @vp_id: Identifier of Virtual Path
1837 * @ring_attr: Attributes of ring for non-offload receive
1838 * @fifo_attr: Attributes of fifo for non-offload transmit
1839 *
1840 * Attributes of virtual path.  This structure is passed as parameter
1841 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1842 */
1843struct vxge_hw_vpath_attr {
1844	u32				vp_id;
1845	struct vxge_hw_ring_attr	ring_attr;
1846	struct vxge_hw_fifo_attr	fifo_attr;
1847};
1848
1849enum vxge_hw_status vxge_hw_device_hw_info_get(
1850	void __iomem *bar0,
1851	struct vxge_hw_device_hw_info *hw_info);
1852
1853enum vxge_hw_status vxge_hw_device_config_default_get(
1854	struct vxge_hw_device_config *device_config);
1855
1856/**
1857 * vxge_hw_device_link_state_get - Get link state.
1858 * @devh: HW device handle.
1859 *
1860 * Get link state.
1861 * Returns: link state.
1862 */
1863static inline
1864enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1865	struct __vxge_hw_device *devh)
1866{
1867	return devh->link_state;
1868}
1869
1870void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1871
1872const u8 *
1873vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1874
1875u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1876
1877const u8 *
1878vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1879
1880enum vxge_hw_status vxge_hw_device_initialize(
1881	struct __vxge_hw_device **devh,
1882	struct vxge_hw_device_attr *attr,
1883	struct vxge_hw_device_config *device_config);
1884
1885enum vxge_hw_status vxge_hw_device_getpause_data(
1886	 struct __vxge_hw_device *devh,
1887	 u32 port,
1888	 u32 *tx,
1889	 u32 *rx);
1890
1891enum vxge_hw_status vxge_hw_device_setpause_data(
1892	struct __vxge_hw_device *devh,
1893	u32 port,
1894	u32 tx,
1895	u32 rx);
1896
1897static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1898			unsigned long size,
1899			struct pci_dev **p_dmah,
1900			struct pci_dev **p_dma_acch)
1901{
1902	gfp_t flags;
1903	void *vaddr;
1904	unsigned long misaligned = 0;
1905	int realloc_flag = 0;
1906	*p_dma_acch = *p_dmah = NULL;
1907
1908	if (in_interrupt())
1909		flags = GFP_ATOMIC | GFP_DMA;
1910	else
1911		flags = GFP_KERNEL | GFP_DMA;
1912realloc:
1913	vaddr = kmalloc((size), flags);
1914	if (vaddr == NULL)
1915		return vaddr;
1916	misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
1917				VXGE_CACHE_LINE_SIZE);
1918	if (realloc_flag)
1919		goto out;
1920
1921	if (misaligned) {
1922		/* misaligned, free current one and try allocating
1923		 * size + VXGE_CACHE_LINE_SIZE memory
1924		 */
1925		kfree(vaddr);
1926		size += VXGE_CACHE_LINE_SIZE;
1927		realloc_flag = 1;
1928		goto realloc;
1929	}
1930out:
1931	*(unsigned long *)p_dma_acch = misaligned;
1932	vaddr = (void *)((u8 *)vaddr + misaligned);
1933	return vaddr;
1934}
1935
1936static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1937			struct pci_dev **p_dma_acch)
1938{
1939	unsigned long misaligned = *(unsigned long *)p_dma_acch;
1940	u8 *tmp = (u8 *)vaddr;
1941	tmp -= misaligned;
1942	kfree((void *)tmp);
1943}
1944
1945/*
1946 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1947 */
1948static inline void*
1949__vxge_hw_mempool_item_priv(
1950	struct vxge_hw_mempool *mempool,
1951	u32 memblock_idx,
1952	void *item,
1953	u32 *memblock_item_idx)
1954{
1955	ptrdiff_t offset;
1956	void *memblock = mempool->memblocks_arr[memblock_idx];
1957
1958
1959	offset = (u32)((u8 *)item - (u8 *)memblock);
1960	vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1961
1962	(*memblock_item_idx) = (u32) offset / mempool->item_size;
1963	vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1964
1965	return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1966			    (*memblock_item_idx) * mempool->items_priv_size;
1967}
1968
1969/*
1970 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1971 * for the fifo.
1972 * @fifo: Fifo
1973 * @txdp: Poniter to a TxD
1974 */
1975static inline struct __vxge_hw_fifo_txdl_priv *
1976__vxge_hw_fifo_txdl_priv(
1977	struct __vxge_hw_fifo *fifo,
1978	struct vxge_hw_fifo_txd *txdp)
1979{
1980	return (struct __vxge_hw_fifo_txdl_priv *)
1981			(((char *)((ulong)txdp->host_control)) +
1982				fifo->per_txdl_space);
1983}
1984
1985enum vxge_hw_status vxge_hw_vpath_open(
1986	struct __vxge_hw_device *devh,
1987	struct vxge_hw_vpath_attr *attr,
1988	struct __vxge_hw_vpath_handle **vpath_handle);
1989
1990enum vxge_hw_status vxge_hw_vpath_close(
1991	struct __vxge_hw_vpath_handle *vpath_handle);
1992
1993enum vxge_hw_status
1994vxge_hw_vpath_reset(
1995	struct __vxge_hw_vpath_handle *vpath_handle);
1996
1997enum vxge_hw_status
1998vxge_hw_vpath_recover_from_reset(
1999	struct __vxge_hw_vpath_handle *vpath_handle);
2000
2001void
2002vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
2003
2004enum vxge_hw_status
2005vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
2006
2007enum vxge_hw_status vxge_hw_vpath_mtu_set(
2008	struct __vxge_hw_vpath_handle *vpath_handle,
2009	u32 new_mtu);
2010
2011void
2012vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2013
2014#ifndef readq
2015static inline u64 readq(void __iomem *addr)
2016{
2017	u64 ret = 0;
2018	ret = readl(addr + 4);
2019	ret <<= 32;
2020	ret |= readl(addr);
2021
2022	return ret;
2023}
2024#endif
2025
2026#ifndef writeq
2027static inline void writeq(u64 val, void __iomem *addr)
2028{
2029	writel((u32) (val), addr);
2030	writel((u32) (val >> 32), (addr + 4));
2031}
2032#endif
2033
2034static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2035{
2036	writel(val, addr + 4);
2037}
2038
2039static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2040{
2041	writel(val, addr);
2042}
2043
2044enum vxge_hw_status
2045vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2046
2047enum vxge_hw_status
2048vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2049
2050/**
2051 * vxge_debug_ll
2052 * @level: level of debug verbosity.
2053 * @mask: mask for the debug
2054 * @buf: Circular buffer for tracing
2055 * @fmt: printf like format string
2056 *
2057 * Provides logging facilities. Can be customized on per-module
2058 * basis or/and with debug levels. Input parameters, except
2059 * module and level, are the same as posix printf. This function
2060 * may be compiled out if DEBUG macro was never defined.
2061 * See also: enum vxge_debug_level{}.
2062 */
2063#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2064#define vxge_debug_ll(level, mask, fmt, ...) do {			       \
2065	if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) ||  \
2066	    (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2067		if ((mask & VXGE_DEBUG_MASK) == mask)			       \
2068			printk(fmt "\n", __VA_ARGS__);			       \
2069} while (0)
2070#else
2071#define vxge_debug_ll(level, mask, fmt, ...)
2072#endif
2073
2074enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2075			struct __vxge_hw_vpath_handle **vpath_handles,
2076			u32 vpath_count,
2077			u8 *mtable,
2078			u8 *itable,
2079			u32 itable_size);
2080
2081enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2082	struct __vxge_hw_vpath_handle *vpath_handle,
2083	enum vxge_hw_rth_algoritms algorithm,
2084	struct vxge_hw_rth_hash_types *hash_type,
2085	u16 bucket_size);
2086
2087enum vxge_hw_status
2088__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2089
2090#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2091#define VXGE_HW_MAX_POLLING_COUNT 100
2092
2093void
2094vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2095
2096enum vxge_hw_status
2097vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2098			     u32 *minor, u32 *build);
2099
2100enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2101
2102enum vxge_hw_status
2103vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2104		     int size);
2105
2106enum vxge_hw_status
2107vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2108				struct eprom_image *eprom_image_data);
2109
2110int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2111#endif
2112