1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <uapi/linux/if_ether.h>
52
53#include <linux/atomic.h>
54#include <linux/mmu_notifier.h>
55#include <asm/uaccess.h>
56
57extern struct workqueue_struct *ib_wq;
58
59union ib_gid {
60	u8	raw[16];
61	struct {
62		__be64	subnet_prefix;
63		__be64	interface_id;
64	} global;
65};
66
67enum rdma_node_type {
68	/* IB values map to NodeInfo:NodeType. */
69	RDMA_NODE_IB_CA 	= 1,
70	RDMA_NODE_IB_SWITCH,
71	RDMA_NODE_IB_ROUTER,
72	RDMA_NODE_RNIC,
73	RDMA_NODE_USNIC,
74	RDMA_NODE_USNIC_UDP,
75};
76
77enum rdma_transport_type {
78	RDMA_TRANSPORT_IB,
79	RDMA_TRANSPORT_IWARP,
80	RDMA_TRANSPORT_USNIC,
81	RDMA_TRANSPORT_USNIC_UDP
82};
83
84__attribute_const__ enum rdma_transport_type
85rdma_node_get_transport(enum rdma_node_type node_type);
86
87enum rdma_link_layer {
88	IB_LINK_LAYER_UNSPECIFIED,
89	IB_LINK_LAYER_INFINIBAND,
90	IB_LINK_LAYER_ETHERNET,
91};
92
93enum ib_device_cap_flags {
94	IB_DEVICE_RESIZE_MAX_WR		= 1,
95	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
96	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
97	IB_DEVICE_RAW_MULTI		= (1<<3),
98	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
99	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
100	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
101	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
102	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
103	IB_DEVICE_INIT_TYPE		= (1<<9),
104	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
105	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
106	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
107	IB_DEVICE_SRQ_RESIZE		= (1<<13),
108	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
109	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
110	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
111	IB_DEVICE_MEM_WINDOW		= (1<<17),
112	/*
113	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
114	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
115	 * messages and can verify the validity of checksum for
116	 * incoming messages.  Setting this flag implies that the
117	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
118	 */
119	IB_DEVICE_UD_IP_CSUM		= (1<<18),
120	IB_DEVICE_UD_TSO		= (1<<19),
121	IB_DEVICE_XRC			= (1<<20),
122	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
123	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
124	IB_DEVICE_MEM_WINDOW_TYPE_2A	= (1<<23),
125	IB_DEVICE_MEM_WINDOW_TYPE_2B	= (1<<24),
126	IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
127	IB_DEVICE_SIGNATURE_HANDOVER	= (1<<30),
128	IB_DEVICE_ON_DEMAND_PAGING	= (1<<31),
129};
130
131enum ib_signature_prot_cap {
132	IB_PROT_T10DIF_TYPE_1 = 1,
133	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
134	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
135};
136
137enum ib_signature_guard_cap {
138	IB_GUARD_T10DIF_CRC	= 1,
139	IB_GUARD_T10DIF_CSUM	= 1 << 1,
140};
141
142enum ib_atomic_cap {
143	IB_ATOMIC_NONE,
144	IB_ATOMIC_HCA,
145	IB_ATOMIC_GLOB
146};
147
148enum ib_odp_general_cap_bits {
149	IB_ODP_SUPPORT = 1 << 0,
150};
151
152enum ib_odp_transport_cap_bits {
153	IB_ODP_SUPPORT_SEND	= 1 << 0,
154	IB_ODP_SUPPORT_RECV	= 1 << 1,
155	IB_ODP_SUPPORT_WRITE	= 1 << 2,
156	IB_ODP_SUPPORT_READ	= 1 << 3,
157	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
158};
159
160struct ib_odp_caps {
161	uint64_t general_caps;
162	struct {
163		uint32_t  rc_odp_caps;
164		uint32_t  uc_odp_caps;
165		uint32_t  ud_odp_caps;
166	} per_transport_caps;
167};
168
169struct ib_device_attr {
170	u64			fw_ver;
171	__be64			sys_image_guid;
172	u64			max_mr_size;
173	u64			page_size_cap;
174	u32			vendor_id;
175	u32			vendor_part_id;
176	u32			hw_ver;
177	int			max_qp;
178	int			max_qp_wr;
179	int			device_cap_flags;
180	int			max_sge;
181	int			max_sge_rd;
182	int			max_cq;
183	int			max_cqe;
184	int			max_mr;
185	int			max_pd;
186	int			max_qp_rd_atom;
187	int			max_ee_rd_atom;
188	int			max_res_rd_atom;
189	int			max_qp_init_rd_atom;
190	int			max_ee_init_rd_atom;
191	enum ib_atomic_cap	atomic_cap;
192	enum ib_atomic_cap	masked_atomic_cap;
193	int			max_ee;
194	int			max_rdd;
195	int			max_mw;
196	int			max_raw_ipv6_qp;
197	int			max_raw_ethy_qp;
198	int			max_mcast_grp;
199	int			max_mcast_qp_attach;
200	int			max_total_mcast_qp_attach;
201	int			max_ah;
202	int			max_fmr;
203	int			max_map_per_fmr;
204	int			max_srq;
205	int			max_srq_wr;
206	int			max_srq_sge;
207	unsigned int		max_fast_reg_page_list_len;
208	u16			max_pkeys;
209	u8			local_ca_ack_delay;
210	int			sig_prot_cap;
211	int			sig_guard_cap;
212	struct ib_odp_caps	odp_caps;
213};
214
215enum ib_mtu {
216	IB_MTU_256  = 1,
217	IB_MTU_512  = 2,
218	IB_MTU_1024 = 3,
219	IB_MTU_2048 = 4,
220	IB_MTU_4096 = 5
221};
222
223static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
224{
225	switch (mtu) {
226	case IB_MTU_256:  return  256;
227	case IB_MTU_512:  return  512;
228	case IB_MTU_1024: return 1024;
229	case IB_MTU_2048: return 2048;
230	case IB_MTU_4096: return 4096;
231	default: 	  return -1;
232	}
233}
234
235enum ib_port_state {
236	IB_PORT_NOP		= 0,
237	IB_PORT_DOWN		= 1,
238	IB_PORT_INIT		= 2,
239	IB_PORT_ARMED		= 3,
240	IB_PORT_ACTIVE		= 4,
241	IB_PORT_ACTIVE_DEFER	= 5
242};
243
244enum ib_port_cap_flags {
245	IB_PORT_SM				= 1 <<  1,
246	IB_PORT_NOTICE_SUP			= 1 <<  2,
247	IB_PORT_TRAP_SUP			= 1 <<  3,
248	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
249	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
250	IB_PORT_SL_MAP_SUP			= 1 <<  6,
251	IB_PORT_MKEY_NVRAM			= 1 <<  7,
252	IB_PORT_PKEY_NVRAM			= 1 <<  8,
253	IB_PORT_LED_INFO_SUP			= 1 <<  9,
254	IB_PORT_SM_DISABLED			= 1 << 10,
255	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
256	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
257	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
258	IB_PORT_CM_SUP				= 1 << 16,
259	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
260	IB_PORT_REINIT_SUP			= 1 << 18,
261	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
262	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
263	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
264	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
265	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
266	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
267	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
268	IB_PORT_IP_BASED_GIDS			= 1 << 26
269};
270
271enum ib_port_width {
272	IB_WIDTH_1X	= 1,
273	IB_WIDTH_4X	= 2,
274	IB_WIDTH_8X	= 4,
275	IB_WIDTH_12X	= 8
276};
277
278static inline int ib_width_enum_to_int(enum ib_port_width width)
279{
280	switch (width) {
281	case IB_WIDTH_1X:  return  1;
282	case IB_WIDTH_4X:  return  4;
283	case IB_WIDTH_8X:  return  8;
284	case IB_WIDTH_12X: return 12;
285	default: 	  return -1;
286	}
287}
288
289enum ib_port_speed {
290	IB_SPEED_SDR	= 1,
291	IB_SPEED_DDR	= 2,
292	IB_SPEED_QDR	= 4,
293	IB_SPEED_FDR10	= 8,
294	IB_SPEED_FDR	= 16,
295	IB_SPEED_EDR	= 32
296};
297
298struct ib_protocol_stats {
299	/* TBD... */
300};
301
302struct iw_protocol_stats {
303	u64	ipInReceives;
304	u64	ipInHdrErrors;
305	u64	ipInTooBigErrors;
306	u64	ipInNoRoutes;
307	u64	ipInAddrErrors;
308	u64	ipInUnknownProtos;
309	u64	ipInTruncatedPkts;
310	u64	ipInDiscards;
311	u64	ipInDelivers;
312	u64	ipOutForwDatagrams;
313	u64	ipOutRequests;
314	u64	ipOutDiscards;
315	u64	ipOutNoRoutes;
316	u64	ipReasmTimeout;
317	u64	ipReasmReqds;
318	u64	ipReasmOKs;
319	u64	ipReasmFails;
320	u64	ipFragOKs;
321	u64	ipFragFails;
322	u64	ipFragCreates;
323	u64	ipInMcastPkts;
324	u64	ipOutMcastPkts;
325	u64	ipInBcastPkts;
326	u64	ipOutBcastPkts;
327
328	u64	tcpRtoAlgorithm;
329	u64	tcpRtoMin;
330	u64	tcpRtoMax;
331	u64	tcpMaxConn;
332	u64	tcpActiveOpens;
333	u64	tcpPassiveOpens;
334	u64	tcpAttemptFails;
335	u64	tcpEstabResets;
336	u64	tcpCurrEstab;
337	u64	tcpInSegs;
338	u64	tcpOutSegs;
339	u64	tcpRetransSegs;
340	u64	tcpInErrs;
341	u64	tcpOutRsts;
342};
343
344union rdma_protocol_stats {
345	struct ib_protocol_stats	ib;
346	struct iw_protocol_stats	iw;
347};
348
349struct ib_port_attr {
350	enum ib_port_state	state;
351	enum ib_mtu		max_mtu;
352	enum ib_mtu		active_mtu;
353	int			gid_tbl_len;
354	u32			port_cap_flags;
355	u32			max_msg_sz;
356	u32			bad_pkey_cntr;
357	u32			qkey_viol_cntr;
358	u16			pkey_tbl_len;
359	u16			lid;
360	u16			sm_lid;
361	u8			lmc;
362	u8			max_vl_num;
363	u8			sm_sl;
364	u8			subnet_timeout;
365	u8			init_type_reply;
366	u8			active_width;
367	u8			active_speed;
368	u8                      phys_state;
369};
370
371enum ib_device_modify_flags {
372	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
373	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
374};
375
376struct ib_device_modify {
377	u64	sys_image_guid;
378	char	node_desc[64];
379};
380
381enum ib_port_modify_flags {
382	IB_PORT_SHUTDOWN		= 1,
383	IB_PORT_INIT_TYPE		= (1<<2),
384	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
385};
386
387struct ib_port_modify {
388	u32	set_port_cap_mask;
389	u32	clr_port_cap_mask;
390	u8	init_type;
391};
392
393enum ib_event_type {
394	IB_EVENT_CQ_ERR,
395	IB_EVENT_QP_FATAL,
396	IB_EVENT_QP_REQ_ERR,
397	IB_EVENT_QP_ACCESS_ERR,
398	IB_EVENT_COMM_EST,
399	IB_EVENT_SQ_DRAINED,
400	IB_EVENT_PATH_MIG,
401	IB_EVENT_PATH_MIG_ERR,
402	IB_EVENT_DEVICE_FATAL,
403	IB_EVENT_PORT_ACTIVE,
404	IB_EVENT_PORT_ERR,
405	IB_EVENT_LID_CHANGE,
406	IB_EVENT_PKEY_CHANGE,
407	IB_EVENT_SM_CHANGE,
408	IB_EVENT_SRQ_ERR,
409	IB_EVENT_SRQ_LIMIT_REACHED,
410	IB_EVENT_QP_LAST_WQE_REACHED,
411	IB_EVENT_CLIENT_REREGISTER,
412	IB_EVENT_GID_CHANGE,
413};
414
415struct ib_event {
416	struct ib_device	*device;
417	union {
418		struct ib_cq	*cq;
419		struct ib_qp	*qp;
420		struct ib_srq	*srq;
421		u8		port_num;
422	} element;
423	enum ib_event_type	event;
424};
425
426struct ib_event_handler {
427	struct ib_device *device;
428	void            (*handler)(struct ib_event_handler *, struct ib_event *);
429	struct list_head  list;
430};
431
432#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
433	do {							\
434		(_ptr)->device  = _device;			\
435		(_ptr)->handler = _handler;			\
436		INIT_LIST_HEAD(&(_ptr)->list);			\
437	} while (0)
438
439struct ib_global_route {
440	union ib_gid	dgid;
441	u32		flow_label;
442	u8		sgid_index;
443	u8		hop_limit;
444	u8		traffic_class;
445};
446
447struct ib_grh {
448	__be32		version_tclass_flow;
449	__be16		paylen;
450	u8		next_hdr;
451	u8		hop_limit;
452	union ib_gid	sgid;
453	union ib_gid	dgid;
454};
455
456enum {
457	IB_MULTICAST_QPN = 0xffffff
458};
459
460#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
461
462enum ib_ah_flags {
463	IB_AH_GRH	= 1
464};
465
466enum ib_rate {
467	IB_RATE_PORT_CURRENT = 0,
468	IB_RATE_2_5_GBPS = 2,
469	IB_RATE_5_GBPS   = 5,
470	IB_RATE_10_GBPS  = 3,
471	IB_RATE_20_GBPS  = 6,
472	IB_RATE_30_GBPS  = 4,
473	IB_RATE_40_GBPS  = 7,
474	IB_RATE_60_GBPS  = 8,
475	IB_RATE_80_GBPS  = 9,
476	IB_RATE_120_GBPS = 10,
477	IB_RATE_14_GBPS  = 11,
478	IB_RATE_56_GBPS  = 12,
479	IB_RATE_112_GBPS = 13,
480	IB_RATE_168_GBPS = 14,
481	IB_RATE_25_GBPS  = 15,
482	IB_RATE_100_GBPS = 16,
483	IB_RATE_200_GBPS = 17,
484	IB_RATE_300_GBPS = 18
485};
486
487/**
488 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
489 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
490 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
491 * @rate: rate to convert.
492 */
493__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
494
495/**
496 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
497 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
498 * @rate: rate to convert.
499 */
500__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
501
502enum ib_mr_create_flags {
503	IB_MR_SIGNATURE_EN = 1,
504};
505
506/**
507 * ib_mr_init_attr - Memory region init attributes passed to routine
508 *     ib_create_mr.
509 * @max_reg_descriptors: max number of registration descriptors that
510 *     may be used with registration work requests.
511 * @flags: MR creation flags bit mask.
512 */
513struct ib_mr_init_attr {
514	int	    max_reg_descriptors;
515	u32	    flags;
516};
517
518/**
519 * Signature types
520 * IB_SIG_TYPE_NONE: Unprotected.
521 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
522 */
523enum ib_signature_type {
524	IB_SIG_TYPE_NONE,
525	IB_SIG_TYPE_T10_DIF,
526};
527
528/**
529 * Signature T10-DIF block-guard types
530 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
531 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
532 */
533enum ib_t10_dif_bg_type {
534	IB_T10DIF_CRC,
535	IB_T10DIF_CSUM
536};
537
538/**
539 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
540 *     domain.
541 * @bg_type: T10-DIF block guard type (CRC|CSUM)
542 * @pi_interval: protection information interval.
543 * @bg: seed of guard computation.
544 * @app_tag: application tag of guard block
545 * @ref_tag: initial guard block reference tag.
546 * @ref_remap: Indicate wethear the reftag increments each block
547 * @app_escape: Indicate to skip block check if apptag=0xffff
548 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
549 * @apptag_check_mask: check bitmask of application tag.
550 */
551struct ib_t10_dif_domain {
552	enum ib_t10_dif_bg_type bg_type;
553	u16			pi_interval;
554	u16			bg;
555	u16			app_tag;
556	u32			ref_tag;
557	bool			ref_remap;
558	bool			app_escape;
559	bool			ref_escape;
560	u16			apptag_check_mask;
561};
562
563/**
564 * struct ib_sig_domain - Parameters for signature domain
565 * @sig_type: specific signauture type
566 * @sig: union of all signature domain attributes that may
567 *     be used to set domain layout.
568 */
569struct ib_sig_domain {
570	enum ib_signature_type sig_type;
571	union {
572		struct ib_t10_dif_domain dif;
573	} sig;
574};
575
576/**
577 * struct ib_sig_attrs - Parameters for signature handover operation
578 * @check_mask: bitmask for signature byte check (8 bytes)
579 * @mem: memory domain layout desciptor.
580 * @wire: wire domain layout desciptor.
581 */
582struct ib_sig_attrs {
583	u8			check_mask;
584	struct ib_sig_domain	mem;
585	struct ib_sig_domain	wire;
586};
587
588enum ib_sig_err_type {
589	IB_SIG_BAD_GUARD,
590	IB_SIG_BAD_REFTAG,
591	IB_SIG_BAD_APPTAG,
592};
593
594/**
595 * struct ib_sig_err - signature error descriptor
596 */
597struct ib_sig_err {
598	enum ib_sig_err_type	err_type;
599	u32			expected;
600	u32			actual;
601	u64			sig_err_offset;
602	u32			key;
603};
604
605enum ib_mr_status_check {
606	IB_MR_CHECK_SIG_STATUS = 1,
607};
608
609/**
610 * struct ib_mr_status - Memory region status container
611 *
612 * @fail_status: Bitmask of MR checks status. For each
613 *     failed check a corresponding status bit is set.
614 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
615 *     failure.
616 */
617struct ib_mr_status {
618	u32		    fail_status;
619	struct ib_sig_err   sig_err;
620};
621
622/**
623 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
624 * enum.
625 * @mult: multiple to convert.
626 */
627__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
628
629struct ib_ah_attr {
630	struct ib_global_route	grh;
631	u16			dlid;
632	u8			sl;
633	u8			src_path_bits;
634	u8			static_rate;
635	u8			ah_flags;
636	u8			port_num;
637	u8			dmac[ETH_ALEN];
638	u16			vlan_id;
639};
640
641enum ib_wc_status {
642	IB_WC_SUCCESS,
643	IB_WC_LOC_LEN_ERR,
644	IB_WC_LOC_QP_OP_ERR,
645	IB_WC_LOC_EEC_OP_ERR,
646	IB_WC_LOC_PROT_ERR,
647	IB_WC_WR_FLUSH_ERR,
648	IB_WC_MW_BIND_ERR,
649	IB_WC_BAD_RESP_ERR,
650	IB_WC_LOC_ACCESS_ERR,
651	IB_WC_REM_INV_REQ_ERR,
652	IB_WC_REM_ACCESS_ERR,
653	IB_WC_REM_OP_ERR,
654	IB_WC_RETRY_EXC_ERR,
655	IB_WC_RNR_RETRY_EXC_ERR,
656	IB_WC_LOC_RDD_VIOL_ERR,
657	IB_WC_REM_INV_RD_REQ_ERR,
658	IB_WC_REM_ABORT_ERR,
659	IB_WC_INV_EECN_ERR,
660	IB_WC_INV_EEC_STATE_ERR,
661	IB_WC_FATAL_ERR,
662	IB_WC_RESP_TIMEOUT_ERR,
663	IB_WC_GENERAL_ERR
664};
665
666enum ib_wc_opcode {
667	IB_WC_SEND,
668	IB_WC_RDMA_WRITE,
669	IB_WC_RDMA_READ,
670	IB_WC_COMP_SWAP,
671	IB_WC_FETCH_ADD,
672	IB_WC_BIND_MW,
673	IB_WC_LSO,
674	IB_WC_LOCAL_INV,
675	IB_WC_FAST_REG_MR,
676	IB_WC_MASKED_COMP_SWAP,
677	IB_WC_MASKED_FETCH_ADD,
678/*
679 * Set value of IB_WC_RECV so consumers can test if a completion is a
680 * receive by testing (opcode & IB_WC_RECV).
681 */
682	IB_WC_RECV			= 1 << 7,
683	IB_WC_RECV_RDMA_WITH_IMM
684};
685
686enum ib_wc_flags {
687	IB_WC_GRH		= 1,
688	IB_WC_WITH_IMM		= (1<<1),
689	IB_WC_WITH_INVALIDATE	= (1<<2),
690	IB_WC_IP_CSUM_OK	= (1<<3),
691	IB_WC_WITH_SMAC		= (1<<4),
692	IB_WC_WITH_VLAN		= (1<<5),
693};
694
695struct ib_wc {
696	u64			wr_id;
697	enum ib_wc_status	status;
698	enum ib_wc_opcode	opcode;
699	u32			vendor_err;
700	u32			byte_len;
701	struct ib_qp	       *qp;
702	union {
703		__be32		imm_data;
704		u32		invalidate_rkey;
705	} ex;
706	u32			src_qp;
707	int			wc_flags;
708	u16			pkey_index;
709	u16			slid;
710	u8			sl;
711	u8			dlid_path_bits;
712	u8			port_num;	/* valid only for DR SMPs on switches */
713	u8			smac[ETH_ALEN];
714	u16			vlan_id;
715};
716
717enum ib_cq_notify_flags {
718	IB_CQ_SOLICITED			= 1 << 0,
719	IB_CQ_NEXT_COMP			= 1 << 1,
720	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
721	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
722};
723
724enum ib_srq_type {
725	IB_SRQT_BASIC,
726	IB_SRQT_XRC
727};
728
729enum ib_srq_attr_mask {
730	IB_SRQ_MAX_WR	= 1 << 0,
731	IB_SRQ_LIMIT	= 1 << 1,
732};
733
734struct ib_srq_attr {
735	u32	max_wr;
736	u32	max_sge;
737	u32	srq_limit;
738};
739
740struct ib_srq_init_attr {
741	void		      (*event_handler)(struct ib_event *, void *);
742	void		       *srq_context;
743	struct ib_srq_attr	attr;
744	enum ib_srq_type	srq_type;
745
746	union {
747		struct {
748			struct ib_xrcd *xrcd;
749			struct ib_cq   *cq;
750		} xrc;
751	} ext;
752};
753
754struct ib_qp_cap {
755	u32	max_send_wr;
756	u32	max_recv_wr;
757	u32	max_send_sge;
758	u32	max_recv_sge;
759	u32	max_inline_data;
760};
761
762enum ib_sig_type {
763	IB_SIGNAL_ALL_WR,
764	IB_SIGNAL_REQ_WR
765};
766
767enum ib_qp_type {
768	/*
769	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
770	 * here (and in that order) since the MAD layer uses them as
771	 * indices into a 2-entry table.
772	 */
773	IB_QPT_SMI,
774	IB_QPT_GSI,
775
776	IB_QPT_RC,
777	IB_QPT_UC,
778	IB_QPT_UD,
779	IB_QPT_RAW_IPV6,
780	IB_QPT_RAW_ETHERTYPE,
781	IB_QPT_RAW_PACKET = 8,
782	IB_QPT_XRC_INI = 9,
783	IB_QPT_XRC_TGT,
784	IB_QPT_MAX,
785	/* Reserve a range for qp types internal to the low level driver.
786	 * These qp types will not be visible at the IB core layer, so the
787	 * IB_QPT_MAX usages should not be affected in the core layer
788	 */
789	IB_QPT_RESERVED1 = 0x1000,
790	IB_QPT_RESERVED2,
791	IB_QPT_RESERVED3,
792	IB_QPT_RESERVED4,
793	IB_QPT_RESERVED5,
794	IB_QPT_RESERVED6,
795	IB_QPT_RESERVED7,
796	IB_QPT_RESERVED8,
797	IB_QPT_RESERVED9,
798	IB_QPT_RESERVED10,
799};
800
801enum ib_qp_create_flags {
802	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
803	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
804	IB_QP_CREATE_NETIF_QP			= 1 << 5,
805	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
806	IB_QP_CREATE_USE_GFP_NOIO		= 1 << 7,
807	/* reserve bits 26-31 for low level drivers' internal use */
808	IB_QP_CREATE_RESERVED_START		= 1 << 26,
809	IB_QP_CREATE_RESERVED_END		= 1 << 31,
810};
811
812
813/*
814 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
815 * callback to destroy the passed in QP.
816 */
817
818struct ib_qp_init_attr {
819	void                  (*event_handler)(struct ib_event *, void *);
820	void		       *qp_context;
821	struct ib_cq	       *send_cq;
822	struct ib_cq	       *recv_cq;
823	struct ib_srq	       *srq;
824	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
825	struct ib_qp_cap	cap;
826	enum ib_sig_type	sq_sig_type;
827	enum ib_qp_type		qp_type;
828	enum ib_qp_create_flags	create_flags;
829	u8			port_num; /* special QP types only */
830};
831
832struct ib_qp_open_attr {
833	void                  (*event_handler)(struct ib_event *, void *);
834	void		       *qp_context;
835	u32			qp_num;
836	enum ib_qp_type		qp_type;
837};
838
839enum ib_rnr_timeout {
840	IB_RNR_TIMER_655_36 =  0,
841	IB_RNR_TIMER_000_01 =  1,
842	IB_RNR_TIMER_000_02 =  2,
843	IB_RNR_TIMER_000_03 =  3,
844	IB_RNR_TIMER_000_04 =  4,
845	IB_RNR_TIMER_000_06 =  5,
846	IB_RNR_TIMER_000_08 =  6,
847	IB_RNR_TIMER_000_12 =  7,
848	IB_RNR_TIMER_000_16 =  8,
849	IB_RNR_TIMER_000_24 =  9,
850	IB_RNR_TIMER_000_32 = 10,
851	IB_RNR_TIMER_000_48 = 11,
852	IB_RNR_TIMER_000_64 = 12,
853	IB_RNR_TIMER_000_96 = 13,
854	IB_RNR_TIMER_001_28 = 14,
855	IB_RNR_TIMER_001_92 = 15,
856	IB_RNR_TIMER_002_56 = 16,
857	IB_RNR_TIMER_003_84 = 17,
858	IB_RNR_TIMER_005_12 = 18,
859	IB_RNR_TIMER_007_68 = 19,
860	IB_RNR_TIMER_010_24 = 20,
861	IB_RNR_TIMER_015_36 = 21,
862	IB_RNR_TIMER_020_48 = 22,
863	IB_RNR_TIMER_030_72 = 23,
864	IB_RNR_TIMER_040_96 = 24,
865	IB_RNR_TIMER_061_44 = 25,
866	IB_RNR_TIMER_081_92 = 26,
867	IB_RNR_TIMER_122_88 = 27,
868	IB_RNR_TIMER_163_84 = 28,
869	IB_RNR_TIMER_245_76 = 29,
870	IB_RNR_TIMER_327_68 = 30,
871	IB_RNR_TIMER_491_52 = 31
872};
873
874enum ib_qp_attr_mask {
875	IB_QP_STATE			= 1,
876	IB_QP_CUR_STATE			= (1<<1),
877	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
878	IB_QP_ACCESS_FLAGS		= (1<<3),
879	IB_QP_PKEY_INDEX		= (1<<4),
880	IB_QP_PORT			= (1<<5),
881	IB_QP_QKEY			= (1<<6),
882	IB_QP_AV			= (1<<7),
883	IB_QP_PATH_MTU			= (1<<8),
884	IB_QP_TIMEOUT			= (1<<9),
885	IB_QP_RETRY_CNT			= (1<<10),
886	IB_QP_RNR_RETRY			= (1<<11),
887	IB_QP_RQ_PSN			= (1<<12),
888	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
889	IB_QP_ALT_PATH			= (1<<14),
890	IB_QP_MIN_RNR_TIMER		= (1<<15),
891	IB_QP_SQ_PSN			= (1<<16),
892	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
893	IB_QP_PATH_MIG_STATE		= (1<<18),
894	IB_QP_CAP			= (1<<19),
895	IB_QP_DEST_QPN			= (1<<20),
896	IB_QP_SMAC			= (1<<21),
897	IB_QP_ALT_SMAC			= (1<<22),
898	IB_QP_VID			= (1<<23),
899	IB_QP_ALT_VID			= (1<<24),
900};
901
902enum ib_qp_state {
903	IB_QPS_RESET,
904	IB_QPS_INIT,
905	IB_QPS_RTR,
906	IB_QPS_RTS,
907	IB_QPS_SQD,
908	IB_QPS_SQE,
909	IB_QPS_ERR
910};
911
912enum ib_mig_state {
913	IB_MIG_MIGRATED,
914	IB_MIG_REARM,
915	IB_MIG_ARMED
916};
917
918enum ib_mw_type {
919	IB_MW_TYPE_1 = 1,
920	IB_MW_TYPE_2 = 2
921};
922
923struct ib_qp_attr {
924	enum ib_qp_state	qp_state;
925	enum ib_qp_state	cur_qp_state;
926	enum ib_mtu		path_mtu;
927	enum ib_mig_state	path_mig_state;
928	u32			qkey;
929	u32			rq_psn;
930	u32			sq_psn;
931	u32			dest_qp_num;
932	int			qp_access_flags;
933	struct ib_qp_cap	cap;
934	struct ib_ah_attr	ah_attr;
935	struct ib_ah_attr	alt_ah_attr;
936	u16			pkey_index;
937	u16			alt_pkey_index;
938	u8			en_sqd_async_notify;
939	u8			sq_draining;
940	u8			max_rd_atomic;
941	u8			max_dest_rd_atomic;
942	u8			min_rnr_timer;
943	u8			port_num;
944	u8			timeout;
945	u8			retry_cnt;
946	u8			rnr_retry;
947	u8			alt_port_num;
948	u8			alt_timeout;
949	u8			smac[ETH_ALEN];
950	u8			alt_smac[ETH_ALEN];
951	u16			vlan_id;
952	u16			alt_vlan_id;
953};
954
955enum ib_wr_opcode {
956	IB_WR_RDMA_WRITE,
957	IB_WR_RDMA_WRITE_WITH_IMM,
958	IB_WR_SEND,
959	IB_WR_SEND_WITH_IMM,
960	IB_WR_RDMA_READ,
961	IB_WR_ATOMIC_CMP_AND_SWP,
962	IB_WR_ATOMIC_FETCH_AND_ADD,
963	IB_WR_LSO,
964	IB_WR_SEND_WITH_INV,
965	IB_WR_RDMA_READ_WITH_INV,
966	IB_WR_LOCAL_INV,
967	IB_WR_FAST_REG_MR,
968	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
969	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
970	IB_WR_BIND_MW,
971	IB_WR_REG_SIG_MR,
972	/* reserve values for low level drivers' internal use.
973	 * These values will not be used at all in the ib core layer.
974	 */
975	IB_WR_RESERVED1 = 0xf0,
976	IB_WR_RESERVED2,
977	IB_WR_RESERVED3,
978	IB_WR_RESERVED4,
979	IB_WR_RESERVED5,
980	IB_WR_RESERVED6,
981	IB_WR_RESERVED7,
982	IB_WR_RESERVED8,
983	IB_WR_RESERVED9,
984	IB_WR_RESERVED10,
985};
986
987enum ib_send_flags {
988	IB_SEND_FENCE		= 1,
989	IB_SEND_SIGNALED	= (1<<1),
990	IB_SEND_SOLICITED	= (1<<2),
991	IB_SEND_INLINE		= (1<<3),
992	IB_SEND_IP_CSUM		= (1<<4),
993
994	/* reserve bits 26-31 for low level drivers' internal use */
995	IB_SEND_RESERVED_START	= (1 << 26),
996	IB_SEND_RESERVED_END	= (1 << 31),
997};
998
999struct ib_sge {
1000	u64	addr;
1001	u32	length;
1002	u32	lkey;
1003};
1004
1005struct ib_fast_reg_page_list {
1006	struct ib_device       *device;
1007	u64		       *page_list;
1008	unsigned int		max_page_list_len;
1009};
1010
1011/**
1012 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1013 * @mr: A memory region to bind the memory window to.
1014 * @addr: The address where the memory window should begin.
1015 * @length: The length of the memory window, in bytes.
1016 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1017 *
1018 * This struct contains the shared parameters for type 1 and type 2
1019 * memory window bind operations.
1020 */
1021struct ib_mw_bind_info {
1022	struct ib_mr   *mr;
1023	u64		addr;
1024	u64		length;
1025	int		mw_access_flags;
1026};
1027
1028struct ib_send_wr {
1029	struct ib_send_wr      *next;
1030	u64			wr_id;
1031	struct ib_sge	       *sg_list;
1032	int			num_sge;
1033	enum ib_wr_opcode	opcode;
1034	int			send_flags;
1035	union {
1036		__be32		imm_data;
1037		u32		invalidate_rkey;
1038	} ex;
1039	union {
1040		struct {
1041			u64	remote_addr;
1042			u32	rkey;
1043		} rdma;
1044		struct {
1045			u64	remote_addr;
1046			u64	compare_add;
1047			u64	swap;
1048			u64	compare_add_mask;
1049			u64	swap_mask;
1050			u32	rkey;
1051		} atomic;
1052		struct {
1053			struct ib_ah *ah;
1054			void   *header;
1055			int     hlen;
1056			int     mss;
1057			u32	remote_qpn;
1058			u32	remote_qkey;
1059			u16	pkey_index; /* valid for GSI only */
1060			u8	port_num;   /* valid for DR SMPs on switch only */
1061		} ud;
1062		struct {
1063			u64				iova_start;
1064			struct ib_fast_reg_page_list   *page_list;
1065			unsigned int			page_shift;
1066			unsigned int			page_list_len;
1067			u32				length;
1068			int				access_flags;
1069			u32				rkey;
1070		} fast_reg;
1071		struct {
1072			struct ib_mw            *mw;
1073			/* The new rkey for the memory window. */
1074			u32                      rkey;
1075			struct ib_mw_bind_info   bind_info;
1076		} bind_mw;
1077		struct {
1078			struct ib_sig_attrs    *sig_attrs;
1079			struct ib_mr	       *sig_mr;
1080			int			access_flags;
1081			struct ib_sge	       *prot;
1082		} sig_handover;
1083	} wr;
1084	u32			xrc_remote_srq_num;	/* XRC TGT QPs only */
1085};
1086
1087struct ib_recv_wr {
1088	struct ib_recv_wr      *next;
1089	u64			wr_id;
1090	struct ib_sge	       *sg_list;
1091	int			num_sge;
1092};
1093
1094enum ib_access_flags {
1095	IB_ACCESS_LOCAL_WRITE	= 1,
1096	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1097	IB_ACCESS_REMOTE_READ	= (1<<2),
1098	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1099	IB_ACCESS_MW_BIND	= (1<<4),
1100	IB_ZERO_BASED		= (1<<5),
1101	IB_ACCESS_ON_DEMAND     = (1<<6),
1102};
1103
1104struct ib_phys_buf {
1105	u64      addr;
1106	u64      size;
1107};
1108
1109struct ib_mr_attr {
1110	struct ib_pd	*pd;
1111	u64		device_virt_addr;
1112	u64		size;
1113	int		mr_access_flags;
1114	u32		lkey;
1115	u32		rkey;
1116};
1117
1118enum ib_mr_rereg_flags {
1119	IB_MR_REREG_TRANS	= 1,
1120	IB_MR_REREG_PD		= (1<<1),
1121	IB_MR_REREG_ACCESS	= (1<<2),
1122	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1123};
1124
1125/**
1126 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1127 * @wr_id:      Work request id.
1128 * @send_flags: Flags from ib_send_flags enum.
1129 * @bind_info:  More parameters of the bind operation.
1130 */
1131struct ib_mw_bind {
1132	u64                    wr_id;
1133	int                    send_flags;
1134	struct ib_mw_bind_info bind_info;
1135};
1136
1137struct ib_fmr_attr {
1138	int	max_pages;
1139	int	max_maps;
1140	u8	page_shift;
1141};
1142
1143struct ib_umem;
1144
1145struct ib_ucontext {
1146	struct ib_device       *device;
1147	struct list_head	pd_list;
1148	struct list_head	mr_list;
1149	struct list_head	mw_list;
1150	struct list_head	cq_list;
1151	struct list_head	qp_list;
1152	struct list_head	srq_list;
1153	struct list_head	ah_list;
1154	struct list_head	xrcd_list;
1155	struct list_head	rule_list;
1156	int			closing;
1157
1158	struct pid             *tgid;
1159#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1160	struct rb_root      umem_tree;
1161	/*
1162	 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1163	 * mmu notifiers registration.
1164	 */
1165	struct rw_semaphore	umem_rwsem;
1166	void (*invalidate_range)(struct ib_umem *umem,
1167				 unsigned long start, unsigned long end);
1168
1169	struct mmu_notifier	mn;
1170	atomic_t		notifier_count;
1171	/* A list of umems that don't have private mmu notifier counters yet. */
1172	struct list_head	no_private_counters;
1173	int                     odp_mrs_count;
1174#endif
1175};
1176
1177struct ib_uobject {
1178	u64			user_handle;	/* handle given to us by userspace */
1179	struct ib_ucontext     *context;	/* associated user context */
1180	void		       *object;		/* containing object */
1181	struct list_head	list;		/* link to context's list */
1182	int			id;		/* index into kernel idr */
1183	struct kref		ref;
1184	struct rw_semaphore	mutex;		/* protects .live */
1185	int			live;
1186};
1187
1188struct ib_udata {
1189	const void __user *inbuf;
1190	void __user *outbuf;
1191	size_t       inlen;
1192	size_t       outlen;
1193};
1194
1195struct ib_pd {
1196	struct ib_device       *device;
1197	struct ib_uobject      *uobject;
1198	atomic_t          	usecnt; /* count all resources */
1199};
1200
1201struct ib_xrcd {
1202	struct ib_device       *device;
1203	atomic_t		usecnt; /* count all exposed resources */
1204	struct inode	       *inode;
1205
1206	struct mutex		tgt_qp_mutex;
1207	struct list_head	tgt_qp_list;
1208};
1209
1210struct ib_ah {
1211	struct ib_device	*device;
1212	struct ib_pd		*pd;
1213	struct ib_uobject	*uobject;
1214};
1215
1216typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1217
1218struct ib_cq {
1219	struct ib_device       *device;
1220	struct ib_uobject      *uobject;
1221	ib_comp_handler   	comp_handler;
1222	void                  (*event_handler)(struct ib_event *, void *);
1223	void                   *cq_context;
1224	int               	cqe;
1225	atomic_t          	usecnt; /* count number of work queues */
1226};
1227
1228struct ib_srq {
1229	struct ib_device       *device;
1230	struct ib_pd	       *pd;
1231	struct ib_uobject      *uobject;
1232	void		      (*event_handler)(struct ib_event *, void *);
1233	void		       *srq_context;
1234	enum ib_srq_type	srq_type;
1235	atomic_t		usecnt;
1236
1237	union {
1238		struct {
1239			struct ib_xrcd *xrcd;
1240			struct ib_cq   *cq;
1241			u32		srq_num;
1242		} xrc;
1243	} ext;
1244};
1245
1246struct ib_qp {
1247	struct ib_device       *device;
1248	struct ib_pd	       *pd;
1249	struct ib_cq	       *send_cq;
1250	struct ib_cq	       *recv_cq;
1251	struct ib_srq	       *srq;
1252	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1253	struct list_head	xrcd_list;
1254	/* count times opened, mcast attaches, flow attaches */
1255	atomic_t		usecnt;
1256	struct list_head	open_list;
1257	struct ib_qp           *real_qp;
1258	struct ib_uobject      *uobject;
1259	void                  (*event_handler)(struct ib_event *, void *);
1260	void		       *qp_context;
1261	u32			qp_num;
1262	enum ib_qp_type		qp_type;
1263};
1264
1265struct ib_mr {
1266	struct ib_device  *device;
1267	struct ib_pd	  *pd;
1268	struct ib_uobject *uobject;
1269	u32		   lkey;
1270	u32		   rkey;
1271	atomic_t	   usecnt; /* count number of MWs */
1272};
1273
1274struct ib_mw {
1275	struct ib_device	*device;
1276	struct ib_pd		*pd;
1277	struct ib_uobject	*uobject;
1278	u32			rkey;
1279	enum ib_mw_type         type;
1280};
1281
1282struct ib_fmr {
1283	struct ib_device	*device;
1284	struct ib_pd		*pd;
1285	struct list_head	list;
1286	u32			lkey;
1287	u32			rkey;
1288};
1289
1290/* Supported steering options */
1291enum ib_flow_attr_type {
1292	/* steering according to rule specifications */
1293	IB_FLOW_ATTR_NORMAL		= 0x0,
1294	/* default unicast and multicast rule -
1295	 * receive all Eth traffic which isn't steered to any QP
1296	 */
1297	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1298	/* default multicast rule -
1299	 * receive all Eth multicast traffic which isn't steered to any QP
1300	 */
1301	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1302	/* sniffer rule - receive all port traffic */
1303	IB_FLOW_ATTR_SNIFFER		= 0x3
1304};
1305
1306/* Supported steering header types */
1307enum ib_flow_spec_type {
1308	/* L2 headers*/
1309	IB_FLOW_SPEC_ETH	= 0x20,
1310	IB_FLOW_SPEC_IB		= 0x22,
1311	/* L3 header*/
1312	IB_FLOW_SPEC_IPV4	= 0x30,
1313	/* L4 headers*/
1314	IB_FLOW_SPEC_TCP	= 0x40,
1315	IB_FLOW_SPEC_UDP	= 0x41
1316};
1317#define IB_FLOW_SPEC_LAYER_MASK	0xF0
1318#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1319
1320/* Flow steering rule priority is set according to it's domain.
1321 * Lower domain value means higher priority.
1322 */
1323enum ib_flow_domain {
1324	IB_FLOW_DOMAIN_USER,
1325	IB_FLOW_DOMAIN_ETHTOOL,
1326	IB_FLOW_DOMAIN_RFS,
1327	IB_FLOW_DOMAIN_NIC,
1328	IB_FLOW_DOMAIN_NUM /* Must be last */
1329};
1330
1331struct ib_flow_eth_filter {
1332	u8	dst_mac[6];
1333	u8	src_mac[6];
1334	__be16	ether_type;
1335	__be16	vlan_tag;
1336};
1337
1338struct ib_flow_spec_eth {
1339	enum ib_flow_spec_type	  type;
1340	u16			  size;
1341	struct ib_flow_eth_filter val;
1342	struct ib_flow_eth_filter mask;
1343};
1344
1345struct ib_flow_ib_filter {
1346	__be16 dlid;
1347	__u8   sl;
1348};
1349
1350struct ib_flow_spec_ib {
1351	enum ib_flow_spec_type	 type;
1352	u16			 size;
1353	struct ib_flow_ib_filter val;
1354	struct ib_flow_ib_filter mask;
1355};
1356
1357struct ib_flow_ipv4_filter {
1358	__be32	src_ip;
1359	__be32	dst_ip;
1360};
1361
1362struct ib_flow_spec_ipv4 {
1363	enum ib_flow_spec_type	   type;
1364	u16			   size;
1365	struct ib_flow_ipv4_filter val;
1366	struct ib_flow_ipv4_filter mask;
1367};
1368
1369struct ib_flow_tcp_udp_filter {
1370	__be16	dst_port;
1371	__be16	src_port;
1372};
1373
1374struct ib_flow_spec_tcp_udp {
1375	enum ib_flow_spec_type	      type;
1376	u16			      size;
1377	struct ib_flow_tcp_udp_filter val;
1378	struct ib_flow_tcp_udp_filter mask;
1379};
1380
1381union ib_flow_spec {
1382	struct {
1383		enum ib_flow_spec_type	type;
1384		u16			size;
1385	};
1386	struct ib_flow_spec_eth		eth;
1387	struct ib_flow_spec_ib		ib;
1388	struct ib_flow_spec_ipv4        ipv4;
1389	struct ib_flow_spec_tcp_udp	tcp_udp;
1390};
1391
1392struct ib_flow_attr {
1393	enum ib_flow_attr_type type;
1394	u16	     size;
1395	u16	     priority;
1396	u32	     flags;
1397	u8	     num_of_specs;
1398	u8	     port;
1399	/* Following are the optional layers according to user request
1400	 * struct ib_flow_spec_xxx
1401	 * struct ib_flow_spec_yyy
1402	 */
1403};
1404
1405struct ib_flow {
1406	struct ib_qp		*qp;
1407	struct ib_uobject	*uobject;
1408};
1409
1410struct ib_mad;
1411struct ib_grh;
1412
1413enum ib_process_mad_flags {
1414	IB_MAD_IGNORE_MKEY	= 1,
1415	IB_MAD_IGNORE_BKEY	= 2,
1416	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1417};
1418
1419enum ib_mad_result {
1420	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1421	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1422	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1423	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1424};
1425
1426#define IB_DEVICE_NAME_MAX 64
1427
1428struct ib_cache {
1429	rwlock_t                lock;
1430	struct ib_event_handler event_handler;
1431	struct ib_pkey_cache  **pkey_cache;
1432	struct ib_gid_cache   **gid_cache;
1433	u8                     *lmc_cache;
1434};
1435
1436struct ib_dma_mapping_ops {
1437	int		(*mapping_error)(struct ib_device *dev,
1438					 u64 dma_addr);
1439	u64		(*map_single)(struct ib_device *dev,
1440				      void *ptr, size_t size,
1441				      enum dma_data_direction direction);
1442	void		(*unmap_single)(struct ib_device *dev,
1443					u64 addr, size_t size,
1444					enum dma_data_direction direction);
1445	u64		(*map_page)(struct ib_device *dev,
1446				    struct page *page, unsigned long offset,
1447				    size_t size,
1448				    enum dma_data_direction direction);
1449	void		(*unmap_page)(struct ib_device *dev,
1450				      u64 addr, size_t size,
1451				      enum dma_data_direction direction);
1452	int		(*map_sg)(struct ib_device *dev,
1453				  struct scatterlist *sg, int nents,
1454				  enum dma_data_direction direction);
1455	void		(*unmap_sg)(struct ib_device *dev,
1456				    struct scatterlist *sg, int nents,
1457				    enum dma_data_direction direction);
1458	void		(*sync_single_for_cpu)(struct ib_device *dev,
1459					       u64 dma_handle,
1460					       size_t size,
1461					       enum dma_data_direction dir);
1462	void		(*sync_single_for_device)(struct ib_device *dev,
1463						  u64 dma_handle,
1464						  size_t size,
1465						  enum dma_data_direction dir);
1466	void		*(*alloc_coherent)(struct ib_device *dev,
1467					   size_t size,
1468					   u64 *dma_handle,
1469					   gfp_t flag);
1470	void		(*free_coherent)(struct ib_device *dev,
1471					 size_t size, void *cpu_addr,
1472					 u64 dma_handle);
1473};
1474
1475struct iw_cm_verbs;
1476
1477struct ib_device {
1478	struct device                *dma_device;
1479
1480	char                          name[IB_DEVICE_NAME_MAX];
1481
1482	struct list_head              event_handler_list;
1483	spinlock_t                    event_handler_lock;
1484
1485	spinlock_t                    client_data_lock;
1486	struct list_head              core_list;
1487	struct list_head              client_data_list;
1488
1489	struct ib_cache               cache;
1490	int                          *pkey_tbl_len;
1491	int                          *gid_tbl_len;
1492
1493	int			      num_comp_vectors;
1494
1495	struct iw_cm_verbs	     *iwcm;
1496
1497	int		           (*get_protocol_stats)(struct ib_device *device,
1498							 union rdma_protocol_stats *stats);
1499	int		           (*query_device)(struct ib_device *device,
1500						   struct ib_device_attr *device_attr);
1501	int		           (*query_port)(struct ib_device *device,
1502						 u8 port_num,
1503						 struct ib_port_attr *port_attr);
1504	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1505						     u8 port_num);
1506	int		           (*query_gid)(struct ib_device *device,
1507						u8 port_num, int index,
1508						union ib_gid *gid);
1509	int		           (*query_pkey)(struct ib_device *device,
1510						 u8 port_num, u16 index, u16 *pkey);
1511	int		           (*modify_device)(struct ib_device *device,
1512						    int device_modify_mask,
1513						    struct ib_device_modify *device_modify);
1514	int		           (*modify_port)(struct ib_device *device,
1515						  u8 port_num, int port_modify_mask,
1516						  struct ib_port_modify *port_modify);
1517	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1518						     struct ib_udata *udata);
1519	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1520	int                        (*mmap)(struct ib_ucontext *context,
1521					   struct vm_area_struct *vma);
1522	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1523					       struct ib_ucontext *context,
1524					       struct ib_udata *udata);
1525	int                        (*dealloc_pd)(struct ib_pd *pd);
1526	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1527						struct ib_ah_attr *ah_attr);
1528	int                        (*modify_ah)(struct ib_ah *ah,
1529						struct ib_ah_attr *ah_attr);
1530	int                        (*query_ah)(struct ib_ah *ah,
1531					       struct ib_ah_attr *ah_attr);
1532	int                        (*destroy_ah)(struct ib_ah *ah);
1533	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1534						 struct ib_srq_init_attr *srq_init_attr,
1535						 struct ib_udata *udata);
1536	int                        (*modify_srq)(struct ib_srq *srq,
1537						 struct ib_srq_attr *srq_attr,
1538						 enum ib_srq_attr_mask srq_attr_mask,
1539						 struct ib_udata *udata);
1540	int                        (*query_srq)(struct ib_srq *srq,
1541						struct ib_srq_attr *srq_attr);
1542	int                        (*destroy_srq)(struct ib_srq *srq);
1543	int                        (*post_srq_recv)(struct ib_srq *srq,
1544						    struct ib_recv_wr *recv_wr,
1545						    struct ib_recv_wr **bad_recv_wr);
1546	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1547						struct ib_qp_init_attr *qp_init_attr,
1548						struct ib_udata *udata);
1549	int                        (*modify_qp)(struct ib_qp *qp,
1550						struct ib_qp_attr *qp_attr,
1551						int qp_attr_mask,
1552						struct ib_udata *udata);
1553	int                        (*query_qp)(struct ib_qp *qp,
1554					       struct ib_qp_attr *qp_attr,
1555					       int qp_attr_mask,
1556					       struct ib_qp_init_attr *qp_init_attr);
1557	int                        (*destroy_qp)(struct ib_qp *qp);
1558	int                        (*post_send)(struct ib_qp *qp,
1559						struct ib_send_wr *send_wr,
1560						struct ib_send_wr **bad_send_wr);
1561	int                        (*post_recv)(struct ib_qp *qp,
1562						struct ib_recv_wr *recv_wr,
1563						struct ib_recv_wr **bad_recv_wr);
1564	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1565						int comp_vector,
1566						struct ib_ucontext *context,
1567						struct ib_udata *udata);
1568	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1569						u16 cq_period);
1570	int                        (*destroy_cq)(struct ib_cq *cq);
1571	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1572						struct ib_udata *udata);
1573	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1574					      struct ib_wc *wc);
1575	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1576	int                        (*req_notify_cq)(struct ib_cq *cq,
1577						    enum ib_cq_notify_flags flags);
1578	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1579						      int wc_cnt);
1580	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1581						 int mr_access_flags);
1582	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1583						  struct ib_phys_buf *phys_buf_array,
1584						  int num_phys_buf,
1585						  int mr_access_flags,
1586						  u64 *iova_start);
1587	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1588						  u64 start, u64 length,
1589						  u64 virt_addr,
1590						  int mr_access_flags,
1591						  struct ib_udata *udata);
1592	int			   (*rereg_user_mr)(struct ib_mr *mr,
1593						    int flags,
1594						    u64 start, u64 length,
1595						    u64 virt_addr,
1596						    int mr_access_flags,
1597						    struct ib_pd *pd,
1598						    struct ib_udata *udata);
1599	int                        (*query_mr)(struct ib_mr *mr,
1600					       struct ib_mr_attr *mr_attr);
1601	int                        (*dereg_mr)(struct ib_mr *mr);
1602	int                        (*destroy_mr)(struct ib_mr *mr);
1603	struct ib_mr *		   (*create_mr)(struct ib_pd *pd,
1604						struct ib_mr_init_attr *mr_init_attr);
1605	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1606					       int max_page_list_len);
1607	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1608								   int page_list_len);
1609	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1610	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1611						    int mr_rereg_mask,
1612						    struct ib_pd *pd,
1613						    struct ib_phys_buf *phys_buf_array,
1614						    int num_phys_buf,
1615						    int mr_access_flags,
1616						    u64 *iova_start);
1617	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
1618					       enum ib_mw_type type);
1619	int                        (*bind_mw)(struct ib_qp *qp,
1620					      struct ib_mw *mw,
1621					      struct ib_mw_bind *mw_bind);
1622	int                        (*dealloc_mw)(struct ib_mw *mw);
1623	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1624						int mr_access_flags,
1625						struct ib_fmr_attr *fmr_attr);
1626	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1627						   u64 *page_list, int list_len,
1628						   u64 iova);
1629	int		           (*unmap_fmr)(struct list_head *fmr_list);
1630	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1631	int                        (*attach_mcast)(struct ib_qp *qp,
1632						   union ib_gid *gid,
1633						   u16 lid);
1634	int                        (*detach_mcast)(struct ib_qp *qp,
1635						   union ib_gid *gid,
1636						   u16 lid);
1637	int                        (*process_mad)(struct ib_device *device,
1638						  int process_mad_flags,
1639						  u8 port_num,
1640						  struct ib_wc *in_wc,
1641						  struct ib_grh *in_grh,
1642						  struct ib_mad *in_mad,
1643						  struct ib_mad *out_mad);
1644	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
1645						 struct ib_ucontext *ucontext,
1646						 struct ib_udata *udata);
1647	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1648	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
1649						  struct ib_flow_attr
1650						  *flow_attr,
1651						  int domain);
1652	int			   (*destroy_flow)(struct ib_flow *flow_id);
1653	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1654						      struct ib_mr_status *mr_status);
1655
1656	struct ib_dma_mapping_ops   *dma_ops;
1657
1658	struct module               *owner;
1659	struct device                dev;
1660	struct kobject               *ports_parent;
1661	struct list_head             port_list;
1662
1663	enum {
1664		IB_DEV_UNINITIALIZED,
1665		IB_DEV_REGISTERED,
1666		IB_DEV_UNREGISTERED
1667	}                            reg_state;
1668
1669	int			     uverbs_abi_ver;
1670	u64			     uverbs_cmd_mask;
1671	u64			     uverbs_ex_cmd_mask;
1672
1673	char			     node_desc[64];
1674	__be64			     node_guid;
1675	u32			     local_dma_lkey;
1676	u8                           node_type;
1677	u8                           phys_port_cnt;
1678};
1679
1680struct ib_client {
1681	char  *name;
1682	void (*add)   (struct ib_device *);
1683	void (*remove)(struct ib_device *);
1684
1685	struct list_head list;
1686};
1687
1688struct ib_device *ib_alloc_device(size_t size);
1689void ib_dealloc_device(struct ib_device *device);
1690
1691int ib_register_device(struct ib_device *device,
1692		       int (*port_callback)(struct ib_device *,
1693					    u8, struct kobject *));
1694void ib_unregister_device(struct ib_device *device);
1695
1696int ib_register_client   (struct ib_client *client);
1697void ib_unregister_client(struct ib_client *client);
1698
1699void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1700void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1701			 void *data);
1702
1703static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1704{
1705	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1706}
1707
1708static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1709{
1710	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1711}
1712
1713/**
1714 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1715 * contains all required attributes and no attributes not allowed for
1716 * the given QP state transition.
1717 * @cur_state: Current QP state
1718 * @next_state: Next QP state
1719 * @type: QP type
1720 * @mask: Mask of supplied QP attributes
1721 * @ll : link layer of port
1722 *
1723 * This function is a helper function that a low-level driver's
1724 * modify_qp method can use to validate the consumer's input.  It
1725 * checks that cur_state and next_state are valid QP states, that a
1726 * transition from cur_state to next_state is allowed by the IB spec,
1727 * and that the attribute mask supplied is allowed for the transition.
1728 */
1729int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1730		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
1731		       enum rdma_link_layer ll);
1732
1733int ib_register_event_handler  (struct ib_event_handler *event_handler);
1734int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1735void ib_dispatch_event(struct ib_event *event);
1736
1737int ib_query_device(struct ib_device *device,
1738		    struct ib_device_attr *device_attr);
1739
1740int ib_query_port(struct ib_device *device,
1741		  u8 port_num, struct ib_port_attr *port_attr);
1742
1743enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1744					       u8 port_num);
1745
1746int ib_query_gid(struct ib_device *device,
1747		 u8 port_num, int index, union ib_gid *gid);
1748
1749int ib_query_pkey(struct ib_device *device,
1750		  u8 port_num, u16 index, u16 *pkey);
1751
1752int ib_modify_device(struct ib_device *device,
1753		     int device_modify_mask,
1754		     struct ib_device_modify *device_modify);
1755
1756int ib_modify_port(struct ib_device *device,
1757		   u8 port_num, int port_modify_mask,
1758		   struct ib_port_modify *port_modify);
1759
1760int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1761		u8 *port_num, u16 *index);
1762
1763int ib_find_pkey(struct ib_device *device,
1764		 u8 port_num, u16 pkey, u16 *index);
1765
1766/**
1767 * ib_alloc_pd - Allocates an unused protection domain.
1768 * @device: The device on which to allocate the protection domain.
1769 *
1770 * A protection domain object provides an association between QPs, shared
1771 * receive queues, address handles, memory regions, and memory windows.
1772 */
1773struct ib_pd *ib_alloc_pd(struct ib_device *device);
1774
1775/**
1776 * ib_dealloc_pd - Deallocates a protection domain.
1777 * @pd: The protection domain to deallocate.
1778 */
1779int ib_dealloc_pd(struct ib_pd *pd);
1780
1781/**
1782 * ib_create_ah - Creates an address handle for the given address vector.
1783 * @pd: The protection domain associated with the address handle.
1784 * @ah_attr: The attributes of the address vector.
1785 *
1786 * The address handle is used to reference a local or global destination
1787 * in all UD QP post sends.
1788 */
1789struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1790
1791/**
1792 * ib_init_ah_from_wc - Initializes address handle attributes from a
1793 *   work completion.
1794 * @device: Device on which the received message arrived.
1795 * @port_num: Port on which the received message arrived.
1796 * @wc: Work completion associated with the received message.
1797 * @grh: References the received global route header.  This parameter is
1798 *   ignored unless the work completion indicates that the GRH is valid.
1799 * @ah_attr: Returned attributes that can be used when creating an address
1800 *   handle for replying to the message.
1801 */
1802int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1803		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1804
1805/**
1806 * ib_create_ah_from_wc - Creates an address handle associated with the
1807 *   sender of the specified work completion.
1808 * @pd: The protection domain associated with the address handle.
1809 * @wc: Work completion information associated with a received message.
1810 * @grh: References the received global route header.  This parameter is
1811 *   ignored unless the work completion indicates that the GRH is valid.
1812 * @port_num: The outbound port number to associate with the address.
1813 *
1814 * The address handle is used to reference a local or global destination
1815 * in all UD QP post sends.
1816 */
1817struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1818				   struct ib_grh *grh, u8 port_num);
1819
1820/**
1821 * ib_modify_ah - Modifies the address vector associated with an address
1822 *   handle.
1823 * @ah: The address handle to modify.
1824 * @ah_attr: The new address vector attributes to associate with the
1825 *   address handle.
1826 */
1827int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1828
1829/**
1830 * ib_query_ah - Queries the address vector associated with an address
1831 *   handle.
1832 * @ah: The address handle to query.
1833 * @ah_attr: The address vector attributes associated with the address
1834 *   handle.
1835 */
1836int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1837
1838/**
1839 * ib_destroy_ah - Destroys an address handle.
1840 * @ah: The address handle to destroy.
1841 */
1842int ib_destroy_ah(struct ib_ah *ah);
1843
1844/**
1845 * ib_create_srq - Creates a SRQ associated with the specified protection
1846 *   domain.
1847 * @pd: The protection domain associated with the SRQ.
1848 * @srq_init_attr: A list of initial attributes required to create the
1849 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1850 *   the actual capabilities of the created SRQ.
1851 *
1852 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1853 * requested size of the SRQ, and set to the actual values allocated
1854 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1855 * will always be at least as large as the requested values.
1856 */
1857struct ib_srq *ib_create_srq(struct ib_pd *pd,
1858			     struct ib_srq_init_attr *srq_init_attr);
1859
1860/**
1861 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1862 * @srq: The SRQ to modify.
1863 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1864 *   the current values of selected SRQ attributes are returned.
1865 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1866 *   are being modified.
1867 *
1868 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1869 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1870 * the number of receives queued drops below the limit.
1871 */
1872int ib_modify_srq(struct ib_srq *srq,
1873		  struct ib_srq_attr *srq_attr,
1874		  enum ib_srq_attr_mask srq_attr_mask);
1875
1876/**
1877 * ib_query_srq - Returns the attribute list and current values for the
1878 *   specified SRQ.
1879 * @srq: The SRQ to query.
1880 * @srq_attr: The attributes of the specified SRQ.
1881 */
1882int ib_query_srq(struct ib_srq *srq,
1883		 struct ib_srq_attr *srq_attr);
1884
1885/**
1886 * ib_destroy_srq - Destroys the specified SRQ.
1887 * @srq: The SRQ to destroy.
1888 */
1889int ib_destroy_srq(struct ib_srq *srq);
1890
1891/**
1892 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1893 * @srq: The SRQ to post the work request on.
1894 * @recv_wr: A list of work requests to post on the receive queue.
1895 * @bad_recv_wr: On an immediate failure, this parameter will reference
1896 *   the work request that failed to be posted on the QP.
1897 */
1898static inline int ib_post_srq_recv(struct ib_srq *srq,
1899				   struct ib_recv_wr *recv_wr,
1900				   struct ib_recv_wr **bad_recv_wr)
1901{
1902	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1903}
1904
1905/**
1906 * ib_create_qp - Creates a QP associated with the specified protection
1907 *   domain.
1908 * @pd: The protection domain associated with the QP.
1909 * @qp_init_attr: A list of initial attributes required to create the
1910 *   QP.  If QP creation succeeds, then the attributes are updated to
1911 *   the actual capabilities of the created QP.
1912 */
1913struct ib_qp *ib_create_qp(struct ib_pd *pd,
1914			   struct ib_qp_init_attr *qp_init_attr);
1915
1916/**
1917 * ib_modify_qp - Modifies the attributes for the specified QP and then
1918 *   transitions the QP to the given state.
1919 * @qp: The QP to modify.
1920 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1921 *   the current values of selected QP attributes are returned.
1922 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1923 *   are being modified.
1924 */
1925int ib_modify_qp(struct ib_qp *qp,
1926		 struct ib_qp_attr *qp_attr,
1927		 int qp_attr_mask);
1928
1929/**
1930 * ib_query_qp - Returns the attribute list and current values for the
1931 *   specified QP.
1932 * @qp: The QP to query.
1933 * @qp_attr: The attributes of the specified QP.
1934 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1935 * @qp_init_attr: Additional attributes of the selected QP.
1936 *
1937 * The qp_attr_mask may be used to limit the query to gathering only the
1938 * selected attributes.
1939 */
1940int ib_query_qp(struct ib_qp *qp,
1941		struct ib_qp_attr *qp_attr,
1942		int qp_attr_mask,
1943		struct ib_qp_init_attr *qp_init_attr);
1944
1945/**
1946 * ib_destroy_qp - Destroys the specified QP.
1947 * @qp: The QP to destroy.
1948 */
1949int ib_destroy_qp(struct ib_qp *qp);
1950
1951/**
1952 * ib_open_qp - Obtain a reference to an existing sharable QP.
1953 * @xrcd - XRC domain
1954 * @qp_open_attr: Attributes identifying the QP to open.
1955 *
1956 * Returns a reference to a sharable QP.
1957 */
1958struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1959			 struct ib_qp_open_attr *qp_open_attr);
1960
1961/**
1962 * ib_close_qp - Release an external reference to a QP.
1963 * @qp: The QP handle to release
1964 *
1965 * The opened QP handle is released by the caller.  The underlying
1966 * shared QP is not destroyed until all internal references are released.
1967 */
1968int ib_close_qp(struct ib_qp *qp);
1969
1970/**
1971 * ib_post_send - Posts a list of work requests to the send queue of
1972 *   the specified QP.
1973 * @qp: The QP to post the work request on.
1974 * @send_wr: A list of work requests to post on the send queue.
1975 * @bad_send_wr: On an immediate failure, this parameter will reference
1976 *   the work request that failed to be posted on the QP.
1977 *
1978 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1979 * error is returned, the QP state shall not be affected,
1980 * ib_post_send() will return an immediate error after queueing any
1981 * earlier work requests in the list.
1982 */
1983static inline int ib_post_send(struct ib_qp *qp,
1984			       struct ib_send_wr *send_wr,
1985			       struct ib_send_wr **bad_send_wr)
1986{
1987	return qp->device->post_send(qp, send_wr, bad_send_wr);
1988}
1989
1990/**
1991 * ib_post_recv - Posts a list of work requests to the receive queue of
1992 *   the specified QP.
1993 * @qp: The QP to post the work request on.
1994 * @recv_wr: A list of work requests to post on the receive queue.
1995 * @bad_recv_wr: On an immediate failure, this parameter will reference
1996 *   the work request that failed to be posted on the QP.
1997 */
1998static inline int ib_post_recv(struct ib_qp *qp,
1999			       struct ib_recv_wr *recv_wr,
2000			       struct ib_recv_wr **bad_recv_wr)
2001{
2002	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2003}
2004
2005/**
2006 * ib_create_cq - Creates a CQ on the specified device.
2007 * @device: The device on which to create the CQ.
2008 * @comp_handler: A user-specified callback that is invoked when a
2009 *   completion event occurs on the CQ.
2010 * @event_handler: A user-specified callback that is invoked when an
2011 *   asynchronous event not associated with a completion occurs on the CQ.
2012 * @cq_context: Context associated with the CQ returned to the user via
2013 *   the associated completion and event handlers.
2014 * @cqe: The minimum size of the CQ.
2015 * @comp_vector - Completion vector used to signal completion events.
2016 *     Must be >= 0 and < context->num_comp_vectors.
2017 *
2018 * Users can examine the cq structure to determine the actual CQ size.
2019 */
2020struct ib_cq *ib_create_cq(struct ib_device *device,
2021			   ib_comp_handler comp_handler,
2022			   void (*event_handler)(struct ib_event *, void *),
2023			   void *cq_context, int cqe, int comp_vector);
2024
2025/**
2026 * ib_resize_cq - Modifies the capacity of the CQ.
2027 * @cq: The CQ to resize.
2028 * @cqe: The minimum size of the CQ.
2029 *
2030 * Users can examine the cq structure to determine the actual CQ size.
2031 */
2032int ib_resize_cq(struct ib_cq *cq, int cqe);
2033
2034/**
2035 * ib_modify_cq - Modifies moderation params of the CQ
2036 * @cq: The CQ to modify.
2037 * @cq_count: number of CQEs that will trigger an event
2038 * @cq_period: max period of time in usec before triggering an event
2039 *
2040 */
2041int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2042
2043/**
2044 * ib_destroy_cq - Destroys the specified CQ.
2045 * @cq: The CQ to destroy.
2046 */
2047int ib_destroy_cq(struct ib_cq *cq);
2048
2049/**
2050 * ib_poll_cq - poll a CQ for completion(s)
2051 * @cq:the CQ being polled
2052 * @num_entries:maximum number of completions to return
2053 * @wc:array of at least @num_entries &struct ib_wc where completions
2054 *   will be returned
2055 *
2056 * Poll a CQ for (possibly multiple) completions.  If the return value
2057 * is < 0, an error occurred.  If the return value is >= 0, it is the
2058 * number of completions returned.  If the return value is
2059 * non-negative and < num_entries, then the CQ was emptied.
2060 */
2061static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2062			     struct ib_wc *wc)
2063{
2064	return cq->device->poll_cq(cq, num_entries, wc);
2065}
2066
2067/**
2068 * ib_peek_cq - Returns the number of unreaped completions currently
2069 *   on the specified CQ.
2070 * @cq: The CQ to peek.
2071 * @wc_cnt: A minimum number of unreaped completions to check for.
2072 *
2073 * If the number of unreaped completions is greater than or equal to wc_cnt,
2074 * this function returns wc_cnt, otherwise, it returns the actual number of
2075 * unreaped completions.
2076 */
2077int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2078
2079/**
2080 * ib_req_notify_cq - Request completion notification on a CQ.
2081 * @cq: The CQ to generate an event for.
2082 * @flags:
2083 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2084 *   to request an event on the next solicited event or next work
2085 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2086 *   may also be |ed in to request a hint about missed events, as
2087 *   described below.
2088 *
2089 * Return Value:
2090 *    < 0 means an error occurred while requesting notification
2091 *   == 0 means notification was requested successfully, and if
2092 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2093 *        were missed and it is safe to wait for another event.  In
2094 *        this case is it guaranteed that any work completions added
2095 *        to the CQ since the last CQ poll will trigger a completion
2096 *        notification event.
2097 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2098 *        in.  It means that the consumer must poll the CQ again to
2099 *        make sure it is empty to avoid missing an event because of a
2100 *        race between requesting notification and an entry being
2101 *        added to the CQ.  This return value means it is possible
2102 *        (but not guaranteed) that a work completion has been added
2103 *        to the CQ since the last poll without triggering a
2104 *        completion notification event.
2105 */
2106static inline int ib_req_notify_cq(struct ib_cq *cq,
2107				   enum ib_cq_notify_flags flags)
2108{
2109	return cq->device->req_notify_cq(cq, flags);
2110}
2111
2112/**
2113 * ib_req_ncomp_notif - Request completion notification when there are
2114 *   at least the specified number of unreaped completions on the CQ.
2115 * @cq: The CQ to generate an event for.
2116 * @wc_cnt: The number of unreaped completions that should be on the
2117 *   CQ before an event is generated.
2118 */
2119static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2120{
2121	return cq->device->req_ncomp_notif ?
2122		cq->device->req_ncomp_notif(cq, wc_cnt) :
2123		-ENOSYS;
2124}
2125
2126/**
2127 * ib_get_dma_mr - Returns a memory region for system memory that is
2128 *   usable for DMA.
2129 * @pd: The protection domain associated with the memory region.
2130 * @mr_access_flags: Specifies the memory access rights.
2131 *
2132 * Note that the ib_dma_*() functions defined below must be used
2133 * to create/destroy addresses used with the Lkey or Rkey returned
2134 * by ib_get_dma_mr().
2135 */
2136struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2137
2138/**
2139 * ib_dma_mapping_error - check a DMA addr for error
2140 * @dev: The device for which the dma_addr was created
2141 * @dma_addr: The DMA address to check
2142 */
2143static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2144{
2145	if (dev->dma_ops)
2146		return dev->dma_ops->mapping_error(dev, dma_addr);
2147	return dma_mapping_error(dev->dma_device, dma_addr);
2148}
2149
2150/**
2151 * ib_dma_map_single - Map a kernel virtual address to DMA address
2152 * @dev: The device for which the dma_addr is to be created
2153 * @cpu_addr: The kernel virtual address
2154 * @size: The size of the region in bytes
2155 * @direction: The direction of the DMA
2156 */
2157static inline u64 ib_dma_map_single(struct ib_device *dev,
2158				    void *cpu_addr, size_t size,
2159				    enum dma_data_direction direction)
2160{
2161	if (dev->dma_ops)
2162		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2163	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2164}
2165
2166/**
2167 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2168 * @dev: The device for which the DMA address was created
2169 * @addr: The DMA address
2170 * @size: The size of the region in bytes
2171 * @direction: The direction of the DMA
2172 */
2173static inline void ib_dma_unmap_single(struct ib_device *dev,
2174				       u64 addr, size_t size,
2175				       enum dma_data_direction direction)
2176{
2177	if (dev->dma_ops)
2178		dev->dma_ops->unmap_single(dev, addr, size, direction);
2179	else
2180		dma_unmap_single(dev->dma_device, addr, size, direction);
2181}
2182
2183static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2184					  void *cpu_addr, size_t size,
2185					  enum dma_data_direction direction,
2186					  struct dma_attrs *attrs)
2187{
2188	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2189				    direction, attrs);
2190}
2191
2192static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2193					     u64 addr, size_t size,
2194					     enum dma_data_direction direction,
2195					     struct dma_attrs *attrs)
2196{
2197	return dma_unmap_single_attrs(dev->dma_device, addr, size,
2198				      direction, attrs);
2199}
2200
2201/**
2202 * ib_dma_map_page - Map a physical page to DMA address
2203 * @dev: The device for which the dma_addr is to be created
2204 * @page: The page to be mapped
2205 * @offset: The offset within the page
2206 * @size: The size of the region in bytes
2207 * @direction: The direction of the DMA
2208 */
2209static inline u64 ib_dma_map_page(struct ib_device *dev,
2210				  struct page *page,
2211				  unsigned long offset,
2212				  size_t size,
2213					 enum dma_data_direction direction)
2214{
2215	if (dev->dma_ops)
2216		return dev->dma_ops->map_page(dev, page, offset, size, direction);
2217	return dma_map_page(dev->dma_device, page, offset, size, direction);
2218}
2219
2220/**
2221 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2222 * @dev: The device for which the DMA address was created
2223 * @addr: The DMA address
2224 * @size: The size of the region in bytes
2225 * @direction: The direction of the DMA
2226 */
2227static inline void ib_dma_unmap_page(struct ib_device *dev,
2228				     u64 addr, size_t size,
2229				     enum dma_data_direction direction)
2230{
2231	if (dev->dma_ops)
2232		dev->dma_ops->unmap_page(dev, addr, size, direction);
2233	else
2234		dma_unmap_page(dev->dma_device, addr, size, direction);
2235}
2236
2237/**
2238 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2239 * @dev: The device for which the DMA addresses are to be created
2240 * @sg: The array of scatter/gather entries
2241 * @nents: The number of scatter/gather entries
2242 * @direction: The direction of the DMA
2243 */
2244static inline int ib_dma_map_sg(struct ib_device *dev,
2245				struct scatterlist *sg, int nents,
2246				enum dma_data_direction direction)
2247{
2248	if (dev->dma_ops)
2249		return dev->dma_ops->map_sg(dev, sg, nents, direction);
2250	return dma_map_sg(dev->dma_device, sg, nents, direction);
2251}
2252
2253/**
2254 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2255 * @dev: The device for which the DMA addresses were created
2256 * @sg: The array of scatter/gather entries
2257 * @nents: The number of scatter/gather entries
2258 * @direction: The direction of the DMA
2259 */
2260static inline void ib_dma_unmap_sg(struct ib_device *dev,
2261				   struct scatterlist *sg, int nents,
2262				   enum dma_data_direction direction)
2263{
2264	if (dev->dma_ops)
2265		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2266	else
2267		dma_unmap_sg(dev->dma_device, sg, nents, direction);
2268}
2269
2270static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2271				      struct scatterlist *sg, int nents,
2272				      enum dma_data_direction direction,
2273				      struct dma_attrs *attrs)
2274{
2275	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2276}
2277
2278static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2279					 struct scatterlist *sg, int nents,
2280					 enum dma_data_direction direction,
2281					 struct dma_attrs *attrs)
2282{
2283	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2284}
2285/**
2286 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2287 * @dev: The device for which the DMA addresses were created
2288 * @sg: The scatter/gather entry
2289 *
2290 * Note: this function is obsolete. To do: change all occurrences of
2291 * ib_sg_dma_address() into sg_dma_address().
2292 */
2293static inline u64 ib_sg_dma_address(struct ib_device *dev,
2294				    struct scatterlist *sg)
2295{
2296	return sg_dma_address(sg);
2297}
2298
2299/**
2300 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2301 * @dev: The device for which the DMA addresses were created
2302 * @sg: The scatter/gather entry
2303 *
2304 * Note: this function is obsolete. To do: change all occurrences of
2305 * ib_sg_dma_len() into sg_dma_len().
2306 */
2307static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2308					 struct scatterlist *sg)
2309{
2310	return sg_dma_len(sg);
2311}
2312
2313/**
2314 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2315 * @dev: The device for which the DMA address was created
2316 * @addr: The DMA address
2317 * @size: The size of the region in bytes
2318 * @dir: The direction of the DMA
2319 */
2320static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2321					      u64 addr,
2322					      size_t size,
2323					      enum dma_data_direction dir)
2324{
2325	if (dev->dma_ops)
2326		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2327	else
2328		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2329}
2330
2331/**
2332 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2333 * @dev: The device for which the DMA address was created
2334 * @addr: The DMA address
2335 * @size: The size of the region in bytes
2336 * @dir: The direction of the DMA
2337 */
2338static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2339						 u64 addr,
2340						 size_t size,
2341						 enum dma_data_direction dir)
2342{
2343	if (dev->dma_ops)
2344		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2345	else
2346		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2347}
2348
2349/**
2350 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2351 * @dev: The device for which the DMA address is requested
2352 * @size: The size of the region to allocate in bytes
2353 * @dma_handle: A pointer for returning the DMA address of the region
2354 * @flag: memory allocator flags
2355 */
2356static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2357					   size_t size,
2358					   u64 *dma_handle,
2359					   gfp_t flag)
2360{
2361	if (dev->dma_ops)
2362		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2363	else {
2364		dma_addr_t handle;
2365		void *ret;
2366
2367		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2368		*dma_handle = handle;
2369		return ret;
2370	}
2371}
2372
2373/**
2374 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2375 * @dev: The device for which the DMA addresses were allocated
2376 * @size: The size of the region
2377 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2378 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2379 */
2380static inline void ib_dma_free_coherent(struct ib_device *dev,
2381					size_t size, void *cpu_addr,
2382					u64 dma_handle)
2383{
2384	if (dev->dma_ops)
2385		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2386	else
2387		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2388}
2389
2390/**
2391 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2392 *   by an HCA.
2393 * @pd: The protection domain associated assigned to the registered region.
2394 * @phys_buf_array: Specifies a list of physical buffers to use in the
2395 *   memory region.
2396 * @num_phys_buf: Specifies the size of the phys_buf_array.
2397 * @mr_access_flags: Specifies the memory access rights.
2398 * @iova_start: The offset of the region's starting I/O virtual address.
2399 */
2400struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2401			     struct ib_phys_buf *phys_buf_array,
2402			     int num_phys_buf,
2403			     int mr_access_flags,
2404			     u64 *iova_start);
2405
2406/**
2407 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2408 *   Conceptually, this call performs the functions deregister memory region
2409 *   followed by register physical memory region.  Where possible,
2410 *   resources are reused instead of deallocated and reallocated.
2411 * @mr: The memory region to modify.
2412 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2413 *   properties of the memory region are being modified.
2414 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2415 *   the new protection domain to associated with the memory region,
2416 *   otherwise, this parameter is ignored.
2417 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2418 *   field specifies a list of physical buffers to use in the new
2419 *   translation, otherwise, this parameter is ignored.
2420 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2421 *   field specifies the size of the phys_buf_array, otherwise, this
2422 *   parameter is ignored.
2423 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2424 *   field specifies the new memory access rights, otherwise, this
2425 *   parameter is ignored.
2426 * @iova_start: The offset of the region's starting I/O virtual address.
2427 */
2428int ib_rereg_phys_mr(struct ib_mr *mr,
2429		     int mr_rereg_mask,
2430		     struct ib_pd *pd,
2431		     struct ib_phys_buf *phys_buf_array,
2432		     int num_phys_buf,
2433		     int mr_access_flags,
2434		     u64 *iova_start);
2435
2436/**
2437 * ib_query_mr - Retrieves information about a specific memory region.
2438 * @mr: The memory region to retrieve information about.
2439 * @mr_attr: The attributes of the specified memory region.
2440 */
2441int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2442
2443/**
2444 * ib_dereg_mr - Deregisters a memory region and removes it from the
2445 *   HCA translation table.
2446 * @mr: The memory region to deregister.
2447 *
2448 * This function can fail, if the memory region has memory windows bound to it.
2449 */
2450int ib_dereg_mr(struct ib_mr *mr);
2451
2452
2453/**
2454 * ib_create_mr - Allocates a memory region that may be used for
2455 *     signature handover operations.
2456 * @pd: The protection domain associated with the region.
2457 * @mr_init_attr: memory region init attributes.
2458 */
2459struct ib_mr *ib_create_mr(struct ib_pd *pd,
2460			   struct ib_mr_init_attr *mr_init_attr);
2461
2462/**
2463 * ib_destroy_mr - Destroys a memory region that was created using
2464 *     ib_create_mr and removes it from HW translation tables.
2465 * @mr: The memory region to destroy.
2466 *
2467 * This function can fail, if the memory region has memory windows bound to it.
2468 */
2469int ib_destroy_mr(struct ib_mr *mr);
2470
2471/**
2472 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2473 *   IB_WR_FAST_REG_MR send work request.
2474 * @pd: The protection domain associated with the region.
2475 * @max_page_list_len: requested max physical buffer list length to be
2476 *   used with fast register work requests for this MR.
2477 */
2478struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2479
2480/**
2481 * ib_alloc_fast_reg_page_list - Allocates a page list array
2482 * @device - ib device pointer.
2483 * @page_list_len - size of the page list array to be allocated.
2484 *
2485 * This allocates and returns a struct ib_fast_reg_page_list * and a
2486 * page_list array that is at least page_list_len in size.  The actual
2487 * size is returned in max_page_list_len.  The caller is responsible
2488 * for initializing the contents of the page_list array before posting
2489 * a send work request with the IB_WC_FAST_REG_MR opcode.
2490 *
2491 * The page_list array entries must be translated using one of the
2492 * ib_dma_*() functions just like the addresses passed to
2493 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2494 * ib_fast_reg_page_list must not be modified by the caller until the
2495 * IB_WC_FAST_REG_MR work request completes.
2496 */
2497struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2498				struct ib_device *device, int page_list_len);
2499
2500/**
2501 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2502 *   page list array.
2503 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2504 */
2505void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2506
2507/**
2508 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2509 *   R_Key and L_Key.
2510 * @mr - struct ib_mr pointer to be updated.
2511 * @newkey - new key to be used.
2512 */
2513static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2514{
2515	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2516	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2517}
2518
2519/**
2520 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2521 * for calculating a new rkey for type 2 memory windows.
2522 * @rkey - the rkey to increment.
2523 */
2524static inline u32 ib_inc_rkey(u32 rkey)
2525{
2526	const u32 mask = 0x000000ff;
2527	return ((rkey + 1) & mask) | (rkey & ~mask);
2528}
2529
2530/**
2531 * ib_alloc_mw - Allocates a memory window.
2532 * @pd: The protection domain associated with the memory window.
2533 * @type: The type of the memory window (1 or 2).
2534 */
2535struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2536
2537/**
2538 * ib_bind_mw - Posts a work request to the send queue of the specified
2539 *   QP, which binds the memory window to the given address range and
2540 *   remote access attributes.
2541 * @qp: QP to post the bind work request on.
2542 * @mw: The memory window to bind.
2543 * @mw_bind: Specifies information about the memory window, including
2544 *   its address range, remote access rights, and associated memory region.
2545 *
2546 * If there is no immediate error, the function will update the rkey member
2547 * of the mw parameter to its new value. The bind operation can still fail
2548 * asynchronously.
2549 */
2550static inline int ib_bind_mw(struct ib_qp *qp,
2551			     struct ib_mw *mw,
2552			     struct ib_mw_bind *mw_bind)
2553{
2554	/* XXX reference counting in corresponding MR? */
2555	return mw->device->bind_mw ?
2556		mw->device->bind_mw(qp, mw, mw_bind) :
2557		-ENOSYS;
2558}
2559
2560/**
2561 * ib_dealloc_mw - Deallocates a memory window.
2562 * @mw: The memory window to deallocate.
2563 */
2564int ib_dealloc_mw(struct ib_mw *mw);
2565
2566/**
2567 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2568 * @pd: The protection domain associated with the unmapped region.
2569 * @mr_access_flags: Specifies the memory access rights.
2570 * @fmr_attr: Attributes of the unmapped region.
2571 *
2572 * A fast memory region must be mapped before it can be used as part of
2573 * a work request.
2574 */
2575struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2576			    int mr_access_flags,
2577			    struct ib_fmr_attr *fmr_attr);
2578
2579/**
2580 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2581 * @fmr: The fast memory region to associate with the pages.
2582 * @page_list: An array of physical pages to map to the fast memory region.
2583 * @list_len: The number of pages in page_list.
2584 * @iova: The I/O virtual address to use with the mapped region.
2585 */
2586static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2587				  u64 *page_list, int list_len,
2588				  u64 iova)
2589{
2590	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2591}
2592
2593/**
2594 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2595 * @fmr_list: A linked list of fast memory regions to unmap.
2596 */
2597int ib_unmap_fmr(struct list_head *fmr_list);
2598
2599/**
2600 * ib_dealloc_fmr - Deallocates a fast memory region.
2601 * @fmr: The fast memory region to deallocate.
2602 */
2603int ib_dealloc_fmr(struct ib_fmr *fmr);
2604
2605/**
2606 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2607 * @qp: QP to attach to the multicast group.  The QP must be type
2608 *   IB_QPT_UD.
2609 * @gid: Multicast group GID.
2610 * @lid: Multicast group LID in host byte order.
2611 *
2612 * In order to send and receive multicast packets, subnet
2613 * administration must have created the multicast group and configured
2614 * the fabric appropriately.  The port associated with the specified
2615 * QP must also be a member of the multicast group.
2616 */
2617int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2618
2619/**
2620 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2621 * @qp: QP to detach from the multicast group.
2622 * @gid: Multicast group GID.
2623 * @lid: Multicast group LID in host byte order.
2624 */
2625int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2626
2627/**
2628 * ib_alloc_xrcd - Allocates an XRC domain.
2629 * @device: The device on which to allocate the XRC domain.
2630 */
2631struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2632
2633/**
2634 * ib_dealloc_xrcd - Deallocates an XRC domain.
2635 * @xrcd: The XRC domain to deallocate.
2636 */
2637int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2638
2639struct ib_flow *ib_create_flow(struct ib_qp *qp,
2640			       struct ib_flow_attr *flow_attr, int domain);
2641int ib_destroy_flow(struct ib_flow *flow_id);
2642
2643static inline int ib_check_mr_access(int flags)
2644{
2645	/*
2646	 * Local write permission is required if remote write or
2647	 * remote atomic permission is also requested.
2648	 */
2649	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2650	    !(flags & IB_ACCESS_LOCAL_WRITE))
2651		return -EINVAL;
2652
2653	return 0;
2654}
2655
2656/**
2657 * ib_check_mr_status: lightweight check of MR status.
2658 *     This routine may provide status checks on a selected
2659 *     ib_mr. first use is for signature status check.
2660 *
2661 * @mr: A memory region.
2662 * @check_mask: Bitmask of which checks to perform from
2663 *     ib_mr_status_check enumeration.
2664 * @mr_status: The container of relevant status checks.
2665 *     failed checks will be indicated in the status bitmask
2666 *     and the relevant info shall be in the error item.
2667 */
2668int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2669		       struct ib_mr_status *mr_status);
2670
2671#endif /* IB_VERBS_H */
2672