1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DRIVER_H
34#define MLX5_DRIVER_H
35
36#include <linux/kernel.h>
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/spinlock_types.h>
40#include <linux/semaphore.h>
41#include <linux/slab.h>
42#include <linux/vmalloc.h>
43#include <linux/radix-tree.h>
44
45#include <linux/mlx5/device.h>
46#include <linux/mlx5/doorbell.h>
47#include <linux/mlx5/mlx5_ifc.h>
48
49enum {
50	MLX5_BOARD_ID_LEN = 64,
51	MLX5_MAX_NAME_LEN = 16,
52};
53
54enum {
55	/* one minute for the sake of bringup. Generally, commands must always
56	 * complete and we may need to increase this timeout value
57	 */
58	MLX5_CMD_TIMEOUT_MSEC	= 7200 * 1000,
59	MLX5_CMD_WQ_MAX_NAME	= 32,
60};
61
62enum {
63	CMD_OWNER_SW		= 0x0,
64	CMD_OWNER_HW		= 0x1,
65	CMD_STATUS_SUCCESS	= 0,
66};
67
68enum mlx5_sqp_t {
69	MLX5_SQP_SMI		= 0,
70	MLX5_SQP_GSI		= 1,
71	MLX5_SQP_IEEE_1588	= 2,
72	MLX5_SQP_SNIFFER	= 3,
73	MLX5_SQP_SYNC_UMR	= 4,
74};
75
76enum {
77	MLX5_MAX_PORTS	= 2,
78};
79
80enum {
81	MLX5_EQ_VEC_PAGES	 = 0,
82	MLX5_EQ_VEC_CMD		 = 1,
83	MLX5_EQ_VEC_ASYNC	 = 2,
84	MLX5_EQ_VEC_COMP_BASE,
85};
86
87enum {
88	MLX5_MAX_EQ_NAME	= 32
89};
90
91enum {
92	MLX5_ATOMIC_MODE_IB_COMP	= 1 << 16,
93	MLX5_ATOMIC_MODE_CX		= 2 << 16,
94	MLX5_ATOMIC_MODE_8B		= 3 << 16,
95	MLX5_ATOMIC_MODE_16B		= 4 << 16,
96	MLX5_ATOMIC_MODE_32B		= 5 << 16,
97	MLX5_ATOMIC_MODE_64B		= 6 << 16,
98	MLX5_ATOMIC_MODE_128B		= 7 << 16,
99	MLX5_ATOMIC_MODE_256B		= 8 << 16,
100};
101
102enum {
103	MLX5_REG_PCAP		 = 0x5001,
104	MLX5_REG_PMTU		 = 0x5003,
105	MLX5_REG_PTYS		 = 0x5004,
106	MLX5_REG_PAOS		 = 0x5006,
107	MLX5_REG_PMAOS		 = 0x5012,
108	MLX5_REG_PUDE		 = 0x5009,
109	MLX5_REG_PMPE		 = 0x5010,
110	MLX5_REG_PELC		 = 0x500e,
111	MLX5_REG_PMLP		 = 0, /* TBD */
112	MLX5_REG_NODE_DESC	 = 0x6001,
113	MLX5_REG_HOST_ENDIANNESS = 0x7004,
114};
115
116enum mlx5_page_fault_resume_flags {
117	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
118	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
119	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
120	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
121};
122
123enum dbg_rsc_type {
124	MLX5_DBG_RSC_QP,
125	MLX5_DBG_RSC_EQ,
126	MLX5_DBG_RSC_CQ,
127};
128
129struct mlx5_field_desc {
130	struct dentry	       *dent;
131	int			i;
132};
133
134struct mlx5_rsc_debug {
135	struct mlx5_core_dev   *dev;
136	void		       *object;
137	enum dbg_rsc_type	type;
138	struct dentry	       *root;
139	struct mlx5_field_desc	fields[0];
140};
141
142enum mlx5_dev_event {
143	MLX5_DEV_EVENT_SYS_ERROR,
144	MLX5_DEV_EVENT_PORT_UP,
145	MLX5_DEV_EVENT_PORT_DOWN,
146	MLX5_DEV_EVENT_PORT_INITIALIZED,
147	MLX5_DEV_EVENT_LID_CHANGE,
148	MLX5_DEV_EVENT_PKEY_CHANGE,
149	MLX5_DEV_EVENT_GUID_CHANGE,
150	MLX5_DEV_EVENT_CLIENT_REREG,
151};
152
153struct mlx5_uuar_info {
154	struct mlx5_uar	       *uars;
155	int			num_uars;
156	int			num_low_latency_uuars;
157	unsigned long	       *bitmap;
158	unsigned int	       *count;
159	struct mlx5_bf	       *bfs;
160
161	/*
162	 * protect uuar allocation data structs
163	 */
164	struct mutex		lock;
165	u32			ver;
166};
167
168struct mlx5_bf {
169	void __iomem	       *reg;
170	void __iomem	       *regreg;
171	int			buf_size;
172	struct mlx5_uar	       *uar;
173	unsigned long		offset;
174	int			need_lock;
175	/* protect blue flame buffer selection when needed
176	 */
177	spinlock_t		lock;
178
179	/* serialize 64 bit writes when done as two 32 bit accesses
180	 */
181	spinlock_t		lock32;
182	int			uuarn;
183};
184
185struct mlx5_cmd_first {
186	__be32		data[4];
187};
188
189struct mlx5_cmd_msg {
190	struct list_head		list;
191	struct cache_ent	       *cache;
192	u32				len;
193	struct mlx5_cmd_first		first;
194	struct mlx5_cmd_mailbox	       *next;
195};
196
197struct mlx5_cmd_debug {
198	struct dentry	       *dbg_root;
199	struct dentry	       *dbg_in;
200	struct dentry	       *dbg_out;
201	struct dentry	       *dbg_outlen;
202	struct dentry	       *dbg_status;
203	struct dentry	       *dbg_run;
204	void		       *in_msg;
205	void		       *out_msg;
206	u8			status;
207	u16			inlen;
208	u16			outlen;
209};
210
211struct cache_ent {
212	/* protect block chain allocations
213	 */
214	spinlock_t		lock;
215	struct list_head	head;
216};
217
218struct cmd_msg_cache {
219	struct cache_ent	large;
220	struct cache_ent	med;
221
222};
223
224struct mlx5_cmd_stats {
225	u64		sum;
226	u64		n;
227	struct dentry  *root;
228	struct dentry  *avg;
229	struct dentry  *count;
230	/* protect command average calculations */
231	spinlock_t	lock;
232};
233
234struct mlx5_cmd {
235	void	       *cmd_alloc_buf;
236	dma_addr_t	alloc_dma;
237	int		alloc_size;
238	void	       *cmd_buf;
239	dma_addr_t	dma;
240	u16		cmdif_rev;
241	u8		log_sz;
242	u8		log_stride;
243	int		max_reg_cmds;
244	int		events;
245	u32 __iomem    *vector;
246
247	/* protect command queue allocations
248	 */
249	spinlock_t	alloc_lock;
250
251	/* protect token allocations
252	 */
253	spinlock_t	token_lock;
254	u8		token;
255	unsigned long	bitmask;
256	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
257	struct workqueue_struct *wq;
258	struct semaphore sem;
259	struct semaphore pages_sem;
260	int	mode;
261	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
262	struct pci_pool *pool;
263	struct mlx5_cmd_debug dbg;
264	struct cmd_msg_cache cache;
265	int checksum_disabled;
266	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
267};
268
269struct mlx5_port_caps {
270	int	gid_table_len;
271	int	pkey_table_len;
272};
273
274struct mlx5_general_caps {
275	u8	log_max_eq;
276	u8	log_max_cq;
277	u8	log_max_qp;
278	u8	log_max_mkey;
279	u8	log_max_pd;
280	u8	log_max_srq;
281	u8	log_max_strq;
282	u8	log_max_mrw_sz;
283	u8	log_max_bsf_list_size;
284	u8	log_max_klm_list_size;
285	u32	max_cqes;
286	int	max_wqes;
287	u32	max_eqes;
288	u32	max_indirection;
289	int	max_sq_desc_sz;
290	int	max_rq_desc_sz;
291	int	max_dc_sq_desc_sz;
292	u64	flags;
293	u16	stat_rate_support;
294	int	log_max_msg;
295	int	num_ports;
296	u8	log_max_ra_res_qp;
297	u8	log_max_ra_req_qp;
298	int	max_srq_wqes;
299	int	bf_reg_size;
300	int	bf_regs_per_page;
301	struct mlx5_port_caps	port[MLX5_MAX_PORTS];
302	u8			ext_port_cap[MLX5_MAX_PORTS];
303	int	max_vf;
304	u32	reserved_lkey;
305	u8	local_ca_ack_delay;
306	u8	log_max_mcg;
307	u32	max_qp_mcg;
308	int	min_page_sz;
309	int	pd_cap;
310	u32	max_qp_counters;
311	u32	pkey_table_size;
312	u8	log_max_ra_req_dc;
313	u8	log_max_ra_res_dc;
314	u32	uar_sz;
315	u8	min_log_pg_sz;
316	u8	log_max_xrcd;
317	u16	log_uar_page_sz;
318};
319
320struct mlx5_caps {
321	struct mlx5_general_caps gen;
322};
323
324struct mlx5_cmd_mailbox {
325	void	       *buf;
326	dma_addr_t	dma;
327	struct mlx5_cmd_mailbox *next;
328};
329
330struct mlx5_buf_list {
331	void		       *buf;
332	dma_addr_t		map;
333};
334
335struct mlx5_buf {
336	struct mlx5_buf_list	direct;
337	struct mlx5_buf_list   *page_list;
338	int			nbufs;
339	int			npages;
340	int			size;
341	u8			page_shift;
342};
343
344struct mlx5_eq {
345	struct mlx5_core_dev   *dev;
346	__be32 __iomem	       *doorbell;
347	u32			cons_index;
348	struct mlx5_buf		buf;
349	int			size;
350	u8			irqn;
351	u8			eqn;
352	int			nent;
353	u64			mask;
354	char			name[MLX5_MAX_EQ_NAME];
355	struct list_head	list;
356	int			index;
357	struct mlx5_rsc_debug	*dbg;
358};
359
360struct mlx5_core_psv {
361	u32	psv_idx;
362	struct psv_layout {
363		u32	pd;
364		u16	syndrome;
365		u16	reserved;
366		u16	bg;
367		u16	app_tag;
368		u32	ref_tag;
369	} psv;
370};
371
372struct mlx5_core_sig_ctx {
373	struct mlx5_core_psv	psv_memory;
374	struct mlx5_core_psv	psv_wire;
375	struct ib_sig_err       err_item;
376	bool			sig_status_checked;
377	bool			sig_err_exists;
378	u32			sigerr_count;
379};
380
381struct mlx5_core_mr {
382	u64			iova;
383	u64			size;
384	u32			key;
385	u32			pd;
386};
387
388enum mlx5_res_type {
389	MLX5_RES_QP,
390};
391
392struct mlx5_core_rsc_common {
393	enum mlx5_res_type	res;
394	atomic_t		refcount;
395	struct completion	free;
396};
397
398struct mlx5_core_srq {
399	u32		srqn;
400	int		max;
401	int		max_gs;
402	int		max_avail_gather;
403	int		wqe_shift;
404	void (*event)	(struct mlx5_core_srq *, enum mlx5_event);
405
406	atomic_t		refcount;
407	struct completion	free;
408};
409
410struct mlx5_eq_table {
411	void __iomem	       *update_ci;
412	void __iomem	       *update_arm_ci;
413	struct list_head	comp_eqs_list;
414	struct mlx5_eq		pages_eq;
415	struct mlx5_eq		async_eq;
416	struct mlx5_eq		cmd_eq;
417	struct msix_entry	*msix_arr;
418	int			num_comp_vectors;
419	/* protect EQs list
420	 */
421	spinlock_t		lock;
422};
423
424struct mlx5_uar {
425	u32			index;
426	struct list_head	bf_list;
427	unsigned		free_bf_bmap;
428	void __iomem	       *wc_map;
429	void __iomem	       *map;
430};
431
432
433struct mlx5_core_health {
434	struct health_buffer __iomem   *health;
435	__be32 __iomem		       *health_counter;
436	struct timer_list		timer;
437	struct list_head		list;
438	u32				prev;
439	int				miss_counter;
440};
441
442struct mlx5_cq_table {
443	/* protect radix tree
444	 */
445	spinlock_t		lock;
446	struct radix_tree_root	tree;
447};
448
449struct mlx5_qp_table {
450	/* protect radix tree
451	 */
452	spinlock_t		lock;
453	struct radix_tree_root	tree;
454};
455
456struct mlx5_srq_table {
457	/* protect radix tree
458	 */
459	spinlock_t		lock;
460	struct radix_tree_root	tree;
461};
462
463struct mlx5_mr_table {
464	/* protect radix tree
465	 */
466	rwlock_t		lock;
467	struct radix_tree_root	tree;
468};
469
470struct mlx5_priv {
471	char			name[MLX5_MAX_NAME_LEN];
472	struct mlx5_eq_table	eq_table;
473	struct mlx5_uuar_info	uuari;
474	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
475
476	/* pages stuff */
477	struct workqueue_struct *pg_wq;
478	struct rb_root		page_root;
479	int			fw_pages;
480	atomic_t		reg_pages;
481	struct list_head	free_list;
482
483	struct mlx5_core_health health;
484
485	struct mlx5_srq_table	srq_table;
486
487	/* start: qp staff */
488	struct mlx5_qp_table	qp_table;
489	struct dentry	       *qp_debugfs;
490	struct dentry	       *eq_debugfs;
491	struct dentry	       *cq_debugfs;
492	struct dentry	       *cmdif_debugfs;
493	/* end: qp staff */
494
495	/* start: cq staff */
496	struct mlx5_cq_table	cq_table;
497	/* end: cq staff */
498
499	/* start: mr staff */
500	struct mlx5_mr_table	mr_table;
501	/* end: mr staff */
502
503	/* start: alloc staff */
504	struct mutex            pgdir_mutex;
505	struct list_head        pgdir_list;
506	/* end: alloc staff */
507	struct dentry	       *dbg_root;
508
509	/* protect mkey key part */
510	spinlock_t		mkey_lock;
511	u8			mkey_key;
512
513	struct list_head        dev_list;
514	struct list_head        ctx_list;
515	spinlock_t              ctx_lock;
516};
517
518struct mlx5_core_dev {
519	struct pci_dev	       *pdev;
520	u8			rev_id;
521	char			board_id[MLX5_BOARD_ID_LEN];
522	struct mlx5_cmd		cmd;
523	struct mlx5_caps	caps;
524	phys_addr_t		iseg_base;
525	struct mlx5_init_seg __iomem *iseg;
526	void			(*event) (struct mlx5_core_dev *dev,
527					  enum mlx5_dev_event event,
528					  unsigned long param);
529	struct mlx5_priv	priv;
530	struct mlx5_profile	*profile;
531	atomic_t		num_qps;
532};
533
534struct mlx5_db {
535	__be32			*db;
536	union {
537		struct mlx5_db_pgdir		*pgdir;
538		struct mlx5_ib_user_db_page	*user_page;
539	}			u;
540	dma_addr_t		dma;
541	int			index;
542};
543
544enum {
545	MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
546};
547
548enum {
549	MLX5_COMP_EQ_SIZE = 1024,
550};
551
552struct mlx5_db_pgdir {
553	struct list_head	list;
554	DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
555	__be32		       *db_page;
556	dma_addr_t		db_dma;
557};
558
559typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
560
561struct mlx5_cmd_work_ent {
562	struct mlx5_cmd_msg    *in;
563	struct mlx5_cmd_msg    *out;
564	void		       *uout;
565	int			uout_size;
566	mlx5_cmd_cbk_t		callback;
567	void		       *context;
568	int			idx;
569	struct completion	done;
570	struct mlx5_cmd        *cmd;
571	struct work_struct	work;
572	struct mlx5_cmd_layout *lay;
573	int			ret;
574	int			page_queue;
575	u8			status;
576	u8			token;
577	u64			ts1;
578	u64			ts2;
579	u16			op;
580};
581
582struct mlx5_pas {
583	u64	pa;
584	u8	log_sz;
585};
586
587static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
588{
589	if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
590		return buf->direct.buf + offset;
591	else
592		return buf->page_list[offset >> PAGE_SHIFT].buf +
593			(offset & (PAGE_SIZE - 1));
594}
595
596extern struct workqueue_struct *mlx5_core_wq;
597
598#define STRUCT_FIELD(header, field) \
599	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
600	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
601
602struct ib_field {
603	size_t struct_offset_bytes;
604	size_t struct_size_bytes;
605	int    offset_bits;
606	int    size_bits;
607};
608
609static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
610{
611	return pci_get_drvdata(pdev);
612}
613
614extern struct dentry *mlx5_debugfs_root;
615
616static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
617{
618	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
619}
620
621static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
622{
623	return ioread32be(&dev->iseg->fw_rev) >> 16;
624}
625
626static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
627{
628	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
629}
630
631static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
632{
633	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
634}
635
636static inline void *mlx5_vzalloc(unsigned long size)
637{
638	void *rtn;
639
640	rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
641	if (!rtn)
642		rtn = vzalloc(size);
643	return rtn;
644}
645
646static inline u32 mlx5_base_mkey(const u32 key)
647{
648	return key & 0xffffff00u;
649}
650
651int mlx5_cmd_init(struct mlx5_core_dev *dev);
652void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
653void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
654void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
655int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
656int mlx5_cmd_status_to_err_v2(void *ptr);
657int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
658		       u16 opmod);
659int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
660		  int out_size);
661int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
662		     void *out, int out_size, mlx5_cmd_cbk_t callback,
663		     void *context);
664int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
665int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
666int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
667int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
668void mlx5_health_cleanup(void);
669void  __init mlx5_health_init(void);
670void mlx5_start_health_poll(struct mlx5_core_dev *dev);
671void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
672int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
673		   struct mlx5_buf *buf);
674void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
675struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
676						      gfp_t flags, int npages);
677void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
678				 struct mlx5_cmd_mailbox *head);
679int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
680			 struct mlx5_create_srq_mbox_in *in, int inlen);
681int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
682int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
683			struct mlx5_query_srq_mbox_out *out);
684int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
685		      u16 lwm, int is_srq);
686void mlx5_init_mr_table(struct mlx5_core_dev *dev);
687void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
688int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
689			  struct mlx5_create_mkey_mbox_in *in, int inlen,
690			  mlx5_cmd_cbk_t callback, void *context,
691			  struct mlx5_create_mkey_mbox_out *out);
692int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
693int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
694			 struct mlx5_query_mkey_mbox_out *out, int outlen);
695int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
696			     u32 *mkey);
697int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
698int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
699int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
700		      u16 opmod, u8 port);
701void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
702void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
703int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
704void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
705void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
706				 s32 npages);
707int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
708int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
709void mlx5_register_debugfs(void);
710void mlx5_unregister_debugfs(void);
711int mlx5_eq_init(struct mlx5_core_dev *dev);
712void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
713void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
714void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
715void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
716#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
717void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
718#endif
719void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
720struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
721void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
722void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
723int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
724		       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
725int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
726int mlx5_start_eqs(struct mlx5_core_dev *dev);
727int mlx5_stop_eqs(struct mlx5_core_dev *dev);
728int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
729int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
730int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
731
732int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
733void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
734int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
735			 int size_in, void *data_out, int size_out,
736			 u16 reg_num, int arg, int write);
737int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
738
739int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
740void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
741int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
742		       struct mlx5_query_eq_mbox_out *out, int outlen);
743int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
744void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
745int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
746void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
747int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
748void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
749
750const char *mlx5_command_str(int command);
751int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
752void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
753int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
754			 int npsvs, u32 *sig_index);
755int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
756void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
757int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
758			struct mlx5_odp_caps *odp_caps);
759
760static inline u32 mlx5_mkey_to_idx(u32 mkey)
761{
762	return mkey >> 8;
763}
764
765static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
766{
767	return mkey_idx << 8;
768}
769
770static inline u8 mlx5_mkey_variant(u32 mkey)
771{
772	return mkey & 0xff;
773}
774
775enum {
776	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
777	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
778};
779
780enum {
781	MAX_MR_CACHE_ENTRIES    = 16,
782};
783
784enum {
785	MLX5_INTERFACE_PROTOCOL_IB  = 0,
786	MLX5_INTERFACE_PROTOCOL_ETH = 1,
787};
788
789struct mlx5_interface {
790	void *			(*add)(struct mlx5_core_dev *dev);
791	void			(*remove)(struct mlx5_core_dev *dev, void *context);
792	void			(*event)(struct mlx5_core_dev *dev, void *context,
793					 enum mlx5_dev_event event, unsigned long param);
794	void *                  (*get_dev)(void *context);
795	int			protocol;
796	struct list_head	list;
797};
798
799void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
800int mlx5_register_interface(struct mlx5_interface *intf);
801void mlx5_unregister_interface(struct mlx5_interface *intf);
802
803struct mlx5_profile {
804	u64	mask;
805	u8	log_max_qp;
806	struct {
807		int	size;
808		int	limit;
809	} mr_cache[MAX_MR_CACHE_ENTRIES];
810};
811
812#endif /* MLX5_DRIVER_H */
813